code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
__author__ = 'Paul Landes'
from typing import Tuple, List
from dataclasses import dataclass, field
import logging
import sys
from io import TextIOBase
from spacy.tokens.doc import Doc
from spacy.tokens import Token
from zensols.persist import persisted, PersistableContainer
from zensols.config import Dictable
from zensols.nlp import FeatureDocument, FeatureToken, FeatureSentence
logger = logging.getLogger(__name__)
@dataclass
class SequenceAnnotation(PersistableContainer, Dictable):
"""An annotation of a pair matching feature and spaCy tokens.
"""
label: str = field()
"""The string label of this annotation."""
doc: FeatureDocument = field()
"""The feature document associated with this annotation."""
tokens: Tuple[FeatureToken] = field()
"""The tokens annotated with ``label``."""
@property
@persisted('_sent', transient=True)
def sent(self) -> FeatureSentence:
"""The sentence containing the annotated tokens."""
sents = self.doc.sentences_for_tokens(self.tokens)
assert len(sents) == 1
return sents[0]
@property
@persisted('_token_matches', transient=True)
def token_matches(self) -> Tuple[FeatureToken, Token]:
"""Pairs of matching feature token to token mapping. This is useful for
annotating spaCy documents.
"""
matches = []
sdoc: Doc = self.doc.spacy_doc
tok: FeatureToken
for tok in self.tokens:
stok: Token = sdoc[tok.i]
matches.append((tok, stok))
return tuple(matches)
@property
def mention(self) -> str:
"""The mention text."""
return ' '.join(map(str, self.tokens))
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
short: bool = False):
if short:
s = f'{self.mention}: {self.label} ({self.tokens[0].i})'
self._write_line(s, depth, writer)
else:
self._write_line(f'label: {self.label}', depth, writer)
tok: FeatureToken
for tok in self.tokens:
sent = ''
if hasattr(tok, 'sent_i'):
sent = f'sent index={tok.sent_i}, '
self._write_line(f'{tok.text}: {sent}index in doc={tok.i}',
depth + 1, writer)
def __str__(self):
return f'{self.mention} ({self.label})'
@dataclass
class SequenceDocumentAnnotation(Dictable):
"""Contains token annotations for a :class:`~zensols.nlp.FeatureDocument` as a
duple of :class:`.SequenceAnnotation`.
"""
doc: FeatureDocument = field()
"""The feature document associated with this annotation."""
sequence_anons: Tuple[SequenceAnnotation] = field()
"""The annotations for the respective :obj:`doc`."""
@property
def spacy_doc(self) -> Doc:
"""The spaCy document associated with this annotation."""
return self.doc.spacy_doc
@property
@persisted('_token_matches', transient=True)
def token_matches(self) -> Tuple[str, FeatureToken, Token]:
"""Triple of matching feature token to token mapping in the form (``label``,
``feature token``, ``spacy token``). This is useful for annotating
spaCy documents.
"""
matches: List[Tuple[str, Tuple[FeatureToken, Token]]] = []
for sanon in self.sequence_anons:
for tok_matches in sanon.token_matches:
matches.append((sanon.label, *tok_matches))
return tuple(matches)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
short: bool = False):
self._write_line(f'doc: {self.doc} S={short}', depth, writer)
for anon in self.sequence_anons:
anon.write(depth + 1, writer, short=short)
@dataclass
class BioSequenceAnnotationMapper(object):
"""Matches feature documents/tokens with spaCy document/tokens and entity
labels.
"""
begin_tag: str = field(default='B')
"""The sequence ``begin`` tag class."""
in_tag: str = field(default='I')
"""The sequence ``in`` tag class."""
out_tag: str = field(default='O')
"""The sequence ``out`` tag class."""
def _map_entities(self, classes: Tuple[List[str]],
docs: Tuple[FeatureDocument]) -> \
Tuple[str, int, Tuple[int, int]]:
"""Map BIO entities and documents to a pairing of both.
:param classes: the clases (labels, or usually, predictions)
:param docs: the feature documents to assign labels
:return: a tuple of label, sentence index and lexical feature document
index interval of tokens
"""
ents: Tuple[str, int, Tuple[int, int]] = []
doc: FeatureDocument
# tok.i is not reliable since holes exist from filtered space and
# possibly other removed tokens
for six, (cls, doc) in enumerate(zip(classes, docs)):
tok: FeatureToken
start_ix = None
start_lab = None
ent: str
for stix, (ent, tok) in enumerate(zip(cls, doc.tokens)):
pos: int = ent.find('-')
bio, lab = None, None
if pos > -1:
bio, lab = ent[0:pos], ent[pos+1:]
if bio == self.begin_tag:
start_ix = stix
start_lab = lab
if ent == self.out_tag and start_ix is not None:
ents.append((start_lab, six, (start_ix, stix)))
start_ix = None
start_lab = None
return ents
def _collate(self, docs: Tuple[FeatureDocument],
ents: Tuple[str, int, Tuple[int, int]]) -> \
List[SequenceAnnotation]:
"""Collate entity tokens in to groups.
:param docs: the feature documents to assign labels
:param ents: a tuple of label, sentence index and lexical feature
document index interval of tokens
:return: a tuple ``(feature document, label, (start feature token, end
feature token))``
"""
anons: List[SequenceAnnotation] = []
for lab, six, loc in ents:
doc: FeatureDocument = docs[six]
ftoks: Tuple[FeatureToken] = doc.tokens
ent_toks: Tuple[FeatureToken] = ftoks[loc[0]:loc[1]]
anons.append(SequenceAnnotation(lab, doc, ent_toks))
return anons
def map(self, classes: Tuple[List[str]],
docs: Tuple[FeatureDocument]) -> Tuple[SequenceDocumentAnnotation]:
"""Map BIO entities and documents to pairings as annotations.
:param classes: a tuple of lists, each list containing the class of the
token in BIO format
:param docs: the feature documents to assign labels
:return: a tuple of annotation instances, each with coupling of label,
feature token and spaCy token
"""
ents: Tuple[str, int, Tuple[int, int]] = \
self._map_entities(classes, docs)
sanons: List[SequenceAnnotation] = self._collate(docs, ents)
col_sanons: List[SequenceAnnotation] = []
danons: List[SequenceDocumentAnnotation] = []
last_doc: FeatureDocument = None
sanon: SequenceAnnotation
for sanon in sanons:
col_sanons.append(sanon)
if last_doc is not None and sanon.doc != last_doc:
danons.append(SequenceDocumentAnnotation(
last_doc, tuple(col_sanons)))
col_sanons.clear()
last_doc = sanon.doc
if len(col_sanons) > 0:
danons.append(SequenceDocumentAnnotation(
last_doc, tuple(col_sanons)))
return danons | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/model/sequence.py | sequence.py |
__author__ = 'Paul Landes'
from typing import Iterable, Tuple, Type, Dict, Any
from dataclasses import dataclass, field
from torch import Tensor
from zensols.persist import persisted
from zensols.nlp import TokenContainer
from zensols.nlp.score import ScoreContext, ScoreMethod, FloatScore
from bert_score import BERTScorer
from zensols.deepnlp import transformer
@dataclass
class BERTScoreScoreMethod(ScoreMethod):
"""A scoring method that uses BERTScore. Sentence pairs are ordered as
``(<references>, <candidates>)``.
Citation:
.. code:: none
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav
Artzi. 2020. BERTScore: Evaluating Text Generation with BERT. In
Proceedings of the 8th International Conference on Learning
Representations, Addis Ababa, Ethopia, March.
"""
use_norm: bool = field(default=True)
"""Whether to compare with
:obj:`~zensols.nlp.container.TokenContainer.norm` or
:obj:`~zensols.nlp.container.TokenContainer.text`.
"""
bert_score_params: Dict[str, Any] = field(
default_factory=lambda: dict(
model_type='microsoft/deberta-xlarge-mnli'))
"""The parameters given to :class:`bert_score.scorer.BERTScorer`."""
@classmethod
def _get_external_modules(cls: Type) -> Tuple[str, ...]:
transformer.suppress_warnings()
return ('bert_score',)
@property
@persisted('_bert_scorer')
def bert_scorer(self) -> BERTScorer:
return BERTScorer(**self.bert_score_params)
def _score(self, meth: str, context: ScoreContext) -> Iterable[FloatScore]:
def container_to_str(container: TokenContainer) -> str:
return container.norm if self.use_norm else container.text
refs: Tuple[str] = tuple(map(
lambda p: container_to_str(p[0]), context.pairs))
cands: Tuple[str] = tuple(map(
lambda p: container_to_str(p[1]), context.pairs))
scorer: BERTScorer = self.bert_scorer
scores: Tuple[Tensor] = scorer.score(cands=cands, refs=refs)
return map(FloatScore, scores[0].tolist()) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/score/bertscore.py | bertscore.py |
__author__ = 'Paul Landes'
import logging
from functools import reduce
from torch import nn
from zensols.actioncli import persisted
logger = logging.getLogger(__name__)
class Im2DimCalculator(object):
"""
Convolution matrix dimension calculation utility.
http://cs231n.github.io/convolutional-networks/#conv
Implementation as Matrix Multiplication section.
Example (im2col):
W_in = H_in = 227
Ch_in = D_in = 3
Ch_out = D_out = 3
K = 96
F = (11, 11)
S = 4
P = 0
W_out = H_out = 227 - 11 + (2 * 0) / 4 = 55 output locations
X_col = Fw^2 * D_out x W_out * H_out = 11^2 * 3 x 55 * 55 = 363 x 3025
Example (im2row):
W_row = 96 filters of size 11 x 11 x 3 => K x 11 * 11 * 3 = 96 x 363
Result of convolution: transpose(W_row) dot X_col
Must reshape back to 55 x 55 x 96
"""
def __init__(self, W, H, D=1, K=1, F=(2, 2), S=1, P=0):
"""Initialize.
:param W: width
:param H: height
:param D: depth [of volume] (usually same as K)
:param K: number of filters
:param F: tuple of kernel/filter (width, height)
:param S: stride
:param P: padding
"""
self.W = W
self.H = H
self.D = D
self.K = K
self.F = F
self.S = S
self.P = P
def clone(self, W=None, H=None, **kwargs):
nkw = {'W': self.W,
'H': self.H,
'D': self.D,
'K': self.K,
'F': self.F,
'S': self.S,
'P': self.P}
nkw.update(kwargs)
W = self.W if W is None else W
H = self.H if H is None else H
return self.__class__(W, H, **kwargs)
def validate(self):
W, H, F, P, S = self.W, self.H, self.F, self.P, self.S
if ((W - F[0] + (2 * P)) % S):
raise ValueError('incongruous convolution width layer parameters')
if ((H - F[1] + (2 * P)) % S):
raise ValueError('incongruous convolution height layer parameters')
if (F[0] > (W + (2 * P))):
raise ValueError(f'kernel/filter {F} must be <= width {W} + 2 * padding {P}')
if (F[1] > (H + (2 * P))):
raise ValueError(f'kernel/filter {F} must be <= height {H} + 2 * padding {P}')
if self.W_row[1] != self.X_col[0]:
raise ValueError(f'columns of W_row {self.W_row} do not match ' +
f'rows of X_col {self.X_col}')
@property
@persisted('_W_out')
def W_out(self):
return int(((self.W - self.F[0] + (2 * self.P)) / self.S) + 1)
@property
@persisted('_H_out')
def H_out(self):
return int(((self.H - self.F[1] + (2 * self.P)) / self.S) + 1)
@property
@persisted('_X_col')
def X_col(self):
# TODO: not supported for non-square filters
return (self.F[0] ** 2 * self.D, self.W_out * self.H_out)
@property
@persisted('_W_col')
def W_row(self):
# TODO: not supported for non-square filters
return (self.K, (self.F[0] ** 2) * self.D)
@property
@persisted('_out_shape')
def out_shape(self):
return (self.K, self.W_out, self.H_out)
@property
@persisted('_flatten_dim')
def flatten_dim(self):
return reduce(lambda x, y: x * y, self.out_shape)
def flatten(self, axis=1):
fd = self.flatten_dim
W, H = (1, fd) if axis else (fd, 1)
return self.__class__(W, H, F=(1, 1), D=1, K=1)
def __str__(self):
attrs = 'W H D K F S P W_out H_out W_row X_col out_shape'.split()
return ', '.join(map(lambda x: f'{x}={getattr(self, x)}', attrs))
def __repr__(self):
return self.__str__()
class ConvolutionLayerFactory(object):
"""Create convolution layers.
"""
def __init__(self, *args, **kwargs):
"""Create a layer factory using the same arguments as given in
``Im2DimCalculator``.
"""
if len(args) > 0 and isinstance(args[0], Im2DimCalculator):
calc = args[0]
else:
calc = Im2DimCalculator(*args, **kwargs)
self.calc = calc
def flatten(self, *args, **kwargs):
"""Return a flat layer with arguments given to ``Im2DimCalculator``.
"""
return self.__class__(self.calc.flatten(*args, **kwargs))
@property
def flatten_dim(self):
"""Return the dimension of a flattened array of the convolution layer
represented by this instance.
"""
return self.calc.flatten_dim
def clone(self, *args, **kwargs):
"""Return a clone of this factory instance.
"""
return self.__class__(self.calc.clone(*args, **kwargs))
def conv1d(self):
"""Return a convolution layer in one dimension.
"""
c = self.calc
return nn.Conv1d(c.D, c.K, c.F, padding=c.P, stride=c.S)
def conv2d(self):
"""Return a convolution layer in two dimensions.
"""
c = self.calc
return nn.Conv2d(c.D, c.K, c.F, padding=c.P, stride=c.S)
def batch_norm2d(self):
"""Return a 2D batch normalization layer.
"""
return nn.BatchNorm2d(self.calc.K)
def max_pool1d(self):
"""Return a one dimensional max pooling layer.
"""
return nn.MaxPool1d(self.calc.F[1], stride=self.calc.S)
def __str__(self):
return str(self.calc) | zensols.dltools | /zensols.dltools-0.0.3-py3-none-any.whl/zensols/dltools/conv.py | conv.py |
__author__ = 'Paul Landes'
import logging
import pylab
from scipy.stats import gaussian_kde
import numpy as np
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
class PlotManager(object):
"""A Convenience class to give window geomtry placement and blocking.
"""
def __init__(self, geometry=(50, 0), size=(5, 5), block=False):
self.geometry = '+{}+{}'.format(*geometry)
logger.debug('using geometry: {} -> {}'.format(
geometry, self.geometry))
self.size = size
self.block = block
@staticmethod
def clear():
global _plot_mng_fig
if '_plot_mng_fig' in globals():
del globals()['_plot_mng_fig']
@property
def fig(self):
return self.get_fig()
def get_fig(self, *args, **kwargs):
if not hasattr(self, '_fig'):
global _plot_mng_fig
if '_plot_mng_fig' in globals():
plt.close(_plot_mng_fig)
_plot_mng_fig = self._fig = plt.figure(
*args, figsize=self.size, **kwargs)
return self._fig
@property
def ax(self):
return self.subplots()
def subplots(self, *args, **kwargs):
return self.fig.subplots(*args, **kwargs)
def subplot(self, *args, **kwargs):
# 1, 1, 1
return self.fig.add_subplot(*args, **kwargs)
def show(self):
mng = pylab.get_current_fig_manager()
mng.window.wm_geometry(self.geometry)
self.fig.tight_layout()
plt.show(block=self.block)
def save(self, fig_path=None, *args, **kwargs):
if fig_path is None:
fig_path = 'fig.png'
logger.info(f'saving output figure to {fig_path}')
plt.savefig(fig_path, *args, **kwargs)
class DensityPlotManager(PlotManager):
"""Create density plots.
"""
def __init__(self, data, covariance_factor=0.5, interval=None, margin=None,
*args, **kwargs):
"""
:param covariance_factor: smooth factor for visualization only
"""
super(DensityPlotManager, self).__init__(*args, **kwargs)
self.interval = interval
self.margin = margin
self.covariance_factor = covariance_factor
self.data = data
def plot(self):
data = self.data
ax = self.ax
density = gaussian_kde(data)
if ax is None:
ax = self.ax
if self.interval is None:
self.interval = (min(data), max(data))
if self.margin is None:
self.margin = 0.2 * abs(self.interval[0] - self.interval[1])
# create evenly spaced numbers over the probably range
xs = np.linspace(
self.interval[0] - self.margin, self.interval[1] + self.margin)
logger.debug(f'data size: {len(data)}, X graph points: {len(xs)}')
# smooth factor for visualization
density.covariance_factor = lambda: self.covariance_factor
# compute probably density and plot
density._compute_covariance()
logger.debug(f'plotting with ax: {ax}')
ax.plot(xs, density(xs))
class GraphPlotManager(PlotManager):
def __init__(self, graph, style='spring', pos=None, *args, **kwargs):
super(GraphPlotManager, self).__init__(*args, **kwargs)
self.graph = graph
self.style = style
self.pos = pos
self.set_draw_arguments()
def set_draw_arguments(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def _get_layout_function(self):
import networkx as nx
style = self.style
if style == 'spectral':
layoutfn = nx.spectral_layout
elif style == 'circular':
layoutfn = nx.circular_layout
elif style == 'spring':
layoutfn = nx.spring_layout
elif style == 'shell':
layoutfn = nx.shell_layout
elif style == 'kamada':
layoutfn = nx.kamada_kawai_layout
elif style == 'planar':
layoutfn = nx.layout.planar_layout
elif style == 'random':
layoutfn = nx.layout.random_layout
else:
raise ValueError(f'no such layout: {style}')
return layoutfn
def _get_pos(self):
if self.pos is None:
layoutfn = self._get_layout_function()
pos = layoutfn(self.graph)
else:
pos = self.pos
return pos
def show(self):
import networkx as nx
nxg = self.graph
ax = self.ax
pos = self._get_pos()
nx.draw_networkx(nxg, pos=pos, ax=ax, *self.args, **self.kwargs)
super(GraphPlotManager, self).show() | zensols.dltools | /zensols.dltools-0.0.3-py3-none-any.whl/zensols/dltools/plot.py | plot.py |
__author__ = 'Paul Landes'
import logging
import torch
import sys
from zensols.actioncli import persisted
logger = logging.getLogger(__name__)
class CudaInfo(object):
"""A utility class that provides information about the CUDA configuration for
the current (hardware) environment.
"""
def num_devices(self):
"""Return number of devices connected.
"""
import pycuda.driver as cuda
return cuda.Device.count()
def devices(self):
"""Get info on all devices connected.
"""
import pycuda.driver as cuda
num = cuda.Device.count()
print('%d device(s) found:' % num)
for i in range(num):
print(f'{cuda.Device(i).name()} (Id: {i})')
def mem_info(self):
"""Get available and total memory of all devices.
"""
import pycuda.driver as cuda
available, total = cuda.mem_get_info()
print('Available: {available/1e9:.2f} GB\nTotal: {total/1e9:.2f} GB')
def attributes(self, device_id=0):
"""Get attributes of device with device Id = device_id
"""
import pycuda.driver as cuda
return cuda.Device(device_id).get_attributes()
def write_attributes(self, writer=sys.stdout):
for att in self.attributes():
writer.write(f'{att}\n')
def write(self, writer=sys.stdout):
"""Class representation as number of devices connected and about them.
"""
import pycuda.driver as cuda
num = cuda.Device.count()
writer.write(f'{num} device(s) found:\n')
for i in range(num):
writer.write(f' {i+1}) {cuda.Device(i).name()} (Id: {i})\n' +
f'{" " * 10}Memory: ' +
f'{cuda.Device(i).total_memory()/1e9:.2f} GB\n')
class CudaConfig(object):
"""A utility class that provides access to CUDA APIs. It provides information
on the current CUDA configuration and convenience methods to create, copy
and modify tensors. These are handy for any given CUDA configuration and
can back off to the CPU when CUDA isn't available.
"""
CPU_DEVICE = 'cpu'
def __init__(self, use_cuda=True, data_type=torch.float32):
self.use_cuda = use_cuda
self.data_type = data_type
@persisted('__init_device', cache_global=True)
def _init_device(self):
use_cuda = self.use_cuda and torch.cuda.is_available()
if use_cuda:
import pycuda.driver as cuda
import pycuda.autoinit
cuda.init()
cuda_dev = torch.cuda.current_device()
torch.cuda.set_device(cuda_dev)
device = f'cuda:{cuda_dev}'
else:
device = self.CPU_DEVICE
device = torch.device(device)
logger.info(f'using device: {device}')
return device
@property
def device(self):
if not hasattr(self, '_device'):
self.set_default_device()
return self._device
@device.setter
def device(self, device):
self._device = device
def set_default_device(self):
if self.use_cuda:
self._device = self._init_device()
else:
self._device = self.CPU_DEVICE
return self._device
def empty_cache(self):
torch.cuda.empty_cache()
@property
def info(self):
self._init_device()
return CudaInfo()
def same_device(self, tensor_or_model) -> bool:
device = self.device
return hasattr(tensor_or_model, 'device') and \
tensor_or_model.device == device
def to(self, tensor_or_model):
if not self.same_device(tensor_or_model):
tensor_or_model = tensor_or_model.to(self.device)
return tensor_or_model
def _populate_defaults(self, kwargs):
if 'dtype' not in kwargs:
kwargs['dtype'] = self.data_type
kwargs['device'] = self.device
def singleton(self, *args, **kwargs):
self._populate_defaults(kwargs)
return torch.tensor(*args, **kwargs)
def empty(self, *args, **kwargs):
self._populate_defaults(kwargs)
return torch.empty(*args, **kwargs)
def zeros(self, *args, **kwargs):
self._populate_defaults(kwargs)
return torch.zeros(*args, **kwargs)
def from_numpy(self, *args, **kwargs):
return self.to(torch.from_numpy(*args, **kwargs))
def cat(self, *args, **kwargs):
return self.to(torch.cat(*args, **kwargs))
def write(self, writer=sys.stdout):
self.info.write(writer)
def __str__(self):
return f'use cuda: {self.use_cuda}, device: {self.device}' | zensols.dltools | /zensols.dltools-0.0.3-py3-none-any.whl/zensols/dltools/cuda_conf.py | cuda_conf.py |
__author__ = 'Paul Landes'
import logging
from torch import nn
from typing import List, Any
from torch.functional import F
from zensols.actioncli import persisted
logger = logging.getLogger(__name__)
class LinearLayerFactory(object):
"""Utility class to create linear layers.
"""
def __init__(self, in_shape, out=None, out_percent=None):
"""Initialize the factory.
:param in_shape: the shape of the layer
:param out: the output size of the reshaped layer or use
``out_percent`` if ``None``
:param out_percent: the output reshaped layer as a percentage of the
input size ``in_shape`` if not None, othewise use
``out`.
"""
self.in_shape = in_shape
if out is None:
self.out_features = int(self.flatten_dim * out_percent)
else:
self.out_features = out
@property
@persisted('_flatten_dim')
def flatten_dim(self):
"""Return the dimension of a flattened layer represened by the this instances's
layer.
"""
from functools import reduce
return reduce(lambda x, y: x * y, self.in_shape)
@property
@persisted('_out_shape')
def out_shape(self):
"""Return the shape of the output layer.
"""
return (1, self.flatten_dim)
def linear(self):
"""Return a new PyTorch linear layer.
"""
return nn.Linear(self.flatten_dim, self.out_features)
def __str__(self):
return f'{self.in_shape} -> {self.out_shape}'
class DeepLinearLayer(nn.Module):
"""A layer that has contains one more nested layers. The input and output
layer shapes are given and an optional 0 or more middle layers are given as
percent changes in size or exact numbers.
"""
def __init__(self, in_features: int, out_features: int,
middle_features: List[Any] = None, dropout: float = None,
activation_function=F.relu, proportions: bool = True):
"""Initialize the deep linear layer.
:param in_features: the number of features coming in to th network
:param out_features: the number of output features leaving the network
:param middle_features: a list of percent differences or exact
parameter counts of each middle layer; if the
former, the next shape is a function of the
scaler multiplied by the previous layer; for
example ``[1.0]`` creates a nested layer with
the exact same shape as the input layer (see
``proportions`` parameter)
:param dropout: the droput used in all layers or ``None`` to disable
:param activation_function: the function between all layers, or
``None`` for no activation
:param proportions: whether or not to interpret ``middle_features`` as
a proportion of the previous layer or use directly
as the size of the middle layer
"""
super(DeepLinearLayer, self).__init__()
self.layer_attrs = []
middle_features = () if middle_features is None else middle_features
last_feat = in_features
for mf in middle_features:
if proportions:
next_feat = int(last_feat * mf)
else:
next_feat = int(mf)
self._add_layer(last_feat, next_feat)
last_feat = next_feat
self._add_layer(last_feat, out_features)
self.dropout = None if dropout is None else nn.Dropout(dropout)
self.activation_function = activation_function
def _add_layer(self, in_features, out_features):
name = f'_layer_{len(self.layer_attrs)}'
logger.debug(f'{name}: in={in_features} out={out_features}')
setattr(self, name, nn.Linear(in_features, out_features))
self.layer_attrs.append(name)
def get_layers(self):
layers = []
for layer_name in self.layer_attrs:
layers.append(getattr(self, f'{layer_name}'))
return layers
def n_features_after_layer(self, nth_layer):
return self.get_layers()[nth_layer].out_features
def train(self, mode=True):
super(DeepLinearLayer, self).train(mode)
self.is_training = mode
def eval(self):
super(DeepLinearLayer, self).eval()
self.is_training = False
def forward(self, x):
for i, aname in enumerate(self.layer_attrs):
if i > 0:
x = self.activation_function(x)
layer = getattr(self, aname)
x = layer.forward(x)
if self.is_training and self.dropout is not None:
x = self.dropout(x)
return x | zensols.dltools | /zensols.dltools-0.0.3-py3-none-any.whl/zensols/dltools/linear.py | linear.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, Iterable, List, Tuple
from dataclasses import dataclass, field
from enum import Enum, auto
import logging
import sys
import json
import yaml
import itertools as it
from zensols.persist import Stash
from zensols.cli import ApplicationError
from . import AdmissionMatch
logger = logging.getLogger(__name__)
class Format(Enum):
text = auto()
json = auto()
yaml = auto()
@dataclass
class Application(object):
"""This library provides integrated MIMIC-III with discharge summary
provenance of data annotations and Pythonic classes.
"""
stash: Stash = field()
"""A stash that creates :class:`.AdmissionMatch` instances."""
def admissions(self, limit: int = None):
"""Print the annotated admission IDs.
:param limit: the limit on items to print
"""
limit = sys.maxsize if limit is None else limit
k: str
for k in it.islice(self.stash.keys(), limit):
print(k)
def show(self, limit: int = None, format: Format = Format.text,
ids: str = None, indent: int = None):
"""Print annotated matches
:param limit: the limit on items to print
:param format: the output format
:param ids: a comma separated list of hospital admission IDs (hadm_id)
:param indent: the indentation (if any)
"""
limit = sys.maxsize if limit is None else limit
ams: Iterable[AdmissionMatch]
if ids is None:
ams = it.islice(self.stash, limit)
else:
ams: List[Tuple[str, AdmissionMatch]] = []
for aid in ids.split(','):
if aid not in self.stash:
raise ApplicationError(
f'Admission (hadm_id) does not exist: {aid}')
ams.append((aid, self.stash[aid]))
if format == Format.text:
for ix, (i, am) in enumerate(ams):
if ix > 0:
print('=' * 80)
am.write()
else:
dct: Dict[str, Any] = dict(
map(lambda t: (t[0], t[1].asflatdict()['note_matches']), ams))
if format == Format.json:
print(json.dumps(dct, indent=indent))
elif format == Format.yaml:
print(yaml.dump(dct)) | zensols.dsprov | /zensols.dsprov-0.0.1-py3-none-any.whl/zensols/dsprov/app.py | app.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, List, Iterable
from dataclasses import dataclass, field
import logging
import json
from pathlib import Path
from zensols.persist import persisted, ReadOnlyStash
from zensols.install import Installer
from zensols.nlp import LexicalSpan
from zensols.mimic import Note, HospitalAdmission, Corpus
from . import NoteSpan, NoteMatch, AdmissionMatch
logger = logging.getLogger(__name__)
@dataclass
class AnnotationStash(ReadOnlyStash):
"""A stash that create instances of :class:`.AdmissionMatch`.
"""
installer: Installer = field()
corpus: Corpus = field()
def _parse_match(self, adm: HospitalAdmission, ds_note: Note,
match: Dict[str, Any]) -> NoteMatch:
"""Parse discharge summary and note antecedent data."""
ant_spans: List[NoteSpan] = []
ds: Dict[str, Any] = match['ds']
ds_span = NoteSpan(
lexspan=LexicalSpan(**ds['note_span']),
note=ds_note)
ant: Dict[str, Any]
for ant in match['ant']:
ant_note: Note = adm.notes_by_id[ant['row_id']]
ant_spans.append(NoteSpan(
lexspan=LexicalSpan(**ant['note_span']),
note=ant_note))
return NoteMatch(
hadm_id=adm.hadm_id,
discharge_summary=ds_span,
antecedents=tuple(ant_spans),
source=match)
def _parse_adm(self, hadm_id: str, anon: Dict[str, Any]) -> AdmissionMatch:
"""Parse matches as a collection into an admission container."""
note_matches: List[NoteMatch] = []
adm: HospitalAdmission = self.corpus.get_hospital_adm_by_id(hadm_id)
matches: Dict[str, Any] = anon['matches']
ds_note: Note = adm.notes_by_id[anon['ds_row_id']]
mid: str
match: Dict[str, Any]
for mid, match in matches.items():
note_match: NoteMatch = self._parse_match(adm, ds_note, match)
note_matches.append(note_match)
return AdmissionMatch(tuple(note_matches))
@persisted('_source_anons')
def _get_source_anons(self) -> Dict[str, Any]:
"""Download the annotations (if not already) and return the JSON content
as an in memory dict of dicts having roughly the same structure as the
match object graph.
"""
self.installer()
anon_path: Path = self.installer.get_singleton_path()
if logger.isEnabledFor(logging.DEBUG):
logger.info(f'parsing {anon_path}')
with open(anon_path) as f:
return json.load(f)
def load(self, hadm_id: str) -> AdmissionMatch:
sanons: Dict[str, Any] = self._get_source_anons()
if hadm_id in sanons:
return self._parse_adm(hadm_id, sanons.get(hadm_id))
def keys(self) -> Iterable[str]:
sanons: Dict[str, Any] = self._get_source_anons()
return sanons.keys()
def exists(self, hadm_id: str) -> bool:
sanons: Dict[str, Any] = self._get_source_anons()
return hadm_id in sanons | zensols.dsprov | /zensols.dsprov-0.0.1-py3-none-any.whl/zensols/dsprov/stash.py | stash.py |
__author__ = 'Paul Landes'
from typing import ClassVar, Tuple, Dict, Any
from dataclasses import dataclass, field
import sys
from io import TextIOBase
from collections import OrderedDict
import textwrap as tw
from zensols.config import Dictable
from zensols.persist import persisted
from zensols.nlp import LexicalSpan, TokenContainer, FeatureSpan
from zensols.mimic import Note, Section
@dataclass
class MatchBase(Dictable):
"""A base class for match data containers that enforces no
pickling/serialization of note spans. This is not supported as subclasses
contain complex object graphs.
"""
def __getstate__(self):
raise ValueError('Pickeling not supported')
def repr(self) -> str:
return self.__str__()
@dataclass
class NoteSpan(MatchBase):
"""A *tie* between two spans of semantically similar or copied text segments
between a note antecedent and a discharge summary This is the analog to
``MatchedNote`` in the reproducibility repo, but use paper terminology.
"""
lexspan: LexicalSpan = field()
"""The 0-index start and end offset in :obj:`note` the demarcates the span
lexically.
"""
note: Note = field()
"""The note that matches."""
@property
def text(self) -> str:
"""The span as text demarcated by the span."""
return self.note.text[self.lexspan.begin:self.lexspan.end]
@property
def norm_text(self) -> str:
"""The normalized as the span text spaced and without newlines."""
toks = filter(lambda t: not t.is_space, self.span.token_iter())
span = FeatureSpan(tokens=tuple(toks))
return span.norm
@property
def span(self) -> TokenContainer:
"""The span as features demarcated by the span."""
return self.note.doc.get_overlapping_span(self.lexspan)
@property
@persisted('_sections')
def sections(self) -> Dict[int, Section]:
"""The sections coverd by the span."""
overlaps: Dict[int, Section] = {}
i: int
sec: Section
for i, sec in self.note.sections.items():
if sec.lexspan.overlaps_with(self.lexspan):
overlaps[i] = sec
return overlaps
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_sections: bool = False):
if include_sections:
self._write_line('text:', depth, writer)
self._write_block(self.text, depth + 1, writer)
self._write_line('sections:', depth, writer)
for i, sec in sorted(self.sections.items(), key=lambda t: t[0]):
self._write_line(str(sec), depth + 1, writer)
else:
self._write_block(self.text, depth, writer)
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
return OrderedDict(
[['row_id', self.note.row_id],
['span', self.lexspan.asdict()],
['text', self.text]])
def __str__(self) -> str:
return str(self.note)
@dataclass
class NoteMatch(MatchBase):
"""A match between a text span in the discharge summary with the semanically
similar or copy/pasted text with the note antecedents. This is the analog
to the ``MatchedAnnotation`` in the reproducibility repo.
"""
STR_SPAN_WIDTH: ClassVar[int] = 30
hadm_id: int = field()
"""The admission unique identifier."""
discharge_summary: NoteSpan = field()
"""The discharge summary note and span."""
antecedents: Tuple[NoteSpan] = field()
"""The note antecedent note/spans."""
source: Dict[str, Any] = field()
"""The source annotation JSON that was used to construct this instance."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_sections: bool = False):
did: str = self.discharge_summary.note.row_id
self._write_line(f'discharge summary ({did}):', depth, writer)
self.discharge_summary.write(depth + 1, writer, include_sections)
if len(self.antecedents) == 1:
aid: str = self.antecedents[0].note.row_id
self._write_line(f'antecedent ({aid}):', depth, writer)
self.antecedents[0].write(depth + 1, writer, include_sections)
else:
self._write_line('antecedents:', depth, writer)
ant: NoteSpan
for ant in self.antecedents:
aid: str = self.antecedents[0].note.row_id
self._write_line(f'{aid}:', depth + 1, writer)
ant.write(depth + 2, writer, include_sections)
def desc(self, width: int) -> str:
"""A short description string of the match."""
def shorten(span: NoteSpan) -> str:
s: str = tw.shorten(span.text, width=width)
return f'({span.note.row_id}) {s}'
width = self.STR_SPAN_WIDTH
ds: str = shorten(self.discharge_summary)
ants: str = ', '.join(map(shorten, self.antecedents))
return f'{self.hadm_id}: {ds} -> {ants}'
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
return OrderedDict(
[['ds', self.discharge_summary.asdict()],
['ant', tuple(map(lambda a: a.asdict(), self.antecedents))]])
def __str__(self) -> str:
return self.desc(self.STR_SPAN_WIDTH)
@dataclass
class AdmissionMatch(MatchBase):
"""Contains match data for an admission.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
note_matches: Tuple[NoteMatch] = field()
"""Contains match data for notes."""
@property
def hadm_id(self) -> int:
"""The admission unique identifier."""
return self.note_matches[0].hadm_id
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
nm: NoteMatch
self._write_line(f'{self}', depth, writer)
for i, nm in enumerate(self.note_matches):
if i > 0:
self._write_divider(depth, writer)
self._write_object(nm, depth, writer)
def __str__(self) -> str:
return f'{self.hadm_id}: {len(self.note_matches)} matches' | zensols.dsprov | /zensols.dsprov-0.0.1-py3-none-any.whl/zensols/dsprov/domain.py | domain.py |
import logging
import sys
from pathlib import Path
import shutil
from zensols.persist import (
PersistedWork,
persisted
)
from zensols.garmdown import (
Backuper,
Persister,
Fetcher,
)
logger = logging.getLogger(__name__)
class Manager(object):
"""Manages downloading and database work. This includes downloading data from
the Garmin connect website and persisting status data in an SQLite
database.
"""
def __init__(self, config):
"""Initialize
:param config: the application configuration
"""
self.config = config
self.download = config.download
self._fetcher = PersistedWork('_fetcher', self, True)
@property
@persisted('_fetcher')
def fetcher(self):
return Fetcher(self.config)
@property
@persisted('_persister')
def persister(self):
return Persister(self.config)
@property
@persisted('_backuper')
def backuper(self):
return Backuper(self.config, self.persister)
def environment(self, writer=sys.stdout):
writer.write(f'database={self.config.db_file}\n')
writer.write(f'activities={self.config.activities_dir}\n')
writer.write(f'backup={self.config.db_backup_dir}\n')
def sync_activities(self, limit=None, start_index=0):
"""Download and add activities to the SQLite database. Note that this does not
download the TCX files.
:param limit: the number of activities to download
:param start_index: the 0 based activity index (not contiguous page
based)
"""
# acts will be an iterable
acts = self.fetcher.get_activities(limit, start_index)
self.persister.insert_activities(acts)
@staticmethod
def _tcx_filename(activity):
"""Format a (non-directory) file name for ``activity``."""
return f'{activity.start_date_str}_{activity.id}.tcx'
def sync_tcx(self, limit=None):
"""Download TCX files and record each succesful download as such in the
database.
:param limit: the maximum number of TCX files to download, which
defaults to all
"""
dl_dir = self.config.activities_dir
persister = self.persister
if not dl_dir.exists():
logger.info(f'creating download directory {dl_dir}')
dl_dir.mkdir(parents=True)
acts = persister.get_missing_downloaded(limit)
logger.info(f'downloading {len(acts)} tcx files')
for act in acts:
dl_path = Path(dl_dir, self._tcx_filename(act))
if dl_path.exists():
logger.warning(f'activity {act.id} is downloaded ' +
f'but not marked--marking now')
else:
logger.debug(f'downloading {dl_path}')
with open(dl_path, 'wb') as f:
self.fetcher.download_tcx(act, f)
sr = dl_path.stat()
logger.debug(f'{dl_path} has size {sr.st_size}')
if sr.st_size < self.download.min_size:
m = f'downloaded file {dl_path} has size ' + \
f'{sr.st_size} < {self.download.min_size}'
raise ValueError(m)
persister.mark_downloaded(act)
def import_tcx(self, limit=None):
"""Download TCX files and record each succesful download as such in the
database.
:param limit: the maximum number of TCX files to download, which
defaults to all
"""
persister = self.persister
dl_dir = self.config.activities_dir
import_dir = self.config.import_dir
if not import_dir.exists():
logger.info(f'creating imported directory {import_dir}')
import_dir.mkdir(parents=True)
acts = persister.get_missing_imported(limit)
logger.info(f'importing {len(acts)} activities')
for act in acts:
fname = self._tcx_filename(act)
dl_path = Path(dl_dir, fname)
import_path = Path(import_dir, fname)
if import_path.exists():
logger.warning(f'activity {act.id} is imported ' +
f'but not marked--marking now')
else:
logger.info(f'copying {dl_path} -> {import_path}')
shutil.copy(dl_path, import_path)
persister.mark_imported(act)
def sync(self, limit=None):
"""Sync activitives and TCX files.
:param limit: the number of activities to download and import, which
defaults to the configuration values
"""
self.sync_activities(limit)
self.sync_tcx(limit)
self.import_tcx()
def write_not_downloaded(self, detail=False, limit=None,
writer=sys.stdout):
"""Write human readable formatted data of all activities not yet downloaded.
:param detail: whether or to give full information about the activity
:param limit: the number of activities to report on
:param writer: the stream to output, which defaults to stdout
"""
for act in self.persister.get_missing_downloaded(limit):
act.write(writer, detail=detail)
def write_not_imported(self, detail=False, limit=None, writer=sys.stdout):
"""Write human readable formatted data of all activities not yet imported.
:param detail: whether or to give full information about the activity
:param limit: the number of activities to report on
:param writer: the stream to output, which defaults to stdout
"""
for act in self.persister.get_missing_imported(limit):
act.write(writer, detail=detail)
def close(self):
"""Close all allocated resources byt by the manager."""
self.fetcher.close()
self._fetcher.clear() | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/mng.py | mng.py |
import logging
from pathlib import Path
from datetime import datetime
import itertools as it
import httplib2 as hl
from oauth2client import file, client, tools
import googleapiclient.discovery as gd
from zensols.persist import persisted
from zensols.garmdown import Persister
logger = logging.getLogger(__name__)
class CompletedEntry(object):
"""Represents a row of training data (bike/swim/run) in the Google training
spreadsheet.
"""
def __init__(self, idx, row_offset, datestr,
swim=None, bike=None, run=None, strength=None):
"""Initialize.
:param idx: the 0-based index of the entry
:param row_offset: the number of rows where the first data entry starts
not including any header(s)
:param datestr: the date string in ``mm/dd/yyyy`` format
"""
self.idx = idx
self.row_offset = row_offset
self.date = datetime.strptime(datestr, '%m/%d/%Y')
self.exists = not (swim is None and bike is None and run is None)
self.swim = 0 if (swim is None or len(swim) == 0) else float(swim)
self.bike = 0 if (bike is None or len(bike) == 0) else float(bike)
self.run = 0 if (run is None or len(run) == 0) else float(run)
self.strength = 0 if (strength is None or len(strength) == 0) else float(strength)
@property
def rowidx(self):
return self.idx + self.row_offset
@property
@persisted('_row')
def row(self):
row = (self.swim, self.bike, self.run, self.strength)
row = tuple(map(lambda x: None if x == 0 else x, row))
return row
def update(self, activities, act_char_to_col_type):
by_sport = {}
for act in activities:
key = act_char_to_col_type[act.type_short]
logger.debug(f'act short: {act.type_short} => {key} for {act}')
if key != '<skip>':
if key not in by_sport:
by_sport[key] = []
by_sport[key].append(act)
for k, v in by_sport.items():
act_secs = sum(map(lambda x: x.move_time_seconds, v))
act_min = act_secs / 60
logger.debug(f'setting {k} => {act_min}')
setattr(self, k, act_min)
def __str__(self):
return (f'{self.rowidx}: date={self.date}: exist={self.exists}, ' +
f's={self.swim}, b={self.bike}, r={self.run}, ' +
f'w={self.strength}')
def __repr__(self):
return self.__str__()
class SheetUpdater(object):
"""Updates a Google Sheets spreadsheet with activity data from the activity
database.
"""
SECTION = 'google_sheets'
def __init__(self, config):
"""Initialize.
:param config: the application configuration
"""
self.config = config
config.populate(self, section=self.SECTION)
self.params = config.sheet
self.act_char_to_col_type = config.fetch_config.get_options(
section='activity_sheet')
@property
def persister(self):
"The DB DAO."
return Persister(self.config)
@property
@persisted('_service', cache_global=True)
def service(self):
"""The Google Sheets API wrapper service.
"""
cred_path = Path(self.google_cred_file).expanduser()
token_path = Path(self.token_file).expanduser()
logger.info(f'getting last update with {cred_path} ' +
f'with file {token_path}')
store = file.Storage(token_path)
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(cred_path, self.params.scope)
creds = tools.run_flow(flow, store)
logger.info(f'logging in to Google sheets API')
return gd.build(self.params.api, self.params.version,
http=creds.authorize(hl.Http()),
cache_discovery=False)
@property
def sheet(self):
"""The Google Sheets API wrapper instance.
"""
return self.service.spreadsheets()
def _get_completed_cell_range(self, start, end):
"Return the completed cell range from rows ``start`` to ``end``."
return self.completed_cell_range_format.format(start, end)
@property
def completed_cell_range(self):
"The completed cell range per defaults in the settings."
return self._get_completed_cell_range(self.row_offset, self.maxdays)
def _get_data(self, range):
"Get data in the spreadsheet via the Google API."
sheet = self.sheet
result = sheet.values().get(spreadsheetId=self.sheet_id,
range=range).execute()
return result.get('values', ())
def _set_data(self, values, range):
"Set data in the spreadsheet via the Google API."
sheet = self.sheet
body = {
'values': values,
}
sheet.values().update(
spreadsheetId=self.sheet_id,
#valueInputOption='RAW',
valueInputOption='USER_ENTERED',
range=range,
body=body).execute()
@persisted('_completed_entries')
def _get_completed_entries(self):
"Return completed training entries from the spreadsheet."
logger.info('getting existing completed workout data')
dates = self._get_data(self.date_cell_range)
completed = self._get_data(self.completed_cell_range)
ldates = len(dates)
lcompleted = len(completed)
logger.debug(f'dates {ldates}, completed: {lcompleted}')
if ldates > lcompleted:
completed = it.chain(completed, ((),) * (ldates - lcompleted))
data = enumerate(zip(dates, completed))
return map(lambda x: CompletedEntry(
x[0], self.row_offset, x[1][0][0], *x[1][1]), data)
def _get_update_range_entries(self, last_idx=None, end_date=None):
"""Return an updated range.
:param last_idx: the last completed work out entry index (0 based), or
if ``None``, calculated from spreadsheet data
:param end_date: the last date to constraint entry range
:type end_date: datetime.datetime
"""
entries = tuple(self._get_completed_entries())
if last_idx is None:
last_idx = -1
for last in filter(lambda x: x.exists, entries):
last_idx = last.idx
logger.debug(f'last existing completed row: {last_idx}')
if end_date is None:
end_date = datetime.now()
return filter(lambda x: x.idx > last_idx and x.date < end_date,
entries)
def _sync_entries_with_db(self, entries, clobber=False):
"""Populate database data in to ``entries``.
:param entries: the workout entries to populate
:type entries: CompletedEntry
:param clobber: if ``True`` populate data even if ``entry.exists`` is
``False``
"""
logger.info(f'syncing {len(entries)} with activity database')
for entry in entries:
if clobber or not entry.exists:
acts = self.persister.get_activities_by_date(entry.date)
if logger.isEnabledFor(logging.DEBUG):
types = ', '.join(map(lambda x: x.type, acts))
logger.debug(f'found {types} activities for {entry}')
entry.update(acts, self.act_char_to_col_type)
logger.debug(f'updated: {entry}')
def _upload_row_data(self, entries):
"""Upload workout data to Google.
:param entries: the workout data to upload
:type entries: iterable of of CompletedEntry
"""
rows = tuple(map(lambda x: x.row, entries))
range = self._get_completed_cell_range(
entries[0].rowidx, entries[-1].rowidx)
logger.info(f'updating {len(rows)} rows with range {range}')
self._set_data(rows, range)
def sync(self):
entries = tuple(self._get_update_range_entries())
if len(entries) > 0:
self._sync_entries_with_db(entries)
self._upload_row_data(entries) | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/sheets.py | sheets.py |
import logging
from zensols.config import ExtendedInterpolationConfig
logger = logging.getLogger(__name__)
class AppConfig(ExtendedInterpolationConfig):
"""Application context simplifies some of the configuration.
"""
@property
def fetch_config(self):
if not hasattr(self, '_fetch_conf'):
path = self.resource_filename('resources/fetch.conf')
logger.debug(f'loading web configuration from {path}')
conf = ExtendedInterpolationConfig(path)
self._web = conf.populate(section='web')
self._sql = conf.populate(section='sql')
self._db = conf.populate(section='db')
self._download = conf.populate(section='download')
self._sheet = conf.populate(section='sheet')
self._fetch_conf = conf
return self._fetch_conf
def _assert_fetch_conf(self):
self.fetch_config
@property
def db_file(self):
"""Return the SQLite DB file."""
return self.get_option_path('db_file').expanduser()
@property
def activities_dir(self):
"""Return the directory where TCX files live.
TODO: fix BAD name. This name should be ``tcx_dir``.
"""
return self.get_option_path('activities_dir').expanduser()
@property
def import_dir(self):
"""Return the directory where the imported TCX files live."""
return self.get_option_path('import_dir').expanduser()
@property
def db_backup_dir(self):
"""Return the directory of the stored SQLite backup files."""
return self.get_option_path('db_backup_dir').expanduser()
@property
def web(self):
"""Return the web configuration used by the robot."""
self._assert_fetch_conf()
return self._web
@property
def sql(self):
"""Return the SQL statements object."""
self._assert_fetch_conf()
return self._sql
@property
def db(self):
"""Return the database configuration parameter object."""
self._assert_fetch_conf()
return self._db
@property
def download(self):
"""Return the download configuration parameter object."""
self._assert_fetch_conf()
return self._download
@property
def sheet(self):
"""Return the Google Sheets configuration parameter object."""
self._assert_fetch_conf()
return self._sheet
@property
def fetch(self):
"""Return the fetch (download) parameter object."""
if not hasattr(self, '_fetch'):
self._fetch = self.populate(section='download')
return self._fetch | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/config.py | config.py |
import sys
import itertools as it
from datetime import datetime
class Activity(object):
def __init__(self, raw, type_char):
self.id = str(raw['activityId'])
self.raw = raw
self.type_char = type_char
@staticmethod
def type_from_raw(raw):
return raw['activityType']['typeKey']
@staticmethod
def common_attributes():
return """
name location start_date_str duration move_time_seconds
heart_rate_average v02max stress_score calories
""".split()
def _attr_names(self):
return self.common_attributes()
@property
def start_time(self):
if not hasattr(self, '_datestr'):
datestr = self.raw['startTimeLocal']
self._date = datetime.strptime(datestr, '%Y-%m-%d %H:%M:%S')
return self._date
@property
def start_date_str(self):
return datetime.strftime(self.start_time, '%Y-%m-%d')
@property
def start_year_str(self):
return datetime.strftime(self.start_time, '%Y')
@property
def name(self):
return self.raw['activityName']
@property
def type_raw(self):
return self.type_from_raw(self.raw)
@property
def type(self):
return self.factory.char_to_type[self.type_char]
@property
def type_short(self):
return self.type_char
@property
def type_long(self):
return self.factory.char_to_name[self.type_char]
@property
def location(self):
return self.raw['locationName']
@property
def duration(self):
no_move_sports = 'indoor_cycling treadmill_running strength_training'
no_move_sports = set(no_move_sports.split())
key = 'duration' if self.type in no_move_sports else 'movingDuration'
return self.raw[key]
@property
def move_time_seconds(self):
if self.type_short == 's':
dur = self.raw['duration']
else:
dur = self.duration#self.raw['movingDuration']
if dur is None:
dur = self.raw['duration']
if dur is None:
raise ValueError(f'no such duration: {self}')
return dur
@property
def heart_rate_average(self):
return self.raw['averageHR']
@property
def v02max(self):
return self.raw['vO2MaxValue']
@property
def stress_score(self):
return self.raw['trainingStressScore']
@property
def calories(self):
return self.raw['calories']
def write_raw(self, writer=sys.stdout):
from pprint import pprint
pprint(self.raw, stream=writer)
def write(self, writer=sys.stdout, detail=True):
if detail:
writer.write(f'{self.start_date_str}: {self.name}\n')
writer.write(f'type: {self.type}\n')
for attr in self._attr_names():
name = attr.replace('_', ' ')
aval = getattr(self, attr)
writer.write(f' {name}: {aval}\n')
else:
writer.write(
f'{self.id}: start: {self.start_time}, type: {self.type}\n')
def __str__(self):
return f'{self.id}: date={self.start_date_str}, sport={self.type_long}'
def __repr__(self):
return self.__str__()
class CyclingActivity(Activity):
@staticmethod
def cycling_attributes():
return """
cadence power_average power_norm power_max strokes
intensity
""".split()
def _attr_names(self):
pattrs = super(CyclingActivity, self)._attr_names()
return it.chain(pattrs, self.cycling_attributes())
@property
def intensity(self):
return self.raw['intensityFactor']
@property
def cadence(self):
return self.raw['averageBikingCadenceInRevPerMinute']
@property
def power_average(self):
return self.raw['avgPower']
@property
def power_norm(self):
return self.raw['normPower']
@property
def power_max(self):
return self.raw['maxPower']
@property
def strokes(self):
return self.raw['strokes']
class RunningActivity(Activity):
@staticmethod
def running_attributes():
return """
cadence_step_per_minute ground_contact_balance_average
ground_contact_time_average steps
""".split()
def _attr_names(self):
pattrs = super(RunningActivity, self)._attr_names()
return it.chain(pattrs, self.running_attributes())
@property
def cadence_step_per_minute(self):
return self.raw['averageRunningCadenceInStepsPerMinute']
@property
def stride_average(self):
return self.raw['avgStrideLength']
@property
def ground_contact_balance_average(self):
return self.raw['avgGroundContactBalance']
@property
def ground_contact_time_average(self):
return self.raw['avgGroundContactTime']
@property
def steps(self):
return self.raw['steps']
class ActivityFactory(object):
def __init__(self, config):
self.type_to_char = config.fetch_config.get_options(
section='activity_type')
self.char_to_name = config.fetch_config.get_options(
section='activity_name')
self.char_to_type = {v: k for k, v in self.type_to_char.items()}
def create(self, raw):
atype = Activity.type_from_raw(raw)
type_char = self.type_to_char[atype]
clsname = atype.capitalize() + 'Activity'
if clsname not in globals():
act = Activity(raw, type_char)
else:
act = globals()[clsname](raw, type_char)
act.factory = self
return act
class Backup(object):
def __init__(self, path, time=None):
self.path = path
if time is None:
self.time = datetime.now()
else:
self.time = time
@property
def datestr(self):
return datetime.strftime(self.time, '%Y-%m-%d')
@staticmethod
def timestr_from_datetime(time=None):
if time is None:
time = datetime.now()
return datetime.strftime(time, '%Y-%m-%d_%H-%M')
@property
def timestr(self):
return self.timestr_from_datetime(self.time)
def __str__(self):
return f'{self.timestr}: {self.path}' | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/domain.py | domain.py |
import logging
import sys
from pathlib import Path
from datetime import datetime
import json
import sqlite3
from zensols.persist import (
persisted,
resource
)
from zensols.garmdown import (
ActivityFactory,
Backup,
)
logger = logging.getLogger(__name__)
class connection(resource):
def __init__(self):
super(connection, self).__init__(
'_create_connection', '_dispose_connection')
class Persister(object):
"""CRUDs activities in the SQLite database.
"""
def __init__(self, config):
"""Initialize
:param config: the application configuration
"""
self.config = config
self.sql = config.sql
@property
@persisted('_activity_factory')
def activity_factory(self):
return ActivityFactory(self.config)
@property
def db_file(self):
return self.config.db_file
def _create_connection(self):
"""Create a connection to the SQLite database (file).
"""
logger.debug('creating connection')
db_file = self.db_file
created = False
if not db_file.exists():
if not db_file.parent.exists():
logger.info(f'creating sql db directory {db_file.parent}')
db_file.parent.mkdir(parents=True)
logger.info(f'creating sqlite db file: {db_file}')
created = True
types = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
conn = sqlite3.connect(str(db_file.absolute()), detect_types=types)
if created:
logger.info(f'initializing database...')
for sql_key in self.config.db.init_sql:
sql = getattr(self.sql, sql_key)
logger.debug(f'invoking sql: {sql}')
conn.execute(sql)
conn.commit()
return conn
def _dispose_connection(self, conn):
"""Close the connection to the database."""
logger.debug(f'closing connection {conn}')
conn.close()
def _activity_exists(self, conn, act):
""""Return whether or not an activity already lives in the database."""
cur = conn.cursor()
exists = None
try:
cur.execute(self.sql.exists_act, (act.id,))
exists = cur.fetchone() is not None
finally:
cur.close()
return exists
@connection()
def insert_activities(self, conn, activities):
"""Insert an activity in the database.
:param conn: the database connection (not provided on by the client of
this class)
:param activities the activities to add to the database
"""
logger.info(f'persisting activities')
logger.debug(f'connection: {conn}')
for act in activities:
if self._activity_exists(conn, act):
logger.debug(f'already found in database: {act}--skipping')
else:
raw = json.dumps(act.raw)
row = (act.id, act.start_time, act.type_short, raw,)
logger.info(f'adding activity to db {act}')
conn.execute(self.sql.insert_act, row)
conn.commit()
def _thaw_activity(self, conn, sql, *params):
"""Unpersist activities from the database.
:param conn: the database connection
:param sql: the string SQL used to query
:param params: the parameters used in the SQL call
"""
afactory = self.activity_factory
for raw in map(lambda x: x[0], conn.execute(sql, params)):
jobj = json.loads(raw)
yield afactory.create(jobj)
def _mark_state(self, conn, sql, action, act):
"""Mark something as downloaded or imported.
:param conn: the database connection
:param sql: the string SQL used to update
:param action: what we're marking--only used for logging
:param act: the activity to mark as updated for 'action' reason
"""
now = datetime.now()
logger.info(f'mark activity {act.id} to {action} {now}')
logger.debug(f'update sql: {sql}')
rc = conn.execute(sql, (now, act.id,)).rowcount
logger.debug(f'updated {rc} row(s)')
conn.commit()
return rc
@connection()
def get_missing_downloaded(self, conn, limit=None):
"""Return activities that have not yet been downloaded.
:param conn: the database connection (not provided on by the client of
this class)
:param limit: the number of actions to return that haven't been
downloaded
"""
if limit is None:
limit = self.config.fetch.tcx_chunk_size
return tuple(self._thaw_activity(
conn, self.sql.missing_downloads, limit))
@connection()
def mark_downloaded(self, conn, activity):
"""Mark ``activity`` as having been downloaded
:param conn: the database connection (not provided on by the client of
this class)
:param activity: the activity to mark as downloaded
"""
update_sql = self.sql.update_downloaded
self._mark_state(conn, update_sql, 'downloaded', activity)
@connection()
def get_missing_imported(self, conn, limit=None):
"""Return activities that have not yet been imported.
:param conn: the database connection (not provided on by the client of
this class)
:param limit: the number of actions to return that haven't been
imported
"""
if limit is None:
limit = sys.maxsize
return tuple(self._thaw_activity(
conn, self.sql.missing_imported, limit))
@connection()
def mark_imported(self, conn, activity):
"""Mark ``activity`` as having been imported
:param conn: the database connection (not provided on by the client of
this class)
:param activity: the activity to mark as imported
"""
update_sql = self.sql.update_imported
self._mark_state(conn, update_sql, 'imported', activity)
@connection()
def insert_backup(self, conn, backup):
row = (backup.time, str(backup.path.absolute()))
logger.info(f'inserting backup {backup} to db')
conn.execute(self.sql.insert_back, row)
conn.commit()
def _thaw_backup(self, conn, sql):
for row in conn.execute(sql):
time, filename = row
yield Backup(Path(filename), time)
@connection()
def get_last_backup(self, conn):
backups = tuple(self._thaw_backup(conn, self.sql.last_back))
if len(backups) > 0:
return backups[0]
@connection()
def get_activities_by_date(self, conn, datetime):
datestr = datetime.strftime('%Y-%m-%d')
return tuple(self._thaw_activity(
conn, self.sql.activity_by_date, datestr)) | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/db.py | db.py |
import logging
import json
import re
import itertools as it
import urllib.parse as up
from robobrowser import RoboBrowser
from zensols.persist import persisted
from zensols.garmdown import ActivityFactory
logger = logging.getLogger(__name__)
class Fetcher(object):
"""Downloads Garmin TXC files and activities (metadata).
"""
def __init__(self, config):
"""Initialize
:param config: the application configuration
"""
self.config = config
self.web = self.config.web
self.fetch_params = self.config.fetch
self.login_state = 'loggedout'
@property
@persisted('_activity_factory')
def activity_factory(self):
return ActivityFactory(self.config)
@property
@persisted('_browser', cache_global=True)
def browser(self):
"The ``RoboBrowser`` instance."
import requests
start = requests.session()
start.headers = {'origin': 'https://sso.garmin.com'}
return RoboBrowser(
history=True, parser='lxml', user_agent=self.web.agent, session=start)
@property
@persisted('_hostname_url', cache_global=True)
def hostname_url(self):
"The API host name."
self.browser.open(self.web.gauth)
return json.loads(self.browser.parsed.html.body.p.text)['host']
# not sure why we need this (taken from Shannon's code)
@property
@persisted('_script_url', cache_global=True)
def script_url(self):
"Not used"
self.browser.open(self.web.base_url)
parsed = self.browser.parsed.decode()
pattern = r"'\S+sso\.garmin\.com\S+'"
return re.search(pattern, parsed).group()[1:-1]
@property
def login_request_data(self):
"""Return the data needed to log in to the garmin connect site.
"""
wconf = self.web
# Package the full login GET request...
data = {'service': wconf.redirect,
'webhost': self.hostname_url,
'source': wconf.base_url,
'redirectAfterAccountLoginUrl': wconf.redirect,
'redirectAfterAccountCreationUrl': wconf.redirect,
'gauthHost': wconf.sso,
'locale': 'en_US',
'id': 'gauth-widget',
'cssUrl': wconf.css,
'clientId': 'GarminConnect',
'rememberMeShown': 'true',
'rememberMeChecked': 'false',
'createAccountShown': 'true',
'openCreateAccount': 'false',
'usernameShown': 'false',
'displayNameShown': 'false',
'consumeServiceTicket': 'false',
'initialFocus': 'true',
'embedWidget': 'false',
'generateExtraServiceTicket': 'false'}
return data
def _get_last_login_state(self):
"""Return ``success`` if the login connection was successful, ``failed`` if
failed or ``unknown`` if it returned a response we don't understand.
"""
decoded = self.browser.parsed.decode()
if decoded.find('SUCCESS') > 0:
state = 'success'
elif decoded.find('Invalid') > 0:
state = 'failed'
else:
logger.warning(f'unknown login state: {decoded}')
state = 'unknown'
return state
def _login(self):
"""Login in the garmin connect site with athlete credentials.
"""
login = self.config.populate(section='login')
url = self.web.login_url + up.urlencode(self.login_request_data)
logger.info('logging in...')
logger.debug(f'login url: {url}')
self.browser.open(url)
form = self.browser.get_form(self.web.login_form)
form['username'] = login.username
form['password'] = login.password
logger.debug(f'submitting form: {form}')
self.browser.submit_form(form)
state = self._get_last_login_state()
if state == 'failed':
raise ValueError('login failed')
elif state == 'unknown':
raise ValueError('login status unknown')
self.login_state = state
def _assert_logged_in(self):
"""Log in if we're not and raise an error if we can't.
"""
if self.login_state != 'success':
self._login()
# the web site seems to need to have this URL touched, otherwise we get
# errors when downloading activities
if not hasattr(self, '_preempt'):
logger.info('preepting login')
self.browser.open(self.web.preempt_activities)
self._preempt = True
def _download_activities(self, index, chunk_size):
"""Use robobrowser to download activities, parse JSON and return them.
:param index: the 0 based activity index (not contiguous page based)
:param chunk_size: how large the batch for each invocation
"""
self._assert_logged_in()
params = {'index': index,
'activity_chunk_size': chunk_size}
url = self.web.activities.format(**params)
logger.info(f'downloading {chunk_size} activities at index {index}')
self.browser.open(url)
jobj = json.loads(self.browser.parsed.html.body.p.text)
if isinstance(jobj, dict) and 'error' in jobj:
raise ValueError('can not parse activities')
return jobj
def _iterate_activities(self, index, chunk_size):
"""Yield downloaded activities.
:param index: the 0 based activity index (not contiguous page based)
:param chunk_size: how large the batch for each invocation
"""
self._assert_logged_in()
afactory = self.activity_factory
search = self._download_activities(index, chunk_size)
for item in search:
activity = afactory.create(item)
logger.debug(f'activity: {activity}')
yield activity
def get_activities(self, limit=None, start_index=0):
"""Download and return ``limit`` activities.
:param limit: the number of activities to download
:param start_index: the 0 based activity index (not contiguous page
based)
"""
activity_chunk_size = self.fetch_params.activity_chunk_size
activity_num = self.fetch_params.activity_num
if limit is None:
limit = activity_chunk_size
al = map(lambda x: self._iterate_activities(x, activity_chunk_size),
range(start_index, activity_num, activity_chunk_size))
return it.islice(it.chain(*al), limit)
def download_tcx(self, activity, writer):
"""Download the TCX file for ``activity`` and dump the contents to ``writer``.
"""
self._assert_logged_in()
url = self.web.tcx.format(**{'activity': activity})
logger.info(f'downloading TCX for {activity} at {url}')
self.browser.open(url)
writer.write(self.browser.response.content)
def close(self):
"""Close the fetcher. It appears robobrowser doesn't need to be closed so this
is a no-op for now.
"""
logger.debug('closing fetcher')
def tmp(self):
self._login() | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/fetcher.py | fetcher.py |
import logging
from datetime import datetime
from zensols.cli import OneConfPerActionOptionsCliEnv
from zensols.garmdown import (
Manager,
Backuper,
SheetUpdater,
Reporter,
AppConfig,
)
class InfoCli(object):
def __init__(self, config, detail=False, limit=None):
self.mng = Manager(config)
self.detail = detail
self.limit = limit
def environment(self):
self.mng.environment()
def fetch_config(self):
self.mng.config.fetch_config.write()
def write_not_downloaded(self):
self.mng.write_not_downloaded(self.detail, self.limit)
def write_not_imported(self):
self.mng.write_not_imported(self.detail, self.limit)
class DownloadCli(object):
def __init__(self, config, limit=None):
self.mng = Manager(config)
self.config = config
self.limit = limit
def sync_activities(self):
self.mng.sync_activities(self.limit)
def sync_tcx(self):
self.mng.sync_tcx(self.limit)
def import_tcx(self):
self.mng.import_tcx()
def sync(self):
self.mng.sync(self.limit)
backuper = Backuper(self.config)
backuper.backup()
class SheetCli(object):
def __init__(self, config):
self.config = config
def sync(self):
su = SheetUpdater(self.config)
su.sync()
class BackupCli(object):
def __init__(self, config):
self.backer = Backuper(config)
def backup(self):
self.backer.backup(True)
class ReporterCli(object):
FORMAT_TYPES = set('detail summary json'.split())
def __init__(self, config, format, datestr=None):
self.config = config
self.format = format
self.datestr = datestr
if format not in self.FORMAT_TYPES:
fopts = self.format_type_string()
raise ValueError(
f"unknown format type '{format}' not one of {fopts}")
@classmethod
def format_type_string(cls):
return '|'.join(cls.FORMAT_TYPES)
@property
def date(self):
if self.datestr is None:
date = datetime.now()
else:
date = datetime.strptime(self.datestr, '%Y-%m-%d')
return date
def report(self):
reporter = Reporter(self.config)
getattr(reporter, f'write_{self.format}')(self.date)
class SyncCli(object):
def __init__(self, config):
self.config = config
def sync(self):
DownloadCli(self.config).sync()
SheetCli(self.config).sync()
class ConfAppCommandLine(OneConfPerActionOptionsCliEnv):
def __init__(self):
detail_op = ['-d', '--detail', False,
{'dest': 'detail',
'action': 'store_true', 'default': False,
'help': 'report details of missing data'}]
limit_op = ['-l', '--limit', False,
{'dest': 'limit', 'metavar': 'INTEGER',
'type': 'int',
'help': 'the limit'}]
date_op = ['-a', '--date', False,
{'dest': 'datestr', 'metavar': 'mm/dd/yyyy',
'help': 'the date to report, which defaults to today'}]
format_op = ['-f', '--format', False,
{'dest': 'format',
'default': 'detail',
'metavar': ReporterCli.format_type_string(),
'help': 'the format'}]
cnf = {'executors':
[{'name': 'info',
'executor': lambda params: InfoCli(**params),
'actions': [{'name': 'env',
'meth': 'environment',
'doc': 'print environment',
'opts': [detail_op]},
{'name': 'fetchconf',
'meth': 'fetch_config',
'doc': 'print fetch configuration',
'opts': [detail_op]},
{'name': 'notdown',
'meth': 'write_not_downloaded',
'doc': 'print activities not downloaded',
'opts': [detail_op]},
{'name': 'notimport',
'meth': 'write_not_imported',
'doc': 'print activities not imported',
'opts': [detail_op]}]},
{'name': 'down',
'executor': lambda params: DownloadCli(**params),
'actions': [{'name': 'activity',
'meth': 'sync_activities',
'doc': 'download outstanding activites',
'opts': [limit_op]},
{'name': 'tcx',
'meth': 'sync_tcx',
'doc': 'download outstanding TCX files',
'opts': [limit_op]},
{'name': 'import',
'meth': 'import_tcx',
'doc': 'import TCX file',
'opts': [limit_op]},
{'name': 'download',
'doc': 'download all outstanding data',
'opts': [limit_op]}]},
{'name': 'backup',
'executor': lambda params: BackupCli(**params),
'actions': [{'name': 'backup',
'doc': 'backup (force) the activites database',
'opts': []}]},
{'name': 'report',
'executor': lambda params: ReporterCli(**params),
'actions': [{'name': 'report',
'doc': 'report activities for a day',
'opts': [date_op, format_op]}]},
{'name': 'sheet',
'executor': lambda params: SheetCli(**params),
'actions': [{'name': 'sheet',
'meth': 'sync',
'doc': 'update Google Docs training spreadsheet',
'opts': []}]},
{'name': 'sync',
'executor': lambda params: SyncCli(**params),
'actions': [{'name': 'sync',
'doc': 'equivalent to actions download and sheet',
'opts': []}]}],
'config_option': {'name': 'config',
'expect': True,
'opt': ['-c', '--config', False,
{'dest': 'config',
'metavar': 'FILE',
'help': 'configuration file'}]},
'whine': 0}
super(ConfAppCommandLine, self).__init__(
cnf, config_env_name='garmdownrc', pkg_dist='zensols.garmdown',
config_type=AppConfig, default_action='sync')
def main():
logging.basicConfig(format='%(module)s: %(message)s', level=logging.INFO)
logging.getLogger('zensols.actioncli').setLevel(logging.WARNING)
cl = ConfAppCommandLine()
cl.invoke() | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/cli.py | cli.py |
import logging
from pathlib import Path
from datetime import datetime
import shutil as su
from zensols.persist import persisted
from zensols.garmdown import (
Backup,
Persister,
)
logger = logging.getLogger(__name__)
class Backuper(object):
"""Backup the SQLite database on a periodic basis.
"""
def __init__(self, config):
"""Initialize.
:param config: the application configuration
"""
self.config = config
self.backup_params = self.config.populate(section='backup')
@property
@persisted('_persister')
def persister(self):
return Persister(self.config)
@property
@persisted('__backup_dir', cache_global=False)
def _backup_dir(self):
"""Return the directory to where we back up."""
backup_dir = self.config.db_backup_dir
if not backup_dir.exists():
logger.info(f'creating backup directory {backup_dir}')
backup_dir.mkdir(parents=True)
return backup_dir
def _execute(self):
"""Execute the backup of the SQLite database."""
persister = self.persister
backup_dir = self._backup_dir
src = persister.db_file
dst = Path(backup_dir, f'{src.name}-{Backup.timestr_from_datetime()}')
backup = Backup(dst)
logger.info(f'backing up database {src} -> {dst}')
su.copy(src, dst)
persister.insert_backup(backup)
def backup(self, force=False):
"""Backup the SQLite if the last backup time is older than what's specified in
the configuration.
:param force: if True, execute the backup regardless
"""
backup = self.persister.get_last_backup()
if force:
do_backup = True
else:
if backup is None:
logger.info('no recorded backup')
do_backup = True
else:
logger.debug(f'last backup: {backup}')
diff = datetime.now() - backup.time
diff_days = diff.days
logger.info(f'days since last backup: {diff_days} and we ' +
f'backup every {self.backup_params.days} days')
do_backup = diff_days >= self.backup_params.days
logger.debug(f'backing up: {do_backup}')
if do_backup:
self._execute() | zensols.garmdown | /zensols.garmdown-0.0.8-py3-none-any.whl/zensols/garmdown/backup.py | backup.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Any
import logging
import sys
from pathlib import Path
from git import Repo
from zensols.persist import persisted
from zensols.grsync import LinkEntry, RemoteSpec, PathTranslator
logger = logging.getLogger(__name__)
MASTER_SECTION = 'branch "master"'
class RepoSpec(object):
"""This class represents a git repository and all the symbolic links from the
distribution (usually the user's home directory) that link into it.
"""
DEFAULT_FORMAT = '{name}: {path}, remotes={remotes}, dirty={dirty}'
SHORT_FORMAT = '{name}: {path} ({remotes})'
def __init__(self, path: Path, path_translator: PathTranslator,
repo: Repo = None):
"""Create with the path to the repo and optionally a git.Repo.
:param path: the directory where the repository will be thawed (cloned)
:param path_translator: translates the distribution root
:param repo: the git repository to use in this instance, otherwise it
will be created that points to what the
``path_translator`` resolves
"""
self.path = path
self.path_translator = path_translator
self._repo = repo
self.links = ()
@property
def name(self) -> str:
"""Return the name of the repository.
"""
return self.path.name
@property
def repo(self) -> Repo:
"""Return the Git repository instance.
"""
if self._repo is None:
self._repo = Repo(str(self.path.resolve()))
return self._repo
@property
def master_remote(self) -> str:
"""Return the first (preferred) remote that is used as the master for pull,
fetch and push.
"""
if not hasattr(self, '_master_remote'):
config = self.repo.config_reader()
if config.has_section(MASTER_SECTION) and \
config.has_option(MASTER_SECTION, 'remote'):
self._master_remote = config.get(MASTER_SECTION, 'remote')
else:
self._master_remote = None
logger.debug('path: {}, master remote: {}'.
format(self.path.resolve(), self._master_remote))
return self._master_remote
@property
def remotes(self) -> List[RemoteSpec]:
"""Return a list or remote specs used as the repo's remotes.
"""
remotes = []
master_remote = self.master_remote
for remote in self.repo.remotes:
is_master = remote.name == master_remote
remotes.append(RemoteSpec(remote, is_master))
return remotes
def _is_linked_to(self, link):
is_linked = str(link.target).startswith(str(self.path))
if is_linked:
link.increment_use_count()
return is_linked
def add_linked(self, links):
self.links = tuple(filter(lambda l: self._is_linked_to(l), links))
def freeze(self) -> Dict[str, Any]:
"""Freeze the data in this instance in to a tree of dicts usable in a JSON
dump.
"""
return {'name': self.name,
'path': str(self.path_translator.relative_to(self.path)),
'links': [lk.freeze() for lk in self.links],
'remotes': [r.freeze() for r in self.remotes]}
def format(self, fmt=None, writer=sys.stdout):
"""Human readable format.
"""
if fmt is None:
fmt = self.DEFAULT_FORMAT
remotes = map(lambda x: x.name, self.remotes)
remotes = ' '.join(sorted(remotes))
rs = {'name': self.name,
'path': self.path_translator.to_relative(self.path),
'dirty': str(self.repo.is_dirty()).lower(),
'remotes': remotes}
return fmt.format(**rs)
def write(self, writer=sys.stdout):
"""Human readable output.
"""
path = self.path_translator.to_relative(self.path)
untracked = self.repo.untracked_files
diffs = self.repo.index.diff(None)
writer.write(f'{self.name}:\n')
writer.write(f' path: {path}\n')
writer.write(f' dirty: {str(self.repo.is_dirty()).lower()}\n')
writer.write(' remotes:\n')
for r in self.remotes:
writer.write(f' {r.name}: {r.url}\n')
if len(self.links) > 0:
writer.write(' links:\n')
for lk in self.links:
source = self.path_translator.to_relative(lk.source)
target = self.path_translator.to_relative(lk.target)
writer.write(f' {source} -> {target}\n')
if len(diffs) > 0:
writer.write(' diffs:\n')
for d in diffs:
writer.write(f' {d.a_path}\n')
if len(untracked) > 0:
writer.write(' untracked:\n')
for f in untracked:
writer.write(f' {f}\n')
def __str__(self):
return self.format()
def __repr__(self):
return self.__str__()
class FrozenRepo(object):
def __init__(self, remotes: List[Dict[str, str]], links: List[LinkEntry],
target_dir: Path, path: Path, repo_pref: str,
path_translator: PathTranslator):
"""Initialize.
:param remotes: a list of dicts with keys ``name``, ``url`` and
``is_master`` representing a git repository remote
:param links: symbol links that link in to what will become the new
repository after thawed (cloned)
:param target_dir: the root target directory of where the repository
will be thawed (cloned)
:param path: the directory where the repository will be thawed (cloned)
:param repo_pref: the remote to use as the first remote when thawed
:param path_translator: translates the distribution root
"""
self.remotes = remotes
self.links = links
self.target_dir = target_dir
self.path = path
self.repo_pref = repo_pref
self.path_translator = path_translator
@property
@persisted('_repo_spec')
def repo_spec(self) -> RepoSpec:
"""Return the repo spec for this frozen repo.
"""
return RepoSpec(self.path, self.path_translator)
@property
def exists(self) -> bool:
"""Return whether or not the repo represented by this frozen repo already
exists.
"""
return self.path.exists()
def _split_master_remote_defs(self):
not_masters = []
master = None
for rmd in self.remotes:
if rmd['name'] == self.repo_pref:
master = rmd
else:
not_masters.append(rmd)
if master is None:
not_masters.clear()
for rmd in self.remotes:
if rmd['is_master']:
master = rmd
else:
not_masters.append(rmd)
if master is None:
master = not_masters[0]
not_masters = not_masters[1:]
return master, not_masters
def thaw(self) -> RepoSpec:
"""Thaw a RepoSpec object, which does a clone and then creates the (remaining
if any) remotes. This also creates the symbol links that link into
this repo. Then return the object represented by the new repo.
"""
if self.path.exists():
logger.warning('path already exists: {}--skipping repo clone'.
format(self.path))
repo_spec = self.repo_spec
else:
master, not_masters = self._split_master_remote_defs()
name = master['name']
url = master['url']
logger.info(f'cloning repo: {url} -> {self.path}')
repo = Repo.clone_from(url, self.path, recursive=True)
repo.remotes[0].rename(name)
for rmd in not_masters:
repo.create_remote(rmd['name'], rmd['url'])
repo_spec = RepoSpec(self.path, self.path_translator, repo)
for link in self.links:
logger.info(f'thawing link {link}')
if link.source.exists():
logger.warning(f'refusing to overwrite link: {link.source}')
else:
par = link.source.parent
if not par.exists():
logger.info(f'creating link directory: {par}')
par.mkdir(parents=True)
link.source.symlink_to(link.target)
repo_spec.links = self.links
return repo_spec
def __str__(self):
return f'{self.path} -> {self.target_dir}: {self.remotes}'
def __repr__(self):
return self.__str__() | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/repospec.py | repospec.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Dict, Any, Iterable
import logging
from pathlib import Path
import platform
import zipfile
import json
from zensols.persist import persisted, PersistedWork
from zensols.grsync import (
FrozenRepo,
FileEntry,
LinkEntry,
PathTranslator,
Discoverer,
)
logger = logging.getLogger(__name__)
class Distribution(object):
"""Represents a frozen distribution.
"""
def __init__(self, path: Path, defs_file: Path, target_dir: Path,
path_translator: PathTranslator):
"""Initialize the distribution instance.
:param path: points to the distribution file itself
:param target_dir: points to the directory where we thaw the
distribution
:param path_translator: translates relative paths to the thaw directory
"""
self.path = path
self.defs_file = defs_file
self.target_dir = target_dir
self.path_translator = path_translator
self.params = {'os': platform.system().lower()}
@classmethod
def from_struct(cls: type, struct: Dict[str, Any],
target_dir: Path) -> Distribution:
"""Return a distrbution directly from the data structure created from
:class:`.Discoverer`.
:param struct: the data structure given by :meth:`.Discoverer.freeze`
using ``flatten=True``
:param target_dir: where the distribution will be *thawed*
"""
self = cls(None, None, target_dir, PathTranslator(target_dir))
self._struct = PersistedWork('_struct', self, initial_value=struct)
return self
@classmethod
def from_discoverer(cls: type, discoverer: Discoverer,
target_dir: Path) -> Distribution:
"""Return a distrbution directly from the data structure created from
:class:`.Discoverer`.
:param discoverer: a discoverer instance created by the *freeze* state
:param target_dir: where the distribution will be *thawed*
"""
fspec = discoverer.freeze(True)
return cls.from_struct(fspec, target_dir)
@property
@persisted('_struct')
def struct(self) -> Dict[str, Any]:
"""Return the JSON deserialized (meta data) of the distribution.
"""
with zipfile.ZipFile(str(self.path.resolve())) as zf:
with zf.open(self.defs_file) as f:
jstr = f.read().decode('utf-8')
struct = json.loads(jstr)
return struct
@property
def version(self) -> str:
"""Get the distribution format version, which for now, is just the application
version.
"""
if 'app_version' in self.struct:
return self.struct['app_version']
@property
@persisted('_files')
def files(self) -> Iterable[FileEntry]:
"""Get the files in the distribution.
"""
return map(lambda fi: FileEntry(self, fi), self.struct['files'])
@property
@persisted('_empty_dirs')
def empty_dirs(self) -> Iterable[FileEntry]:
"""Get empty directories defined in the dist configuration.
"""
return map(lambda fi: FileEntry(self, fi), self.struct['empty_dirs'])
@property
@persisted('_links')
def links(self) -> Iterable[LinkEntry]:
"""Pattern links and symbolic links not pointing to repositories.
"""
return map(lambda fi: LinkEntry(self, fi), self.struct['links'])
@property
@persisted('_repos')
def repos(self) -> Iterable[FrozenRepo]:
"""Repository specifications.
"""
repos = []
repo_pref = self.struct['repo_pref']
for rdef in self.struct['repo_specs']:
links = tuple(map(lambda fi: LinkEntry(self, fi),
rdef['links']))
repo = FrozenRepo(rdef['remotes'], links, self.target_dir,
self.path_translator.expand(rdef['path']),
repo_pref, self.path_translator)
repos.append(repo)
return repos | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/distribution.py | distribution.py |
__author__ = 'Paul Landes'
from typing import List
import logging
from pathlib import Path
from zensols.config import YamlConfig
from zensols.persist import persisted
from zensols.grsync import (
Discoverer,
Distribution,
FreezeManager,
ThawManager,
PathTranslator,
DistributionMover,
)
logger = logging.getLogger(__name__)
class DistManager(object):
"""The main entry point that supports saving user home directory information
(freezing) so that the data can later be restored (thawed). It does this
by finding git repositories and saving the remotes. It also finds symbolic
links, files and empty directories specified in the configuration.
"""
def __init__(self, config: YamlConfig, dist_dir: Path = None,
target_dir: Path = None, profiles: List[str] = None,
repo_preference: str = None, dry_run: bool = False):
"""Initialize.
:param config: the app config
:param dist_dir: the location of the frozen distribution
:param target_dir: the location of the distrbution to freeze or where
to exapand (default to the user home)
:param profiles: the (maven like) profiles that define what to freeze
:param repo_preference: the repository to make master on thaw (default
to configuration file)
"""
self.config = config
# config will be missing on thaw
if config is None:
if dist_dir is None:
raise ValueError('missing dist file option')
self.dist_dir = Path(dist_dir)
else:
if dist_dir is not None:
self.config.dist_dir = dist_dir
self.dist_dir = self.config.dist_dir
if target_dir is not None:
self.target_dir = Path(target_dir).expanduser().absolute()
else:
self.target_dir = Path.home().absolute()
self.profiles = profiles
self.repo_preference = repo_preference
self.dry_run = dry_run
# configuration directory in the zip distribution
self.config_dir = 'conf'
# definitions file contains all the metadata (files, links etc)
self.defs_file = '{}/dist.json'.format(self.config_dir)
# the main distribution compressed file that will have the
# configuration needed to thaw, all saved files and symbolic links.
self.dist_file = Path(self.dist_dir, 'dist.zip')
# resovle path to and from the target directory
self.path_translator = PathTranslator(self.target_dir)
self._app_version = None
@property
def app_version(self) -> str:
return self._app_version
@app_version.setter
def app_version(self, app_version: str):
self._app_version = app_version
@property
@persisted('_discoverer')
def discoverer(self) -> Discoverer:
return Discoverer(
self.config, self.profiles, self.path_translator,
self.repo_preference)
@property
def distribution(self):
return Distribution(
self.dist_file, self.defs_file, self.target_dir,
self.path_translator)
def discover_info(self):
"""Proviate information about what's found in the user home directory. This is
later used to freeze the data.
"""
from pprint import pprint
pprint(self.discoverer.freeze(flatten=True))
def print_repos(self, fmt=None):
for repo_spec in self.discoverer.discover(False)['repo_specs']:
print(repo_spec.format(fmt=fmt))
def print_repo_info(self, names=None):
struct = self.discoverer.discover(flatten=True)
specs = {x.name: x for x in struct['repo_specs']}
if names is None:
names = sorted(specs.keys())
for name in names:
if name not in specs:
raise ValueError(f'no such repository: {name}')
specs[name].write()
def freeze(self, wheel_dependency=None):
"""Freeze the current configuration and file set to the distribution zip.
"""
fmng = FreezeManager(
self.config, self.dist_file, self.defs_file, self.discoverer,
self.app_version,
dry_run=self.dry_run)
fmng.freeze(wheel_dependency)
def thaw(self):
"""Expand the distribution zip on to the file system.
"""
tmng = ThawManager(self.distribution, self.path_translator,
self.app_version, self.dry_run)
tmng.thaw()
def move(self, destination_path, dir_reduce=True):
"""Move a thawed file set to ``destination_path``. If ``dir_reduce`` is
``True`` then recursively remove directories.
"""
mv = DistributionMover(
self.distribution, self.target_dir,
destination_path, dry_run=self.dry_run)
mv.move()
if dir_reduce:
mv.dir_reduce()
def copy(self):
"""Copy a local distribution to a different directory on the local file
system.
"""
disc = self.discoverer
dist = Distribution.from_discoverer(disc, self.dist_dir)
tmng = ThawManager(dist, self.path_translator,
self.app_version, self.dry_run)
tmng.thaw_from_in_memory(self.target_dir)
def tmp(self):
destination_path = Path('target/thaw').absolute()
mv = DistributionMover(
self.distribution, self.target_dir,
destination_path, dry_run=self.dry_run)
mv.dir_reduce() | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/distmng.py | distmng.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Any
from dataclasses import dataclass, field
from abc import ABCMeta
import logging
from pathlib import Path
from zensols.persist import persisted
from zensols.cli import LogConfigurator, ActionCliManager
from . import DistManager, AppConfig
logger = logging.getLogger(__name__)
@dataclass
class DistManagerFactory(object):
"""Creates instances of :class:`.DistManager`.
"""
path: Path = field()
"""The path to the YAML application configuration file."""
@property
@persisted('_config')
def config(self) -> AppConfig:
return None if self.path is None else AppConfig(self.path)
def __call__(self, **kwargs) -> DistManager:
params = dict(kwargs)
params['config'] = self.config
return DistManager(**params)
@dataclass
class Application(object):
"""Application base class.
"""
CLASS_INSPECTOR = {}
CLI_META = {'option_excludes': {'dist_mng_factory'},
'option_overrides': {'repo_pref': {'short_name': 'r'}}}
dist_mng_factory: DistManagerFactory = field()
"""The main class that freezes/thaws and provides repo information."""
def __post_init__(self):
self._params: Dict[str, Any] = {}
@property
@persisted('_dist_mng')
def dist_mng(self) -> DistManager:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'params: {self._params}')
return self.dist_mng_factory(**self._params)
@dataclass
class InfoApplication(Application):
"""Captures and syncronizes a user home and its Git repositories with other
hosts.
"""
CLI_META = ActionCliManager.combine_meta(
Application,
{'option_excludes': {'log_config'},
'mnemonic_overrides': {'list_profiles': 'profiles'}})
log_config: LogConfigurator = field()
"""The application log configurator."""
profiles: str = field(default=None)
"""Comma spearated list of profiles in config."""
def __post_init__(self):
super().__post_init__()
if self.profiles is not None:
self._params['profiles'] = AppConfig.split_profiles(self.profiles)
self.log_config.level = 'err'
self.log_config()
def info(self):
"""Pretty print discovery information."""
self.dist_mng.discover_info()
def repoinfo(self, names: str = None):
"""Get information on repositories.
:param names: the last component of the repo's directory name
"""
names: List[str] = None if names is None else names.split(',')
self.dist_mng.print_repo_info(names)
def repos(self, format: str = '{path}'):
"""Output all repository top level info.
:param format: format string (ie {name}: {path}, remotes={remotes},
dirty={dirty})
"""
self.dist_mng.print_repos(format)
def list_profiles(self):
"""Print the profiles """
config: AppConfig = self.dist_mng_factory.config
print('\n'.join(config.get_profiles()))
@dataclass
class ModifyApplication(Application, metaclass=ABCMeta):
"""Super class for applications that modify the file system.
"""
CLI_META = ActionCliManager.combine_meta(
Application,
{'option_overrides': {'dist_dir': {'metavar': 'DIRECTORY',
'short_name': 'd'},
'dry_run': {'short_name': None},
'profiles': {'short_name': 'p'}}})
dist_dir: Path = field(default=None)
"""The location of build out distribution."""
profiles: str = field(default=None)
"""Comma spearated list of profiles in config."""
dry_run: bool = field(default=False)
"""Do not do anything, just act like it."""
def __post_init__(self):
super().__post_init__()
for attr in 'dist_dir profiles dry_run'.split():
if hasattr(self, attr):
self._params[attr] = getattr(self, attr)
@dataclass
class TargetApplication(ModifyApplication, metaclass=ABCMeta):
CLI_META = ActionCliManager.combine_meta(
ModifyApplication,
{'option_overrides': {'target_dir': {'metavar': 'DIRECTORY',
'short_name': 't'}}})
target_dir: Path = field(default=None)
"""The location of build out target dir."""
def __post_init__(self):
super().__post_init__()
if self.target_dir is not None:
self._params['target_dir'] = self.target_dir
@dataclass
class FreezeApplication(ModifyApplication):
CLI_META = ModifyApplication.CLI_META
def freeze(self, wheel_dep: Path = Path('zensols.grsync'),
repo_pref: str = None):
"""Create a distribution.
:param whee_dep: used to create the wheel dep files
:param repo_pref: the repository to make primary on thaw
"""
self.dist_mng.freeze(wheel_dep)
@dataclass
class ThawApplication(TargetApplication):
CLI_META = TargetApplication.CLI_META
def thaw(self):
"""Build out a distribution.
"""
self.dist_mng.thaw()
@dataclass
class CopyMoveApplication(TargetApplication):
CLI_META = ActionCliManager.combine_meta(
TargetApplication,
{'option_overrides':
{'move_dir': {'metavar': 'DIRECTORY'},
'dir_reduce': {'long_name': 'reduce', 'short_name': None}}})
def copy(self, repo_pref: str = None):
"""Build out a distribution.
:param repo_pref: the repository to make primary on thaw
"""
self.dst_mnt.copy()
def move(self, move_dir: Path = None, dir_reduce: bool = False):
"""Move a distribution to another root (easy to delete).
:param move_dir: the location of build out move dir
:param reduce: dir_remove empty directories
"""
self.dst_mnt.move(move_dir, dir_reduce) | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/app.py | app.py |
__author__ = 'Paul Landes'
from typing import List, Union
import logging
from pathlib import Path
import itertools as it
import re
from zensols.config import YamlConfig
logger = logging.getLogger(__name__)
class AppConfig(YamlConfig):
"""Application specific configuration access and parsing.
Since much of the application centers around configuration of what to
persist, this class does more heavy lifting than most configuraetion like
classes.
"""
ROOT = 'discover'
OBJECTS_PATH = f'{ROOT}.objects'
PROFILES_PATH = f'{ROOT}.profiles'
EMPTY_DIR_PATH = f'{ROOT}.empty_dirs'
OBJECTS_PROFILE_PATH = f'{PROFILES_PATH}.{{}}.objects'
EMPTY_DIR_PROFILE_PATH = f'{PROFILES_PATH}.{{}}.empty_dirs'
def __init__(self, config_file=None, default_vars=None):
super(AppConfig, self).__init__(
config_file, delimiter='^', default_vars=default_vars)
@property
def _find_profiles(self):
if not self.has_option(self.PROFILES_PATH):
opts = ()
else:
opts = self.get_options(self.PROFILES_PATH)
return opts
@staticmethod
def split_profiles(profile_str):
return re.split(r'\s*,\s*', profile_str)
@property
def _default_profiles(self):
strlist = self.get_option(f'{self.ROOT}.default_profiles')
if strlist is not None:
return self.split_profiles(strlist)
def get_profiles(self, profile_overide: Union[str, List[str]] = None):
if profile_overide is None:
profiles = self._default_profiles
else:
if isinstance(profile_overide, str):
profiles = self.split_profiles(profile_overide)
else:
profiles = profile_overide
if profiles is None:
profiles = self._find_profiles
profiles = list(profiles)
# protect user error
if 'default' not in profiles:
profiles = ['default'] + list(profiles)
if 'nodefault' in profiles:
profiles.pop(profiles.index('default'))
profiles.pop(profiles.index('nodefault'))
return profiles
def _iterate_objects(self, profile):
if profile == 'default':
path = self.OBJECTS_PATH
else:
path = self.OBJECTS_PROFILE_PATH.format(profile)
opts = self.get_options(path)
if opts is None and profile == 'default':
opts = ()
if opts is None:
logger.warning(
f'no such profile for objects: {profile} for path {path}' +
'--maybe entries exist in other profiles')
opts = ()
return map(lambda x: x.strip(), opts)
def get_discoverable_objects(self, profiles):
return it.chain(*map(self._iterate_objects, profiles))
def get_empty_dirs(self, profiles):
paths = []
for profile in profiles:
if profile == 'default':
path = self.EMPTY_DIR_PATH
else:
path = self.EMPTY_DIR_PROFILE_PATH.format(profile)
opts = self.get_options(path)
if opts is None:
## warnings for missing empty directory entries is worth it
# logger.warning(
# f'no such profile for objects: {profile} for path {path}' +
# '--maybe entries exist in other profiles')
pass
else:
paths.extend(opts)
return map(lambda x: Path(x).expanduser().absolute(), paths)
def _get_path(self, name):
path = self.get_option(name)
if path is None:
raise ValueError('no path defined for option: {name}')
return Path(path).expanduser().absolute()
@property
def dist_dir(self):
return self._get_path(f'{self.ROOT}.local.dist_dir')
@dist_dir.setter
def dist_dir(self, dist_dir):
if self.default_vars is None:
self.default_vars = {}
self.default_vars[f'{self.ROOT}.local.dist_dir'] = dist_dir
@property
def wheel_dir_name(self):
return self._get_path(f'{self.ROOT}.local.wheels_dir')
@property
def bootstrap_script_file(self):
return Path(self.dist_dir, 'bootstrap.sh') | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/config.py | config.py |
__author__ = 'Paul Landes'
import logging
from git import Remote
from pathlib import Path
from zensols.persist import persisted
logger = logging.getLogger(__name__)
class PathTranslator(object):
"""Utility class around helping with paths.
"""
def __init__(self, target_path):
self.target_path = target_path
def relative_to(self, path):
"""Return a path that's relative to the user's home directory."""
return path.relative_to(self.target_path.resolve())
def to_relative(self, path):
return str(Path(self.target_path, path).absolute())
def expand(self, path):
"""Return the user's home directory as a ``pathlib.Path`` object."""
return Path.joinpath(self.target_path, path)
class SymbolicLink(object):
"""This classs represents a file system symbolic link. The class also freezes
created symbol links.
"""
def __init__(self, source: Path, path_translator: PathTranslator):
"""Create.
:param source: the :class:`.Path` that points to the symbolic link on
the local file system.
"""
self.source = source
self.path_translator = path_translator
self.use_count = 0
@property
def target(self):
"""The target (where it point to).
"""
return self.source.resolve()
@property
def source_relative(self):
"""The relative location source (to the user's home).
"""
if not hasattr(self, '_src'):
self._src = self.path_translator.relative_to(self.source)
return self._src
@property
def target_relative(self):
"""The relative location (where it points to) relative to the user's home.
"""
if not hasattr(self, '_dst'):
self._dst = self.path_translator.relative_to(self.target)
return self._dst
def increment_use_count(self):
"""Indicate this symblic link is used (linked) to another target.
"""
self.use_count += 1
def freeze(self):
"""Create and return an object graph as a dict of the link.
"""
return {'source': str(self.source_relative),
'target': str(self.target_relative)}
def __str__(self):
return '{} -> {}'.format(self.source, self.target)
def __repr__(self):
return self.__str__()
class FileEntry(object):
"""Represents a file based entry in the frozen version of the distribution zip.
"""
def __init__(self, dist, finfo: dict):
self.dist = dist
self.finfo = finfo
def _str_to_path(self, pathstr: str):
return Path(pathstr)
def _target_relative(self, path):
return self.dist.path_translator.expand(path)
@property
@persisted('_rel')
def relative(self):
"""Return the relative path of the file.
"""
return Path(self._str_to_path(self.finfo['rel']))
@property
@persisted('_path')
def path(self):
"""Return the absolute path of the file.
"""
return self._target_relative(self.relative)
@property
@persisted('_mode')
def mode(self):
"""Return the numeric mode of the file
"""
return self.finfo['mode']
@property
@persisted('_modestr')
def modestr(self):
"""Return a human readable string of the mode of the file.
"""
return self.finfo['modestr']
@property
@persisted('_modify_time')
def modify_time(self):
"""Return the numeric modify time of the file
"""
return self.finfo['modify_time']
def __str__(self):
return f'{self.relative} -> {self.path}: {self.mode} ({self.modestr})'
def __repr__(self):
return self.__str__()
class LinkEntry(FileEntry):
"""Represents a symbolic link in the frozen version of the distribution zip.
"""
def __init__(self, dist, finfo: dict, target_dir=None):
super(LinkEntry, self).__init__(dist, finfo)
self.target_dir = target_dir
def _str_to_path(self, pathstr: str):
return Path(pathstr.format(**self.dist.params))
def _target_relative(self, path):
if self.target_dir is not None:
return Path(self.target_dir, path)
else:
return super(LinkEntry, self)._target_relative(path)
@property
@persisted('_rel')
def relative(self):
return Path(self._str_to_path(self.finfo['source']))
@property
def source(self):
return self.path
@property
def target(self):
rel_path = Path(self._str_to_path(self.finfo['target']))
rel_path = self._target_relative(rel_path)
return rel_path
def __str__(self):
return f'{self.source} -> {self.target}'
class RemoteSpec(object):
"""This class represents a remote for a git repo.
"""
def __init__(self, remote: Remote, is_master=None):
"""Initialize.
:param remote: a remote object from the git repo
:param is_master: whether or not the remote is the primary (upstream)
remote
"""
self.remote = remote
with remote.config_reader as cr:
self.url = cr.get('url')
self.is_master = is_master
@property
def name(self):
"""Return the remote's name.
"""
return self.remote.name
def rename(self, name, url=None):
"""Rename the remote in the git repository itself, along with the class
instance.
"""
remote = self.remote
remote.rename(name)
with remote.config_writer as cw:
if url is not None:
cw.set('url', url)
self.repo.git.config('branch.master.pushremote', name)
def freeze(self):
"""Freeze/create an object graph representation of the remote as a dict.
"""
return {'name': self.name,
'url': self.url,
'is_master': self.is_master}
def __str__(self):
return '{}: {}'.format(self.name, self.url)
def __repr__(self):
return self.__str__() | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Dict, Any
import os
import stat
import socket
import logging
import json
import zipfile
from pathlib import Path
from datetime import datetime
from zensols.persist import persisted
from zensols.grsync import (
RepoSpec,
SymbolicLink,
BootstrapGenerator,
PathTranslator,
AppConfig,
)
logger = logging.getLogger(__name__)
class Discoverer(object):
"""Discover git repositories, links, files and directories to save to
reconstitute a user home directory later.
"""
CONF_TARG_KEY = 'discover.target.config'
TARG_LINKS = 'discover.target.links'
REPO_PREF = 'discover.repo.remote_pref'
def __init__(self, config: AppConfig, profiles: list,
path_translator: PathTranslator, repo_preference: str):
self.config = config
self.profiles_override = profiles
self.path_translator = path_translator
self._repo_preference = repo_preference
def _get_repo_paths(self, paths):
"""Recusively find git repository root directories."""
git_paths = []
logger.debug('repo root search paths {}'.format(paths))
for path in paths:
logger.debug('searching git paths in {}'.format(path.resolve()))
for root, dirs, files in os.walk(path.resolve()):
rootpath = Path(root)
if rootpath.name == '.git':
git_paths.append(rootpath.parent)
return git_paths
def _discover_repo_specs(self, paths, links):
"""Return a list of RepoSpec objects.
:param paths: a list of paths each of which start a new RepoSpec
:param links: a list of symlinks to check if they point to the
repository, and if so, add them to the RepoSpec
"""
repo_specs = []
logger.debug(f'repo spec paths: {paths}')
for path in paths:
logger.debug(f'found repo at path {path}')
repo_spec = RepoSpec(path, self.path_translator)
repo_spec.add_linked(links)
if len(repo_spec.remotes) == 0:
logger.warning(f'repo {repo_spec} has no remotes--skipping...')
else:
repo_specs.append(repo_spec)
return repo_specs
@property
@persisted('_profiles')
def profiles(self):
if self.config is None:
raise ValueError('no configuration given; use the --help option')
return self.config.get_profiles(self.profiles_override)
def get_discoverable_objects(self):
"""Find git repos, files, sym links and directories to reconstitute
later.
"""
paths = []
if logger.isEnabledFor(logging.INFO):
path: Path = {self.config.config_file}
logger.info(f'finding objects to perist defined in {path}')
for fname in self.config.get_discoverable_objects(self.profiles):
path = Path(fname).expanduser().absolute()
logger.debug(f'file pattern {fname} -> {path}')
bname = path.name
dname = path.parent.expanduser()
files = list(dname.glob(bname))
logger.debug(f'expanding {path} -> {dname} / {bname}: {files}')
paths.extend(files)
return paths
def _create_file(self, src, dst=None, no_path_obj=False, robust=False):
"""Return a file object, which has the relative (rel) to home dir path,
absolute path (abs) used later to zip the file, and mode (mode and
modestr) information.
"""
dst = src if dst is None else dst
if src.exists():
mode = src.stat().st_mode
modestr = stat.filemode(mode)
modify_time = os.path.getmtime(src)
create_time = os.path.getctime(src)
elif not robust:
raise OSError(f'no such file: {src}')
else:
logger.warning(f'missing file: {src}--robustly skipping')
mode, modestr, create_time, modify_time = None, None, None, None
# the mode string is used as documentation and currently there is no
# way to convert from a mode string to an octal mode, which would be
# nice to allow modification of the dist.json file.
fobj = {'modestr': modestr,
'mode': mode,
'create_time': create_time,
'modify_time': modify_time}
if no_path_obj:
fobj['rel'] = str(self.path_translator.relative_to(dst))
else:
fobj['abs'] = src
fobj['rel'] = self.path_translator.relative_to(dst)
return fobj
def discover(self, flatten) -> Dict[str, Any]:
"""Main worker method to capture all the user home information (git repos,
files, sym links and empty directories per the configuration file).
:param flatten: if ``True`` then return a data structure appropriate
for pretty printing; this will omit data needed to
create the distrubtion so it shouldn't be used for the
freeze task
"""
files = []
dirs = []
empty_dirs = []
pattern_links = []
path_trans = self.path_translator
# find all things to persist (repos, symlinks, files, etc)
dobjs = self.get_discoverable_objects()
# all directories are either repositories or base directories to
# persist files in the distribution file
dirs_or_gits = tuple(
filter(lambda x: x.is_dir() and not x.is_symlink(), dobjs))
# find the directories that have git repos in them (recursively)
git_paths = self._get_repo_paths(dirs_or_gits)
# create symbolic link objects from those objects that are links
links = tuple(map(lambda l: SymbolicLink(l, self.path_translator),
filter(lambda x: x.is_symlink(), dobjs)))
#logger.debug(f'links: {links}')
# normal files are kept track of so we can compress them later
for f in filter(lambda x: x.is_file() and not x.is_symlink(), dobjs):
files.append(self._create_file(f))
# create RepoSpec objects that capture information about the git repo
repo_specs = self._discover_repo_specs(git_paths, links)
# these are the Path objects to where the repo lives on the local fs
repo_paths = set(map(lambda x: x.path, repo_specs))
# add the configuration used to freeze so the target can freeze again
if self.config.has_option(self.CONF_TARG_KEY):
config_targ = self.config.get_option(self.CONF_TARG_KEY)
src = Path(self.config.config_file)
dst = Path(config_targ).expanduser()
files.append(self._create_file(dst, dst))
logger.debug(f'files: {files}')
# recusively find files that don't belong to git repos
def gather(par):
for c in par.iterdir():
if c.is_dir() and c not in repo_paths:
gather(c)
elif c.is_file():
files.append(self._create_file(c))
# find files that don't belong to git repos
for path in filter(lambda x: x not in repo_paths, dirs_or_gits):
logger.debug('dir {}'.format(path))
dirs.append({'abs': path, 'rel': path_trans.relative_to(path)})
gather(path)
# configurated empty directories are added only if they exist so we can
# recreate with the correct mode
logger.info(f'using profiles: {", ".join(self.profiles)}')
for ed in self.config.get_empty_dirs(self.profiles):
logger.debug('empty dir: {}'.format(str(ed)))
empty_dirs.append(self._create_file(
ed, no_path_obj=True, robust=True))
# pattern symlinks are special links that can change name based on
# variables like the platform name so each link points to a
# configuration file for that platform.
if self.config.has_option(self.TARG_LINKS):
dec_links = self.config.get_option(self.TARG_LINKS)
for link in map(lambda x: x['link'],
filter(lambda x: 'link' in x, dec_links)):
src = Path(link['source']).expanduser().absolute()
targ = Path(link['target']).expanduser().absolute()
pattern_links.append(
{'source': str(path_trans.relative_to(src)),
'target': str(path_trans.relative_to(targ))})
# create data structures for symbolic link integrity
files_by_name = {f['abs']: f for f in files}
for f in files:
if f['abs'].is_file():
dname = f['abs'].parent
files_by_name[dname] = dname
if flatten:
del f['abs']
f['rel'] = str(f['rel'])
# unused links pointing to repositories won't get created, so those not
# used by repos are added explicitly to pattern links
for link in links:
if link.use_count == 0:
try:
pattern_links.append(
{'source': str(link.source_relative),
'target': str(link.target_relative)})
except ValueError as e:
logger.error(f'couldn\'t create link: {link}')
raise e
if link.target in files_by_name:
dst = files_by_name[link.target]
# follow links enhancement picks up here
logger.debug(f'source {link.source} -> {dst}')
else:
logger.warning(f'hanging link with no target: {link}--' +
'proceeding anyway')
return {'repo_specs': repo_specs,
'empty_dirs': empty_dirs,
'files': files,
'links': pattern_links}
@property
def repo_preference(self):
"""Return the preference for which repo to make primary on thaw
"""
return self._repo_preference or \
(self.config.has_option(self.REPO_PREF) and
self.config.get_option(self.REPO_PREF))
def freeze(self, flatten=False):
"""Main entry point method that creates an object graph of all the data that
needs to be saved (freeze) in the user home directory to reconstitute
later (thaw).
:param flatten: if ``True`` then return a data structure appropriate
for pretty printing; this will omit data needed to
create the distrubtion so it shouldn't be used for the
freeze task
"""
disc = self.discover(flatten)
repo_specs = tuple(x.freeze() for x in disc['repo_specs'])
files = disc['files']
logger.info('freeezing with git repository ' +
f'preference: {self.repo_preference}')
disc.update({'repo_specs': repo_specs,
'repo_pref': self.repo_preference,
'files': files,
'source': socket.gethostname(),
'create_date': datetime.now().isoformat(
timespec='minutes')})
return disc
class FreezeManager(object):
"""Invoked by a client to create *frozen* distribution .
"""
CREATE_WHEEL = 'discover.wheel.create'
def __init__(self, config, dist_file, defs_file, discoverer, app_version,
dry_run: bool):
self.config = config
self.dist_file = dist_file
self.defs_file = defs_file
self.discoverer = discoverer
self.app_version = app_version
self.dry_run = dry_run
def _create_wheels(self, wheel_dependency):
"""Create wheel dependencies on this software so the host doesn't need Internet
connectivity. Currently the YAML dependency breaks this since only
binary per host wheels are available for download and the wrong was is
given of spanning platforms (i.e. OSX to Linux).
"""
wheel_dir_name = self.config.wheel_dir_name
wheel_dir = Path(self.dist_dir, wheel_dir_name)
logger.info(f'creating wheels from dependency {wheel_dependency} in {wheel_dir}')
if not wheel_dir.exists():
wheel_dir.mkdir(parents=True, exist_ok=True)
from pip._internal import main
pip_cmd = f'wheel --wheel-dir={wheel_dir} --no-cache-dir {wheel_dependency}'
logger.debug('pip cmd: {}'.format(pip_cmd))
main(pip_cmd.split())
def _freeze_dist(self):
"""Freeze the distribution (see the class documentation).
"""
dist_dir = self.dist_file.parent
if not self.dry_run and not dist_dir.exists():
dist_dir.mkdir(parents=True, exist_ok=True)
data = self.discoverer.freeze()
data['app_version'] = self.app_version
if not self.dry_run:
with zipfile.ZipFile(self.dist_file, mode='w') as zf:
for finfo in data['files']:
fabs = finfo['abs']
frel = str(Path(finfo['rel']))
logger.debug(f'adding file: {fabs}')
zf.write(fabs, arcname=frel)
del finfo['abs']
finfo['rel'] = frel
logger.info(f'writing distribution defs to {self.defs_file}')
zf.writestr(self.defs_file, json.dumps(data, indent=2))
logger.info(f'created frozen distribution in {self.dist_file}')
def freeze(self, wheel_dependency=None):
"""Freeze the distribution by saving creating a script to thaw along with all
artifacts (i.e. repo definitions) in a zip file.
"""
self._freeze_dist()
script_file = self.config.bootstrap_script_file
if not self.dry_run:
bg = BootstrapGenerator(self.config)
bg.generate(script_file)
script_file.chmod(0o755)
# wheel creation last since pip clobers/reconfigures logging
if self.config.has_option(self.CREATE_WHEEL):
create_wheel = self.config.get_option(self.CREATE_WHEEL)
if create_wheel and wheel_dependency is not None:
self._create_wheels(wheel_dependency) | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/freeze.py | freeze.py |
import logging
import itertools as it
import shutil
from pathlib import Path
from zensols.grsync import (
Distribution,
RepoSpec,
FileEntry,
FrozenRepo,
)
logger = logging.getLogger(__name__)
class DistributionMover(object):
"""This class moves thawed files that are defined in a distribution zip. If
the file is not defined in the distribution then it doesn't move it.
In situations where you've already deleted the distribution zip, you'll
have to create a new distribution by freezing what you have. For this
reason it is recommended that you always include the original `grsync.yml`
configuration file in your distribution so it *migrates* along with each of
your freeze/thaw iterations.
"""
def __init__(self, dist: Distribution, target_dir=None,
destination_dir: Path = None,
force_repo=False, force_dirs=False, dry_run=False):
"""Initialize.
:param dist: the distrbution that represent the distribution zip
:param target_dir: the directory with the thawed files
:param destination_dir: where the thawed files/repos will be moved
:param force_repo: if ``True`` move repositories even if they're dirty
:param force_dirs: if ``True`` move directories even if they're not empty
:param dry_run: don't actually do anything, but log like we are
"""
self.dist = dist
self.target_dir = target_dir
if destination_dir is None:
destination_dir = Path('old_dist').absolute()
self.destination_dir = destination_dir
self.force_repo = force_repo
self.force_dirs = force_dirs
self.dry_run = dry_run
def _get_paths(self):
dist = self.dist
objs = (dist.links, dist.repos, dist.files, dist.empty_dirs)
paths = it.chain(map(lambda x: (x.path, x), it.chain(*objs)),
map(lambda l: (l.path, l),
it.chain(*map(lambda r: r.links, dist.repos))))
return sorted(paths, key=lambda x: len(x[0].parts), reverse=True)
def _dir_empty(self, path):
return sum(map(lambda x: 1, path.iterdir())) == 0
def _get_moves(self):
for src, obj in self._get_paths():
if not src.exists() and not src.is_symlink():
logger.warning(f'no longer exists: {src}')
else:
if isinstance(obj, FrozenRepo):
try:
grepo = obj.repo_spec.repo
except Exception:
# git.exc.InvalidGitRepositoryError
logger.error(f'invalid repository: {obj}--skipping')
continue
if grepo.is_dirty():
name = obj.repo_spec.format(RepoSpec.SHORT_FORMAT)
if self.force_repo:
logger.warning(f'repo is dirty: {name}; moving anyway')
else:
logger.warning(f'repo is dirty: {name}--skipping')
continue
elif isinstance(obj, FileEntry) and src.is_dir() and not src.is_symlink():
if not self._dir_empty(src):
if self.force_dirs:
logger.warning(f'directory not empty: {src}; ' +
'moving anyway')
else:
logger.warning(f'directory not empty: {src}--skipping')
continue
dst = self.destination_dir / src.relative_to(self.target_dir)
yield (src, dst.absolute())
def move(self):
"Move the files over."
logger.info(f'moving installed distribution to {self.destination_dir}')
for src, dst in self._get_moves():
logger.info(f'move {src} -> {dst}')
if not self.dry_run:
if src.exists() or src.is_symlink():
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.move(str(src), str(dst))
else:
logger.warning(f'no longer exists: {src}')
def dir_reduce(self, parent=None):
"Remove empty directories recursively starting at ``parent``."
try:
if parent is None:
parent = self.target_dir
for child in parent.iterdir():
logger.debug(f'descending: {child}')
if child.is_dir() and not child.is_symlink():
self.dir_reduce(child)
if parent != self.target_dir and parent.is_dir():
if self._dir_empty(parent):
logger.info(f'deleting empty directory: {parent}')
if not self.dry_run:
parent.rmdir()
else:
logger.info(f'skipping non-empty directory delete: {parent}')
except Exception as e:
# be robust
logger.error(f"couldn't delete {parent}: {e}") | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/mover.py | mover.py |
__author__ = 'Paul Landes'
import logging
import traceback
import os
import zipfile
import shutil
from pathlib import Path
from git.exc import GitCommandError
from zensols.grsync import PathTranslator, Distribution
logger = logging.getLogger(__name__)
class ThawManager(object):
def __init__(self, dist: Distribution, path_translator: PathTranslator,
app_version: str, dry_run: bool = False):
self.dist = dist
self.path_translator = path_translator
self.app_version = app_version
self.dry_run = dry_run
def assert_version(self):
logger.info(f'app version: {self.app_version} =? {self.dist.version}')
if self.app_version is None:
raise ValueError('could not determine the application version')
if self.dist.version is None:
raise ValueError('distribution has incompatable version')
if self.app_version != self.dist.version:
raise ValueError('distribution has incompatable version: ' +
self.dist.version)
def _thaw_empty_dirs(self):
"""Create empty directories on the file system.
"""
for entry in self.dist.empty_dirs:
path = entry.path
if path.exists():
logger.warning(f'path already exists: {path}')
else:
logger.info(f'creating path {path}')
if not self.dry_run:
# we store the mode of the directory, but we don't want
# that to apply to all children dirs that might not exist
# yet
if entry.mode is None:
# use the default mode for missing directories during
# the freeze phase
path.mkdir(parents=True, exist_ok=True)
else:
path.mkdir(mode=entry.mode, parents=True, exist_ok=True)
def _thaw_files(self, zf):
"""Thaw files in the distribution by extracting from the zip file ``zf``. File
definitions are found in ``struct``.
"""
for entry in self.dist.files:
path = entry.path
parent = path.parent
if not parent.exists():
logger.info(f'creating parent directory: {parent}')
if not self.dry_run:
parent.mkdir(parents=True)
logger.debug(f'thawing file: {path}')
if path.exists():
logger.warning(f'path already exists: {path}')
else:
logger.info(f'{path}: mode={entry.modestr}, ' +
f'time={entry.modify_time}')
if not self.dry_run:
with zf.open(str(entry.relative)) as fin:
with open(str(path), 'wb') as fout:
shutil.copyfileobj(fin, fout)
logger.debug(f'setting mode of {path} to {entry.mode} ' +
f'({entry.modestr}, {entry.modify_time})')
if not self.dry_run:
path.chmod(entry.mode)
if entry.modify_time is not None:
os.utime(path, (entry.modify_time, entry.modify_time))
def _thaw_repos(self):
"""Thaw repositories in the config, which does a clone and then creates the
(remaining if any) remotes.
"""
for repo in self.dist.repos:
repo_path = repo.path
parent = repo_path.parent
logger.info(f'thawing repo: {repo}')
if not parent.exists():
logger.info(f'creating parent directory: {parent}')
if not self.dry_run:
parent.mkdir(parents=True, exist_ok=True)
try:
if not self.dry_run:
try:
thawed = repo.thaw()
logger.debug(f'thawed: {thawed}')
except Exception as e:
# robust
traceback.print_exc()
logger.error(f'could not thaw {repo}: {e}')
except GitCommandError as err:
logger.warning(f'couldn\'t create repo {repo_path}--skippping: {err}')
def _thaw_pattern_links(self):
"""Method to call other thaw methods based on type.
"""
for link in self.dist.links:
if link.source.exists():
logger.warning(f'link source already exists: {link.source}')
elif not link.target.exists():
logger.warning(
f'link target does not exist: {link}--skipping')
else:
logger.info(f'linking: {link}')
if not self.dry_run:
par = link.source.parent
if not par.exists():
logger.info(f'creating link directory: {par}')
par.mkdir(parents=True)
link.source.symlink_to(link.target)
def thaw(self):
"""Thaw the distribution, which includes creating git repositories, extracting
(frozen) files, creating symbolic links, and creating empty directories
that were captured/configured during the freezing phase.
"""
logger.info(f'expanding distribution in {self.dist.path}')
with zipfile.ZipFile(str(self.dist.path.resolve())) as zf:
self._thaw_empty_dirs()
self._thaw_files(zf)
self._thaw_repos()
self._thaw_pattern_links()
def _thaw_files_from_local(self, local_dir: Path):
"""Thaw files by copying from the local file system.
"""
for entry in self.dist.files:
path = entry.path
parent = path.parent
if not parent.exists():
logger.info(f'creating parent directory: {parent}')
if not self.dry_run:
parent.mkdir(parents=True)
logger.debug(f'copying file: {path}')
if path.exists():
logger.warning(f'path already exists: {path}')
else:
src = local_dir / entry.relative
logger.info(f'{src} -> {path}: mode={entry.modestr}, ' +
f'time={entry.modify_time}')
if not self.dry_run:
shutil.copy2(src, path, follow_symlinks=False)
def thaw_from_in_memory(self, source_dir: Path):
"""Copy a local distribution to a different directory on the local file system.
:param source_dir: the distribution directory from where to copy files
"""
self._thaw_empty_dirs()
self._thaw_files_from_local(source_dir)
self._thaw_repos()
self._thaw_pattern_links() | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/thaw.py | thaw.py |
__author__ = 'Paul Landes'
import logging
logger = logging.getLogger(__name__)
class BootstrapGenerator(object):
"""Generate the script that creates the distribution on the target machine.
"""
SCRIPT = """\
#!/bin/sh
if [ $# -eq 0 ] ; then
echo "usage: $0 <python_dir> [grsync dir] [python<version>]"
echo "where: python_dir is the bin directory where python is installed"
echo " grsync_dir is the distribution directory copied from the source"
echo " python<version> is the version of python (i.e. python3.6)"
exit 1
fi
NATIVE_PYTHON_BIN_DIR=$1
if [ $# -ge 2 ]; then
echo "setting inst dir: $2"
GRSYNC_INST_DIR=$2
else
GRSYNC_INST_DIR=`pwd`
fi
if [ $# -ge 3 ]; then
echo "setting python ver: $3"
PYTHON_VER=$3
else
PYTHON_VER=$NATIVE_PYTHON_BIN_DIR
fi
PYTHON_DIR=${HOME}/opt/lib/python3
PIP=${PYTHON_DIR}/bin/pip3
PYTHON_PAR=`dirname $PYTHON_DIR`
WHEELS_DIR=${GRSYNC_INST_DIR}/%(wheel_dir)s
WHEELS=${WHEELS_DIR}/*.whl
if [ -f ${PIP} ] ; then
PIP=${PYTHON_DIR}/bin/pip3
fi
echo "GRSYNC_INST_DIR=${GRSYNC_INST_DIR}"
echo "PYTHON_DIR=${PYTHON_DIR}"
echo "PYTHON_VER=${PYTHON_VER}"
echo "PIP=${PIP}"
echo "PYTHON_PAR=${PYTHON_PAR}"
echo "WHEELS_DIR=${WHEELS_DIR}"
echo "WHEELS=${WHEELS}"
echo "bootstrapping python env in ${PYTHON_DIR}, wheels: ${WHEELS}"
rm -rf $PYTHON_PAR
cmd="${NATIVE_PYTHON_BIN_DIR}/bin/python3 -m venv --copies ${PYTHON_DIR}"
echo "invoke $cmd"
mkdir -p $PYTHON_PAR && \
cd $PYTHON_PAR && \
$cmd && \
cd - || exit 1
if [ -d ${WHEELS_DIR} ] ; then
echo "installing from wheel"
${PIP} install ${GRSYNC_INST_DIR}/%(wheel_dir)s/zensols.grsync*
else
echo "installing from net"
${PIP} install zensols.grsync
fi
# ${PIP} install ${WHEELS}
rm ${HOME}/.bash* ${HOME}/.profile*
# echo to thaw the repo: ${PYTHON_DIR}/bin/grsync thaw -d ${GRSYNC_INST_DIR}
${PYTHON_DIR}/bin/grsync thaw -d ${GRSYNC_INST_DIR}
"""
PARAM_PATH = 'discover.bootstrap'
def __init__(self, config):
self.config = config
def generate(self, path):
params = self.config.get_options(self.PARAM_PATH)
script = self.SCRIPT % params
logger.info('creating bootstrap script at: {}'.format(path))
with open(path, 'w') as f:
f.write(script) | zensols.grsync | /zensols.grsync-0.3.0-py3-none-any.whl/zensols/grsync/bootstrap.py | bootstrap.py |
import logging
import socket
import re
import os
import sys
import getpass
from subprocess import call
from zensols.actioncli import SimpleActionCli
from zensols.actioncli import Config
logger = logging.getLogger('zensols.env.conn')
class Connector(object):
"""Connect to a host via xterm, ssh login, etc.
"""
def __init__(self, config, host_name=None, user_name=None, dry_run=False,
domain=None, output_file=None):
self.config = config
self.host_name = host_name or config.get_option('host_name', expect=True)
self._user_name = user_name
self.dry_run = dry_run
self._domain = domain
self.output_file = output_file
@property
def domain(self):
if self._domain != None:
return self._domain
else:
hname = socket.getfqdn()
logger.debug('fqdn: %s' % hname)
if hname == '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa':
return None
elif hname == '1.0.0.127.in-addr.arpa':
return None
else:
match = re.match(r'^[^.]+\.(.+)$', hname)
return match.group(1) if match else hname
@property
def user_name(self):
if self._user_name != None:
return self._user_name
else:
return getpass.getuser()
def _get_params_section(self, section):
sections = self.config.sections
logger.debug('param search in %s for sections: %s' % (section, sections))
if sections and section in sections:
host_params = self.config.get_options(section, vars=os.environ)
return host_params
def _get_params_from_config(self, section_prefix=None):
domain = self.domain
logger.debug('domain: %s' % domain)
if section_prefix: section_prefix = section_prefix + ' '
else: section_prefix = ''
domain = '.' + domain if domain else ''
init_section = '%s%s%s' % (section_prefix, self.host_name, domain)
section = init_section
logger.debug('section: %s' % section)
host_params = self._get_params_section(section)
if not host_params:
section = '%s %s' % (self.host_name, self.config.default_section)
host_params = self._get_params_section(section)
if not host_params:
# for error message
section = '<%s>, <%s %s> or <%s>' % (init_section, self.host_name, self.config.default_section, self.config.default_section)
host_params = self.config.get_options(vars=os.environ)
if not 'user_name' in host_params:
host_params['user_name'] = self.user_name
if not 'host_name' in host_params:
host_params['host_name'] = self.host_name
host_params['section'] = section
logger.debug('params: %s' % host_params)
return host_params
@property
def command_keys(self):
return 'host_name domain user_name ssh_port ssh_switches type'.split(' ')
@property
def mount_keys(self):
return 'remote_mount_point local_mount_point'.split(' ')
@property
def context_keys(self):
return 'ssh_switches'.split(' ')
def get_params(self, section_prefix=None, command_keys=None, optional_keys=[]):
params = self._get_params_from_config(section_prefix)
logger.debug('params: <%s>, domain: %s' % (params, self.domain))
params['domain'] = self.domain
context_keys = self.context_keys
for k in context_keys:
params[k] = None
params.update(self.config.get_options(opt_keys=context_keys, vars=os.environ))
command_keys = command_keys if command_keys else self.command_keys
logger.debug('command keys: %s' % command_keys)
cmd_keys = set(command_keys) - {'type'}
opt_keys = params.keys() & set(optional_keys)
logger.debug('params: <%s>, command keys: <%s>' % (params, cmd_keys))
params_sub = {k: params[k] for k in (params.keys() & cmd_keys) | opt_keys}
logger.debug('params_sub: %s' % params_sub)
for p in cmd_keys:
logger.debug('cmd key set: %s' % p)
if not p in params_sub:
raise ValueError('missing configuration key \'%s\' for section: %s' % (p, params['section']))
return params_sub
def print_info(self):
params = self.get_params()
ctx_keys = set(self.context_keys)
for k in sorted(self.command_keys):
if k in params and not k in ctx_keys:
kname = k.replace('_', '-')
print('%s: %s' % (kname, params[k]))
def print_environment(self):
params = self.get_params()
ctx_keys = set(self.context_keys)
for k in sorted(self.command_keys):
if k in params and not k in ctx_keys:
kname = k.upper()
print('export %s=%s' % (kname, params[k]))
def get_command_args_lists(self, conn_type):
if conn_type == 'mount': return self._get_command_mount_args()
elif conn_type == 'umount': return self._get_command_umount_args()
else: return self._get_command_default_args(conn_type)
def _get_command_mount_args(self):
params = self.get_params(optional_keys=['mounts'])
mounts = params['mounts'] if 'mounts' in params else ''
mounts = re.split('[ ,]', mounts)
cmds = []
for mname in mounts:
logger.debug('mount found: %s' % mname)
params.update(self.get_params(section_prefix=mname, command_keys=self.mount_keys))
addr = '%(user_name)s@%(host_name)s:%(remote_mount_point)s' % params
args = ['sshfs', addr]
args.append(params['local_mount_point'])
mount_opts = self.config.get_option('mount_options')
ssh_opts = '-oport=' + params['ssh_port']
if mount_opts: ssh_opts = ssh_opts + ',' + mount_opts
ssh_opts = ssh_opts + ',volname=' + mname
args.append(ssh_opts)
cmds.append(args)
return cmds
def _get_command_umount_args(self):
params = self.get_params(optional_keys=['mounts'])
mounts = params['mounts'] if 'mounts' in params else ''
mounts = re.split('[ ,]', mounts)
cmds = []
for mname in mounts:
logger.debug('mount found: %s' % mname)
params.update(self.get_params(section_prefix=mname, command_keys=self.mount_keys))
args = ['umount', params['local_mount_point']]
cmds.append(args)
return cmds
def _get_command_default_args(self, conn_type):
params = self.get_params()
conn_cfg = {'xterm': ['/usr/bin/xterm', 'ssh', ['-f']],
'login': [None, 'ssh', []],
'emacs': ['/usr/local/emacs/bin/emacs', 'ssh', ['-f']],
}[conn_type]
conn_type, bin_name, extra_args = conn_cfg
args = [bin_name]
params['type'] = conn_type
if params['ssh_switches']:
args.extend(params['ssh_switches'].split(' '))
args.extend(extra_args)
addr = '%(user_name)s@%(host_name)s' % params
args.extend(['-p', params['ssh_port'], addr, conn_type])
args = list(filter(lambda x: x != None, args))
return [args]
def _args_to_command(self, args):
return ' '.join(args)
def get_commands(self, conn_type, single_command=False):
args_list = self.get_command_args_lists(conn_type)
if single_command: return args_list
else: return list(map(lambda x: self._args_to_command(x), args_list))
def exec_commands(self, conn_type):
args_list = self.get_commands(conn_type, single_command=True)
for args in args_list:
cmd = self._args_to_command(args)
logger.info('invoking %s' % cmd)
if not self.dry_run:
logger.debug('args: %s' % args)
call(args)
return args_list
def _create_bourne(self, writer=sys.stdout):
cmds = 'mount umount login xterm emacs'.split()
writer.write('#!/bin/sh\n\n')
writer.write('USAGE="usage: hostcon.sh <{}>"\n\n'.format('|'.join(cmds)))
writer.write('case "$1" in\n')
for conn_type in cmds:
cmd = self.get_commands(conn_type)[0]
writer.write('{}{})\n'.format(' ' * 4, conn_type))
writer.write('{}{}\n'.format(' ' * 8, cmd))
writer.write('{};;\n\n'.format(' ' * 8, cmd))
#print(args_list)
writer.write('{}*)\n'.format(' ' * 4))
writer.write('{}echo $USAGE\n'.format(' ' * 8))
writer.write('{};;\n'.format(' ' * 8))
writer.write('esac')
def create_bourne(self):
writer = sys.stdout
try:
if self.output_file is not None:
writer = open(self.output_file, 'w')
logger.info('writing output to file: {}...'.format(self.output_file))
self._create_bourne(writer)
if writer != sys.stdout:
os.chmod(self.output_file, 0o755)
finally:
if writer != sys.stdout:
logger.info('wrote script to {}'.format(self.output_file))
def exec_mount(self):
self.exec_commands('mount')
def exec_umount(self):
self.exec_commands('umount')
def exec_login(self):
self.exec_commands('login')
def exec_xterm(self):
self.exec_commands('xterm')
def exec_emacs(self):
self.exec_commands('emacs') | zensols.hostcon | /zensols.hostcon-0.5.0-py3-none-any.whl/zensols/hostcon/connector.py | connector.py |
from zensols.actioncli import OneConfPerActionOptionsCliEnv
from zensols.hostcon import Connector, AppConfig
class ConfAppCommandLine(OneConfPerActionOptionsCliEnv):
"""Command line entry point.
"""
def __init__(self):
host_op = ['-n', '--hostname', False,
{'dest': 'host_name',
'help': 'the host to connect to'}]
dryrun_op = ['-d', '--dryrun', False,
{'dest': 'dry_run',
'action': 'store_true',
'help': 'don\'t actually connect, but act like it'}]
output_file_op = ['-o', '--output', False,
{'dest': 'output_file',
'metavar': 'FILE',
'help': 'output file for the script actions'}]
cnf = {'executors':
[{'name': 'fixer',
'executor': lambda params: Connector(**params),
'actions': [{'name': 'info',
'meth': 'print_info',
'doc': 'print configuration info',
'opts': [host_op, dryrun_op]},
{'name': 'env',
'meth': 'print_environment',
'doc': 'print info as environment variables',
'opts': [host_op, dryrun_op]},
{'name': 'script',
'meth': 'create_bourne',
'doc': 'create a script using current network',
'opts': [host_op, output_file_op]},
{'name': 'xterm',
'meth': 'exec_xterm',
'doc': 'start an xterm on host',
'opts': [host_op, dryrun_op]},
{'name': 'emacs',
'meth': 'exec_emacs',
'doc': 'start emacs on the host',
'opts': [host_op, dryrun_op]},
{'name': 'mount',
'meth': 'exec_mount',
'doc': 'mount directories from host locally',
'opts': [host_op, dryrun_op]},
{'name': 'umount',
'meth': 'exec_umount',
'doc': 'un-mount directories',
'opts': [host_op, dryrun_op]},
{'name': 'login',
'meth': 'exec_login',
'doc': 'slogin to host',
'opts': [host_op, dryrun_op]}]}],
'config_option': {'name': 'config',
'opt': ['-c', '--config', False,
{'dest': 'config', 'metavar': 'FILE',
'help': 'configuration file'}]},
'whine': 1}
super(ConfAppCommandLine, self).__init__(
cnf, config_env_name='hostconrc', config_type=AppConfig,
pkg_dist='zensols.hostcon')
def config_parser(self):
super(ConfAppCommandLine, self).config_parser()
self._add_short_option(self.parser)
if self.default_config_file is not None:
config = AppConfig(self.default_config_file)
self.default_action = config.get_option('action')
def main():
cl = ConfAppCommandLine()
cl.invoke() | zensols.hostcon | /zensols.hostcon-0.5.0-py3-none-any.whl/zensols/hostcon/cli.py | cli.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Sequence, Union
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
import logging
import re
import urllib
import shutil
from pathlib import Path
from urllib.parse import ParseResult
import patoolib
from zensols.config import Dictable
from . import InstallError
logger = logging.getLogger(__name__)
@dataclass
class FileSystemUpdateContext(Dictable):
"""The context given to a :class:`.FileSystemUpdate`.
"""
resource: Resource = field()
"""The :class:`.Resource` that created this context and updating the file
system.
"""
check_path: Path = field()
"""The installer relative :obj:`.Resource.check_path`."""
target: Path = field()
"""The installer relative target path from :class:`.Resource`."""
@dataclass
class FileSystemUpdate(Dictable, metaclass=ABCMeta):
"""A command (GoF pattern) to udpate the file system after a resource has
decompressed a file. First experiment with :class:`.ListUpdate`, then find
the corresponding command with :obj:`dry_run` turned on, then turn it off
once you've validated its doing the right thing.
Path fields (i.e. :obj:`.ListUpdate.path`) are formatted with the dictionary
version of :class:`.FileSystemUpdateContext` and also a ``target`` property
with the uncompressed path.
"""
dry_run: bool = field()
"""If ``True`` don't do anything, just act like it."""
def _format_str(self, context: FileSystemUpdateContext, val: str) -> str:
return val.format(**context.asdict())
def _format_path(self, context: FileSystemUpdateContext,
attr: str, make_path: bool = True) -> \
Union[Path, str]:
val: str = getattr(self, attr)
path_str: str = self._format_str(context, val)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{attr}({val}) -> {path_str}')
return Path(path_str) if make_path else path_str
@abstractmethod
def invoke(self, context: FileSystemUpdateContext):
pass
@dataclass
class ListUpdate(FileSystemUpdate):
"""Lists the contents of :obj:`path`.
"""
path: str = field()
"""A file or directory to list."""
def invoke(self, context: FileSystemUpdateContext):
pdir: Path = self._format_path(context, 'path')
if logger.isEnabledFor(logging.INFO):
logger.info(f'listing {pdir}')
for path in pdir.iterdir():
logger.info(f'list: {path.absolute()}')
@dataclass
class MoveUpdate(FileSystemUpdate):
"""Move file(s) from :obj:`source` to :obj:`target`.
"""
source: str = field()
"""The source glob (i.e. ``{target}/../*)`` from where to move the files."""
target: str = field()
"""The target directory where the files end up."""
def invoke(self, context: FileSystemUpdateContext):
source: str = self._format_path(context, 'source').resolve().absolute()
target: Path = self._format_path(context, 'target').resolve().absolute()
source: Path
if logger.isEnabledFor(logging.INFO):
logger.info(f'moving {source} -> {target}')
for source in source.parent.glob(source.name):
source = source.resolve().absolute()
if logger.isEnabledFor(logging.INFO):
logger.info(f'renaming {source} -> {target}')
if not self.dry_run:
shutil.move(source, target)
@dataclass
class RemoveUpdate(FileSystemUpdate):
"""Remove/clean up files to help keep the file system "clean".
"""
paths: Sequence[str] = field()
"""The list of path formatted files to remove. For example
``{target}/../__MACOSX``.
"""
def _rm_path(self, path: Path):
if path.is_dir():
if logger.isEnabledFor(logging.INFO):
logger.info(f'removing clean up dir: {path}')
if not self.dry_run:
shutil.rmtree(path)
elif path.is_file():
if logger.isEnabledFor(logging.INFO):
logger.info(f'removing clean up file: {path}')
if not self.dry_run:
path.unlink()
elif logger.isEnabledFor(logging.INFO):
logger.info(f'skipping non-existant clean up dir: {path}')
def invoke(self, context: FileSystemUpdateContext):
for path_str in self.paths:
path = Path(self._format_str(context, path_str))
path = path.resolve().absolute()
self._rm_path(path)
@dataclass
class Resource(Dictable):
"""A resource that is installed by downloading from the Internet and then
optionally uncompressed. Once the file is downloaded, it is only
uncompressed if it is an archive file. This is determined by the file
extension.
"""
_DICTABLE_ATTRIBUTES = 'remote_name is_compressed compressed_name'.split()
_FILE_REGEX = re.compile(r'^(.+)\.(tar\.gz|tgz|tar\.bz2|gz|bz2|' +
'|'.join(patoolib.ArchiveFormats) + ')$')
_NO_FILE_REGEX = re.compile(r'^(?:.+/)?(.+?)\.(.+)?$')
url: str = field()
"""The URL that locates the file to install."""
name: str = field(default=None)
"""Used for local file naming."""
remote_name: str = field(default=None)
"""The name of extracted file (or root directory if a compressed file) after
being downloaded. If this isn't set, it is taken from the file name portion
of the path of the URL.
"""
is_compressed: bool = field(default=None)
"""Whether or not the file is compressed. If this isn't set, it is derived
from the file name.
"""
rename: bool = field(default=True)
"""If ``True`` then rename the directory to the :obj:`name`."""
check_path: str = field(default=None)
"""The file to check for existance before doing uncompressing."""
sub_path: Path = field(default=None)
"""The path to a file in the compressed file after it is extracted. This is
only used to obtain the file name in :meth:`get_file_name` when used to
locate the uncompressed resource file.
"""
clean_up: bool = field(default=True)
"""Whether or not to remove the downloaded compressed after finished."""
updates: Sequence[FileSystemUpdate] = field(default=())
"""The file system updates to apply after the file has been decompressed."""
def __post_init__(self):
url: ParseResult = urllib.parse.urlparse(self.url)
remote_path: Path = Path(url.path)
remote_name: str
m = self._FILE_REGEX.match(remote_path.name)
if m is None:
m = self._NO_FILE_REGEX.match(remote_path.name)
self._extension = None
if m is None:
remote_name = remote_path.name
else:
remote_name = m.group(1)
if self.name is None:
self.name = remote_path.name
else:
remote_name, self._extension = m.groups()
if self.name is None:
self.name = remote_name
if self.remote_name is None:
self.remote_name = remote_name
if self.is_compressed is None:
self.is_compressed = self._extension is not None
def uncompress(self, path: Path = None, out_dir: Path = None) -> bool:
"""Uncompress the file.
:param path: the file to uncompress
:param out_dir: where the uncompressed files are extracted
"""
uncompressed = False
if path is None:
src = Path(self.compressed_name)
out_dir = Path('.')
else:
src = path
if out_dir is None:
out_dir = path.parent
# the target is the name we want after the process completes
target: Path = out_dir / self.name
# this is the name of the resulting file of what we expect, or the user
# can override it if they know what the real resulting file is
if self.check_path is None:
check_path = target
else:
check_path = out_dir / self.check_path
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'check path: {check_path}')
# uncompress if we can't find where the output is suppose to go
if not check_path.exists():
if logger.isEnabledFor(logging.INFO):
logger.info(f'uncompressing {src} to {out_dir}')
out_dir.mkdir(parents=True, exist_ok=True)
patoolib.extract_archive(str(src), outdir=str(out_dir))
uncompressed = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'rename: {self.rename}, ' +
f'path ({check_path}) exists: {check_path.exists()}')
# the extracted data can either be a file (gz/bz2) or a directory;
# compare to what we want to rename the target directory
#
# note: the check path has to be what extracts as, otherwise it it will
# unextract it again next time it checks; if the directory extracts as
# something other than the file name, set both the name and the check
# path to whatever that path is
if self.rename and not check_path.exists():
# the source is where it was extracted
extracted: Path = out_dir / self.remote_name
if not extracted.exists():
raise InstallError(f'Trying to create {check_path} but ' +
f'missing extracted path: {extracted}')
if logger.isEnabledFor(logging.INFO):
logger.info(f'renaming {extracted} to {target}')
extracted.rename(target)
if self.clean_up:
if logger.isEnabledFor(logging.INFO):
logger.info(f'cleaning up downloaded file: {src}')
src.unlink()
update_context = FileSystemUpdateContext(self, check_path, target)
update: FileSystemUpdate
for update in self.updates:
logger.info(f'updating: {update}')
update.invoke(update_context)
return uncompressed
@property
def compressed_name(self) -> str:
"""The file name with the extension and used to uncompress. If the
resource isn't compressed, just the name is returned.
"""
if self.is_compressed:
name = f'{self.name}'
if self._extension is not None:
name = f'{name}.{self._extension}'
else:
name = self.name
return name
def get_file_name(self, compressed: bool = False) -> str:
"""Return the path where a resource is installed.
:param compressed: if ``True``, return the path where its compressed
file (if any) lives
:return: the path of the resource
"""
fname = self.compressed_name if compressed else self.name
if fname is None:
fname = self.remote_name
if not compressed and self.sub_path is not None:
fname = str(Path(fname, self.sub_path))
return fname | zensols.install | /zensols.install-1.0.0-py3-none-any.whl/zensols/install/resource.py | resource.py |
from typing import Dict, Any, Optional
from dataclasses import dataclass, field
import logging
from pathlib import Path
import urllib.request
from urllib.request import Request
from urllib import parse
from urllib.error import HTTPError
from http.client import HTTPResponse
from ssl import SSLContext
import base64
from tqdm import tqdm
from . import InstallError
logger = logging.getLogger(__name__)
class _DownloadProgressBar(tqdm):
"""Tqdm progress automoation.
"""
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update((b * bsize) - self.n)
@dataclass
class Downloader(object):
"""A utility class to download a file and (optionally) display a progress
bar as it downloads.
"""
DEFAULT_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:101.0) Gecko/20100101 Firefox/101.0'
use_progress_bar: bool = field(default=True)
"""Whether or not to render the progress bar as the file downloads."""
skip_if_exists: bool = field(default=True)
"""Skip download if the file exists as the target path."""
mkdir: bool = field(default=True)
"""Recursively create directories for the target path if they do not already
exist.
"""
buffer_size: int = field(default=((2 ** 20) * 10))
"""The size in bytes of the read buffer with a default of 10MB."""
tqdm_params: Dict[str, Any] = field(
default_factory=lambda: {'unit': 'B', 'unit_scale': True})
"""Parameters given to :mod:`tqdm` for the progress bar when downloading.
"""
user_agent: str = field(default=DEFAULT_AGENT)
"""The user agent header used for the file request."""
def _create_context(self, request: Request) -> Optional[Any]:
pass
def _download_result(self, result: Request, output_path: Path,
tqdm: tqdm = None):
with open(output_path, 'wb') as f:
while True:
data: bytearray = result.read(self.buffer_size)
if not data:
break
if tqdm is not None:
tqdm.update(len(data))
f.write(data)
def _start_download(self, url: str, output_path: Path):
headers: Dict[str, str] = {}
if self.user_agent is not None:
headers['User-Agent'] = self.user_agent
url_info: parse.Parse = parse.urlparse(url)
request: Request
result: HTTPResponse
context: SSLContext
try:
request = Request(url, headers=headers)
context = self._create_context(request)
except Exception as e:
raise InstallError(f"Could not access '{url}' in {self}: {e}", e)
try:
result = urllib.request.urlopen(request, context=context)
except HTTPError as e:
raise InstallError(f"Could not acceess '{url}: {e}'")
if self.use_progress_bar and url_info.scheme != 'file':
flen = result.length
params = dict(self.tqdm_params)
params.update({'miniters': 1, 'desc': url.split('/')[-1]})
if flen is not None and flen > 0:
params['total'] = flen
with tqdm(**params) as t:
self._download_result(result, output_path, t)
else:
self._download_result(result, output_path)
def download(self, url: str, output_path: Path):
if self.skip_if_exists and output_path.is_file():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{output_path} is already downloaded')
else:
parent = output_path.parent
if self.mkdir and not parent.is_dir():
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating directory: {parent}')
parent.mkdir(parents=True)
if logger.isEnabledFor(logging.INFO):
logger.info(f'downloading {url} to {output_path}')
self._start_download(url, output_path)
@dataclass
class AuthenticationDownloader(Downloader):
"""Download by first authenticating by login with the server.
"""
user: str = field(default=None)
"""The user for the authentication."""
password: str = field(default=None)
"""The password for the authentication."""
check_certificate: bool = field(default=False)
"""Whether to check the server's certification for validity."""
def _create_context(self, request: Request) -> Optional[Any]:
lstr = base64.b64encode(bytes(f'{self.user}:{self.password}', 'ascii'))
decoded = lstr.decode('utf-8')
request.add_header('Authorization', f'Basic {decoded}')
if not self.check_certificate:
return SSLContext() | zensols.install | /zensols.install-1.0.0-py3-none-any.whl/zensols/install/download.py | download.py |
__author__ = 'Paul Landes'
from typing import Union, Tuple, Dict, List
from dataclasses import dataclass, field
import logging
import sys
from pathlib import Path
from io import TextIOBase
from frozendict import frozendict
from zensols.util import PackageResource
from zensols.persist import persisted
from zensols.config import Dictable
from zensols.install import Downloader
from . import InstallError, Resource
logger = logging.getLogger(__name__)
@dataclass
class Status(Dictable):
"""Tells of what was installed and how.
"""
resource: Resource = field()
"""The resource that might have been installed."""
downloaded_path: Path = field()
"""The path where :obj:`resource` was downloaded, or None if it wasn't
downloaded.
"""
target_path: Path = field()
"""Where the resource was installed and/or downloaded on the file system.
"""
uncompressed: bool = field()
"""Whether or not the resource was uncompressed."""
@dataclass
class Installer(Dictable):
"""Downloads files from the internet and optionally extracts them.
The files are extracted to either :obj:`base_directory` or a path resolved
from the home directory with name (i.e. ``~/.cache/zensols/someappname)``.
If the ``~/.cache`` directory does not yet exist, it will base the installs
in the home directory per the :obj:`DEFAULT_BASE_DIRECTORIES` attribute.
Finally, the :obj:`sub_directory` is also added to the path if set.
Instances of this class are resource path iterable and indexable by name.
:see: :class:`.Resource`
"""
DEFAULT_BASE_DIRECTORIES = ('~/.cache', '~/', '/tmp')
"""Contains a list of directories to look as the default base when
:obj:`base_directory` is not given.
:see: :obj:`base_directory`
:see: :obj:`package_resource`
"""
resources: Tuple[Resource] = field()
"""The list of resources to install and track."""
package_resource: Union[str, PackageResource] = field(default=None)
"""Package resource (i.e. ``zensols.someappname``). This field is
converted to a package if given as a string during post initialization.
This is used to set :obj:`base_directory` using the package name from the
home directory if given. Otherwise, :obj:`base_directory` is used. One
must be set.
"""
base_directory: Path = field(default=None)
"""The directory to base relative resource paths. If this is not set, then
this attribute is set from :obj:`package_resource` on initialization.
:see: :obj:`package_resource`
:see: :obj:`DEFAULT_BASE_DIRECTORIES`
"""
sub_directory: Path = field(default=None)
"""A path that is added to :obj:`base_directory` or the path referred by
:obj:`package_resource` if set. Setting this is useful to allow for more
directory structure in the installation (see class docs). This is only
useful when using it with :obj:`package_resource`.
"""
downloader: Downloader = field(default_factory=Downloader)
"""Used to download the file from the Internet."""
def __post_init__(self):
if self.package_resource is None and self.base_directory is None:
raise InstallError(
'Either package_resource or base_directory must be set')
if isinstance(self.package_resource, str):
self.package_resource = PackageResource(self.package_resource)
if self.base_directory is None:
self.base_directory = self._get_default_base()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'resolved base directory: {self.base_directory}')
if self.sub_directory is not None:
self.base_directory = self.base_directory / self.sub_directory
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'install base directory: {self.base_directory}')
def _get_default_base(self) -> Path:
existing = tuple(filter(lambda p: p.is_dir(),
map(lambda p: Path(p).expanduser(),
self.DEFAULT_BASE_DIRECTORIES)))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'existing default base directories: {existing}')
if len(existing) == 0:
raise InstallError('No default base directories found ' +
f'in: {self.DEFAULT_BASE_DIRECTORIES}')
base: Path = existing[0]
parts: List[str] = self.package_resource.name.split('.')
is_home: bool = (base == Path('~/').expanduser())
if is_home:
# make a UNIX 'hidden' file if home directory based
parts[0] = '.' + parts[0]
pkg_path: Path = Path(*parts)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating base path from home={base}/' +
f'sub={self.sub_directory}/pkg_path={pkg_path}')
return base / pkg_path
def get_path(self, resource: Resource, compressed: bool = False) -> Path:
"""Return the path where a resource is installed.
:param resource: the resource to find
:param compressed: if ``True``, return the path where its compressed
file (if any) lives
:return: the path of the resource
"""
fname = resource.get_file_name(compressed)
return self.base_directory / fname
def get_singleton_path(self, compressed: bool = False) -> Path:
"""Return the path of resource, which is expected to be the only one.
:param compressed: if ``True``, return the path where its compressed
file (if any) lives
:raises: InstallError if the number of :obj:`resources` length isn't 1
:return: the resource's path
"""
rlen = len(self.resources)
if rlen != 1:
raise InstallError(
f'Expecting configured resources to be one, but got {rlen}')
return self.get_path(self.resources[0], compressed)
@property
@persisted('_by_name')
def by_name(self) -> Dict[str, Resource]:
"""All resources as a dict with keys as their respective names."""
return frozendict({i.name: i for i in self.resources})
@property
@persisted('_paths_by_name')
def paths_by_name(self) -> Dict[str, Path]:
"""All resource paths as a dict with keys as their respective names."""
return frozendict({i.name: self.get_path(i) for i in self.resources})
def _install(self, inst: Resource, dst_path: Path) -> Status:
uncompressed: bool = False
downloaded_path: Path = False
target_path: Path = None
if inst.is_compressed:
comp_path = self.get_path(inst, True)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'needs decompression: {comp_path}')
if not comp_path.is_file():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'missing compressed file {comp_path}')
self.downloader.download(inst.url, comp_path)
downloaded_path = comp_path
uncompressed = inst.uncompress(comp_path)
target_path = comp_path
if uncompressed:
if logger.isEnabledFor(logging.INFO):
logger.info(f'uncompressed to {comp_path}')
else:
if logger.isEnabledFor(logging.INFO):
logger.info(f'downloading: {inst.url} -> {dst_path}')
self.downloader.download(inst.url, dst_path)
downloaded_path = dst_path
target_path = dst_path
return Status(inst, downloaded_path, target_path, uncompressed)
def install(self) -> List[Status]:
"""Download and install all resources.
:return: a list of statuses for each resource downloaded
"""
statuses: List[Status] = []
res: Resource
for res in self.resources:
local_path: Path = self.get_path(res, False)
check_path: Path = None
status: Status = None
if res.check_path is not None:
check_path = self.base_directory / res.check_path
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'local path: {local_path}, ' +
f'check path: {check_path}, ' +
f'res check path: {res.check_path}, ' +
f'compressed: {res.is_compressed}')
# we can skip installation if we already find it on the file system;
# however, we have to re-check compressed files in cases where we've
# downloaded by not uncompressed between life-cycles (ie raised
# exceptions)
if (check_path is not None and check_path.exists()) or \
(local_path.exists() and not
(res.is_compressed and res.check_path is not None)):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found: {local_path}--skipping')
comp_path = self.get_path(res, True)
status = Status(res, None, comp_path, False)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'missing {local_path}')
status = self._install(res, local_path)
statuses.append(status)
return statuses
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
dct = self.asdict()
del dct['resources']
self._write_dict(dct, depth, writer)
self._write_line('resources:', depth, writer)
res: Resource
for res in self.resources:
dct = res.asdict()
del dct['name']
self._write_line(res.name, depth + 1, writer)
self._write_dict(dct, depth + 2, writer)
def __call__(self) -> List[Status]:
return self.install()
def __getitem__(self, resource: Union[str, Resource]):
if isinstance(resource, str):
resource = self.by_name[resource]
return self.get_path(resource)
def __iter__(self):
return map(lambda r: self.get_path(r), self.resources)
def __len__(self):
return len(self.resources) | zensols.install | /zensols.install-1.0.0-py3-none-any.whl/zensols/install/installer.py | installer.py |
import dill
import json
import logging
import aiofiles
import numpy as np
from typing import Dict, Set, Optional, List, Union, cast
from functools import partial
from medcat import __version__
from medcat.utils.hasher import Hasher
from medcat.utils.matutils import unitvec
from medcat.utils.ml_utils import get_lr_linking
from medcat.config import Config, weighted_average, workers
class CDB(object):
""" Concept DataBase - holds all information necessary for NER+L.
Properties:
name2cuis (`Dict[str, List[str]]`):
Map fro concept name to CUIs - one name can map to multiple CUIs.
name2cuis2status (`Dict[str, Dict[str, str]]`):
What is the status for a given name and cui pair - each name can be:
P - Preferred, A - Automatic (e.g. let medcat decide), N - Not common.
snames (`Set[str]`):
All possible subnames for all concepts
cui2names (`Dict[str, Set[str]]`):
From cui to all names assigned to it. Mainly used for subsetting (maybe even only).
cui2snames (`Dict[str, Set[str]]`):
From cui to all sub-names assigned to it. Only used for subsetting.
cui2context_vectors (`Dict[str, Dict[str, np.array]]`):
From cui to a dictionary of different kinds of context vectors. Normally you would have here
a short and a long context vector - they are calculated separately.
cui2count_train (`Dict[str, int]`):
From CUI to the number of training examples seen.
cui2tags (`Dict[str, List[str]]`):
From CUI to a list of tags. This can be used to tag concepts for grouping of whatever.
cui2type_ids (`Dict[str, Set[str]]`):
From CUI to type id (e.g. TUI in UMLS).
cui2preferred_name (`Dict[str, str]`):
From CUI to the preferred name for this concept.
cui2average_confidence(`Dict[str, str]`):
Used for dynamic thresholding. Holds the average confidence for this CUI given the training examples.
name2count_train(`Dict[str, str]`):
Counts how often did a name appear during training.
addl_info (`Dict[str, Dict[]]`):
Any additional maps that are not part of the core CDB. These are usually not needed
for the base NER+L use-case, but can be useufl for Debugging or some special stuff.
vocab (`Dict[str, int]`):
Stores all the words tha appear in this CDB and the count for each one.
"""
log = logging.getLogger(__name__)
def __init__(self, config: Union[Config, None] = None) -> None:
if config is None:
self.config = Config()
else:
self.config = config
self.name2cuis: Dict = {}
self.name2cuis2status: Dict = {}
self.snames: Set = set()
self.cui2names: Dict = {}
self.cui2snames: Dict = {}
self.cui2context_vectors: Dict = {}
self.cui2count_train: Dict = {}
self.cui2info: Dict = {}
self.cui2tags: Dict = {} # Used to add custom tags to CUIs
self.cui2type_ids: Dict = {}
self.cui2preferred_name: Dict = {}
self.cui2average_confidence: Dict = {}
self.name2count_train: Dict = {}
self.name_isupper: Dict = {}
self.addl_info: Dict= {
'cui2icd10': {},
'cui2opcs4': {},
'cui2ontologies': {},
'cui2original_names': {},
'cui2description': {},
'type_id2name': {},
'type_id2cuis': {},
'cui2group': {},
# Can be extended with whatever is necessary
}
self.vocab: Dict = {} # Vocabulary of all words ever in our cdb
self._optim_params = None
def get_name(self, cui: str) -> str:
r''' Returns preferred name if it exists, otherwise it will return
the logest name assigend to the concept.
Args:
cui
'''
name = cui # In case we do not find anything it will just return the CUI
if cui in self.cui2preferred_name and self.cui2preferred_name[cui]:
name = self.cui2preferred_name[cui]
elif cui in self.cui2names and self.cui2names[cui]:
name = " ".join(str(max(self.cui2names[cui], key=len)).split(self.config.general.get('separator', '~'))).title()
return name
def update_cui2average_confidence(self, cui: str, new_sim: float) -> None:
self.cui2average_confidence[cui] = (self.cui2average_confidence.get(cui, 0) * self.cui2count_train.get(cui, 0) + new_sim) / \
(self.cui2count_train.get(cui, 0) + 1)
def remove_names(self, cui: str, names: Dict) -> None:
r''' Remove names from an existing concept - efect is this name will never again be used to link to this concept.
This will only remove the name from the linker (namely name2cuis and name2cuis2status), the name will still be present everywhere else.
Why? Because it is bothersome to remove it from everywhere, but
could also be useful to keep the removed names in e.g. cui2names.
Args:
cui (`str`):
Concept ID or unique identifer in this database.
names (`Dict[str, Dict]`):
Names to be removed, should look like: `{'name': {'tokens': tokens, 'snames': snames, 'raw_name': raw_name}, ...}`
'''
for name in names.keys():
if name in self.name2cuis:
if cui in self.name2cuis[name]:
self.name2cuis[name].remove(cui)
if len(self.name2cuis[name]) == 0:
del self.name2cuis[name]
# Remove from name2cuis2status
if name in self.name2cuis2status:
if cui in self.name2cuis2status[name]:
_ = self.name2cuis2status[name].pop(cui)
if len(self.name2cuis2status[name]) == 0:
del self.name2cuis2status[name]
# Set to disamb always if name2cuis2status is now only one CUI
if name in self.name2cuis2status:
if len(self.name2cuis2status[name]) == 1:
for _cui in self.name2cuis2status[name]:
if self.name2cuis2status[name][_cui] == 'A':
self.name2cuis2status[name][_cui] = 'N'
elif self.name2cuis2status[name][_cui] == 'P':
self.name2cuis2status[name][_cui] = 'PD'
def add_names(self, cui: str, names: Dict, name_status: str = 'A', full_build: bool = False) -> None:
r''' Adds a name to an existing concept.
Args:
cui (`str`):
Concept ID or unique identifer in this database, all concepts that have
the same CUI will be merged internally.
names (`Dict[str, Dict]`):
Names for this concept, or the value that if found in free text can be linked to this concept.
Names is an dict like: `{name: {'tokens': tokens, 'snames': snames, 'raw_name': raw_name}, ...}`
name_status (`str`):
One of `P`, `N`, `A`
full_build (`bool`, defaults to `False`):
If True the dictionary self.addl_info will also be populated, contains a lot of extra information
about concepts, but can be very memory consuming. This is not necessary for normal functioning of MedCAT.
'''
name_status = name_status.upper()
if name_status not in ['P', 'A', 'N']:
# Name status must be one of the three
name_status = 'A'
self.add_concept(cui=cui, names=names, ontologies=set(), name_status=name_status, type_ids=set(), description='', full_build=full_build)
def add_concept(self,
cui: str,
names: Dict,
ontologies: set,
name_status: str,
type_ids: Set[str],
description: str,
full_build: bool = False) -> None:
r'''
Add a concept to internal Concept Database (CDB). Depending on what you are providing
this will add a large number of properties for each concept.
Args:
cui (`str`):
Concept ID or unique identifier in this database, all concepts that have
the same CUI will be merged internally.
names (`Dict[str, Dict]`):
Names for this concept, or the value that if found in free text can be linked to this concept.
Names is an dict like: `{name: {'tokens': tokens, 'snames': snames, 'raw_name': raw_name}, ...}`
ontologies(`Set[str]`):
ontologies in which the concept exists (e.g. SNOMEDCT, HPO)
name_status (`str`):
One of `P`, `N`, `A`
type_ids (`Set[str]`):
Semantic type identifier (have a look at TUIs in UMLS or SNOMED-CT)
description (`str`):
Description of this concept.
full_build (`bool`, defaults to `False`):
If True the dictionary self.addl_info will also be populated, contains a lot of extra information
about concepts, but can be very memory consuming. This is not necessary for normal functioning of MedCAT.
'''
# Add CUI to the required dictionaries
if cui not in self.cui2names:
# Create placeholders
self.cui2names[cui] = set()
self.cui2snames[cui] = set()
# Add type_ids
self.cui2type_ids[cui] = type_ids
else:
# If the CUI is already in update the type_ids
self.cui2type_ids[cui].update(type_ids)
# Add names to the required dictionaries
name_info = None
for name in names:
name_info = names[name]
# Extend snames
self.snames.update(name_info['snames'])
# Add name to cui2names
self.cui2names[cui].add(name)
# Extend cui2snames, but check is the cui already in also
if cui in self.cui2snames:
self.cui2snames[cui].update(name_info['snames'])
else:
self.cui2snames[cui] = name_info['snames']
# Add whether concept is uppercase
self.name_isupper[name] = names[name]['is_upper']
if name in self.name2cuis:
# Means we have alrady seen this name
if cui not in self.name2cuis[name]:
# If CUI is not already linked do it
self.name2cuis[name].append(cui)
# At the same time it means the cui is also missing from name2cuis2status, but the
#name is there
self.name2cuis2status[name][cui] = name_status
elif name_status == 'P':
# If name_status is P overwrite whatever was the old status
self.name2cuis2status[name][cui] = name_status
else:
# Means we never saw this name
self.name2cuis[name] = [cui]
# Add name2cuis2status
self.name2cuis2status[name] = {cui: name_status}
# Add tokens to vocab
for token in name_info['tokens']:
if token in self.vocab:
self.vocab[token] += 1
else:
self.vocab[token] = 1
# Check is this a preferred name for the concept, this takes the name_info
#dict which must have a value (but still have to check it, just in case).
if name_info is not None:
if name_status == 'P' and cui not in self.cui2preferred_name:
# Do not overwrite old preferred names
self.cui2preferred_name[cui] = name_info['raw_name']
# Add other fields if full_build
if full_build:
# Use original_names as the base check because they must be added
if cui not in self.addl_info['cui2original_names']:
if ontologies:
self.addl_info['cui2ontologies'][cui] = ontologies
if description:
self.addl_info['cui2description'][cui] = description
self.addl_info['cui2original_names'][cui] = set([v['raw_name'] for k, v in names.items()])
else:
# Update existing ones
if ontologies:
self.addl_info['cui2ontologies'][cui].update(ontologies)
if description:
self.addl_info['cui2description'][cui] = description
self.addl_info['cui2original_names'][cui].update([v['raw_name'] for k,v in names.items()])
for type_id in type_ids:
# Add type_id2cuis link
if type_id in self.addl_info['type_id2cuis']:
self.addl_info['type_id2cuis'][type_id].add(cui)
else:
self.addl_info['type_id2cuis'][type_id] = {cui}
def add_addl_info(self, name: str, data: Dict, reset_existing: bool = False) -> None:
r''' Add data to the addl_info dictionary. This is done in a function to
not directly access the addl_info dictionary.
Args:
name (`str`):
What key should be used in the `addl_info` dictionary.
data (`Dict[<whatever>]`):
What will be added as the value for the key `name`
reset_existing (`bool`):
Should old data be removed if it exists
'''
if reset_existing:
self.addl_info[name] = {}
self.addl_info[name].update(data)
def update_context_vector(self,
cui: str,
vectors: Dict[str, np.ndarray],
negative: bool = False,
lr: Optional[float] = None,
cui_count: int = 0) -> None:
r''' Add the vector representation of a context for this CUI.
cui (`str`):
The concept in question.
vectors (`Dict[str, numpy.ndarray]`):
Vector represenation of the context, must have the format: {'context_type': np.array(<vector>), ...}
context_type - is usually one of: ['long', 'medium', 'short']
negative (`bool`, defaults to `False`):
Is this negative context of positive.
lr (`int`, optional):
If set it will override the base value from the config file.
cui_count (`int`, defaults to 0):
The learning rate will be calculated based on the count for the provided CUI + cui_count.
'''
if cui not in self.cui2context_vectors:
self.cui2context_vectors[cui] = {}
self.cui2count_train[cui] = 0
similarity = None
for context_type, vector in vectors.items():
# Get the right context
if context_type in self.cui2context_vectors[cui]:
cv = self.cui2context_vectors[cui][context_type]
similarity = np.dot(unitvec(cv), unitvec(vector))
# Get the learning rate if None
if lr is None:
lr = get_lr_linking(self.config, self.cui2count_train[cui] + cui_count)
if negative:
# Add negative context
b = max(0, similarity) * lr
self.cui2context_vectors[cui][context_type] = cv*(1-b) - vector*b
else:
b = (1 - max(0, similarity)) * lr
self.cui2context_vectors[cui][context_type] = cv*(1-b) + vector*b
# DEBUG
self.log.debug("Updated vector embedding.\n" +
"CUI: %s, Context Type: %s, Similarity: %.2f, Is Negative: %s, LR: %.5f, b: %.3f", cui, context_type,
similarity, negative, lr, b)
cv = self.cui2context_vectors[cui][context_type]
similarity_after = np.dot(unitvec(cv), unitvec(vector))
self.log.debug("Similarity before vs after: %.5f vs %.5f", similarity, similarity_after)
else:
if negative:
self.cui2context_vectors[cui][context_type] = -1 * vector
else:
self.cui2context_vectors[cui][context_type] = vector
# DEBUG
self.log.debug("Added new context type with vectors.\n" +
"CUI: %s, Context Type: %s, Is Negative: %s", cui, context_type, negative)
if not negative:
# Increase counter only for positive examples
self.cui2count_train[cui] += 1
def save(self, path: str) -> None:
r''' Saves model to file (in fact it saves variables of this class).
Args:
path (`str`):
Path to a file where the model will be saved
'''
with open(path, 'wb') as f:
# No idea how to this correctly
to_save = {}
to_save['config'] = self.config.__dict__
to_save['cdb'] = {k:v for k,v in self.__dict__.items() if k != 'config'}
dill.dump(to_save, f)
async def save_async(self, path: str) -> None:
r''' Async version of saving model to file (in fact it saves variables of this class).
Args:
path (`str`):
Path to a file where the model will be saved
'''
async with aiofiles.open(path, 'wb') as f:
to_save = {
'config': self.config.__dict__,
'cdb': {k: v for k, v in self.__dict__.items() if k != 'config'}
}
await f.write(dill.dumps(to_save))
@classmethod
def load(cls, path: str, config_dict: Optional[Dict] = None) -> "CDB":
r''' Load and return a CDB. This allows partial loads in probably not the right way at all.
Args:
path (`str`):
Path to a `cdb.dat` from which to load data.
config_dict:
A dictionary that will be used to overwrite existing fields in the config of this CDB
'''
with open(path, 'rb') as f:
# Again no idea
data = dill.load(f)
cls._check_medcat_version(data['config'])
config = cast(Config, Config.from_dict(data['config']))
cls._ensure_backward_compatibility(config)
# Create an instance of the CDB (empty)
cdb = cls(config=config)
# Load data into the new cdb instance
for k in cdb.__dict__:
if k in data['cdb']:
cdb.__dict__[k] = data['cdb'][k]
# Overwrite the config with new data
if config_dict is not None:
cdb.config.merge_config(config_dict)
return cdb
def import_training(self, cdb: "CDB", overwrite: bool = True) -> None:
r''' This will import vector embeddings from another CDB. No new concepts will be added.
IMPORTANT it will not import name maps (cui2names, name2cuis or anything else) only vectors.
Args:
cdb (`medcat.cdb.CDB`):
Concept database from which to import training vectors
overwrite (`bool`, defaults to `True`):
If True all training data in the existing CDB will be overwritten, else
the average between the two training vectors will be taken.
Examples:
>>> new_cdb.import_traininig(cdb=old_cdb, owerwrite=True)
'''
# Import vectors and counts
for cui in cdb.cui2context_vectors:
if cui in self.cui2names:
for context_type, vector in cdb.cui2context_vectors[cui].items():
if overwrite or context_type not in self.cui2context_vectors[cui]:
self.cui2context_vectors[cui][context_type] = vector
else:
self.cui2context_vectors[cui][context_type] = (vector + self.cui2context_vectors[cui][context_type]) / 2
# Increase the vector count
self.cui2count_train[cui] = self.cui2count_train.get(cui, 0) + cdb.cui2count_train[cui]
def reset_cui_count(self, n: int = 10) -> None:
r''' Reset the CUI count for all concepts that received training, used when starting new unsupervised training
or for suppervised with annealing.
Args:
n (`int`, optional, defaults to 10):
This will be set as the CUI count for all cuis in this CDB.
Examples:
>>> cdb.reset_cui_count()
'''
for cui in self.cui2count_train.keys():
self.cui2count_train[cui] = n
def reset_training(self) -> None:
r''' Will remove all training efforts - in other words all embeddings that are learnt
for concepts in the current CDB. Please note that this does not remove synonyms (names) that were
potentially added during supervised/online learning.
'''
self.cui2count_train = {}
self.cui2context_vectors = {}
self.reset_concept_similarity()
def filter_by_cui(self, cuis_to_keep: Union[List[str], Set[str]]) -> None:
''' Subset the core CDB fields (dictionaries/maps). Note that this will potenitally keep a bit more CUIs
then in cuis_to_keep. It will first find all names that link to the cuis_to_keep and then
find all CUIs that link to those names and keep all of them.
This also will not remove any data from cdb.addl_info - as this field can contain data of
unknown structure.
Args:
cuis_to_keep (`List[str]`):
CUIs that will be kept, the rest will be removed (not completely, look above).
'''
if not self.cui2snames:
raise Exception("This CDB does not support subsetting - most likely because it is a `small/medium` version of a CDB")
# First get all names/snames that should be kept based on this CUIs
names_to_keep = set()
snames_to_keep = set()
for cui in cuis_to_keep:
names_to_keep.update(self.cui2names.get(cui, []))
snames_to_keep.update(self.cui2snames.get(cui, []))
# Based on the names get also the indirect CUIs that have to be kept
all_cuis_to_keep = set()
for name in names_to_keep:
all_cuis_to_keep.update(self.name2cuis.get(name, []))
new_name2cuis = {}
new_name2cuis2status = {}
new_cui2names = {}
new_cui2snames = {}
new_cui2context_vectors = {}
new_cui2count_train = {}
new_cui2tags = {} # Used to add custom tags to CUIs
new_cui2type_ids = {}
new_cui2preferred_name = {}
# Subset cui2<whatever>
for cui in all_cuis_to_keep:
if cui in self.cui2names:
new_cui2names[cui] = self.cui2names[cui]
new_cui2snames[cui] = self.cui2snames[cui]
if cui in self.cui2context_vectors:
new_cui2context_vectors[cui] = self.cui2context_vectors[cui]
# We assume that it must have the cui2count_train if it has a vector
new_cui2count_train[cui] = self.cui2count_train[cui]
if cui in self.cui2tags:
new_cui2tags[cui] = self.cui2tags[cui]
new_cui2type_ids[cui] = self.cui2type_ids[cui]
if cui in self.cui2preferred_name:
new_cui2preferred_name[cui] = self.cui2preferred_name[cui]
# Subset name2<whatever>
for name in names_to_keep:
if name in self.name2cuis:
new_name2cuis[name] = self.name2cuis[name]
new_name2cuis2status[name] = self.name2cuis2status[name]
# Replace everything
self.name2cuis = new_name2cuis
self.snames = snames_to_keep
self.name2cuis2status = new_name2cuis2status
self.cui2names = new_cui2names
self.cui2snames = new_cui2snames
self.cui2context_vectors = new_cui2context_vectors
self.cui2count_train = new_cui2count_train
self.cui2tags = new_cui2tags
self.cui2type_ids = new_cui2type_ids
self.cui2preferred_name = new_cui2preferred_name
def _make_stats(self):
stats = {}
stats["Number of concepts"] = len(self.cui2names)
stats["Number of names"] = len(self.name2cuis)
stats["Number of concepts that received training"] = len([cui for cui in self.cui2count_train if self.cui2count_train[cui] > 0])
stats["Number of seen training examples in total"] = sum(self.cui2count_train.values())
stats["Average training examples per concept"] = np.average(
[self.cui2count_train[cui] for cui in self.cui2count_train if self.cui2count_train[cui] > 0])
return stats
def print_stats(self) -> None:
r'''Print basic statistics for the CDB.
'''
self.log.info(json.dumps(self._make_stats(), indent=2))
def reset_concept_similarity(self) -> None:
r''' Reset concept similarity matrix.
'''
self.addl_info['similarity'] = {}
def most_similar(self,
cui: str,
context_type: str,
type_id_filter: List[str] = [],
min_cnt: int = 0,
topn: int = 50,
force_build: bool = False) -> Dict:
r''' Given a concept it will calculate what other concepts in this CDB have the most similar
embedding.
Args:
cui (`str`):
The concept ID for the base concept for which you want to get the most similar concepts.
context_type (`str`):
On what vector type from the cui2context_vectors map will the similarity be calculated.
type_id_filter (`List[str]`):
A list of type_ids that will be used to filterout the returned results. Using this it is possible
to limit the similarity calculation to only disorders/symptoms/drugs/...
min_cnt (`int`):
Minimum training examples (unsupervised+supervised) that a concept must have to be considered
for the similarity calculation.
topn (`int`):
How many results to return
force_build (`bool`, defaults to `False`):
Do not use cached sim matrix
Return:
results (Dict):
A dictionary with topn results like: {<cui>: {'name': <name>, 'sim': <similarity>, 'type_name': <type_name>,
'type_id': <type_id>, 'cnt': <number of training examples the concept has seen>}, ...}
'''
if 'similarity' in self.addl_info:
if context_type not in self.addl_info['similarity']:
self.addl_info['similarity'][context_type] = {}
else:
self.addl_info['similarity'] = {context_type: {}}
sim_data = self.addl_info['similarity'][context_type]
# Create the matrix if necessary
if 'sim_vectors' not in sim_data or force_build:
self.log.info("Building similarity matrix")
sim_vectors = []
sim_vectors_counts = []
sim_vectors_type_ids = []
sim_vectors_cuis = []
for _cui in self.cui2context_vectors:
if context_type in self.cui2context_vectors[_cui]:
sim_vectors.append(unitvec(self.cui2context_vectors[_cui][context_type]))
sim_vectors_counts.append(self.cui2count_train.get(_cui, 0))
sim_vectors_type_ids.append(self.cui2type_ids.get(_cui, {'unk'}))
sim_vectors_cuis.append(_cui)
sim_data['sim_vectors'] = np.array(sim_vectors)
sim_data['sim_vectors_counts'] = np.array(sim_vectors_counts)
sim_data['sim_vectors_type_ids'] = np.array(sim_vectors_type_ids)
sim_data['sim_vectors_cuis'] = np.array(sim_vectors_cuis)
# Select appropriate concepts
type_id_inds = np.arange(0, len(sim_data['sim_vectors_type_ids']))
if len(type_id_filter) > 0:
type_id_inds = np.array([], dtype=np.int32)
for type_id in type_id_filter:
type_id_inds = np.union1d(np.array([ind for ind, type_ids in enumerate(sim_data['sim_vectors_type_ids']) if type_id in type_ids]),
type_id_inds)
cnt_inds = np.arange(0, len(sim_data['sim_vectors_counts']))
if min_cnt > 0:
cnt_inds = np.where(sim_data['sim_vectors_counts'] >= min_cnt)[0]
# Intersect cnt and type_id
inds = np.intersect1d(type_id_inds, cnt_inds)
mtrx = sim_data['sim_vectors'][inds]
cuis = sim_data['sim_vectors_cuis'][inds]
sims = np.dot(mtrx, unitvec(self.cui2context_vectors[cui][context_type]))
sims_srt = np.argsort(-1*sims)
# Create the return dict
res = {}
print()
for ind, _cui in enumerate(cuis[sims_srt[0:topn]]):
res[_cui] = {'name': self.cui2preferred_name.get(_cui, list(self.cui2names[_cui])[0]), 'sim': sims[sims_srt][ind],
'type_names': [self.addl_info['type_id2name'].get(cui, 'unk') for cui in self.cui2type_ids.get(_cui, ['unk'])],
'type_ids': self.cui2type_ids.get(_cui, 'unk'),
'cnt': self.cui2count_train.get(_cui, 0)}
return res
@staticmethod
def _ensure_backward_compatibility(config: Config) -> None:
# Hacky way of supporting old CDBs
weighted_average_function = config.linking['weighted_average_function']
if callable(weighted_average_function) and getattr(weighted_average_function, "__name__", None) == "<lambda>":
config.linking['weighted_average_function'] = partial(weighted_average, factor=0.0004)
if config.general.get('workers', None) is None:
config.general['workers'] = workers()
disabled_comps = config.general.get('spacy_disabled_components', [])
if 'tagger' in disabled_comps and 'lemmatizer' not in disabled_comps:
config.general['spacy_disabled_components'].append('lemmatizer')
@classmethod
def _check_medcat_version(cls, config_data: Dict) -> None:
cdb_medcat_version = config_data.get('version', {}).get('medcat_version', None)
if cdb_medcat_version is None:
cls.log.warning('The CDB was exported by an unknown version of MedCAT.')
elif __version__.split(".")[:1] != cdb_medcat_version.split(".")[:1]:
cls.log.warning(
f"""You have MedCAT version '{__version__}' installed while the CDB was exported by MedCAT version '{cdb_medcat_version}'.
Please reinstall MedCAT or download the compatible model."""
)
elif __version__.split(".")[:2] != cdb_medcat_version.split(".")[:2]:
cls.log.warning(
f"""You have MedCAT version '{__version__}' installed while the CDB was exported by MedCAT version '{cdb_medcat_version}',
which may or may not work. If you experience any compatibility issues, please reinstall MedCAT
or download the compatible model."""
)
def get_hash(self):
hasher = Hasher()
for k,v in self.__dict__.items():
if k in ['cui2countext_vectors', 'name2cuis']:
hasher.update(v, length=False)
elif k != 'config':
hasher.update(v, length=True)
return hasher.hexdigest() | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/cdb.py | cdb.py |
import re
import logging
import jsonpickle
from functools import partial
from multiprocessing import cpu_count
from medcat.utils.hasher import Hasher
from typing import Optional, Iterable, Tuple, Dict, Any
def weighted_average(step: int, factor: float) -> float:
return max(0.1, 1 - (step ** 2 * factor))
def workers(workers_override: Optional[int] = None) -> int:
return max(cpu_count() - 1, 1) if workers_override is None else workers_override
class ConfigMixin(object):
jsonpickle.set_encoder_options('json', sort_keys=True, indent=2)
def __iter__(self) -> Iterable[Tuple]:
for attr, value in self.__dict__.items():
yield attr, value
def save(self, save_path: str) -> None:
r''' Save the config into a .json file
Args:
save_path (`str`):
Where to save the created json file
'''
# We want to save the dict here, not the whole class
json_string = jsonpickle.encode(self.__dict__)
with open(save_path, 'w') as f:
f.write(json_string)
def merge_config(self, config_dict: Dict) -> None:
r''' Merge a config_dict with the existing config object.
Args:
config_dict (`dict`):
A dictionary which key/values should be added to this class.
'''
for key in config_dict.keys():
if key in self.__dict__ and isinstance(self.__dict__[key], dict):
self.__dict__[key].update(config_dict[key])
else:
self.__dict__[key] = config_dict[key]
def parse_config_file(self, path: str) -> None:
r'''
Parses a configuration file in text format. Must be like:
cat.<variable>.<key> = <value>
...
- variable: linking, general, ner, ...
- key: a key in the config dict e.g. subsample_after for linking
- value: the value for the key, will be parsed with `eval`
'''
with open(path, 'r') as f:
for line in f:
if line.strip() and line.startswith("cat."):
line = line[4:]
left, right = line.split("=")
variable, key = left.split(".")
variable = variable.strip()
key = key.strip()
value = eval(right.strip())
attr = getattr(self, variable)
attr[key] = value
self.rebuild_re()
def rebuild_re(self) -> None:
pass
def get_hash(self):
hasher = Hasher()
for k, v in self.__dict__.items():
hasher.update(v)
return hasher.hexdigest()
def __str__(self) -> str:
json_obj = {}
for attr, value in self: # type: ignore
json_obj[attr] = value
return jsonpickle.encode(json_obj)
@classmethod
def load(cls, save_path: str) -> "ConfigMixin":
r''' Load config from a json file, note that fields that
did not exist in the old config but do exist in the current
version of the ConfigMetaCAT class will be kept.
Args:
save_path (`str`):
Path to the json file to load
'''
config = cls()
# Read the jsonpickle string
with open(save_path) as f:
config_dict = jsonpickle.decode(f.read())
config.merge_config(config_dict)
return config
@classmethod
def from_dict(cls, config_dict: Dict) -> "ConfigMixin":
config = cls()
config.merge_config(config_dict)
return config
class Config(ConfigMixin):
def __init__(self) -> None:
self.version: Dict[str, Any] = {
'id': None, # Will be: hash of most things
'last_modified': None, # Yep
'location': None, # Path/URL/Whatever to where is this CDB located
'history': [], # Populated automatically
'description': "No description", # General description and what it was trained on
'meta_cats': {}, # Populated automatically
'cdb_info': {}, # Populated automatically, output from cdb.print_stats
'performance': {'ner': {}, 'meta': {}}, # NER general performance, meta should be: {'meta': {'model_name': {'f1': <>, 'p': <>, ...}, ...}}
'ontology': None, # What was used to build the CDB, e.g. SNOMED_202009
'medcat_version': None, # Which version of medcat was used to build the CDB
}
# CDB Maker
self.cdb_maker: Dict[str, Any] = {
# If multiple names or type_ids for a concept present in one row of a CSV, they are separted
# by the character below.
'multi_separator': '|',
# Name versions to be generated.
'name_versions': ['LOWER', 'CLEAN'],
# Should preferred names with parenthesis be cleaned 0 means no, else it means if longer than or equal
# e.g. Head (Body part) -> Head
'remove_parenthesis': 5,
# Minimum number of letters required in a name to be accepted for a concept
'min_letters_required': 2,
}
# Used mainly to configure the output of the get_entities function, and in that also the output of
#get_json and multiprocessing
self.annotation_output: Dict[str, Any] = {
'doc_extended_info': False,
'context_left': -1,
'context_right': -1,
'lowercase_context': True,
'include_text_in_output': False,
}
self.general: Dict[str, Any] = {
# Logging config for everything | 'tagger' can be disabled, but will cause a drop in performance
'log_level': logging.INFO,
'log_format': '%(levelname)s:%(name)s: %(message)s',
'log_path': './medcat.log',
'spacy_disabled_components': ['ner', 'parser', 'vectors', 'textcat',
'entity_linker', 'sentencizer', 'entity_ruler', 'merge_noun_chunks',
'merge_entities', 'merge_subtokens'],
# What model will be used for tokenization
'spacy_model': 'en_core_web_md',
# Separator that will be used to merge tokens of a name. Once a CDB is built this should
#always stay the same.
'separator': '~',
# Should we check spelling - note that this makes things much slower, use only if necessary. The only thing necessary
#for the spell checker to work is vocab.dat and cdb.dat built with concepts in the respective language.
'spell_check': True,
# Should we process diacritics - for languages other than English, symbols such as 'é, ë, ö' can be relevant.
# Note that this makes spell_check slower.
'diacritics': False,
# If True the spell checker will try harder to find mistakes, this can slow down
#things drastically.
'spell_check_deep': False,
# Spelling will not be checked for words with length less than this
'spell_check_len_limit': 7,
# If set to True functions like get_entities and get_json will return nested_entities and overlaps
'show_nested_entities': False,
# When unlinking a name from a concept should we do full_unlink (means unlink a name from all concepts, not just the one in question)
'full_unlink': False,
# Number of workers used by a parallelizable pipeline component
'workers': workers(),
# Should the labels of entities (shown in displacy) be pretty or just 'concept'. Slows down the annotation pipeline
#should not be used when annotating millions of documents. If `None` it will be the string "concept", if `short` it will be CUI,
#if `long` it will be CUI | Name | Confidence
'make_pretty_labels': None,
# If the cdb.addl_info['cui2group'] is provided and this option enabled, each CUI will be maped to the group
'map_cui_to_group': False,
# Checkpointing config
'checkpoint': {
# When doing training this is the name of the directory where checkpoints will be saved
'output_dir': 'checkpoints',
# When training how often to save the checkpoint (one step represents one document), if None no ckpts will be created
'steps': None,
# When training the maximum checkpoints will be kept on the disk
"max_to_keep": 1,
},
}
self.preprocessing: Dict[str, Any] = {
# Should stopwords be skipped/ingored when processing input
'skip_stopwords': False,
# This words will be completly ignored from concepts and from the text (must be a Set)
'words_to_skip': set(['nos']),
# All punct will be skipped by default, here you can set what will be kept
'keep_punct': {'.', ':'},
# Nothing below this length will ever be normalized (input tokens or concept names), normalized means lemmatized in this case
'min_len_normalize': 5,
# If None the default set of stowords from spacy will be used. This must be a Set.
'stopwords': None,
# Documents longer than this will be trimmed
'max_document_length': 1000000,
# Should specific word types be normalized: e.g. running -> run
'do_not_normalize': {'VBD', 'VBG', 'VBN', 'VBP', 'JJS', 'JJR'},
}
self.ner: Dict[str, Any] = {
# Do not detect names below this limit, skip them
'min_name_len': 3,
# When checkng tokens for concepts you can have skipped tokens inbetween
#used ones (usually spaces, new lines etc). This number tells you how many skipped can you have.
'max_skip_tokens': 2,
# Check uppercase to distinguish uppercase and lowercase words that have a different meaning.
'check_upper_case_names': False,
# Any name shorter than this must be uppercase in the text to be considered. If it is not uppercase
#it will be skipped.
'upper_case_limit_len': 4,
# Try reverse word order for short concepts (2 words max), e.g. heart disease -> disease heart
'try_reverse_word_order': False,
}
self.linking: Dict[str, Any] = {
# Should it train or not, this is set automatically ignore in 99% of cases and do not set manually
'train': True,
# Linear anneal
'optim': {'type': 'linear', 'base_lr': 1, 'min_lr': 0.00005},
# If <1 during unsupervised training the detected term will be randomly replaced with a probability of 1 - random_replacement_unsupervised
#Replaced with a synonym used for that term
'random_replacement_unsupervised': 0.80,
# 'optim': {'type': 'standard', 'lr': 1},
# 'optim': {'type': 'moving_avg', 'alpha': 0.99, 'e': 1e-4, 'size': 100},
# All concepts below this will always be disambiguated
'disamb_length_limit': 3,
# Context vector sizes that will be calculated and used for linking
'context_vector_sizes': {'xlong': 27, 'long': 18, 'medium': 9, 'short': 3},
# Weight of each vector in the similarity score - make trainable at some point. Should add up to 1.
'context_vector_weights': {'xlong': 0.1, 'long': 0.4, 'medium': 0.4, 'short': 0.1},
# If True it will filter before doing disamb. Useful for the trainer.
'filter_before_disamb': False,
# Concepts that have seen less training examples than this will not be used for
#similarity calculation and will have a similarity of -1.
'train_count_threshold': 1,
# Do we want to calculate context similarity even for concepts that are not ambigous.
'always_calculate_similarity': False,
# Weights for a weighted average
#'weighted_average_function': partial(weighted_average, factor=0.02),
'weighted_average_function': partial(weighted_average, factor=0.0004),
# Concepts below this similarity will be ignored. Type can be static/dynamic - if dynamic each CUI has a different TH
#and it is calcualted as the average confidence for that CUI * similarity_threshold. Take care that dynamic works only
#if the cdb was trained with calculate_dynamic_threshold = True.
'calculate_dynamic_threshold': False,
'similarity_threshold_type': 'static',
'similarity_threshold': 0.25,
# Probability for the negative context to be added for each positive addition
'negative_probability': 0.5,
# Do we ignore punct/num when negative sampling
'negative_ignore_punct_and_num': True,
# If >0 concepts for which a detection is its primary name will be preferred by that amount (0 to 1)
'prefer_primary_name': 0.35,
# If >0 concepts that are more frequent will be prefered by a multiply of this amount
'prefer_frequent_concepts': 0.35,
# DISABLED in code permanetly: Subsample during unsupervised training if a concept has received more than
'subsample_after': 30000,
# When adding a positive example, should it also be treated as Negative for concepts
#which link to the postive one via names (ambigous names).
'devalue_linked_concepts': False,
# If true when the context of a concept is calculated (embedding) the words making that concept are not taken into accout
'context_ignore_center_tokens': False,
# Filters
'filters': {
'cuis': set(), # CUIs in this filter will be included, everything else excluded, must be a set, if empty all cuis will be included
},
}
# Some regex that we will need
self.word_skipper = re.compile('^({})$'.format('|'.join(self.preprocessing['words_to_skip'])))
# Very agressive punct checker, input will be lowercased
self.punct_checker = re.compile(r'[^a-z0-9]+')
# Override
def rebuild_re(self) -> None:
# Some regex that we will need
self.word_skipper = re.compile('^({})$'.format('|'.join(self.preprocessing['words_to_skip'])))
# Very agressive punct checker, input will be lowercased
self.punct_checker = re.compile(r'[^a-z0-9]+')
def get_hash(self):
hasher = Hasher()
for k, v in self.__dict__.items():
if k not in ['version', 'general', 'linking']:
hasher.update(v, length=True)
elif k == 'general':
for k2, v2 in v.items():
if k2 != 'spacy_model':
hasher.update(v2, length=False)
else:
# Ignore spacy model
pass
elif k == 'linking':
for k2, v2 in v.items():
if k2 != "filters":
hasher.update(v2, length=False)
else:
hasher.update(v2, length=True)
return hasher.hexdigest() | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/config.py | config.py |
from typing import Dict, Any
from medcat.config import ConfigMixin
class ConfigMetaCAT(ConfigMixin):
def __init__(self) -> None:
self.general: Dict[str, Any] = {
'device': 'cpu',
'disable_component_lock': False,
'seed': 13,
'description': "No description", # Should provide a basic description of this MetaCAT model
'category_name': None, # What category is this meta_cat model predicting/training
'category_value2id': {}, # Map from category values to ID, if empty it will be autocalculated during training
'vocab_size': None, # Will be set automatically if the tokenizer is provided during meta_cat init
'lowercase': True, # If true all input text will be lowercased
'cntx_left': 15, # Number of tokens to take from the left of the concept
'cntx_right': 10, # Number of tokens to take from the right of the concept
'replace_center': None, # If set the center (concept) will be replaced with this string
'batch_size_eval': 5000, # Number of annotations to be meta-annotated at once in eval
'annotate_overlapping': False, # If set meta_anns will be calcualted for doc._.ents, otherwise for doc.ents
'tokenizer_name': 'bbpe', # Tokenizer name used with of MetaCAT
# This is a dangerous option, if not sure ALWAYS set to False. If set, it will try to share the pre-calculated
#context tokens between MetaCAT models when serving. It will ignore differences in tokenizer and context size,
#so you need to be sure that the models for which this is turned on have the same tokenizer and context size, during
#a deployment.
'save_and_reuse_tokens': False,
'pipe_batch_size_in_chars': 20000000, # How many characters are piped at once into the meta_cat class
}
self.model: Dict[str, Any] = {
'model_name': 'lstm',
'num_layers': 2,
'input_size': 300,
'hidden_size': 300,
'dropout': 0.5,
'num_directions': 2, # 2 - bidirectional model, 1 - unidirectional
'nclasses': 2, # Number of classes that this model will output
'padding_idx': -1,
'emb_grad': True, # If True the embeddings will also be trained
'ignore_cpos': False, # If set to True center positions will be ignored when calculating represenation
}
self.train: Dict[str, Any] = {
'batch_size': 100,
'nepochs': 50,
'lr': 0.001,
'test_size': 0.1,
'shuffle_data': True, # Used only during training, if set the dataset will be shuffled before train/test split
'class_weights': None,
'score_average': 'weighted', # What to use for averaging F1/P/R across labels
'prerequisites': {},
'cui_filter': None, # If set only this CUIs will be used for training
'auto_save_model': True, # Should do model be saved during training for best results
'last_train_on': None, # When was the last training run
'metric': {'base': 'weighted avg', 'score': 'f1-score'}, # What metric should be used for choosing the best model
} | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/config_meta_cat.py | config_meta_cat.py |
import pandas as pd
import numpy as np
import datetime
import logging
import re
from typing import Optional, List, Dict, Union
from medcat.pipe import Pipe
from medcat.cdb import CDB
from medcat.config import Config
from medcat.preprocessing.tokenizers import spacy_split_all
from medcat.preprocessing.cleaners import prepare_name
from medcat.preprocessing.taggers import tag_skip_and_punct
PH_REMOVE = re.compile("(\s)\([a-zA-Z]+[^\)\(]*\)($)")
class CDBMaker(object):
r''' Given a CSV as shown in https://github.com/CogStack/MedCAT/tree/master/examples/<example> it creates a CDB or
updates an exisitng one.
Args:
config (`medcat.config.Config`):
Global config for MedCAT.
cdb (`medcat.cdb.CDB`, optional):
If set the `CDBMaker` will updat the existing `CDB` with new concepts in the CSV.
name_max_words (`int`, defaults to `20`):
Names with more words will be skipped during the build of a CDB
'''
log = logging.getLogger(__package__)
def __init__(self, config: Config, cdb: Optional[CDB] = None) -> None:
self.config = config
# Set log level
self.log.setLevel(self.config.general['log_level'])
# To make life a bit easier
self.cnf_cm = config.cdb_maker
if cdb is None:
self.cdb = CDB(config=self.config)
else:
self.cdb = cdb
# Build the required spacy pipeline
self.pipe = Pipe(tokenizer=spacy_split_all, config=config)
self.pipe.add_tagger(tagger=tag_skip_and_punct,
name='skip_and_punct',
additional_fields=['is_punct'])
def prepare_csvs(self,
csv_paths: Union[pd.DataFrame, List[str]],
sep: str = ',',
encoding: Optional[str] = None,
escapechar: Optional[str] = None,
index_col: bool = False,
full_build: bool = False,
only_existing_cuis: bool = False, **kwargs) -> CDB:
r''' Compile one or multiple CSVs into a CDB.
Args:
csv_paths (`Union[pd.DataFrame, List[str]]`):
An array of paths to the csv files that should be processed. Can also be an array of pd.DataFrames
full_build (`bool`, defaults to `True`):
If False only the core portions of the CDB will be built (the ones required for
the functioning of MedCAT). If True, everything will be added to the CDB - this
usually includes concept descriptions, various forms of names etc (take care that
this option produces a much larger CDB).
sep (`str`, defaults to `,`):
If necessary a custom separator for the csv files
encoding (`str`, optional):
Encoding to be used for reading the CSV file
escapechar (`str`, optional):
Escape char for the CSV
index_col (`bool`, defaults_to `False`):
Index column for pandas read_csv
only_existing_cuis (`bool`, defaults to False):
If True no new CUIs will be added, but only linked names will be extended. Mainly used when
enriching names of a CDB (e.g. SNOMED with UMLS terms).
Return:
`medcat.cdb.CDB` with the new concepts added.
Note:
\*\*kwargs:
Will be passed to pandas for CSV reading
csv:
Examples of the CSV used to make the CDB can be found on [GitHub](link)
'''
useful_columns = ['cui', 'name', 'ontologies', 'name_status', 'type_ids', 'description']
name_status_options = {'A', 'P', 'N'}
for csv_path in csv_paths:
# Read CSV, everything is converted to strings
if isinstance(csv_path, str):
self.log.info("Started importing concepts from: {}".format(csv_path))
df = pd.pandas.read_csv(csv_path, sep=sep, encoding=encoding, escapechar=escapechar, index_col=index_col, dtype=str, **kwargs)
else:
# Not very clear, but csv_path can be a pre-loaded csv
df = csv_path
df = df.fillna('')
# Find which columns to use from the CSV
cols: List = []
col2ind = {}
for col in list(df.columns):
if str(col).lower().strip() in useful_columns:
col2ind[str(col).lower().strip()] = len(cols)
cols.append(col)
_time = None # Used to check speed
_logging_freq = np.ceil(len(df[cols]) / 100)
for row_id, row in enumerate(df[cols].values):
if row_id % _logging_freq == 0:
# Print some stats
if _time is None:
# Add last time if it does not exist
_time = datetime.datetime.now()
# Get current time
ctime = datetime.datetime.now()
# Get time difference
timediff = ctime - _time
self.log.info("Current progress: {:.0f}% at {:.3f}s per {} rows".format(
(row_id / len(df)) * 100, timediff.microseconds/10**6 + timediff.seconds, (len(df[cols]) // 100)))
# Set previous time to current time
_time = ctime
# This must exist
cui = row[col2ind['cui']].strip().upper()
if not only_existing_cuis or (only_existing_cuis and cui in self.cdb.cui2names):
if 'ontologies' in col2ind:
ontologies = set([ontology.strip() for ontology in row[col2ind['ontologies']].upper().split(self.cnf_cm['multi_separator']) if
len(ontology.strip()) > 0])
else:
ontologies = set()
if 'name_status' in col2ind:
name_status = row[col2ind['name_status']].strip().upper()
# Must be allowed
if name_status not in name_status_options:
name_status = 'A'
else:
# Defaults to A - meaning automatic
name_status = 'A'
if 'type_ids' in col2ind:
type_ids = set([type_id.strip() for type_id in row[col2ind['type_ids']].upper().split(self.cnf_cm['multi_separator']) if
len(type_id.strip()) > 0])
else:
type_ids = set()
# Get the ones that do not need any changing
if 'description' in col2ind:
description = row[col2ind['description']].strip()
else:
description = ""
# We can have multiple versions of a name
names: Dict = {} # {'name': {'tokens': [<str>], 'snames': [<str>]}}
raw_names = [raw_name.strip() for raw_name in row[col2ind['name']].split(self.cnf_cm['multi_separator']) if
len(raw_name.strip()) > 0]
for raw_name in raw_names:
raw_name = raw_name.strip()
prepare_name(raw_name, self.pipe.spacy_nlp, names, self.config)
if self.config.cdb_maker.get('remove_parenthesis', 0) > 0 and name_status == 'P':
# Should we remove the content in parenthesis from primary names and add them also
raw_name = PH_REMOVE.sub(" ", raw_name).strip()
if len(raw_name) >= self.config.cdb_maker['remove_parenthesis']:
prepare_name(raw_name, self.pipe.spacy_nlp, names, self.config)
self.cdb.add_concept(cui=cui, names=names, ontologies=ontologies, name_status=name_status, type_ids=type_ids,
description=description, full_build=full_build)
# DEBUG
self.log.debug("\n\n**** Added\n CUI: %s\n Names: %s\n Ontologies: %s\n Name status: %s\n Type IDs: %s\n Description: %s\n Is full build: %s",
cui, names, ontologies, name_status, type_ids, description, full_build)
return self.cdb
def destroy_pipe(self) -> None:
self.pipe.destroy() | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/cdb_maker.py | cdb_maker.py |
import os
import shutil
import pickle
import traceback
import json
import logging
import math
import time
import psutil
from time import sleep
from copy import deepcopy
from multiprocess import Process, Manager, cpu_count
from multiprocess.queues import Queue
from multiprocess.synchronize import Lock
from typing import Union, List, Tuple, Optional, Dict, Iterable, Set
from itertools import islice, chain, repeat
from datetime import date
from tqdm.autonotebook import tqdm, trange
from spacy.tokens import Span, Doc, Token
from spacy.language import Language
from medcat import __version__
from medcat.preprocessing.tokenizers import spacy_split_all
from medcat.pipe import Pipe
from medcat.preprocessing.taggers import tag_skip_and_punct
from medcat.cdb import CDB
from medcat.utils.matutils import intersect_nonempty_set
from medcat.utils.data_utils import make_mc_train_test, get_false_positives
from medcat.utils.normalizers import BasicSpellChecker
from medcat.utils.checkpoint import Checkpoint, CheckpointConfig, CheckpointManager
from medcat.utils.helpers import tkns_from_doc, get_important_config_parameters
from medcat.utils.hasher import Hasher
from medcat.ner.vocab_based_ner import NER
from medcat.linking.context_based_linker import Linker
from medcat.utils.filters import get_project_filters, check_filters
from medcat.preprocessing.cleaners import prepare_name
from medcat.meta_cat import MetaCAT
from medcat.utils.meta_cat.data_utils import json_to_fake_spacy
from medcat.config import Config
from medcat.vocab import Vocab
from medcat.utils.decorators import deprecated
from medcat.ner.transformers_ner import TransformersNER
class CAT(object):
r"""
The main MedCAT class used to annotate documents, it is built on top of spaCy
and works as a spaCy pipline. Creates an instance of a spaCy pipline that can
be used as a spacy nlp model.
Args:
cdb (medcat.cdb.CDB):
The concept database that will be used for NER+L
config (medcat.config.Config):
Global configuration for medcat
vocab (medcat.vocab.Vocab, optional):
Vocabulary used for vector embeddings and spelling. Default: None
meta_cats (list of medcat.meta_cat.MetaCAT, optional):
A list of models that will be applied sequentially on each
detected annotation.
Attributes (limited):
cdb (medcat.cdb.CDB):
Concept database used with this CAT instance, please do not assign
this value directly.
config (medcat.config.Config):
The global configuration for medcat. Usually cdb.config will be used for this
field. WILL BE REMOVED - TEMPORARY PLACEHOLDER
vocab (medcat.utils.vocab.Vocab):
The vocabulary object used with this instance, please do not assign
this value directly.
Examples:
>>> cat = CAT(cdb, vocab)
>>> spacy_doc = cat("Put some text here")
>>> print(spacy_doc.ents) # Detected entites
"""
# Add file and console handlers
log = logging.getLogger(__package__)
DEFAULT_MODEL_PACK_NAME = "medcat_model_pack"
def __init__(self,
cdb: CDB,
vocab: Union[Vocab, None] = None,
config: Optional[Config] = None,
meta_cats: List[MetaCAT] = [],
addl_ner: Union[TransformersNER, List[TransformersNER]] = []) -> None:
self.cdb = cdb
self.vocab = vocab
if config is None:
# Take config from the cdb
self.config = cdb.config
else:
# Take the new config and assign it to the CDB also
self.config = config
self.cdb.config = config
self._meta_cats = meta_cats
self._addl_ner = addl_ner if isinstance(addl_ner, list) else [addl_ner]
self._create_pipeline(self.config)
def _create_pipeline(self, config):
# Set log level
self.log.setLevel(config.general['log_level'])
# Build the pipeline
self.pipe = Pipe(tokenizer=spacy_split_all, config=config)
self.pipe.add_tagger(tagger=tag_skip_and_punct,
name='skip_and_punct',
additional_fields=['is_punct'])
if self.vocab is not None:
spell_checker = BasicSpellChecker(cdb_vocab=self.cdb.vocab, config=config, data_vocab=self.vocab)
self.pipe.add_token_normalizer(spell_checker=spell_checker, config=config)
# Add NER
self.ner = NER(self.cdb, config)
self.pipe.add_ner(self.ner)
# Add LINKER
self.linker = Linker(self.cdb, self.vocab, config)
self.pipe.add_linker(self.linker)
# Add addl_ner if they exist
for ner in self._addl_ner:
self.pipe.add_addl_ner(ner, ner.config.general['name'])
# Add meta_annotaiton classes if they exist
for meta_cat in self._meta_cats:
self.pipe.add_meta_cat(meta_cat, meta_cat.config.general['category_name'])
# Set max document length
self.pipe.spacy_nlp.max_length = config.preprocessing.get('max_document_length', 1000000)
@deprecated(message="Replaced with cat.pipe.spacy_nlp.")
def get_spacy_nlp(self) -> Language:
""" Returns the spacy pipeline with MedCAT
"""
return self.pipe.spacy_nlp
def get_hash(self):
r""" Will not be a deep hash but will try to cactch all the changing parts during training.
"""
hasher = Hasher()
hasher.update(self.cdb.get_hash())
hasher.update(self.config.get_hash())
for mc in self._meta_cats:
hasher.update(mc.get_hash())
for trf in self._addl_ner:
hasher.update(trf.get_hash())
return hasher.hexdigest()
def get_model_card(self, as_dict=False):
"""
A minimal model card for MedCAT model packs.
Args:
as_dict: return the model card as a dictionary instead of a str.
Returns:
By default a str - indented JSON object.
"""
card = {
'Model ID': self.config.version['id'],
'Last Modified On': self.config.version['last_modified'],
'History (from least to most recent)': self.config.version['history'],
'Description': self.config.version['description'],
'Source Ontology': self.config.version['ontology'],
'Location': self.config.version['location'],
'MetaCAT models': self.config.version['meta_cats'],
'Basic CDB Stats': self.config.version['cdb_info'],
'Performance': self.config.version['performance'],
'Important Parameters (Partial view, all available in cat.config)': get_important_config_parameters(self.config),
'MedCAT Version': self.config.version['medcat_version']
}
if as_dict:
return card
else:
return json.dumps(card, indent=2, sort_keys=False)
def _versioning(self):
# Check version info and do not allow without it
if self.config.version['description'] == 'No description':
self.log.warning("Please consider populating the version information [description, performance, location, ontology] in cat.config.version")
# Fill the stuff automatically that is needed for versioning
m = self.get_hash()
version = self.config.version
if version['id'] is None or m != version['id']:
if version['id'] is not None:
version['history'].append(version['id'])
version['id'] = m
version['last_modified'] = date.today().strftime("%d %B %Y")
version['cdb_info'] = self.cdb._make_stats()
version['meta_cats'] = [meta_cat.get_model_card(as_dict=True) for meta_cat in self._meta_cats]
version['medcat_version'] = __version__
self.log.warning("Please consider updating [description, performance, location, ontology] in cat.config.version")
def create_model_pack(self, save_dir_path: str, model_pack_name: str = DEFAULT_MODEL_PACK_NAME) -> str:
r""" Will crete a .zip file containing all the models in the current running instance
of MedCAT. This is not the most efficient way, for sure, but good enough for now.
model_pack_name - an id will be appended to this name
returns:
Model pack name
"""
# Spacy model always should be just the name, but during loading it can be reset to path
self.config.general['spacy_model'] = os.path.basename(self.config.general['spacy_model'])
# Versioning
self._versioning()
model_pack_name += "_{}".format(self.config.version['id'])
self.log.warning("This will save all models into a zip file, can take some time and require quite a bit of disk space.")
_save_dir_path = save_dir_path
save_dir_path = os.path.join(save_dir_path, model_pack_name)
# expand user path to make this work with '~'
os.makedirs(os.path.expanduser(save_dir_path), exist_ok=True)
# Save the used spacy model
spacy_path = os.path.join(save_dir_path, self.config.general['spacy_model'])
if str(self.pipe.spacy_nlp._path) != spacy_path:
# First remove if something is there
shutil.rmtree(spacy_path, ignore_errors=True)
shutil.copytree(str(self.pipe.spacy_nlp._path), spacy_path)
# Save the CDB
cdb_path = os.path.join(save_dir_path, "cdb.dat")
self.cdb.save(cdb_path)
# Save the Vocab
vocab_path = os.path.join(save_dir_path, "vocab.dat")
if self.vocab is not None:
# We will allow creation of modelpacks without vocabs
self.vocab.save(vocab_path)
# Save addl_ner
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
trf_path = os.path.join(save_dir_path, "trf_" + comp[1].config.general['name'])
comp[1].save(trf_path)
# Save all meta_cats
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], MetaCAT):
name = comp[0]
meta_path = os.path.join(save_dir_path, "meta_" + name)
comp[1].save(meta_path)
# Add a model card also, why not
model_card_path = os.path.join(save_dir_path, "model_card.json")
json.dump(self.get_model_card(as_dict=True), open(model_card_path, 'w'), indent=2)
# Zip everything
shutil.make_archive(os.path.join(_save_dir_path, model_pack_name), 'zip', root_dir=save_dir_path)
# Log model card and return new name
self.log.info(self.get_model_card()) # Print the model card
return model_pack_name
@classmethod
def load_model_pack(cls, zip_path: str, meta_cat_config_dict: Optional[Dict] = None) -> "CAT":
r"""Load everything within the 'model pack', i.e. the CDB, config, vocab and any MetaCAT models
(if present)
Args:
zip_path:
path to model pack zip.
meta_cat_config_dict:
A config dict that will overwrite existing configs in meta_cat.
e.g. meta_cat_config_dict = {'general': {'device': 'cpu'}}
"""
from medcat.cdb import CDB
from medcat.vocab import Vocab
from medcat.meta_cat import MetaCAT
base_dir = os.path.dirname(zip_path)
filename = os.path.basename(zip_path)
foldername = filename.replace(".zip", '')
model_pack_path = os.path.join(base_dir, foldername)
if os.path.exists(model_pack_path):
cls.log.info("Found an existing unziped model pack at: {}, the provided zip will not be touched.".format(model_pack_path))
else:
cls.log.info("Unziping the model pack and loading models.")
shutil.unpack_archive(zip_path, extract_dir=model_pack_path)
# Load the CDB
cdb_path = os.path.join(model_pack_path, "cdb.dat")
cdb = CDB.load(cdb_path)
# TODO load addl_ner
# Modify the config to contain full path to spacy model
cdb.config.general['spacy_model'] = os.path.join(model_pack_path, os.path.basename(cdb.config.general['spacy_model']))
# Load Vocab
vocab_path = os.path.join(model_pack_path, "vocab.dat")
if os.path.exists(vocab_path):
vocab = Vocab.load(vocab_path)
else:
vocab = None
# Find meta models in the model_pack
trf_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('trf_')]
addl_ner = []
for trf_path in trf_paths:
trf = TransformersNER.load(save_dir_path=trf_path)
trf.cdb = cdb # Set the cat.cdb to be the CDB of the TRF model
addl_ner.append(trf)
# Find meta models in the model_pack
meta_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('meta_')]
meta_cats = []
for meta_path in meta_paths:
meta_cats.append(MetaCAT.load(save_dir_path=meta_path,
config_dict=meta_cat_config_dict))
cat = cls(cdb=cdb, config=cdb.config, vocab=vocab, meta_cats=meta_cats, addl_ner=addl_ner)
cls.log.info(cat.get_model_card()) # Print the model card
return cat
def __call__(self, text: Optional[str], do_train: bool = False) -> Optional[Doc]:
r"""
Push the text through the pipeline.
Args:
text (string):
The text to be annotated, if the text length is longer than
self.config.preprocessing['max_document_length'] it will be trimmed to that length.
do_train (bool, defaults to `False`):
This causes so many screwups when not there, so I'll force training
to False. To run training it is much better to use the self.train() function
but for some special cases I'm leaving it here also.
Returns:
A single spacy document or multiple spacy documents with the extracted entities
"""
# Should we train - do not use this for training, unless you know what you are doing. Use the
#self.train() function
self.config.linking['train'] = do_train
if text is None:
self.log.error("The input text should be either a string or a sequence of strings but got %s", type(text))
return None
else:
text = self._get_trimmed_text(str(text))
return self.pipe(text)
def __repr__(self):
"""
Prints the model_card for this CAT instance.
Returns:
the 'Model Card' for this CAT instance. This includes NER+L config and any MetaCATs
"""
return self.get_model_card(as_dict=False)
def _print_stats(self,
data: Dict,
epoch: int = 0,
use_project_filters: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
use_groups: bool = False,
extra_cui_filter: Optional[Set] = None) -> Tuple:
r""" TODO: Refactor and make nice
Print metrics on a dataset (F1, P, R), it will also print the concepts that have the most FP,FN,TP.
Args:
data (list of dict):
The json object that we get from MedCATtrainer on export.
epoch (int):
Used during training, so we know what epoch is it.
use_project_filters (boolean):
Each project in medcattrainer can have filters, do we want to respect those filters
when calculating metrics.
use_overlaps (boolean):
Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites.
use_cui_doc_limit (boolean):
If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words
if the document was annotated for that CUI. Useful in very specific situations when during the annotation
process the set of CUIs changed.
use_groups (boolean):
If True concepts that have groups will be combined and stats will be reported on groups.
extra_cui_filter(Optional[Set]):
This filter will be intersected with all other filters, or if all others are not set then only this one will be used.
Returns:
fps (dict):
False positives for each CUI
fns (dict):
False negatives for each CUI
tps (dict):
True positives for each CUI
cui_prec (dict):
Precision for each CUI
cui_rec (dict):
Recall for each CUI
cui_f1 (dict):
F1 for each CUI
cui_counts (dict):
Number of occurrence for each CUI
examples (dict):
Examples for each of the fp, fn, tp. Format will be examples['fp']['cui'][<list_of_examples>]
"""
tp = 0
fp = 0
fn = 0
fps: Dict = {}
fns: Dict = {}
tps: Dict = {}
cui_prec: Dict = {}
cui_rec: Dict = {}
cui_f1: Dict = {}
cui_counts: Dict = {}
examples: Dict = {'fp': {}, 'fn': {}, 'tp': {}}
fp_docs: Set = set()
fn_docs: Set = set()
# reset and back up filters
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
for pind, project in tqdm(enumerate(data['projects']), desc="Stats project", total=len(data['projects']), leave=False):
filters['cuis'] = set()
# Add extrafilter if set
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_project_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
# Intersect project filter with existing if it has something
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for dind, doc in tqdm(
enumerate(project["documents"]),
desc="Stats document",
total=len(project["documents"]),
leave=False,
):
anns = self._get_doc_annotations(doc)
# Apply document level filtering, in this case project_filter is ignored while the extra_cui_filter is respected still
if use_cui_doc_limit:
_cuis = set([ann['cui'] for ann in anns])
if _cuis:
filters['cuis'] = intersect_nonempty_set(_cuis, extra_cui_filter)
else:
filters['cuis'] = {'empty'}
spacy_doc: Doc = self(doc['text'])
if use_overlaps:
p_anns = spacy_doc._.ents
else:
p_anns = spacy_doc.ents
anns_norm = []
anns_norm_neg = []
anns_examples = []
anns_norm_cui = []
for ann in anns:
cui = ann['cui']
if check_filters(cui, filters):
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):
anns_norm.append((ann['start'], cui))
anns_examples.append({"text": doc['text'][max(0, ann['start']-60):ann['end']+60],
"cui": cui,
"source value": ann['value'],
"acc": 1,
"project index": pind,
"document inedex": dind})
elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)):
anns_norm_neg.append((ann['start'], cui))
if ann.get("validated", True):
# This is used to test was someone annotating for this CUI in this document
anns_norm_cui.append(cui)
cui_counts[cui] = cui_counts.get(cui, 0) + 1
p_anns_norm = []
p_anns_examples = []
for ann in p_anns:
cui = ann._.cui
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
p_anns_norm.append((ann.start_char, cui))
p_anns_examples.append({"text": doc['text'][max(0, ann.start_char-60):ann.end_char+60],
"cui": cui,
"source value": ann.text,
"acc": float(ann._.context_similarity),
"project index": pind,
"document inedex": dind})
for iann, ann in enumerate(p_anns_norm):
cui = ann[1]
if ann in anns_norm:
tp += 1
tps[cui] = tps.get(cui, 0) + 1
example = p_anns_examples[iann]
examples['tp'][cui] = examples['tp'].get(cui, []) + [example]
else:
fp += 1
fps[cui] = fps.get(cui, 0) + 1
fp_docs.add(doc.get('name', 'unk'))
# Add example for this FP prediction
example = p_anns_examples[iann]
if ann in anns_norm_neg:
# Means that it really was annotated as negative
example['real_fp'] = True
examples['fp'][cui] = examples['fp'].get(cui, []) + [example]
for iann, ann in enumerate(anns_norm):
if ann not in p_anns_norm:
cui = ann[1]
fn += 1
fn_docs.add(doc.get('name', 'unk'))
fns[cui] = fns.get(cui, 0) + 1
examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]]
try:
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = 2*(prec*rec) / (prec + rec)
print("Epoch: {}, Prec: {}, Rec: {}, F1: {}\n".format(epoch, prec, rec, f1))
print("Docs with false positives: {}\n".format("; ".join([str(x) for x in list(fp_docs)[0:10]])))
print("Docs with false negatives: {}\n".format("; ".join([str(x) for x in list(fn_docs)[0:10]])))
# Sort fns & prec
fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}
fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}
tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}
# F1 per concept
for cui in tps.keys():
prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))
rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))
f1 = 2*(prec*rec) / (prec + rec)
cui_prec[cui] = prec
cui_rec[cui] = rec
cui_f1[cui] = f1
# Get top 10
pr_fps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]
pr_fns = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]
pr_tps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]
print("\n\nFalse Positives\n")
for one in pr_fps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nFalse Negatives\n")
for one in pr_fns:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nTrue Positives\n")
for one in pr_tps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("*"*110 + "\n")
except Exception:
traceback.print_exc()
# restore filters to original state
self.config.linking['filters'] = _filters
return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples
def _init_ckpts(self, is_resumed, checkpoint):
if self.config.general['checkpoint']['steps'] is not None or checkpoint is not None:
checkpoint_config = CheckpointConfig(**self.config.general.get('checkpoint', {}))
checkpoint_manager = CheckpointManager('cat_train', checkpoint_config)
if is_resumed:
# TODO: probably remove is_resumed mark and always resume if a checkpoint is provided,
#but I'll leave it for now
checkpoint = checkpoint or checkpoint_manager.get_latest_checkpoint()
self.log.info(f"Resume training on the most recent checkpoint at {checkpoint.dir_path}...")
self.cdb = checkpoint.restore_latest_cdb()
self.cdb.config.merge_config(self.config.__dict__)
self.config = self.cdb.config
self._create_pipeline(self.config)
else:
checkpoint = checkpoint or checkpoint_manager.create_checkpoint()
self.log.info(f"Start new training and checkpoints will be saved at {checkpoint.dir_path}...")
return checkpoint
def train(self,
data_iterator: Iterable,
nepochs: int = 1,
fine_tune: bool = True,
progress_print: int = 1000,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> None:
""" Runs training on the data, note that the maximum length of a line
or document is 1M characters. Anything longer will be trimmed.
Args:
data_iterator (Iterable):
Simple iterator over sentences/documents, e.g. a open file
or an array or anything that we can use in a for loop.
nepochs (int):
Number of epochs for which to run the training.
fine_tune (bool):
If False old training will be removed.
progress_print (int):
Print progress after N lines.
checkpoint (Optional[medcat.utils.checkpoint.CheckpointUT]):
The MedCAT checkpoint object
is_resumed (bool):
If True resume the previous training; If False, start a fresh new training.
"""
if not fine_tune:
self.log.info("Removing old training data!")
self.cdb.reset_training()
checkpoint = self._init_ckpts(is_resumed, checkpoint)
latest_trained_step = checkpoint.count if checkpoint is not None else 0
epochal_data_iterator = chain.from_iterable(repeat(data_iterator, nepochs))
for line in islice(epochal_data_iterator, latest_trained_step, None):
if line is not None and line:
# Convert to string
line = str(line).strip()
try:
_ = self(line, do_train=True)
except Exception as e:
self.log.warning("LINE: '%s...' \t WAS SKIPPED", line[0:100])
self.log.warning("BECAUSE OF: %s", str(e))
else:
self.log.warning("EMPTY LINE WAS DETECTED AND SKIPPED")
latest_trained_step += 1
if latest_trained_step % progress_print == 0:
self.log.info("DONE: %s", str(latest_trained_step))
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(cdb=self.cdb, count=latest_trained_step)
self.config.linking['train'] = False
def add_cui_to_group(self, cui: str, group_name: str) -> None:
r"""
Ads a CUI to a group, will appear in cdb.addl_info['cui2group']
Args:
cui (str):
The concept to be added
group_name (str):
The group to whcih the concept will be added
Examples:
>>> cat.add_cui_to_group("S-17", 'pain')
"""
# Add group_name
self.cdb.addl_info['cui2group'][cui] = group_name
def unlink_concept_name(self, cui: str, name: str, preprocessed_name: bool = False) -> None:
r"""
Unlink a concept name from the CUI (or all CUIs if full_unlink), removes the link from
the Concept Database (CDB). As a consequence medcat will never again link the `name`
to this CUI - meaning the name will not be detected as a concept in the future.
Args:
cui (str):
The CUI from which the `name` will be removed
name (str):
The span of text to be removed from the linking dictionary
Examples:
>>> # To never again link C0020538 to HTN
>>> cat.unlink_concept_name('C0020538', 'htn', False)
"""
cuis = [cui]
if preprocessed_name:
names = {name: 'nothing'}
else:
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# If full unlink find all CUIs
if self.config.general.get('full_unlink', False):
for n in names:
cuis.extend(self.cdb.name2cuis.get(n, []))
# Remove name from all CUIs
for c in cuis:
self.cdb.remove_names(cui=c, names=names)
def add_and_train_concept(self,
cui: str,
name: str,
spacy_doc: Optional[Doc] = None,
spacy_entity: Optional[Union[List[Token], Span]] = None,
ontologies: Set = set(),
name_status: str = 'A',
type_ids: Set = set(),
description: str = '',
full_build: bool = True,
negative: bool = False,
devalue_others: bool = False,
do_add_concept: bool = True) -> None:
r""" Add a name to an existing concept, or add a new concept, or do not do anything if the name or concept already exists. Perform
training if spacy_entity and spacy_doc are set.
Args:
cui (str):
CUI of the concept
name (str):
Name to be linked to the concept (in the case of MedCATtrainer this is simply the
selected value in text, no preprocessing or anything needed).
spacy_doc (spacy.tokens.Doc):
Spacy represenation of the document that was manually annotated.
spacy_entity (Optional[Union[List[Token], Span]]):
Given the spacy document, this is the annotated span of text - list of annotated tokens that are marked with this CUI.
negative (bool):
Is this a negative or positive example.
devalue_others:
If set, cuis to which this name is assigned and are not `cui` will receive negative training given
that negative=False.
\*\*other:
Refer to medcat.cat.cdb.CDB.add_concept
"""
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# Only if not negative, otherwise do not add the new name if in fact it should not be detected
if do_add_concept and not negative:
self.cdb.add_concept(cui=cui, names=names, ontologies=ontologies, name_status=name_status, type_ids=type_ids, description=description,
full_build=full_build)
if spacy_entity is not None and spacy_doc is not None:
# Train Linking
self.linker.context_model.train(cui=cui, entity=spacy_entity, doc=spacy_doc, negative=negative, names=names)
if not negative and devalue_others:
# Find all cuis
cuis = set()
for n in names:
cuis.update(self.cdb.name2cuis.get(n, []))
# Remove the cui for which we just added positive training
if cui in cuis:
cuis.remove(cui)
# Add negative training for all other CUIs that link to these names
for _cui in cuis:
self.linker.context_model.train(cui=_cui, entity=spacy_entity, doc=spacy_doc, negative=True)
def train_supervised(self,
data_path: str,
reset_cui_count: bool = False,
nepochs: int = 1,
print_stats: int = 0,
use_filters: bool = False,
terminate_last: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
test_size: int = 0,
devalue_others: bool = False,
use_groups: bool = False,
never_terminate: bool = False,
train_from_false_positives: bool = False,
extra_cui_filter: Optional[Set] = None,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> Tuple:
r""" TODO: Refactor, left from old
Run supervised training on a dataset from MedCATtrainer. Please take care that this is more a simulated
online training then supervised.
Args:
data_path (str):
The path to the json file that we get from MedCATtrainer on export.
reset_cui_count (boolean):
Used for training with weight_decay (annealing). Each concept has a count that is there
from the beginning of the CDB, that count is used for annealing. Resetting the count will
significantly increase the training impact. This will reset the count only for concepts
that exist in the the training data.
nepochs (int):
Number of epochs for which to run the training.
print_stats (int):
If > 0 it will print stats every print_stats epochs.
use_filters (boolean):
Each project in medcattrainer can have filters, do we want to respect those filters
when calculating metrics.
terminate_last (boolean):
If true, concept termination will be done after all training.
use_overlaps (boolean):
Allow overlapping entities, nearly always False as it is very difficult to annotate overlapping entities.
use_cui_doc_limit (boolean):
If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words
if the document was annotated for that CUI. Useful in very specific situations when during the annotation
process the set of CUIs changed.
test_size (float):
If > 0 the data set will be split into train test based on this ration. Should be between 0 and 1.
Usually 0.1 is fine.
devalue_others(bool):
Check add_name for more details.
use_groups (boolean):
If True concepts that have groups will be combined and stats will be reported on groups.
never_terminate (boolean):
If True no termination will be applied
train_from_false_positives (boolean):
If True it will use false positive examples detected by medcat and train from them as negative examples.
extra_cui_filter(Optional[Set]):
This filter will be intersected with all other filters, or if all others are not set then only this one will be used.
checkpoint (Optional[Optional[medcat.utils.checkpoint.CheckpointST]):
The MedCAT CheckpointST object
is_resumed (bool):
If True resume the previous training; If False, start a fresh new training.
Returns:
fp (dict):
False positives for each CUI
fn (dict):
False negatives for each CUI
tp (dict):
True positives for each CUI
p (dict):
Precision for each CUI
r (dict):
Recall for each CUI
f1 (dict):
F1 for each CUI
cui_counts (dict):
Number of occurrence for each CUI
examples (dict):
FP/FN examples of sentences for each CUI
"""
checkpoint = self._init_ckpts(is_resumed, checkpoint)
# Backup filters
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
fp = fn = tp = p = r = f1 = examples = {}
with open(data_path) as f:
data = json.load(f)
cui_counts = {}
if test_size == 0:
self.log.info("Running without a test set, or train==test")
test_set = data
train_set = data
else:
train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size)
if print_stats > 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
if reset_cui_count:
# Get all CUIs
cuis = []
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
cuis.append(ann['cui'])
for cui in set(cuis):
if cui in self.cdb.cui2count_train:
self.cdb.cui2count_train[cui] = 100
# Remove entities that were terminated
if not never_terminate:
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
latest_trained_step = checkpoint.count if checkpoint is not None else 0
current_epoch, current_project, current_document = self._get_training_start(train_set, latest_trained_step)
for epoch in trange(current_epoch, nepochs, initial=current_epoch, total=nepochs, desc='Epoch', leave=False):
# Print acc before training
for idx_project in trange(current_project, len(train_set['projects']), initial=current_project, total=len(train_set['projects']), desc='Project', leave=False):
project = train_set['projects'][idx_project]
# Set filters in case we are using the train_from_fp
filters['cuis'] = set()
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for idx_doc in trange(current_document, len(project['documents']), initial=current_document, total=len(project['documents']), desc='Document', leave=False):
doc = project['documents'][idx_doc]
spacy_doc: Doc = self(doc['text'])
# Compatibility with old output where annotations are a list
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if not ann.get('killed', False):
cui = ann['cui']
start = ann['start']
end = ann['end']
spacy_entity = tkns_from_doc(spacy_doc=spacy_doc, start=start, end=end)
deleted = ann.get('deleted', False)
self.add_and_train_concept(cui=cui,
name=ann['value'],
spacy_doc=spacy_doc,
spacy_entity=spacy_entity,
negative=deleted,
devalue_others=devalue_others)
if train_from_false_positives:
fps: List[Span] = get_false_positives(doc, spacy_doc)
for fp in fps:
fp_: Span = fp
self.add_and_train_concept(cui=fp_._.cui,
name=fp_.text,
spacy_doc=spacy_doc,
spacy_entity=fp_,
negative=True,
do_add_concept=False)
latest_trained_step += 1
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(self.cdb, latest_trained_step)
if terminate_last and not never_terminate:
# Remove entities that were terminated, but after all training is done
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
if print_stats > 0 and (epoch + 1) % print_stats == 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
epoch=epoch + 1,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
# Set the filters again
self.config.linking['filters'] = _filters
return fp, fn, tp, p, r, f1, cui_counts, examples
def get_entities(self,
text: str,
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed']) -> Dict:
doc = self(text)
out = self._doc_to_out(doc, only_cui, addl_info)
return out
def get_entities_multi_texts(self,
texts: Union[Iterable[str], Iterable[Tuple]],
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed'],
n_process: Optional[int] = None,
batch_size: Optional[int] = None) -> List[Dict]:
r""" Get entities
text: text to be annotated
return: entities
"""
out: List[Dict] = []
if n_process is None:
texts_ = self._generate_trimmed_texts(texts)
for text in texts_:
out.append(self._doc_to_out(self(text), only_cui, addl_info))
else:
self.pipe.set_error_handler(self._pipe_error_handler)
try:
texts_ = self._get_trimmed_texts(texts)
docs = self.pipe.batch_multi_process(texts_, n_process, batch_size)
for doc in tqdm(docs, total=len(texts_)):
doc = None if doc.text.strip() == '' else doc
out.append(self._doc_to_out(doc, only_cui, addl_info, out_with_text=True))
# Currently spaCy cannot mark which pieces of texts failed within the pipe so be this workaround,
# which also assumes texts are different from each others.
if len(out) < len(texts_):
self.log.warning("Found at least one failed batch and set output for enclosed texts to empty")
for i, text in enumerate(texts_):
if i == len(out):
out.append(self._doc_to_out(None, only_cui, addl_info))
elif out[i].get('text', '') != text:
out.insert(i, self._doc_to_out(None, only_cui, addl_info))
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if not(cnf_annotation_output.get('include_text_in_output', False)):
for o in out:
if o is not None:
o.pop('text', None)
finally:
self.pipe.reset_error_handler()
return out
def get_json(self, text: str, only_cui: bool = False, addl_info=['cui2icd10', 'cui2ontologies']) -> str:
""" Get output in json format
text: text to be annotated
return: json with fields {'entities': <>, 'text': text}
"""
ents = self.get_entities(text, only_cui, addl_info=addl_info)['entities']
out = {'annotations': ents, 'text': text}
return json.dumps(out)
@staticmethod
def _get_training_start(train_set, latest_trained_step):
total_steps_per_epoch = sum([1 for project in train_set['projects'] for _ in project['documents']])
if total_steps_per_epoch == 0:
raise ValueError("MedCATtrainer export contains no documents")
current_epoch, last_step_in_epoch = divmod(latest_trained_step, total_steps_per_epoch)
document_count = 0
current_project = 0
current_document = 0
for idx_project, project in enumerate(train_set['projects']):
for idx_doc, _ in enumerate(project['documents']):
document_count += 1
if document_count == last_step_in_epoch:
current_project = idx_project
current_document = idx_doc
break
if current_project > 0:
break
current_document = 0
return current_epoch, current_project, current_document
def _separate_nn_components(self):
# Loop though the models and check are there GPU devices
nn_components = []
for component in self.pipe.spacy_nlp.components:
if isinstance(component[1], MetaCAT) or isinstance(component[1], TransformersNER):
self.pipe.spacy_nlp.disable_pipe(component[0])
nn_components.append(component)
return nn_components
def _run_nn_components(self, docs: Dict, nn_components: List, id2text: Dict) -> None:
r""" This will add meta_anns in-place to the docs dict.
"""
self.log.debug("Running GPU components separately")
# First convert the docs into the fake spacy doc format
spacy_docs = json_to_fake_spacy(docs, id2text=id2text)
# Disable component locks also
for name, component in nn_components:
component.config.general['disable_component_lock'] = True
# For meta_cat compoments
for name, component in [c for c in nn_components if isinstance(c[1], MetaCAT)]:
spacy_docs = component.pipe(spacy_docs)
for spacy_doc in spacy_docs:
for ent in spacy_doc.ents:
docs[spacy_doc.id]['entities'][ent._.id]['meta_anns'].update(ent._.meta_anns)
def _batch_generator(self, data: Iterable, batch_size_chars: int, skip_ids: Set = set()):
docs = []
char_count = 0
for doc in data:
if doc[0] not in skip_ids:
char_count += len(str(doc[1]))
docs.append(doc)
if char_count < batch_size_chars:
continue
yield docs
docs = []
char_count = 0
if len(docs) > 0:
yield docs
def _save_docs_to_file(self, docs: Iterable, annotated_ids: List[str], save_dir_path: str, annotated_ids_path: Optional[str], part_counter: int = 0) -> int:
path = os.path.join(save_dir_path, 'part_{}.pickle'.format(part_counter))
pickle.dump(docs, open(path, "wb"))
self.log.info("Saved part: %s, to: %s", part_counter, path)
part_counter = part_counter + 1 # Increase for save, as it should be what is the next part
if annotated_ids_path is not None:
pickle.dump((annotated_ids, part_counter), open(annotated_ids_path, 'wb'))
return part_counter
def multiprocessing(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 2,
batch_size_chars: int = 5000 * 1000,
only_cui: bool = False,
addl_info: List[str] = [],
separate_nn_components: bool = True,
out_split_size_chars: Optional[int] = None,
save_dir_path: str = os.path.abspath(os.getcwd()),
min_free_memory=0.1) -> Dict:
r""" Run multiprocessing for inference, if out_save_path and out_split_size_chars is used this will also continue annotating
documents if something is saved in that directory.
Args:
data:
Iterator or array with format: [(id, text), (id, text), ...]
nproc (`int`, defaults to 8):
Number of processors
batch_size_chars (`int`, defaults to 1000000):
Size of a batch in number of characters, this should be around: NPROC * average_document_length * 200
separate_nn_components (`bool`, defaults to True):
If set the medcat pipe will be broken up into NN and not-NN components and
they will be run sequentially. This is useful as the NN components
have batching and like to process many docs at once, while the rest of the pipeline
runs the documents one by one.
out_split_size_chars (`int`, None):
If set once more than out_split_size_chars are annotated
they will be saved to a file (save_dir_path) and the memory cleared. Recommended
value is 20*batch_size_chars.
save_dir_path(`str`, defaults to the current working directory):
Where to save the annotated documents if splitting.
min_free_memory(`float`, defaults to 0.1):
If set a process will not start unless there is at least this much RAM memory left,
should be a range between [0, 1] meaning how much of the memory has to be free. Helps when annotating
very large datasets because spacy is not the best with memory management and multiprocessing.
Returns:
A dictionary: {id: doc_json, id2: doc_json2, ...}, in case out_split_size_chars is used
the last batch will be returned while that and all previous batches will be
written to disk (out_save_dir).
"""
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
raise Exception("Please do not use multiprocessing when running a transformer model for NER, run sequentially.")
# Set max document length
self.pipe.spacy_nlp.max_length = self.config.preprocessing.get('max_document_length', 1000000)
if self._meta_cats and not separate_nn_components:
# Hack for torch using multithreading, which is not good if not
#separate_nn_components, need for CPU runs only
import torch
torch.set_num_threads(1)
nn_components = []
if separate_nn_components:
nn_components = self._separate_nn_components()
if save_dir_path is not None:
os.makedirs(save_dir_path, exist_ok=True)
# "5" looks like a magic number here so better with comment about why the choice was made.
internal_batch_size_chars = batch_size_chars // (5 * nproc)
annotated_ids_path = os.path.join(save_dir_path, 'annotated_ids.pickle') if save_dir_path is not None else None
if annotated_ids_path is not None and os.path.exists(annotated_ids_path):
annotated_ids, part_counter = pickle.load(open(annotated_ids_path, 'rb'))
else:
annotated_ids = []
part_counter = 0
docs = {}
_start_time = time.time()
_batch_counter = 0 # Used for splitting the output, counts batches inbetween saves
for batch in self._batch_generator(data, batch_size_chars, skip_ids=set(annotated_ids)):
self.log.info("Annotated until now: %s docs; Current BS: %s docs; Elapsed time: %.2f minutes",
len(annotated_ids),
len(batch),
(time.time() - _start_time)/60)
try:
_docs = self._multiprocessing_batch(data=batch,
nproc=nproc,
only_cui=only_cui,
batch_size_chars=internal_batch_size_chars,
addl_info=addl_info,
nn_components=nn_components,
min_free_memory=min_free_memory)
docs.update(_docs)
annotated_ids.extend(_docs.keys())
_batch_counter += 1
del _docs
if out_split_size_chars is not None and (_batch_counter * batch_size_chars) > out_split_size_chars:
# Save to file and reset the docs
part_counter = self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
del docs
docs = {}
_batch_counter = 0
except Exception as e:
self.log.warning("Failed an outer batch in the multiprocessing script")
self.log.warning(e, exc_info=True, stack_info=True)
# Save the last batch
if out_split_size_chars is not None and len(docs) > 0:
# Save to file and reset the docs
self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
# Enable the GPU Components again
if separate_nn_components:
for name, _ in nn_components:
# No need to do anything else as it was already in the pipe
self.pipe.spacy_nlp.enable_pipe(name)
return docs
def _multiprocessing_batch(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 8,
batch_size_chars: int = 1000000,
only_cui: bool = False,
addl_info: List[str] = [],
nn_components: List = [],
min_free_memory: int = 0) -> Dict:
r""" Run multiprocessing on one batch
Args:
data:
Iterator or array with format: [(id, text), (id, text), ...]
nproc (`int`, defaults to 8):
Number of processors
batch_size_chars (`int`, defaults to 1000000):
Size of a batch in number of characters
Returns:
A dictionary: {id: doc_json, id2: doc_json2, ...}
"""
# Create the input output for MP
with Manager() as manager:
out_list = manager.list()
lock = manager.Lock()
in_q = manager.Queue(maxsize=10*nproc)
id2text = {}
for batch in self._batch_generator(data, batch_size_chars):
if nn_components:
# We need this for the json_to_fake_spacy
id2text.update({k:v for k,v in batch})
in_q.put(batch)
# Final data point for workers
for _ in range(nproc):
in_q.put(None)
sleep(2)
# Create processes
procs = []
for i in range(nproc):
p = Process(target=self._mp_cons,
kwargs={'in_q': in_q,
'out_list': out_list,
'pid': i,
'only_cui': only_cui,
'addl_info': addl_info,
'min_free_memory': min_free_memory,
'lock': lock})
p.start()
procs.append(p)
# Join processes
for p in procs:
p.join()
docs = {}
# Covnerts a touple into a dict
docs.update({k:v for k,v in out_list})
# If we have separate GPU components now we pipe that
if nn_components:
try:
self._run_nn_components(docs, nn_components, id2text=id2text)
except Exception as e:
self.log.warning(e, exc_info=True, stack_info=True)
return docs
def multiprocessing_pipe(self,
in_data: Union[List[Tuple], Iterable[Tuple]],
nproc: Optional[int] = None,
batch_size: Optional[int] = None,
only_cui: bool = False,
addl_info: List[str] = [],
return_dict: bool = True,
batch_factor: int = 2) -> Union[List[Tuple], Dict]:
r""" Run multiprocessing NOT FOR TRAINING
in_data: a list with format: [(id, text), (id, text), ...]
nproc: the number of processors
batch_size: the number of texts to buffer
return_dict: a flag for returning either a dict or a list of tuples
return: a dict: {id: doc_json, id: doc_json, ...} or if return_dict is False, a list of tuples: [(id, doc_json), (id, doc_json), ...]
"""
out: Union[Dict, List[Tuple]]
if nproc == 0:
raise ValueError("nproc cannot be set to zero")
in_data = list(in_data) if isinstance(in_data, Iterable) else in_data
n_process = nproc if nproc is not None else min(max(cpu_count() - 1, 1), math.ceil(len(in_data) / batch_factor))
batch_size = batch_size if batch_size is not None else math.ceil(len(in_data) / (batch_factor * abs(n_process)))
start_method = None
try:
if self._meta_cats:
import torch
if torch.multiprocessing.get_start_method() != "spawn":
start_method = torch.multiprocessing.get_start_method()
torch.multiprocessing.set_start_method("spawn", force=True)
entities = self.get_entities_multi_texts(texts=in_data, only_cui=only_cui, addl_info=addl_info,
n_process=n_process, batch_size=batch_size)
finally:
if start_method is not None:
import torch
torch.multiprocessing.set_start_method(start_method, force=True)
if return_dict:
out = {}
for idx, data in enumerate(in_data):
out[data[0]] = entities[idx]
else:
out = []
for idx, data in enumerate(in_data):
out.append((data[0], entities[idx]))
return out
def _mp_cons(self, in_q: Queue, out_list: List, min_free_memory: int, lock: Lock, pid: int = 0, only_cui: bool = False, addl_info: List = []) -> None:
out: List = []
while True:
if not in_q.empty():
if psutil.virtual_memory().available / psutil.virtual_memory().total < min_free_memory:
with lock:
out_list.extend(out)
# Stop a process if there is not enough memory left
break
data = in_q.get()
if data is None:
with lock:
out_list.extend(out)
break
for i_text, text in data:
try:
# Annotate document
doc = self.get_entities(text=text, only_cui=only_cui, addl_info=addl_info)
out.append((i_text, doc))
except Exception as e:
self.log.warning("PID: %s failed one document in _mp_cons, running will continue normally. \n" +
"Document length in chars: %s, and ID: %s", pid, len(str(text)), i_text)
self.log.warning(str(e))
sleep(2)
def _doc_to_out(self,
doc: Doc,
only_cui: bool,
addl_info: List[str],
out_with_text: bool = False) -> Dict:
out: Dict = {'entities': {}, 'tokens': []}
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if doc is not None:
out_ent: Dict = {}
if self.config.general.get('show_nested_entities', False):
_ents = []
for _ent in doc._.ents:
entity = Span(doc, _ent['start'], _ent['end'], label=_ent['label'])
entity._.cui = _ent['cui']
entity._.detected_name = _ent['detected_name']
entity._.context_similarity = _ent['context_similarity']
entity._.id = _ent['id']
if 'meta_anns' in _ent:
entity._.meta_anns = _ent['meta_anns']
_ents.append(entity)
else:
_ents = doc.ents
if cnf_annotation_output.get("lowercase_context", True):
doc_tokens = [tkn.text_with_ws.lower() for tkn in list(doc)]
else:
doc_tokens = [tkn.text_with_ws for tkn in list(doc)]
if cnf_annotation_output.get('doc_extended_info', False):
# Add tokens if extended info
out['tokens'] = doc_tokens
context_left = cnf_annotation_output.get('context_left', -1)
context_right = cnf_annotation_output.get('context_right', -1)
doc_extended_info = cnf_annotation_output.get('doc_extended_info', False)
for _, ent in enumerate(_ents):
cui = str(ent._.cui)
if not only_cui:
out_ent['pretty_name'] = self.cdb.get_name(cui)
out_ent['cui'] = cui
out_ent['type_ids'] = list(self.cdb.cui2type_ids.get(cui, ''))
out_ent['types'] = [self.cdb.addl_info['type_id2name'].get(tui, '') for tui in out_ent['type_ids']]
out_ent['source_value'] = ent.text
out_ent['detected_name'] = str(ent._.detected_name)
out_ent['acc'] = float(ent._.context_similarity)
out_ent['context_similarity'] = float(ent._.context_similarity)
out_ent['start'] = ent.start_char
out_ent['end'] = ent.end_char
for addl in addl_info:
tmp = self.cdb.addl_info.get(addl, {}).get(cui, [])
out_ent[addl.split("2")[-1]] = list(tmp) if type(tmp) == set else tmp
out_ent['id'] = ent._.id
out_ent['meta_anns'] = {}
if doc_extended_info:
out_ent['start_tkn'] = ent.start
out_ent['end_tkn'] = ent.end
if context_left > 0 and context_right > 0:
out_ent['context_left'] = doc_tokens[max(ent.start - context_left, 0):ent.start]
out_ent['context_right'] = doc_tokens[ent.end:min(ent.end + context_right, len(doc_tokens))]
out_ent['context_center'] = doc_tokens[ent.start:ent.end]
if hasattr(ent._, 'meta_anns') and ent._.meta_anns:
out_ent['meta_anns'] = ent._.meta_anns
out['entities'][out_ent['id']] = dict(out_ent)
else:
out['entities'][ent._.id] = cui
if cnf_annotation_output.get('include_text_in_output', False) or out_with_text:
out['text'] = doc.text
return out
def _get_trimmed_text(self, text: Optional[str]) -> str:
return text[0:self.config.preprocessing.get('max_document_length')] if text is not None and len(text) > 0 else ""
def _generate_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> Iterable[str]:
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
yield self._get_trimmed_text(text_)
def _get_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> List[str]:
trimmed: List = []
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
trimmed.append(self._get_trimmed_text(text_))
return trimmed
@staticmethod
def _pipe_error_handler(proc_name: str, proc: "Pipe", docs: List[Doc], e: Exception) -> None:
CAT.log.warning("Exception raised when applying component %s to a batch of docs.", proc_name)
CAT.log.warning(e, exc_info=True, stack_info=True)
if docs is not None:
CAT.log.warning("Docs contained in the batch:")
for doc in docs:
if hasattr(doc, "text"):
CAT.log.warning("%s...", doc.text[:50])
@staticmethod
def _get_doc_annotations(doc: Doc):
if type(doc['annotations']) == list:
return doc['annotations']
if type(doc['annotations']) == dict:
return doc['annotations'].values()
return None
def destroy_pipe(self):
self.pipe.destroy() | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/cat.py | cat.py |
import os
import json
import logging
import torch
import numpy
from multiprocessing import Lock
from torch import nn, Tensor
from spacy.tokens import Doc
from datetime import datetime
from typing import Iterable, Iterator, Optional, Dict, List, Tuple, cast, Union
from medcat.utils.hasher import Hasher
from medcat.config_meta_cat import ConfigMetaCAT
from medcat.utils.meta_cat.ml_utils import predict, train_model, set_all_seeds, eval_model
from medcat.utils.meta_cat.data_utils import prepare_from_json, encode_category_values
from medcat.pipeline.pipe_runner import PipeRunner
from medcat.tokenizers.meta_cat_tokenizers import TokenizerWrapperBase
from medcat.utils.meta_cat.data_utils import Doc as FakeDoc
# It should be safe to do this always, as all other multiprocessing
# will be finished before data comes to meta_cat
os.environ["TOKENIZERS_PARALLELISM"] = "true"
class MetaCAT(PipeRunner):
r""" TODO: Add documentation
"""
# Custom pipeline component name
name = 'meta_cat'
_component_lock = Lock()
# Add file and console handlers
log = logging.getLogger(__package__)
# Override
def __init__(self,
tokenizer: Optional[TokenizerWrapperBase] = None,
embeddings: Optional[Union[Tensor, numpy.ndarray]] = None,
config: Optional[ConfigMetaCAT] = None) -> None:
if config is None:
config = ConfigMetaCAT()
self.config = config
set_all_seeds(config.general['seed'])
if tokenizer is not None:
# Set it in the config
config.general['tokenizer_name'] = tokenizer.name
config.general['vocab_size'] = tokenizer.get_size()
# We will also set the padding
config.model['padding_idx'] = tokenizer.get_pad_id()
self.tokenizer = tokenizer
self.embeddings = torch.tensor(embeddings, dtype=torch.float32) if embeddings is not None else None
self.model = self.get_model(embeddings=self.embeddings)
def get_model(self, embeddings: Optional[Tensor]) -> nn.Module:
config = self.config
if config.model['model_name'] == 'lstm':
from medcat.utils.meta_cat.models import LSTM
model = LSTM(embeddings, config)
else:
raise ValueError("Unknown model name %s" % config.model['model_name'])
return model
def get_hash(self):
r""" A partial hash trying to catch differences between models
"""
hasher = Hasher()
# Set last_train_on if None
if self.config.train['last_train_on'] is None:
self.config.train['last_train_on'] = datetime.now().timestamp()
hasher.update(self.config.get_hash())
return hasher.hexdigest()
def train(self, json_path: Union[str, list], save_dir_path: Optional[str] = None) -> Dict:
r""" Train or continue training a model give a json_path containing a MedCATtrainer export. It will
continue training if an existing model is loaded or start new training if the model is blank/new.
Args:
json_path (`str` or `list`):
Path/Paths to a MedCATtrainer export containing the meta_annotations we want to train for.
save_dir_path (`str`, optional, defaults to `None`):
In case we have aut_save_model (meaning during the training the best model will be saved)
we need to set a save path.
"""
g_config = self.config.general
t_config = self.config.train
# Load the medcattrainer export
if isinstance(json_path, str):
json_path = [json_path]
def merge_data_loaded(base, other):
if not base:
return other
elif other is None:
return base
else:
for p in other['projects']:
base['projects'].append(p)
return base
# Merge data from all different data paths
data_loaded: Dict = {}
for path in json_path:
with open(path, 'r') as f:
data_loaded = merge_data_loaded(data_loaded, json.load(f))
# Create directories if they don't exist
if t_config['auto_save_model']:
if save_dir_path is None:
raise Exception("The `save_dir_path` argument is required if `aut_save_model` is `True` in the config")
else:
os.makedirs(save_dir_path, exist_ok=True)
# Prepare the data
assert self.tokenizer is not None
data = prepare_from_json(data_loaded, g_config['cntx_left'], g_config['cntx_right'], self.tokenizer,
cui_filter=t_config['cui_filter'],
replace_center=g_config['replace_center'], prerequisites=t_config['prerequisites'],
lowercase=g_config['lowercase'])
# Check is the name there
category_name = g_config['category_name']
if category_name not in data:
raise Exception(
"The category name does not exist in this json file. You've provided '{}', while the possible options are: {}".format(
category_name, " | ".join(list(data.keys()))))
data = data[category_name]
category_value2id = g_config['category_value2id']
if not category_value2id:
# Encode the category values
data, category_value2id = encode_category_values(data)
g_config['category_value2id'] = category_value2id
else:
# We already have everything, just get the data
data, _ = encode_category_values(data, existing_category_value2id=category_value2id)
# Make sure the config number of classes is the same as the one found in the data
if len(category_value2id) != self.config.model['nclasses']:
self.log.warning(
"The number of classes set in the config is not the same as the one found in the data: {} vs {}".format(
self.config.model['nclasses'], len(category_value2id)))
self.log.warning("Auto-setting the nclasses value in config and rebuilding the model.")
self.config.model['nclasses'] = len(category_value2id)
self.model = self.get_model(embeddings=self.embeddings)
report = train_model(self.model, data=data, config=self.config, save_dir_path=save_dir_path)
# If autosave, then load the best model here
if t_config['auto_save_model']:
if save_dir_path is None:
raise Exception("The `save_dir_path` argument is required if `aut_save_model` is `True` in the config")
else:
path = os.path.join(save_dir_path, 'model.dat')
device = torch.device(g_config['device'])
self.model.load_state_dict(torch.load(path, map_location=device))
# Save everything now
self.save(save_dir_path=save_dir_path)
self.config.train['last_train_on'] = datetime.now().timestamp()
return report
def eval(self, json_path: str) -> Dict:
g_config = self.config.general
t_config = self.config.train
with open(json_path, 'r') as f:
data_loaded: Dict = json.load(f)
# Prepare the data
assert self.tokenizer is not None
data = prepare_from_json(data_loaded, g_config['cntx_left'], g_config['cntx_right'], self.tokenizer,
cui_filter=t_config['cui_filter'],
replace_center=g_config['replace_center'], prerequisites=t_config['prerequisites'],
lowercase=g_config['lowercase'])
# Check is the name there
category_name = g_config['category_name']
if category_name not in data:
raise Exception("The category name does not exist in this json file.")
data = data[category_name]
# We already have everything, just get the data
category_value2id = g_config['category_value2id']
data, _ = encode_category_values(data, existing_category_value2id=category_value2id)
# Run evaluation
assert self.tokenizer is not None
result = eval_model(self.model, data, config=self.config, tokenizer=self.tokenizer)
return result
def save(self, save_dir_path: str) -> None:
r""" Save all components of this class to a file
Args:
save_dir_path(`str`):
Path to the directory where everything will be saved.
"""
# Create dirs if they do not exist
os.makedirs(save_dir_path, exist_ok=True)
# Save tokenizer
assert self.tokenizer is not None
self.tokenizer.save(save_dir_path)
# Save config
self.config.save(os.path.join(save_dir_path, 'config.json'))
# Save the model
model_save_path = os.path.join(save_dir_path, 'model.dat')
torch.save(self.model.state_dict(), model_save_path)
# This is everything we need to save from the class, we do not
# save the class itself.
@classmethod
def load(cls, save_dir_path: str, config_dict: Optional[Dict] = None) -> "MetaCAT":
r""" Load a meta_cat object.
Args:
save_dir_path (`str`):
The directory where all was saved.
config_dict (`dict`):
This can be used to overwrite saved parameters for this meta_cat
instance. Why? It is needed in certain cases where we autodeploy stuff.
Returns:
meta_cat (`medcat.MetaCAT`):
You don't say
"""
# Load config
config = cast(ConfigMetaCAT, ConfigMetaCAT.load(os.path.join(save_dir_path, 'config.json')))
# Overwrite loaded paramters with something new
if config_dict is not None:
config.merge_config(config_dict)
tokenizer: Optional[TokenizerWrapperBase] = None
# Load tokenizer (TODO: This should be converted into a factory or something)
if config.general['tokenizer_name'] == 'bbpe':
from medcat.tokenizers.meta_cat_tokenizers import TokenizerWrapperBPE
tokenizer = TokenizerWrapperBPE.load(save_dir_path)
elif config.general['tokenizer_name'] == 'bert-tokenizer':
from medcat.tokenizers.meta_cat_tokenizers import TokenizerWrapperBERT
tokenizer = TokenizerWrapperBERT.load(save_dir_path)
# Create meta_cat
meta_cat = cls(tokenizer=tokenizer, embeddings=None, config=config)
# Load the model
model_save_path = os.path.join(save_dir_path, 'model.dat')
device = torch.device(config.general['device'])
if not torch.cuda.is_available() and device.type == 'cuda':
MetaCAT.log.warning('Loading a MetaCAT model without GPU availability, stored config used GPU')
config.general['device'] = 'cpu'
device = torch.device('cpu')
meta_cat.model.load_state_dict(torch.load(model_save_path, map_location=device))
return meta_cat
def prepare_document(self, doc: Doc, input_ids: List, offset_mapping: List, lowercase: bool) -> Tuple:
r"""
Args:
doc - spacy
input_ids
offset_mapping
"""
config = self.config
cntx_left = config.general['cntx_left']
cntx_right = config.general['cntx_right']
replace_center = config.general['replace_center']
# Should we annotate overlapping entities
if config.general['annotate_overlapping']:
ents = doc._.ents
else:
ents = doc.ents
samples = []
last_ind = 0
ent_id2ind = {} # Map form entitiy ID to where is it in the samples array
for ent in sorted(ents, key=lambda ent: ent.start_char):
start = ent.start_char
end = ent.end_char
ind = 0
# Start where the last ent was found, cannot be before it as we've sorted
for ind, pair in enumerate(offset_mapping[last_ind:]):
if start >= pair[0] and start < pair[1]:
break
ind = last_ind + ind # If we did not start from 0 in the for loop
last_ind = ind
_start = max(0, ind - cntx_left)
_end = min(len(input_ids), ind + 1 + cntx_right)
tkns = input_ids[_start:_end]
cpos = cntx_left + min(0, ind - cntx_left)
if replace_center is not None:
if lowercase:
replace_center = replace_center.lower()
# We start from ind
s_ind = ind
e_ind = ind
for _ind, pair in enumerate(offset_mapping[ind:]):
if end > pair[0] and end <= pair[1]:
e_ind = _ind + ind
break
ln = e_ind - s_ind # Length of the concept in tokens
assert self.tokenizer is not None
tkns = tkns[:cpos] + self.tokenizer(replace_center)['input_ids'] + tkns[cpos + ln + 1:]
samples.append([tkns, cpos])
ent_id2ind[ent._.id] = len(samples) - 1
return ent_id2ind, samples
@staticmethod
def batch_generator(stream: Iterable[Doc], batch_size_chars: int) -> Iterable[List[Doc]]:
docs = []
char_count = 0
for doc in stream:
char_count += len(doc.text)
docs.append(doc)
if char_count < batch_size_chars:
continue
yield docs
docs = []
char_count = 0
# If there is anything left return that also
if len(docs) > 0:
yield docs
# Override
def pipe(self, stream: Iterable[Union[Doc, FakeDoc]], *args, **kwargs) -> Iterator[Doc]:
r""" Process many documents at once.
Args:
stream (Iterable[spacy.tokens.Doc]):
List of spacy documents.
"""
# Just in case
if stream is None or not stream:
return stream
config = self.config
id2category_value = {v: k for k, v in config.general['category_value2id'].items()}
batch_size_chars = config.general['pipe_batch_size_in_chars']
if config.general['device'] == 'cpu' or config.general['disable_component_lock']:
yield from self._set_meta_anns(stream, batch_size_chars, config, id2category_value)
else:
with MetaCAT._component_lock:
yield from self._set_meta_anns(stream, batch_size_chars, config, id2category_value)
def _set_meta_anns(self,
stream: Iterable[Union[Doc, FakeDoc]],
batch_size_chars: int,
config: ConfigMetaCAT,
id2category_value: Dict) -> Iterator[Optional[Doc]]:
for docs in self.batch_generator(stream, batch_size_chars):
try:
if not config.general['save_and_reuse_tokens'] or docs[0]._.share_tokens is None:
if config.general['lowercase']:
all_text = [doc.text.lower() for doc in docs]
else:
all_text = [doc.text for doc in docs]
assert self.tokenizer is not None
all_text_processed = self.tokenizer(all_text)
doc_ind2positions = {}
data: List = [] # The thing that goes into the model
for i, doc in enumerate(docs):
ent_id2ind, samples = self.prepare_document(doc, input_ids=all_text_processed[i]['input_ids'],
offset_mapping=all_text_processed[i]['offset_mapping'],
lowercase=config.general['lowercase'])
doc_ind2positions[i] = (len(data), len(data) + len(samples), ent_id2ind) # Needed so we know where is what in the big data array
data.extend(samples)
if config.general['save_and_reuse_tokens']:
doc._.share_tokens = (samples, doc_ind2positions[i])
else:
# This means another model has already processed the data and we can just use it. This is a
# dangerous option - as it assumes the other model has the same tokenizer and context size.
data = []
doc_ind2positions = {}
for i, doc in enumerate(docs):
data.extend(doc._.share_tokens[0])
doc_ind2positions[i] = doc._.share_tokens[1]
all_predictions, all_confidences = predict(self.model, data, config)
for i, doc in enumerate(docs):
start_ind, end_ind, ent_id2ind = doc_ind2positions[i]
predictions = all_predictions[start_ind:end_ind]
confidences = all_confidences[start_ind:end_ind]
if config.general['annotate_overlapping']:
ents = doc._.ents
else:
ents = doc.ents
for ent in ents:
ent_ind = ent_id2ind[ent._.id]
value = id2category_value[predictions[ent_ind]]
confidence = confidences[ent_ind]
if ent._.meta_anns is None:
ent._.meta_anns = {config.general['category_name']: {'value': value,
'confidence': float(confidence),
'name': config.general['category_name']}}
else:
ent._.meta_anns[config.general['category_name']] = {'value': value,
'confidence': float(confidence),
'name': config.general['category_name']}
yield from docs
except Exception as e:
self.get_error_handler()(self.name, self, docs, e)
yield from [None] * len(docs)
# Override
def __call__(self, doc: Doc) -> Doc:
""" Process one document, used in the spacy pipeline for sequential
document processing.
Args:
doc (spacy.tokens.Doc):
A spacy document
"""
# Just call the pipe method
doc = next(self.pipe(iter([doc])))
return doc
def get_model_card(self, as_dict: bool = False):
"""A minimal model card.
Args:
as_dict (bool): return the model card as a dictionary instead of a str.
Returns:
By default a str - indented JSON object.
"""
card = {
'Category Name': self.config.general['category_name'],
'Description': self.config.general['description'],
'Classes': self.config.general['category_value2id'],
'Model': self.config.model['model_name']
}
if as_dict:
return card
else:
return json.dumps(card, indent=2, sort_keys=False)
def __repr__(self):
"""
Prints the model_card for this MetaCAT instance.
Returns:
the 'Model Card' for this MetaCAT instance. This includes NER+L config and any MetaCATs
"""
return self.get_model_card(as_dict=False) | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/meta_cat.py | meta_cat.py |
import numpy as np
import pickle
from typing import Optional, List, Dict
class Vocab(object):
r''' Vocabulary used to store word embeddings for context similarity
calculation. Also used by the spell checker - but not for fixing the spelling
only for checking is something correct.
Properties:
vocab (dict):
Map from word to attributes, e.g. {'house': {'vec': <np.array>, 'cnt': <int>, ...}, ...}
index2word (dict):
From word to an index - used for negative sampling
vec_index2word (dict):
Same as index2word but only words that have vectors
unigram_table (dict):
Negative sampling.
'''
def __init__(self) -> None:
self.vocab: Dict = {}
self.index2word: Dict = {}
self.vec_index2word: Dict = {}
self.unigram_table: np.ndarray = np.array([])
def inc_or_add(self, word: str, cnt: int = 1, vec: Optional[np.ndarray] = None):
r''' Add a word or incrase its count.
Args:
word (str):
Word to be added
cnt (int):
By how much should the count be increased, or to wha
should it be set if a new word.
vec (numpy.ndarray):
Word vector
'''
if word not in self.vocab:
self.add_word(word, cnt, vec)
else:
self.inc_wc(word, cnt)
def remove_all_vectors(self) -> None:
r''' Remove all stored vector representations
'''
self.vec_index2word = {}
for word in self.vocab:
self.vocab[word]['vec'] = None
def remove_words_below_cnt(self, cnt: int) -> None:
r''' Remove all words with frequency below cnt.
Args:
cnt (int):
Word count limit.
'''
print("Words before removal: " + str(len(self.vocab)))
for word in list(self.vocab.keys()):
if self.vocab[word]['cnt'] < cnt:
del self.vocab[word]
print("Words after removal : " + str(len(self.vocab)))
# Rebuild index2word and vec_index2word
self.index2word = {}
self.vec_index2word = {}
for word in self.vocab.keys():
ind = len(self.index2word)
self.index2word[ind] = word
self.vocab[word]['ind'] = ind
if self.vocab[word]['vec'] is not None:
self.vec_index2word[ind] = word
def inc_wc(self, word: str, cnt: int = 1):
r''' Incraese word count by cnt.
Args:
word (str):
For which word to increase the count
cnt (int):
By how muhc to incrase the count
'''
self.item(word)['cnt'] += cnt
def add_vec(self, word: str, vec: np.ndarray) -> None:
r''' Add vector to a word.
Args:
word (str):
To which word to add the vector.
vec (numpy.ndarray):
The vector to add.
'''
self.vocab[word]['vec'] = vec
ind = self.vocab[word]['ind']
if ind not in self.vec_index2word:
self.vec_index2word[ind] = word
def reset_counts(self, cnt: int = 1) -> None:
r''' Reset the count for all word to cnt.
Args:
cnt (int):
New count for all words in the vocab.
'''
for word in self.vocab.keys():
self.vocab[word]['cnt'] = cnt
def update_counts(self, tokens: List[str]) -> None:
r''' Given a list of tokens update counts for words in the vocab.
Args:
tokens (List[str]):
Usually a large block of text split into tokens/words.
'''
for token in tokens:
if token in self:
self.inc_wc(token, 1)
def add_word(self, word: str, cnt: int = 1, vec: Optional[np.ndarray] = None, replace: bool = True) -> None:
"""Add a word to the vocabulary
Args:
word (str):
the word to be added, it should be lemmatized and lowercased
cnt (int):
count of this word in your dataset
vec (numpy.ndarray):
the vector representation of the word
replace (bool):
will replace old vector representation
"""
if word not in self.vocab:
ind = len(self.index2word)
self.index2word[ind] = word
item = {'vec': vec, 'cnt': cnt, 'ind': ind}
self.vocab[word] = item
if vec is not None:
self.vec_index2word[ind] = word
elif replace and vec is not None:
self.vocab[word]['vec'] = vec
self.vocab[word]['cnt'] = cnt
# If this word didn't have a vector before
ind = self.vocab[word]['ind']
if ind not in self.vec_index2word:
self.vec_index2word[ind] = word
def add_words(self, path: str, replace: bool = True) -> None:
"""Adds words to the vocab from a file, the file
is required to have the following format (vec being optional):
<word>\t<cnt>[\t<vec_space_separated>]
e.g. one line: the word house with 3 dimensional vectors
house 34444 0.3232 0.123213 1.231231
Args:
path (str):
path to the file with words and vectors
replace (bool):
existing words in the vocabulary will be replaced
"""
with open(path) as f:
for line in f:
parts = line.split("\t")
word = parts[0]
cnt = int(parts[1].strip())
vec = None
if len(parts) == 3:
vec = np.array([float(x) for x in parts[2].strip().split(" ")])
self.add_word(word, cnt, vec, replace)
def make_unigram_table(self, table_size: int = 100000000) -> None:
r''' Make unigram table for negative sampling, look at the paper if interested
in details.
'''
freqs = []
unigram_table = []
words = list(self.vec_index2word.values())
for word in words:
freqs.append(self[word])
freqs = np.array(freqs)
freqs = np.power(freqs, 3/4)
sm = np.sum(freqs)
for ind in self.vec_index2word.keys():
word = self.vec_index2word[ind]
f_ind = words.index(word)
p = freqs[f_ind] / sm
unigram_table.extend([ind] * int(p * table_size))
self.unigram_table = np.array(unigram_table)
def get_negative_samples(self, n: int = 6, ignore_punct_and_num: bool = False) -> List[int]:
r''' Get N negative samples.
Args:
n (int):
How many words to return
ignore_punct_and_num (bool):
When returing words shold we skip punctuation and numbers.
Returns:
inds (List[int]):
Indices for words in this vocabulary.
'''
if len(self.unigram_table) == 0:
raise Exception("No unigram table present, please run the function vocab.make_unigram_table() first.")
inds = np.random.randint(0, len(self.unigram_table), n)
inds = self.unigram_table[inds]
if ignore_punct_and_num:
# Do not return anything that does not have letters in it
inds = [ind for ind in inds if self.index2word[ind].upper().isupper()]
return inds
def __getitem__(self, word: str) -> int:
return self.count(word)
def vec(self, word: str) -> np.ndarray:
return self.vocab[word]['vec']
def count(self, word: str) -> int:
return self.vocab[word]['cnt']
def item(self, word: str) -> Dict:
return self.vocab[word]
def __contains__(self, word: str) -> bool:
if word in self.vocab:
return True
return False
def save(self, path: str) -> None:
with open(path, 'wb') as f:
pickle.dump(self.__dict__, f)
@classmethod
def load(cls, path: str) -> "Vocab":
with open(path, 'rb') as f:
vocab = cls()
vocab.__dict__ = pickle.load(f)
return vocab | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/vocab.py | vocab.py |
import types
import spacy
import gc
import logging
from typing import List, Optional, Union, Iterable, Callable
from multiprocessing import cpu_count
from spacy.tokens import Token, Doc, Span
from spacy.tokenizer import Tokenizer
from spacy.language import Language
from spacy.util import raise_error
from tqdm.autonotebook import tqdm
from medcat.linking.context_based_linker import Linker
from medcat.meta_cat import MetaCAT
from medcat.ner.vocab_based_ner import NER
from medcat.utils.normalizers import TokenNormalizer, BasicSpellChecker
from medcat.config import Config
from medcat.pipeline.pipe_runner import PipeRunner
from medcat.preprocessing.taggers import tag_skip_and_punct
from medcat.ner.transformers_ner import TransformersNER
class Pipe(object):
r""" A wrapper around the standard spacy pipeline.
Args:
tokenizer (`spacy.tokenizer.Tokenizer`):
What will be used to split text into tokens, can be anything built as a spacy tokenizer.
config (`medcat.config.Config`):
Global config for medcat.
Properties:
nlp (spacy.language.<lng>):
The base spacy NLP pipeline.
"""
# Add file and console handlers
log = logging.getLogger(__package__)
def __init__(self, tokenizer: Tokenizer, config: Config) -> None:
self._nlp = spacy.load(config.general['spacy_model'], disable=config.general['spacy_disabled_components'])
if config.preprocessing['stopwords'] is not None:
self._nlp.Defaults.stop_words = set(config.preprocessing['stopwords'])
self._nlp.tokenizer = tokenizer(self._nlp, config)
# Set max document length
self._nlp.max_length = config.preprocessing.get('max_document_length', 1000000)
self.config = config
# Set log level
self.log.setLevel(self.config.general['log_level'])
def add_tagger(self, tagger: Callable, name: Optional[str] = None, additional_fields: List[str] = []) -> None:
r""" Add any kind of a tagger for tokens.
Args:
tagger (`object/function`):
Any object/function that takes a spacy doc as an input, does something
and returns the same doc.
name (`str`):
Name for this component in the pipeline.
additional_fields (`List[str]`):
Fields to be added to the `_` properties of a token.
"""
component_factory_name = spacy.util.get_object_name(tagger)
name = name if name is not None else component_factory_name
Language.factory(name=component_factory_name, default_config={"config": self.config}, func=tagger)
self._nlp.add_pipe(component_factory_name, name=name, first=True)
# Add custom fields needed for this usecase
Token.set_extension('to_skip', default=False, force=True)
# Add any additional fields that are required
for field in additional_fields:
Token.set_extension(field, default=False, force=True)
def add_token_normalizer(self, config: Config, name: Optional[str] = None, spell_checker: Optional[BasicSpellChecker] = None) -> None:
token_normalizer = TokenNormalizer(config=config, spell_checker=spell_checker)
component_name = spacy.util.get_object_name(token_normalizer)
name = name if name is not None else component_name
Language.component(name=component_name, func=token_normalizer)
self._nlp.add_pipe(component_name, name=name, last=True)
# Add custom fields needed for this usecase
Token.set_extension('norm', default=None, force=True)
def add_ner(self, ner: NER, name: Optional[str] = None) -> None:
r""" Add NER from CAT to the pipeline, will also add the necessary fields
to the document and Span objects.
"""
component_name = spacy.util.get_object_name(ner)
name = name if name is not None else component_name
Language.component(name=component_name, func=ner)
self._nlp.add_pipe(component_name, name=name, last=True)
Doc.set_extension('ents', default=[], force=True)
Span.set_extension('confidence', default=-1, force=True)
Span.set_extension('id', default=0, force=True)
# Do not set this property if a vocabulary apporach is not used, this name must
#refer to a name2cuis in the cdb.
Span.set_extension('detected_name', default=None, force=True)
Span.set_extension('link_candidates', default=None, force=True)
def add_linker(self, linker: Linker, name: Optional[str] = None) -> None:
r""" Add entity linker to the pipeline, will also add the necessary fields
to Span object.
linker (object/function):
Any object/function created based on the requirements for a spaCy pipeline components. Have
a look at https://spacy.io/usage/processing-pipelines#custom-components
"""
component_name = spacy.util.get_object_name(linker)
name = name if name is not None else component_name
Language.component(name=component_name, func=linker)
self._nlp.add_pipe(component_name, name=name, last=True)
Span.set_extension('cui', default=-1, force=True)
Span.set_extension('context_similarity', default=-1, force=True)
def add_meta_cat(self, meta_cat: MetaCAT, name: Optional[str] = None) -> None:
component_name = spacy.util.get_object_name(meta_cat)
name = name if name is not None else component_name
Language.component(name=component_name, func=meta_cat)
self._nlp.add_pipe(component_name, name=name, last=True)
# meta_anns is a dictionary like {category_name: value, ...}
Span.set_extension('meta_anns', default=None, force=True)
# Used for sharing pre-processed data/tokens
Doc.set_extension('share_tokens', default=None, force=True)
def add_addl_ner(self, addl_ner: TransformersNER, name: Optional[str] = None) -> None:
component_name = spacy.util.get_object_name(addl_ner)
name = name if name is not None else component_name
Language.component(name=component_name, func=addl_ner)
self._nlp.add_pipe(component_name, name=name, last=True)
Doc.set_extension('ents', default=[], force=True)
Span.set_extension('confidence', default=-1, force=True)
Span.set_extension('id', default=0, force=True)
Span.set_extension('cui', default=-1, force=True)
Span.set_extension('context_similarity', default=-1, force=True)
Span.set_extension('detected_name', default=None, force=True)
def batch_multi_process(self,
texts: Iterable[str],
n_process: Optional[int] = None,
batch_size: Optional[int] = None) -> Iterable[Doc]:
r""" Batch process a list of texts in parallel.
Args:
texts (`Iterable[str]`):
The input sequence of texts to process.
n_process (`int`):
The number of processes running in parallel. Defaults to max(mp.cpu_count() - 1, 1).
batch_size (`int`):
The number of texts to buffer. Defaults to 1000.
total (`int`):
The number of texts in total.
Return:
Generator[Doc]:
The output sequence of spacy documents with the extracted entities
"""
instance_name = "ensure_serializable"
try:
self._nlp.get_pipe(instance_name)
except KeyError:
component_name = spacy.util.get_object_name(self._ensure_serializable)
Language.component(name=component_name, func=self._ensure_serializable)
self._nlp.add_pipe(component_name, name=instance_name, last=True)
n_process = n_process if n_process is not None else max(cpu_count() - 1, 1)
batch_size = batch_size if batch_size is not None else 1000
# If n_process < 0, multiprocessing will be either conducted inside pipeline components based on the con(when
# 'parallel' is set to True) or not happen at all (when 'parallel' is set to False). Otherwise, multiprocessing
# will be conducted at the pipeline level, i.e., texts will be processed sequentially by each pipeline component.
if n_process < 0:
inner_parallel = True
n_process = 1
else:
inner_parallel = False
component_cfg = {
tag_skip_and_punct.name: { # type: ignore
'parallel': inner_parallel
},
TokenNormalizer.name: {
'parallel': inner_parallel
},
NER.name: {
'parallel': inner_parallel
},
Linker.name: {
'parallel': inner_parallel
}
}
return self._nlp.pipe(texts, # type: ignore
n_process=n_process,
batch_size=batch_size,
component_cfg=component_cfg)
def set_error_handler(self, error_handler: Callable) -> None:
self._nlp.set_error_handler(error_handler)
def reset_error_handler(self) -> None:
self._nlp.set_error_handler(raise_error)
def force_remove(self, component_name: str) -> None:
try:
self._nlp.remove_pipe(component_name)
except ValueError:
pass
def destroy(self) -> None:
del self._nlp
gc.collect()
@property
def spacy_nlp(self) -> Language:
""" The spaCy Language object
"""
return self._nlp
@staticmethod
def _ensure_serializable(doc: Doc) -> Doc:
return PipeRunner.serialize_entities(doc)
def __call__(self, text: Union[str, Iterable[str]]) -> Union[Doc, List[Doc]]:
if isinstance(text, str):
return self._nlp(text) if len(text) > 0 else None
elif isinstance(text, Iterable):
docs = []
for t in text if isinstance(text, types.GeneratorType) else tqdm(text, total=len(list(text))):
try:
doc = self._nlp(t) if isinstance(t, str) and len(t) > 0 else None
except Exception as e:
self.log.warning("Exception raised when processing text: %s", t[:50] + "..." if isinstance(t, str) else t)
self.log.warning(e, exc_info=True, stack_info=True)
doc = None
docs.append(doc)
return docs
else:
self.log.error("The input text should be either a string or a sequence of strings but got: %s", type(text))
return None | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/pipe.py | pipe.py |
import logging
import gc
from joblib import Parallel, delayed
from typing import Iterable, Generator, Tuple, Callable, Union, Iterator
from spacy.tokens import Doc, Span
from spacy.tokens.underscore import Underscore
from spacy.pipeline import Pipe
from spacy.util import minibatch
class PipeRunner(Pipe):
log = logging.getLogger(__name__)
_execute = None
_delayed = None
_time_out_in_secs = 3600
def __init__(self, workers: int):
self.workers = workers
def __call__(self, doc: Doc):
raise NotImplementedError("Method __call__ has not been implemented.")
# Override
def pipe(self, stream: Iterable[Doc], batch_size: int, **kwargs) -> Union[Generator[Doc, None, None], Iterator[Doc]]:
error_handler = self.get_error_handler()
if kwargs.get("parallel", False):
PipeRunner._execute, PipeRunner._delayed = self._lazy_init_pool()
for docs in minibatch(stream, size=self.workers):
docs = [PipeRunner.serialize_entities(doc) for doc in docs]
try:
tasks = (PipeRunner._delayed(self.__call__, doc, Underscore.get_state()) for doc in docs)
for output_doc in PipeRunner._execute(tasks):
yield PipeRunner.deserialize_entities(output_doc)
except Exception as e:
error_handler(self.name, self, docs, e)
yield from [None] * len(docs)
else:
for doc in stream:
try:
yield self(doc)
except Exception as e:
error_handler(self.name, self, [doc], e)
yield None
@staticmethod
def serialize_entities(doc: Doc):
new_ents = []
for ent in doc._.ents:
serializable = {
"start": ent.start,
"end": ent.end,
"label": ent.label_,
"cui": ent._.cui,
"detected_name": ent._.detected_name,
"context_similarity": ent._.context_similarity,
"link_candidates": ent._.link_candidates,
"confidence": ent._.confidence,
"id": ent._.id
}
if hasattr(ent._, 'meta_anns') and ent._.meta_anns:
serializable['meta_anns'] = ent._.meta_anns
new_ents.append(serializable)
doc._.ents.clear()
gc.collect()
doc._.ents = new_ents
return doc
@staticmethod
def deserialize_entities(doc: Doc):
new_ents = []
for ent in doc._.ents:
ent_span = Span(doc, ent['start'], ent['end'], label=ent['label'])
ent_span._.cui = ent['cui']
ent_span._.detected_name = ent['detected_name']
ent_span._.context_similarity = ent['context_similarity']
ent_span._.link_candidates = ent['link_candidates']
ent_span._.confidence = ent['confidence']
ent_span._.id = ent['id']
if 'meta_anns' in ent:
ent_span._.meta_anns = ent['meta_anns']
new_ents.append(ent_span)
doc._.ents.clear()
gc.collect()
doc._.ents = new_ents
return doc
@staticmethod
def _run_pipe_on_one(call: Callable, doc: Doc, underscore_state: Tuple) -> Doc:
Underscore.load_state(underscore_state)
doc = PipeRunner.deserialize_entities(doc)
doc = call(doc)
doc = PipeRunner.serialize_entities(doc)
return doc
def _lazy_init_pool(self) -> Tuple:
if PipeRunner._execute is None or self.workers > PipeRunner._execute.n_jobs:
PipeRunner._execute = Parallel(n_jobs=self.workers, timeout=PipeRunner._time_out_in_secs)
if PipeRunner._delayed is None:
PipeRunner._delayed = delayed(PipeRunner._run_pipe_on_one)
return PipeRunner._execute, PipeRunner._delayed | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/pipeline/pipe_runner.py | pipe_runner.py |
import numpy as np
import logging
from typing import Tuple, Dict, List, Union
from spacy.tokens import Span, Doc
from medcat.utils.matutils import unitvec
from medcat.utils.filters import check_filters
from medcat.cdb import CDB
from medcat.vocab import Vocab
from medcat.config import Config
import random
class ContextModel(object):
r''' Used to learn embeddings for concepts and calculate similarities in new documents.
Args:
cdb
vocab
config
'''
log = logging.getLogger(__name__)
def __init__(self, cdb: CDB, vocab: Vocab, config: Config) -> None:
self.cdb = cdb
self.vocab = vocab
self.config = config
def get_context_tokens(self, entity: Span, doc: Doc, size: int) -> Tuple:
r''' Get context tokens for an entity, this will skip anything that
is marked as skip in token._.to_skip
Args:
entity
doc
size
'''
start_ind = entity[0].i
end_ind = entity[-1].i
tokens_left = [tkn for tkn in doc[max(0, start_ind-size):start_ind] if not tkn._.to_skip
and not tkn.is_stop and not tkn.is_digit and not tkn.is_punct]
# Reverse because the first token should be the one closest to center
tokens_left.reverse()
tokens_center = list(entity)
tokens_right = [tkn for tkn in doc[end_ind+1:end_ind + 1 + size] if not tkn._.to_skip
and not tkn.is_stop and not tkn.is_digit and not tkn.is_punct]
return tokens_left, tokens_center, tokens_right
def get_context_vectors(self, entity: Span, doc: Doc, cui=None) -> Dict:
r''' Given an entity and the document it will return the context representation for the
given entity.
Args:
entity
doc
'''
vectors = {}
for context_type in self.config.linking['context_vector_sizes'].keys():
size = self.config.linking['context_vector_sizes'][context_type]
tokens_left, tokens_center, tokens_right = self.get_context_tokens(entity, doc, size)
values = []
# Add left
values.extend([self.config.linking['weighted_average_function'](step) * self.vocab.vec(tkn.lower_)
for step, tkn in enumerate(tokens_left) if tkn.lower_ in self.vocab and self.vocab.vec(tkn.lower_) is not None])
if not self.config.linking['context_ignore_center_tokens']:
# Add center
if cui is not None and random.random() > self.config.linking['random_replacement_unsupervised'] and self.cdb.cui2names.get(cui, []):
new_tokens_center = random.choice(list(self.cdb.cui2names[cui])).split(self.config.general['separator'])
values.extend([self.vocab.vec(tkn) for tkn in new_tokens_center if tkn in self.vocab and self.vocab.vec(tkn) is not None])
else:
values.extend([self.vocab.vec(tkn.lower_) for tkn in tokens_center if tkn.lower_ in self.vocab and self.vocab.vec(tkn.lower_) is not None])
# Add right
values.extend([self.config.linking['weighted_average_function'](step) * self.vocab.vec(tkn.lower_)
for step, tkn in enumerate(tokens_right) if tkn.lower_ in self.vocab and self.vocab.vec(tkn.lower_) is not None])
if len(values) > 0:
value = np.average(values, axis=0)
vectors[context_type] = value
return vectors
def similarity(self, cui: str, entity: Span, doc: Doc) -> float:
r''' Calculate the similarity between the learnt context for this CUI and the context
in the given `doc`.
Args:
cui
entity
doc
'''
vectors = self.get_context_vectors(entity, doc)
sim = self._similarity(cui, vectors)
return sim
def _similarity(self, cui: str, vectors: Dict) -> float:
r''' Calculate similarity once we have vectors and a cui.
Args:
cui
vectors
'''
cui_vectors = self.cdb.cui2context_vectors.get(cui, {})
if cui_vectors and self.cdb.cui2count_train[cui] >= self.config.linking['train_count_threshold']:
similarity = 0
for context_type in self.config.linking['context_vector_weights']:
# Can be that a certain context_type does not exist for a cui/context
if context_type in vectors and context_type in cui_vectors:
weight = self.config.linking['context_vector_weights'][context_type]
s = np.dot(unitvec(vectors[context_type]), unitvec(cui_vectors[context_type]))
similarity += weight * s
# DEBUG
self.log.debug("Similarity for CUI: %s, Count: %s, Context Type: %.10s, Weight: %s.2f, Similarity: %s.3f, S*W: %s.3f",
cui, self.cdb.cui2count_train[cui], context_type, weight, s, s*weight)
return similarity
else:
return -1
def disambiguate(self, cuis: List, entity: Span, name: str, doc: Doc) -> Tuple:
vectors = self.get_context_vectors(entity, doc)
filters = self.config.linking['filters']
# If it is trainer we want to filter concepts before disambiguation
#do not want to explain why, but it is needed.
if self.config.linking['filter_before_disamb']:
# DEBUG
self.log.debug("Is trainer, subsetting CUIs")
self.log.debug("CUIs before: %s", cuis)
cuis = [cui for cui in cuis if check_filters(cui, filters)]
# DEBUG
self.log.debug("CUIs after: %s", cuis)
if cuis: # Maybe none are left after filtering
# Calculate similarity for each cui
similarities = [self._similarity(cui, vectors) for cui in cuis]
# DEBUG
self.log.debug("Similarities: %s", [(sim, cui) for sim, cui in zip(cuis, similarities)])
# Prefer primary
if self.config.linking.get('prefer_primary_name', 0) > 0:
self.log.debug("Preferring primary names")
for i, cui in enumerate(cuis):
if similarities[i] > 0:
if self.cdb.name2cuis2status.get(name, {}).get(cui, '') in {'P', 'PD'}:
old_sim = similarities[i]
similarities[i] = min(0.99, similarities[i] + similarities[i] * self.config.linking.get('prefer_primary_name', 0))
# DEBUG
self.log.debug("CUI: %s, Name: %s, Old sim: %.3f, New sim: %.3f", cui, name, old_sim, similarities[i])
if self.config.linking.get('prefer_frequent_concepts', 0) > 0:
self.log.debug("Preferring frequent concepts")
#Prefer frequent concepts
cnts = [self.cdb.cui2count_train.get(cui, 0) for cui in cuis]
m = min(cnts) if min(cnts) > 0 else 1
scales = [np.log10(cnt/m)*self.config.linking.get('prefer_frequent_concepts', 0) if cnt > 10 else 0 for cnt in cnts]
similarities = [min(0.99, sim + sim*scales[i]) for i, sim in enumerate(similarities)]
# Prefer concepts with tag
mx = np.argmax(similarities)
return cuis[mx], similarities[mx]
else:
return None, 0
def train(self, cui: str, entity: Span, doc: Doc, negative: bool = False, names: Union[List[str], Dict] = []) -> None:
r''' Update the context representation for this CUI, given it's correct location (entity)
in a document (doc).
Args:
names (List[str]/Dict):
Optionally used to update the `status` of a name-cui pair in the CDB.
'''
# Context vectors to be calculated
if len(entity) > 0: # Make sure there is something
vectors = self.get_context_vectors(entity, doc, cui=cui)
self.cdb.update_context_vector(cui=cui, vectors=vectors, negative=negative)
# Debug
self.log.debug("Updating CUI: %s with negative=%s", cui, negative)
if not negative:
# Update the name count, if possible
if type(entity) == Span:
self.cdb.name2count_train[entity._.detected_name] = self.cdb.name2count_train.get(entity._.detected_name, 0) + 1
if self.config.linking.get('calculate_dynamic_threshold', False):
# Update average confidence for this CUI
sim = self.similarity(cui, entity, doc)
self.cdb.update_cui2average_confidence(cui=cui, new_sim=sim)
if negative:
# Change the status of the name so that it has to be disambiguated always
for name in names:
if self.cdb.name2cuis2status.get(name, {}).get(cui, '') == 'P':
# Set this name to always be disambiguated, even though it is primary
self.cdb.name2cuis2status.get(name, {})[cui] = 'PD'
# Debug
self.log.debug("Updating status for CUI: %s, name: %s to <PD>", cui, name)
elif self.cdb.name2cuis2status.get(name, {}).get(cui, '') == 'A':
# Set this name to always be disambiguated instead of A
self.cdb.name2cuis2status.get(name, {})[cui] = 'N'
self.log.debug("Updating status for CUI: %s, name: %s to <N>", cui, name)
if not negative and self.config.linking.get('devalue_linked_concepts', False):
#Find what other concepts can be disambiguated against this one
_cuis = set()
for name in self.cdb.cui2names[cui]:
_cuis.update(self.cdb.name2cuis.get(name, []))
# Remove the cui of the current concept
_cuis = _cuis - {cui}
for _cui in _cuis:
self.cdb.update_context_vector(cui=_cui, vectors=vectors, negative=True)
self.log.debug("Devalued via names.\n\tBase cui: %s \n\tTo be devalued: %s\n", cui, _cuis)
else:
self.log.warning("The provided entity for cui <%s> was empty, nothing to train", cui)
def train_using_negative_sampling(self, cui: str) -> None:
vectors = {}
# Get vectors for each context type
for context_type in self.config.linking['context_vector_sizes'].keys():
size = self.config.linking['context_vector_sizes'][context_type]
# While it should be size*2 it is already too many negative examples, so we leave it at size
inds = self.vocab.get_negative_samples(size, ignore_punct_and_num=self.config.linking['negative_ignore_punct_and_num'])
values = [self.vocab.vec(self.vocab.index2word[ind]) for ind in inds]
if len(values) > 0:
vectors[context_type] = np.average(values, axis=0)
# Debug
self.log.debug("Updating CUI: %s, with %s negative words", cui, len(inds))
# Do the update for all context types
self.cdb.update_context_vector(cui=cui, vectors=vectors, negative=True) | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/linking/vector_context_model.py | vector_context_model.py |
import random
import logging
from spacy.tokens import Span, Doc
from typing import Dict
from medcat.utils.filters import check_filters
from medcat.linking.vector_context_model import ContextModel
from medcat.pipeline.pipe_runner import PipeRunner
from medcat.cdb import CDB
from medcat.vocab import Vocab
from medcat.config import Config
from medcat.utils.postprocessing import map_ents_to_groups, make_pretty_labels, create_main_ann, LabelStyle
class Linker(PipeRunner):
r''' Link to a biomedical database.
Args:
cdb
vocab
config
'''
log = logging.getLogger(__name__)
# Custom pipeline component name
name = 'cat_linker'
# Override
def __init__(self, cdb: CDB, vocab: Vocab, config: Config) -> None:
self.cdb = cdb
self.vocab = vocab
self.config = config
self.context_model = ContextModel(self.cdb, self.vocab, self.config)
# Counter for how often did a pair (name,cui) appear and was used during training
self.train_counter: Dict = {}
super().__init__(self.config.general['workers'])
def _train(self, cui: str, entity: Span, doc: Doc, add_negative: bool = True) -> None:
name = "{} - {}".format(entity._.detected_name, cui)
""" TODO: Disable for now
if self.train_counter.get(name, 0) > self.config.linking['subsample_after']:
if random.random() < 1 / math.sqrt(self.train_counter.get(name) - self.config.linking['subsample_after']):
self.context_model.train(cui, entity, doc, negative=False)
if add_negative and self.config.linking['negative_probability'] >= random.random():
self.context_model.train_using_negative_sampling(cui)
self.train_counter[name] = self.train_counter.get(name, 0) + 1
else:
"""
# Always train
self.context_model.train(cui, entity, doc, negative=False)
if add_negative and self.config.linking['negative_probability'] >= random.random():
self.context_model.train_using_negative_sampling(cui)
self.train_counter[name] = self.train_counter.get(name, 0) + 1
# Override
def __call__(self, doc: Doc) -> Doc:
r'''
'''
doc.ents = [] # Reset main entities, will be recreated later
cnf_l = self.config.linking
linked_entities = []
if cnf_l["train"]:
# Run training
for entity in doc._.ents:
# Check does it have a detected name
if entity._.detected_name is not None:
name = entity._.detected_name
cuis = entity._.link_candidates
if len(name) >= cnf_l['disamb_length_limit']:
if len(cuis) == 1:
# N - means name must be disambiguated, is not the prefered
#name of the concept, links to other concepts also.
if self.cdb.name2cuis2status[name][cuis[0]] != 'N':
self._train(cui=cuis[0], entity=entity, doc=doc)
entity._.cui = cuis[0]
entity._.context_similarity = 1
linked_entities.append(entity)
else:
for cui in cuis:
if self.cdb.name2cuis2status[name][cui] in {'P', 'PD'}:
self._train(cui=cui, entity=entity, doc=doc)
# It should not be possible that one name is 'P' for two CUIs,
#but it can happen - and we do not care.
entity._.cui = cui
entity._.context_similarity = 1
linked_entities.append(entity)
else:
for entity in doc._.ents:
self.log.debug("Linker started with entity: %s", entity)
# Check does it have a detected name
if entity._.link_candidates is not None:
if entity._.detected_name is not None:
name = entity._.detected_name
cuis = entity._.link_candidates
if len(cuis) > 0:
do_disambiguate = False
if len(name) < cnf_l['disamb_length_limit']:
do_disambiguate = True
elif len(cuis) == 1 and self.cdb.name2cuis2status[name][cuis[0]] in {'N', 'PD'}:
# PD means it is preferred but should still be disambiguated and N is disamb always
do_disambiguate = True
elif len(cuis) > 1:
do_disambiguate = True
if do_disambiguate:
cui, context_similarity = self.context_model.disambiguate(cuis, entity, name, doc)
else:
cui = cuis[0]
if self.config.linking['always_calculate_similarity']:
context_similarity = self.context_model.similarity(cui, entity, doc)
else:
context_similarity = 1 # Direct link, no care for similarity
else:
# No name detected, just disambiguate
cui, context_similarity = self.context_model.disambiguate(entity._.link_candidates, entity, 'unk-unk', doc)
# Add the annotation if it exists and if above threshold and in filters
if cui and check_filters(cui, self.config.linking['filters']):
th_type = self.config.linking.get('similarity_threshold_type', 'static')
if (th_type == 'static' and context_similarity >= self.config.linking['similarity_threshold']) or \
(th_type == 'dynamic' and context_similarity >= self.cdb.cui2average_confidence[cui] * self.config.linking['similarity_threshold']):
entity._.cui = cui
entity._.context_similarity = context_similarity
linked_entities.append(entity)
doc._.ents = linked_entities
create_main_ann(self.cdb, doc)
if self.config.general['make_pretty_labels'] is not None:
make_pretty_labels(self.cdb, doc, LabelStyle[self.config.general['make_pretty_labels']])
if self.config.general['map_cui_to_group'] is not None and self.cdb.addl_info.get('cui2group', {}):
map_ents_to_groups(self.cdb, doc)
return doc | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/linking/context_based_linker.py | context_based_linker.py |
import os
from abc import ABC, abstractmethod
from typing import List, Dict, Optional, Union, overload
from tokenizers import Tokenizer, ByteLevelBPETokenizer
from transformers.models.bert.tokenization_bert_fast import BertTokenizerFast
class TokenizerWrapperBase(ABC):
name: str
def __init__(self, hf_tokenizer: Optional[Tokenizer] = None) -> None:
self.hf_tokenizers = hf_tokenizer
@overload
def __call__(self, text: str) -> Dict: ...
@overload
def __call__(self, text: List[str]) -> List[Dict]: ...
@abstractmethod
def __call__(self, text: Union[str, List[str]]) -> Union[Dict, List[Dict]]: ...
@abstractmethod
def save(self, dir_path: str) -> None: ...
@classmethod
@abstractmethod
def load(cls, dir_path: str, **kwargs) -> Tokenizer: ...
@abstractmethod
def get_size(self) -> int: ...
@abstractmethod
def token_to_id(self, token: str) -> Union[int, List[int]]: ...
@abstractmethod
def get_pad_id(self) -> Union[Optional[int], List[int]]: ...
def ensure_tokenizer(self) -> Tokenizer:
if self.hf_tokenizers is None:
raise ValueError("The tokenizer is not loaded yet")
return self.hf_tokenizers
class TokenizerWrapperBPE(TokenizerWrapperBase):
''' Wrapper around a huggingface tokenizer so that it works with the
MetaCAT models.
Args:
hf_tokenizers (`tokenizers.ByteLevelBPETokenizer`):
A huggingface BBPE tokenizer.
'''
name = 'bbpe'
def __init__(self, hf_tokenizers: Optional[ByteLevelBPETokenizer] = None) -> None:
super().__init__(hf_tokenizers)
if self.hf_tokenizers is not None:
# For whatever reason added tokens do not persist with this tokenizer, what to do
self.hf_tokenizers.add_tokens(['<PAD>'])
@overload
def __call__(self, text: str) -> Dict: ...
@overload
def __call__(self, text: List[str]) -> List[Dict]: ...
def __call__(self, text: Union[str, List[str]]) -> Union[Dict, List[Dict]]:
r''' Tokenize some text
Args:
text (`Union(str, List[str])`):
Text/texts to be tokenized.
Returns:
res (`Union(dict, List[dict])`):
Dictionary/ies containing `offset_mapping`, `input_ids` and `tokens` corresponding to the
input text/s.
'''
self.hf_tokenizers = self.ensure_tokenizer()
if isinstance(text, str):
result = self.hf_tokenizers.encode(text)
return {'offset_mapping': result.offsets,
'input_ids': result.ids,
'tokens': result.tokens,
}
elif isinstance(text, list):
results = self.hf_tokenizers.encode_batch(text)
output = []
for result in results:
output.append({'offset_mapping': result.offsets,
'input_ids': result.ids,
'tokens': result.tokens,
})
return output
else:
raise Exception("Unsuported input type, supported: text/list, but got: {}".format(type(text)))
def save(self, dir_path: str) -> None:
self.hf_tokenizers = self.ensure_tokenizer()
if self.hf_tokenizers is None:
raise ValueError("The tokenizer is not loaded yet")
self.hf_tokenizers.save_model(dir_path, prefix=self.name)
@classmethod
def load(cls, dir_path: str, **kwargs) -> "TokenizerWrapperBPE":
tokenizer = cls()
vocab_file = os.path.join(dir_path, f'{tokenizer.name}-vocab.json')
merges_file = os.path.join(dir_path, f'{tokenizer.name}-merges.txt')
tokenizer.hf_tokenizers = ByteLevelBPETokenizer.from_file(vocab_filename=vocab_file,
merges_filename=merges_file,
**kwargs)
# For whatever reason added tokens do not persist with this tokenizer, so we added it at each load
tokenizer.hf_tokenizers.add_tokens(['<PAD>'])
return tokenizer
def get_size(self) -> int:
self.hf_tokenizers = self.ensure_tokenizer()
return self.hf_tokenizers.get_vocab_size()
def token_to_id(self, token: str) -> Union[int, List[int]]:
self.hf_tokenizers = self.ensure_tokenizer()
return self.hf_tokenizers.token_to_id(token)
def get_pad_id(self) -> Union[int, List[int]]:
pad = self.token_to_id('<PAD>')
if pad is None:
raise Exception("No <PAD> token in the vocabulary of the tokenizer, please add it")
return pad
class TokenizerWrapperBERT(TokenizerWrapperBase):
''' Wrapper around a huggingface BERT tokenizer so that it works with the
MetaCAT models.
Args:
hf_tokenizers (`transformers.models.bert.tokenization_bert_fast.BertTokenizerFast`):
A huggingface Fast BERT.
'''
name = 'bert-tokenizer'
def __init__(self, hf_tokenizers: Optional[BertTokenizerFast] = None) -> None:
super().__init__(hf_tokenizers)
@overload
def __call__(self, text: str) -> Dict: ...
@overload
def __call__(self, text: List[str]) -> List[Dict]: ...
def __call__(self, text: Union[str, List[str]]) -> Union[Dict, List[Dict]]:
self.hf_tokenizers = self.ensure_tokenizer()
if isinstance(text, str):
result = self.hf_tokenizers.encode_plus(text, return_offsets_mapping=True,
add_special_tokens=False)
return {'offset_mapping': result['offset_mapping'],
'input_ids': result['input_ids'],
'tokens': self.hf_tokenizers.convert_ids_to_tokens(result['input_ids']),
}
elif isinstance(text, list):
results = self.hf_tokenizers._batch_encode_plus(text, return_offsets_mapping=True,
add_special_tokens=False)
output = []
for ind in range(len(results['input_ids'])):
output.append({'offset_mapping': results['offset_mapping'][ind],
'input_ids': results['input_ids'][ind],
'tokens': self.hf_tokenizers.convert_ids_to_tokens(results['input_ids'][ind]),
})
return output
else:
raise Exception("Unsuported input type, supported: text/list, but got: {}".format(type(text)))
def save(self, dir_path: str) -> None:
self.hf_tokenizers = self.ensure_tokenizer()
path = os.path.join(dir_path, self.name)
self.hf_tokenizers.save_pretrained(path)
@classmethod
def load(cls, dir_path: str, **kwargs) -> "TokenizerWrapperBERT":
tokenizer = cls()
path = os.path.join(dir_path, cls.name)
tokenizer.hf_tokenizers = BertTokenizerFast.from_pretrained(path, **kwargs)
return tokenizer
def get_size(self) -> int:
self.hf_tokenizers = self.ensure_tokenizer()
return len(self.hf_tokenizers.vocab)
def token_to_id(self, token: str) -> Union[int, List[int]]:
self.hf_tokenizers = self.ensure_tokenizer()
return self.hf_tokenizers.convert_tokens_to_ids(token)
def get_pad_id(self) -> Optional[int]:
self.hf_tokenizers = self.ensure_tokenizer()
return self.hf_tokenizers.pad_token_id | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/tokenizers/meta_cat_tokenizers.py | meta_cat_tokenizers.py |
import dill
from typing import Optional, Dict
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
class TransformersTokenizerNER(object):
r'''
Args:
hf_tokenizer
Must be able to return token offsets
max_len:
Max sequence length, if longer it will be split into multiple examples
id2type:
Can be ignored in most cases, should be a map from token to 'start' or 'sub' meaning is the token
a subword or the start/full word. For BERT 'start' is everything that does not begin with ##
cui2name:
Map from CUI to full name for labels.
'''
def __init__(self,
hf_tokenizer: Optional[PreTrainedTokenizerBase] = None,
max_len: int = 512,
id2type: Optional[Dict] = None,
cui2name: Optional[Dict] = None) -> None:
self.hf_tokenizer = hf_tokenizer
self.max_len = max_len
self.label_map = {'O': 0, 'X': 1} # We'll keep the 'X' in case id2type is provided
self.id2type = id2type
self.cui2name = cui2name
def calculate_label_map(self, dataset) -> None:
for cuis in dataset['ent_cuis']:
for cui in cuis:
if cui not in self.label_map:
self.label_map[cui] = len(self.label_map)
def encode(self, examples: Dict, ignore_subwords: bool = False) -> Dict:
r''' Used with huggingface datasets map function to convert medcat_ner dataset into the
appropriate form for NER with BERT. It will split long text segments into max_len sequences.
Args:
examples:
Stream of examples
ignore_subwords:
If set to `True` subwords of any token will get the special label `X`
'''
self.hf_tokenizer = self.ensure_tokenizer()
old_ids = examples['id']
old_names = examples['name']
examples['input_ids'] = []
examples['labels'] = []
examples['id'] = []
examples['name'] = []
for _ind, example in enumerate(zip(examples['text'], examples['ent_starts'],
examples['ent_ends'], examples['ent_cuis'])):
tokens = self.hf_tokenizer(example[0], return_offsets_mapping=True, add_special_tokens=False)
entities = [(start, end, cui) for start, end, cui in zip(example[1],
example[2], example[3])]
entities.sort(key=lambda x: x[0])
input_ids = []
labels = []
tkn_part_of_entity = False
for ind in range(len(tokens['offset_mapping'])):
offset = tokens['offset_mapping'][ind]
input_ids.append(tokens['input_ids'][ind])
if entities and (offset[0] >= entities[0][0] and offset[1] <= entities[0][1]):
# Means this token is part of entity at position 0
tkn_part_of_entity = True
if not ignore_subwords or (self.id2type is not None and self.id2type[tokens['input_ids'][ind]] == 'start'):
labels.append(self.label_map[entities[0][2]])
else:
labels.append(self.label_map['X'])
if entities[0][1] <= offset[1]:
# If it is the last token of the entity, remove the entity as it is done
del entities[0]
tkn_part_of_entity = False # Set this so the next token is not removed
else:
if tkn_part_of_entity:
del entities[0]
tkn_part_of_entity = False
if not ignore_subwords or (self.id2type is not None and self.id2type[tokens['input_ids'][ind]] == 'start'):
labels.append(self.label_map["O"])
else:
labels.append(self.label_map['X'])
if len(input_ids) >= self.max_len:
# Split into multiple examples if too long
examples['input_ids'].append(input_ids)
examples['labels'].append(labels)
examples['id'].append(old_ids[_ind])
examples['name'].append(old_names[_ind])
input_ids = []
labels = []
if input_ids:
examples['input_ids'].append(input_ids)
examples['labels'].append(labels)
examples['id'].append(old_ids[_ind])
examples['name'].append(old_names[_ind])
return examples
def save(self, path: str) -> None:
with open(path, 'wb') as f:
dill.dump(self.__dict__, f)
def ensure_tokenizer(self) -> PreTrainedTokenizerBase:
if self.hf_tokenizer is None:
raise ValueError("The tokenizer is not loaded yet")
return self.hf_tokenizer
@classmethod
def load(cls, path: str) -> 'TransformersTokenizerNER':
tokenizer = cls()
with open(path, 'rb') as f:
d = dill.load(f)
for k in tokenizer.__dict__:
if k in d:
tokenizer.__dict__[k] = d[k]
return tokenizer | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/tokenizers/transformers_ner.py | transformers_ner.py |
import os
import pandas as pd
def get_index_queries():
r''' Run before everything to speed up things
'''
return ['CREATE INDEX patientId FOR (p:Patient) ON (p.patientId);',
'CREATE INDEX conceptId FOR (c:Concept) ON (c.conceptId);',
'CREATE INDEX documentId FOR (d:Document) ON (d.documentId);']
def create_neo_csv(data, columns, output_dir='/etc/lib/neo4j/import/',
base_name='patients'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data
columns:
What data to use from the dataframe
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
base_name:
Name of the csv
'''
if isinstance(data, pd.DataFrame):
df = data
else:
df = pd.read_csv(data)
# Remove duplicates
df = df.drop_duplicates(subset=columns)
out_df = df[columns]
data_path = os.path.join(output_dir, f"{base_name}.csv")
out_df.to_csv(data_path, index=False)
def create_patients_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='patients'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: patientId,
sex, ethnicity, dob
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible,
but writing there could be only admin
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'CREATE (:Patient {patientId: toString(row.patientId), \n'
' sex: toString(row.sex), \n'
' ethnicity: toString(row.ethnicity), \n'
' dob: datetime(row.dob)}) \n'
)
create_neo_csv(data=data, columns=['patientId', 'sex', 'ethnicity', 'dob'],
output_dir=output_dir, base_name=base_name)
return query
def create_documents_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='documents'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: documentId
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'CREATE (:Document {documentId: toString(row.documentId)}) \n'
)
create_neo_csv(data=data, columns=['documentId'],
output_dir=output_dir, base_name=base_name)
return query
def create_concepts_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='concepts'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: conceptId,
name and type
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'CREATE (:Concept {conceptId: toString(row.conceptId), \n'
' type: toString(row.type), \n'
' name: toString(row.name)}) \n'
)
create_neo_csv(data=data, columns=['conceptId', 'name', 'type'],
output_dir=output_dir, base_name=base_name)
return query
def create_document2patient_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='document2patient'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: patientId and
documentId
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'MATCH (pt:Patient {patientId: toString(row.patientId)}) \n'
'MATCH (doc:Document {documentId: toString(row.documentId)}) \n'
'CREATE (pt)-[:HAS]->(doc); \n'
)
create_neo_csv(data=data, columns=['patientId', 'documentId'],
output_dir=output_dir, base_name=base_name)
return query
def create_concept_ontology_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='concept_ontology'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: child, parent
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'MATCH (child:Concept {conceptId: toString(row.child)}) \n'
'MATCH (parent:Concept {conceptId: toString(row.parent)}) \n'
'CREATE (child)-[:IS_A]->(parent); \n'
)
create_neo_csv(data=data, columns=['child', 'parent'],
output_dir=output_dir, base_name=base_name)
return query
def create_document2concept_csv(data, output_dir='/etc/lib/neo4j/import/',
base_name='document2concepts'):
r''' Creates a patients CSV for neo4j load csv function
Args:
data:
A dataframe or path to a dataframe with the required data: 'conceptId',
'documentId', 'contextSimilarity', 'start', 'end', 'timestamp',
'metaSubject', 'metaPresence', 'metaTime'
output_dir:
Where to save the CSVs, should be the neo4j imports path if possible
'''
query = (
'USING PERIODIC COMMIT 100000 \n'
f'LOAD CSV WITH HEADERS FROM "file:///{base_name}.csv" AS row \n'
'MATCH (doc:Document{documentId: toString(row.documentId)}) \n'
'MATCH (concept:Concept {conceptId: toString(row.conceptId)}) \n'
'CREATE (doc)-[:HAS {start: toInteger(row.start), \n'
' end: toInteger(row.end), \n'
' timestamp: toInteger(row.timestamp), \n'
' contextSimilarity: toFloat(row.contextSimilarity), \n'
' metaSubject: toString(row.metaSubject), \n'
' metaPresence: toString(row.metaPresence), \n'
' metaTime: toString(row.metaTime) \n'
' }]->(concept); \n'
)
columns = ['conceptId', 'documentId', 'contextSimilarity', 'start',
'end', 'timestamp', 'metaSubject', 'metaPresence', 'metaTime']
create_neo_csv(data=data, columns=columns,
output_dir=output_dir, base_name=base_name)
return query
def get_data_from_docs(docs, doc2pt, doc2time=None):
data = [['conceptId', 'documentId', 'contextSimilarity',
'start', 'end', 'timestamp', 'metaSubject',
'metaPresence', 'metaTime']]
for doc_id, doc in docs.items():
row = []
for ent in doc['entities'].values():
#if ent['meta_anns']['Subject']['value'] == 'Patient' and \
# ent['meta_anns']['Presence']['value'] == 'True':
if doc2time is not None:
t = doc2time[doc_id]
else:
t = ent['document_timestamp']
row = [ent['cui'], doc_id,
ent['context_similarity'],
ent['start'], ent['end'],
t,
ent['meta_anns'].get('Subject', {}).get('value', None),
ent['meta_anns'].get('Presence', {}).get('value', None),
ent['meta_anns'].get('Time', {}).get('value', None)]
data.append(row)
row = []
return data | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/neo/data_preparation.py | data_preparation.py |
from py2neo import Graph
import getpass
from collections import defaultdict
class NeoConnector:
def __init__(self, uri, user, password=None):
if password is None:
password = getpass.getpass("Password:")
self.graph = Graph(uri, auth=(user, password))
def execute(self, query):
r = self.graph.run(query)
return r
def bucket_concepts(self, data, bucket_size_seconds):
entities = data['entities']
_bucket = []
_concepts = set()
start_time = -1
new_stream = []
# Sort entities
entities.sort(key=lambda ent: ent['timestamp'])
for ent in entities:
if start_time == -1:
start_time = ent['timestamp']
if ent['timestamp'] - start_time >= bucket_size_seconds:
# Add to stream
new_stream.extend(_bucket)
_bucket = []
_concepts = set()
start_time = ent['timestamp']
t_ent = dict(new_stream[-1])
t_ent['timestamp'] += 1
t_ent['name'] = '<SEP>'
t_ent['conceptId'] = '<SEP>'
new_stream.append(t_ent)
if ent['conceptId'] not in _concepts:
_bucket.append(ent)
_concepts.add(ent['conceptId'])
if _bucket:
new_stream.extend(_bucket)
data['entities'] = new_stream
def get_all_patients(self, concepts, limit=1000, require_time=False, ignore_meta=False):
r''' Return all patients having all concepts
Args:
concepts
limit
require_time:
If set only concepts that have the timestamp property will be used.
'''
q = "WITH [{}] AS cs ".format(",".join(["'{}'".format(c) for c in concepts]))
if not require_time:
q += '''MATCH (c:Concept)<-[:HAS '''
if not ignore_meta:
q += '''{metaPresence: 'True', metaSubject: 'Patient'}'''
q += ''']-(:Document)<-[:HAS]-(pt:Patient)
WHERE c.conceptId in cs
WITH pt, size(cs) as inputCnt, count(DISTINCT c) as cnt
WHERE cnt = inputCnt
'''
else:
q += '''MATCH (c:Concept)<-[r:HAS {metaPresence: 'True', metaSubject:
'Patient'}]-(:Document)<-[:HAS]-(pt:Patient) \n
WHERE c.conceptId in cs AND exists(r.timestamp) \n
WITH pt, size(cs) as inputCnt, count(DISTINCT c) as cnt \n
WHERE cnt = inputCnt \n
'''
q += ' RETURN pt LIMIT {}'.format(limit)
data = self.execute(q).data() # Do not like this too much
return [n['pt']['patientId'] for n in data], q
def get_all_concepts_from(self, patient_id=None, document_id=None,
limit=1000, bucket_size_seconds=None, min_count=0, meta_requirements=None, require_time=True):
r''' Returns all concepts belonging to a document or patient
given the concept type (if none all are retruned).
'''
if patient_id is not None:
q = 'MATCH (patient:Patient {patientId: "%s"})-[:HAS]->' % patient_id \
+ '(document:Document)-[has:HAS]->(concept:Concept) \n'
elif document_id is not None:
q = 'MATCH (patient:Patient)-[:HAS]->(document:Document {documentId: "%s"})' % document_id \
+ '-[has:HAS]->(concept:Concept) \n'
else:
raise Exception("patient_id or document_id are required")
q += 'RETURN patient, document, concept, has LIMIT %s \n' % limit
data = self.execute(q).data() # Do not like this too much
out = None
if len(data) > 0:
out = {'patient': dict(data[0]['patient']),
'entities': []}
cnt = defaultdict(int)
for row in data:
if meta_requirements is None or \
all([row['has'][meta] == value for meta,value in meta_requirements.items()]):
if not require_time or 'timestamp' in row['has']:
ent = dict(row['concept']) # Take everything from concept
ent['documentId'] = row['document']['documentId']
ent.update(row['has']) # add all the stuff from the meta ann
out['entities'].append(ent)
cnt[ent['conceptId']] += 1
# Cleanup based on min_count
new_ents = []
for ent in out['entities']:
if cnt[ent['conceptId']] >= min_count:
ent['count'] = cnt[ent['conceptId']]
new_ents.append(ent)
out['entities'] = new_ents
if bucket_size_seconds is not None:
self.bucket_concepts(data=out, bucket_size_seconds=bucket_size_seconds)
return out, q
def get_all_patients_descend(self, concepts, limit=1000, require_time=False):
r''' Return all patients having all descendant concepts under the ancestor concept
Args:
concepts - ancestor top-level concepts
limit
require_time:
If set only concepts that have the timestamp property will be used.
Output:
lists of patients with attached SNOMED concepts
'''
q = "WITH [{}] AS ancestor ".format(",".join(["'{}'".format(c) for c in concepts]))
if not require_time:
q += '''MATCH (n:Concept)-[:IS_A*0..5]->(m:Concept)
WHERE m.conceptId IN ancestor ## get the ancestor and the children
WITH [n.conceptId] AS lineage ## pass the lineage to patient match
MATCH (c:Concept)<-[r:HAS {metaPresence: 'True', metaSubject: 'Patient'}]-(d:Document)<-[q:HAS]-(pt:Patient)
WHERE c.conceptId in lineage
'''
else:
q += '''MATCH (n:Concept)-[:IS_A*0..5]->(m:Concept)
WHERE m.conceptId IN ancestor ## get the ancestor and the children
WITH [n.conceptId] AS lineage ## pass the lineage to patient match
MATCH (c:Concept)<-[r:HAS {metaPresence: 'True', metaSubject: 'Patient'}]-(d:Document)<-[q:HAS]-(pt:Patient)
WHERE c.conceptId in lineage AND exists(r.timestamp)
'''
q += ' RETURN pt.patientId, pt.sex, c.conceptId, c.name, r.timestamp LIMIT {}'.format(limit)
data = self.execute(q).data() # Do not like this too much
return [n['pt']['patientId'] for n in data], q | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/neo/neo_connector.py | neo_connector.py |
import getpass
import elasticsearch
import elasticsearch.helpers
from IPython.display import display, HTML
from datetime import datetime
from typing import Dict, Optional
class CogStackConn(object):
def __init__(self, host, port=9200, username: Optional[str] = None, password: Optional[str] = None, scheme: str='https',
timeout: int = 360, max_retries: int = 10, retry_on_timeout: bool = True, **kwargs):
username, password = self._check_auth_details(username, password)
self.elastic = elasticsearch.Elasticsearch(hosts=[{'host': host, 'port': port}],
http_auth=(username, password),
scheme=scheme,
verify_certs=False,
timeout=timeout,
max_retries=max_retries,
retry_on_timeout=retry_on_timeout,
**kwargs)
def _check_auth_details(self, username: Optional[str] = None, password: Optional[str] = None):
if username is None:
username = input("Username:")
if password is None:
password = getpass.getpass("Password:")
# TODO: Implement auth check, for now I assume all is fine
return username, password
def get_docs_generator(self, query: Dict, index: str, es_gen_size: int=800, request_timeout: int=840000, **kwargs):
docs_generator = elasticsearch.helpers.scan(self.elastic,
query=query,
index=index,
size=es_gen_size,
request_timeout=request_timeout,
**kwargs)
return docs_generator
def get_text_for_doc(self, doc_id, index='epr_documents', text_field='body_analysed'):
r = self.elastic.get(index=index, id=doc_id)
text = r['_source'][text_field]
return text
def show_all_ent_cntx(self, stream, cui: str, cntx_size: int = 100, index='epr_documents', text_field='body_analysed'):
for id in range(len(stream['entities'])):
if stream['entities'][id]['conceptId'] == cui:
print(stream['entities'][id]['name'])
print("Status: " + stream['entities'][id]['metaSubject'])
print("Presence: " + stream['entities'][id]['metaPresence'])
print("Time: " + stream['entities'][id]['metaTime'])
print("Date: " + str(datetime.fromtimestamp((stream['entities'][id]['timestamp']))))
self.show_ent_cntx(stream, id, cntx_size, index, text_field)
def show_ent_cntx(self, stream, id: int, cntx_size=100, index='epr_documents', text_field='body_analysed'):
doc_id = stream['entities'][id]['documentId']
text = self.get_text_for_doc(doc_id, index=index, text_field=text_field)
start = stream['entities'][id]['start']
c_start = max(0, start-cntx_size)
end = stream['entities'][id]['end']
c_end = min(len(text), end+cntx_size)
ent_cntx = text[c_start:start] + "<span style='background-color: #53f725'>" + text[start:end] + "</span>" + text[end:c_end]
ent_cntx.replace("\n", "<br />")
display(HTML(ent_cntx))
if len(text) < start:
print("Text of the clincal note corrupted: " + text[0:100])
def bulk_to_cogstack(self):
# TODO: look the code made for Nazli/Dan
pass | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/cogstack/cogstack_conn.py | cogstack_conn.py |
import logging
from typing import List, Optional
from spacy.tokens import Span, Token, Doc
from medcat.cdb import CDB
from medcat.config import Config
log = logging.getLogger(__name__)
def maybe_annotate_name(name: str, tkns: List[Token], doc: Doc, cdb: CDB, config: Config, label: str = 'concept') -> Optional[Span]:
r''' Given a name it will check should it be annotated based on config rules. If yes
the annotation will be added to the doc._.ents array.
Args:
name (`str`):
The name found in the text of the document.
tkns (`List[spacy.tokens.Token]`):
Tokens that belong to this name in the spacy document.
doc (`spacy.tokens.Doc`):
Spacy document to be annotated with named entities.
cdb (`medcat.cdb.CDB`):
Concept database.
config (`medcat.config.Config`):
Global config for medcat.
label (`str`):
Label for this name (usually `concept` if we are using a vocab based approach).
'''
log.debug("Maybe annotating name: %s", name)
# Check uppercase to distinguish uppercase and lowercase words that have a different meaning.
if config.ner.get('check_upper_case_names'):
# Check whether name is completely uppercase in CDB.
if cdb.name_isupper.get(name, False):
# Check whether tokens are also in uppercase. If tokens are not in uppercase, there is a mismatch.
if not all([x.is_upper for x in tkns]):
return None
if len(name) >= config.ner['min_name_len']:
# Check the upper case limit, last part checks is it one token and uppercase
if len(name) >= config.ner['upper_case_limit_len'] or (len(tkns) == 1 and tkns[0].is_upper):
# Everything is fine, mark name
entity = Span(doc, tkns[0].i, tkns[-1].i + 1, label=label)
# Only set this property when using a vocab approach and where this name
#fits a name in the cdb. All standard name entity recognition models will not set this.
entity._.detected_name = name
entity._.link_candidates = cdb.name2cuis[name]
entity._.id = len(doc._.ents)
entity._.confidence = -1 # This does not calculate confidence
# Append the entity to the document
doc._.ents.append(entity)
# Not necessary, but why not
log.debug("NER detected an entity." +
"\n\tDetected name: %s" +
"\n\tLink candidates: %s\n", entity._.detected_name, entity._.link_candidates)
return entity
return None
"""
def check_disambiguation_status(name, cuis, config):
if len(name) < config.linking['disamb_len_limit']:
return True
elif len(cuis) == 1:
if cdb.name2cui2status[name][cuis[0]] != 'N':
return True
else:
return False
else:
for cui in cuis:
if self.cdb.name2cui2status[name][cui] == 'P':
"""
"""
class (object):
def __init__(self, cdb, spacy_cat):
self.cdb = cdb
self._cat = spacy_cat
self.pref_names = set(cdb.cui2pref_name.values())
def CheckAnnotation(self, name, tkns, doc, to_disamb, doc_words):
# Do not add annotations here, but just return what needs to be done
# Is the detected name uppercase and only one token
# Must be one token, multiple is very dangerous
# First check length limit and uppercase limit
elif len(name) < config.length_limit:
# Disambiguate
return 'disambiguate'
elif self.cdb.name2status[name] == 'A': # Check the cdb
if len(self.cdb.name2cui[name]) == 1:
# Links to only one CUI
return 'annotate'
else:
# Links to multiple CUIs
return 'disambiguate'
elif self.cdb.name2status == 'N':
return 'disambiguate'
elif self.cdb.
return 'annotate'
### This is for annotation, should be moved
if is_train:
if len(name) < config.disamb_length_limit:
cuis = self.cdb.name2cuis[name]
if len(cuis) == 1:
if self.cdb.name2cui2status[name][cuis[0]] != 'N':
return cuis[0]
else:
for cui in cuis:
if self.cdb.name2cui2status[name][cui] == 'P':
# Means this name should be used for training as it nearly always links to
#the concept with this CUI
return cui # Break the loop, one name marked with 'P' linkes to max 1 concept
return None
else:
cuis = self.cdb.name2cuis[name]
if len(name) < config.disamb_length_limit:
return disambiguate()
elif len(cuis) == 1:
if self.cdb.name2cui2status[name][cuis[0]] == 'N':
return disambiguate()
else:
return cuis[0]
else:
# Returns None if below thrashold
return disambiguate(doc, ent, cuis, cdb, config)
# Disambiguate function should check the cut-offs based on min context similarity
#- Reward P, but punish N, leave 0 for A
#- Take 1000 most frequent words, set length limit and make them all 'N'
### End of annotation
if len(name) > 1 or one_tkn_upper:
if name in self.cdb.name_isunique:
# Is the number of tokens matching for short words
if len(name) >= 7 or len(tkns) in self.cdb.name2ntkns[name]:
if self.cdb.name_isunique[name]:
# Annotate
cui = list(self.cdb.name2cui[name])[0]
self._cat._add_ann(cui, doc, tkns, acc=1, name=name)
else:
to_disamb.append((list(tkns), name))
else:
# For now ignore if < 7 and tokens don't match
#to_disamb.append((list(tkns), name))
pass
else:
# Is the number of tokens matching for short words
if len(name) > 7 or len(tkns) in self.cdb.name2ntkns[name]:
if len(self.cdb.name2cui[name]) == 1 and len(name) > 2:
# There is only one concept linked to this name and has
#more than 2 characters
cui = list(self.cdb.name2cui[name])[0]
self._cat._add_ann(cui, doc, tkns, acc=1, name=name)
elif self._cat.train and name in self.pref_names and len(name) > 3:
# If training use prefered names as ground truth
cuis = self.cdb.name2cui[name]
for cui in cuis:
if name == self.cdb.cui2pref_name.get(cui, 'nan-nan'):
self._cat._add_ann(cui, doc, tkns, acc=1, name=name)
else:
to_disamb.append((list(tkns), name))
else:
# For now ignore
#to_disamb.append((list(tkns), name))
pass
""" | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/ner/vocab_based_annotator.py | vocab_based_annotator.py |
import logging
from spacy.tokens import Doc
from medcat.ner.vocab_based_annotator import maybe_annotate_name
from medcat.pipeline.pipe_runner import PipeRunner
from medcat.cdb import CDB
from medcat.config import Config
class NER(PipeRunner):
r'''
'''
log = logging.getLogger(__name__)
# Custom pipeline component name
name = 'cat_ner'
# Override
def __init__(self, cdb: CDB, config: Config) -> None:
self.config = config
self.cdb = cdb
super().__init__(self.config.general['workers'])
# Override
def __call__(self, doc: Doc) -> Doc:
r''' Detect candidates for concepts - linker will then be able to do the rest. It adds `entities` to the
doc._.ents and each entity can have the entitiy._.link_candidates - that the linker will resolve.
Args:
doc (`spacy.tokens.Doc`):
Spacy document to be annotated with named entities.
Return
doc (`spacy.tokens.Doc`):
Spacy document with detected entities.
'''
# Just take the tokens we need
_doc = [tkn for tkn in doc if not tkn._.to_skip]
for i in range(len(_doc)):
tkn = _doc[i]
tkns = [tkn]
#name_versions = [tkn.lower_, tkn._.norm]
name_versions = [tkn._.norm, tkn.lower_]
name = ""
for name_version in name_versions:
if name_version in self.cdb.snames:
if name:
name = name + self.config.general['separator'] + name_version
else:
name = name_version
break
if name in self.cdb.name2cuis and not tkn.is_stop:
maybe_annotate_name(name, tkns, doc, self.cdb, self.config)
if name: # There has to be at least something appended to the name to go forward
for j in range(i+1, len(_doc)):
if _doc[j].i - _doc[j-1].i - 1 > self.config.ner['max_skip_tokens']:
# Do not allow to skip more than limit
break
tkn = _doc[j]
tkns.append(tkn)
name_versions = [tkn._.norm, tkn.lower_]
name_changed = False
name_reverse = None
for name_version in name_versions:
_name = name + self.config.general['separator'] + name_version
if _name in self.cdb.snames:
# Append the name and break
name = _name
name_changed = True
break
if self.config.ner.get('try_reverse_word_order', False):
_name_reverse = name_version + self.config.general['separator'] + name
if _name_reverse in self.cdb.snames:
# Append the name and break
name_reverse = _name_reverse
if name_changed:
if name in self.cdb.name2cuis:
maybe_annotate_name(name, tkns, doc, self.cdb, self.config)
elif name_reverse is not None:
if name_reverse in self.cdb.name2cuis:
maybe_annotate_name(name_reverse, tkns, doc, self.cdb, self.config)
else:
break
return doc | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/ner/vocab_based_ner.py | vocab_based_ner.py |
import os
import json
import logging
from spacy.tokens import Doc
from datetime import datetime
from typing import Iterable, Iterator, Optional, Dict, List, cast, Union
from spacy.tokens import Span
from medcat.cdb import CDB
from medcat.utils.meta_cat.ml_utils import set_all_seeds
from medcat.datasets import transformers_ner
from medcat.utils.postprocessing import map_ents_to_groups, make_pretty_labels, create_main_ann, LabelStyle
from medcat.utils.hasher import Hasher
from medcat.config_transformers_ner import ConfigTransformersNER
#from medcat.utils.loggers import add_handlers
from medcat.tokenizers.transformers_ner import TransformersTokenizerNER
from medcat.utils.ner.metrics import metrics
from medcat.datasets.data_collator import CollateAndPadNER
from transformers import Trainer, AutoModelForTokenClassification, AutoTokenizer
from transformers import pipeline, TrainingArguments
import datasets
# It should be safe to do this always, as all other multiprocessing
#will be finished before data comes to meta_cat
os.environ["TOKENIZERS_PARALLELISM"] = "true"
os.environ['WANDB_DISABLED'] = 'true'
class TransformersNER(object):
r''' TODO: Add documentation
'''
# Custom pipeline component name
name = 'transformers_ner'
# Add file and console handlers. TODO: get's messed up because of transformer loggers
#log = add_handlers(logging.getLogger(__package__))
log = logging.getLogger(__package__)
def __init__(self, cdb, config: Optional[ConfigTransformersNER] = None,
training_arguments=None) -> None:
self.cdb = cdb
if config is None:
config = ConfigTransformersNER()
self.config = config
set_all_seeds(config.general['seed'])
self.model = AutoModelForTokenClassification.from_pretrained(config.general['model_name'])
# Get the tokenizer either create a new one or load existing
if os.path.exists(os.path.join(config.general['model_name'], 'tokenizer.dat')):
self.tokenizer = TransformersTokenizerNER.load(os.path.join(config.general['model_name'], 'tokenizer.dat'))
else:
hf_tokenizer = AutoTokenizer.from_pretrained(self.config.general['model_name'])
self.tokenizer = TransformersTokenizerNER(hf_tokenizer)
if training_arguments is None:
self.training_arguments = TrainingArguments(
output_dir='./results',
logging_dir='./logs', # directory for storing logs
num_train_epochs=10, # total number of training epochs
per_device_train_batch_size=1, # batch size per device during training
per_device_eval_batch_size=1, # batch size for evaluation
weight_decay=0.14, # strength of weight decay
warmup_ratio=0.01,
learning_rate=4.47e-05, # Should be smaller when finetuning an existing deid model
eval_accumulation_steps=1,
gradient_accumulation_steps=4, # We want to get to bs=4
do_eval=True,
evaluation_strategy='epoch', # type: ignore
logging_strategy='epoch', # type: ignore
save_strategy='epoch', # type: ignore
metric_for_best_model='eval_recall', # Can be changed if our preference is not recall but precision or f1
load_best_model_at_end=True,
remove_unused_columns=False)
else:
self.training_arguments = training_arguments
def create_eval_pipeline(self):
self.ner_pipe = pipeline(model=self.model, task="ner", tokenizer=self.tokenizer.hf_tokenizer)
self.ner_pipe.device = self.model.device
def get_hash(self):
r''' A partial hash trying to catch differences between models
'''
hasher = Hasher()
# Set last_train_on if None
if self.config.general['last_train_on'] is None:
self.config.general['last_train_on'] = datetime.now().timestamp()
hasher.update(self.config.get_hash())
return hasher.hexdigest()
def _prepare_dataset(self, json_path, ignore_extra_labels, meta_requirements, file_name='data.json'):
def merge_data_loaded(base, other):
if not base:
return other
elif other is None:
return base
else:
for p in other['projects']:
base['projects'].append(p)
return base
if isinstance(json_path, str):
json_path = [json_path]
# Merge data from all different data paths
data_loaded: Dict = {}
for path in json_path:
with open(path, 'r') as f:
data_loaded = merge_data_loaded(data_loaded, json.load(f))
# Remove labels that did not exist in old dataset
if ignore_extra_labels and self.tokenizer.label_map:
self.log.info("Ignoring extra labels from the data")
for p in data_loaded['projects']:
for d in p['documents']:
new_anns = []
for a in d['annotations']:
if a['cui'] in self.tokenizer.label_map:
new_anns.append(a)
d['annotations'] = new_anns
if meta_requirements is not None:
self.log.info("Removing anns that do not meet meta requirements")
for p in data_loaded['projects']:
for d in p['documents']:
new_anns = []
for a in d['annotations']:
if all([a['meta_anns'][name]['value'] == value for name, value in meta_requirements.items()]):
new_anns.append(a)
d['annotations'] = new_anns
# Here we have to save the data because of the data loader
os.makedirs('results', exist_ok=True)
out_path = os.path.join(os.getcwd(), 'results', file_name)
json.dump(data_loaded, open(out_path, 'w'))
return out_path
def train(self, json_path: Union[str, list, None]=None, ignore_extra_labels=False, dataset=None, meta_requirements=None):
r''' Train or continue training a model give a json_path containing a MedCATtrainer export. It will
continue training if an existing model is loaded or start new training if the model is blank/new.
Args:
json_path (`str` or `list`):
Path/Paths to a MedCATtrainer export containing the meta_annotations we want to train for.
train_arguments(`str`, optional, defaults to `None`):
HF TrainingArguments. If None the default will be used
ignore_extra_labels:
Makes only sense when an existing deid model was loaded and from the new data we want to ignore
labels that did not exist in the old model.
'''
if dataset is None and json_path is not None:
# Load the medcattrainer export
json_path = self._prepare_dataset(json_path, ignore_extra_labels=ignore_extra_labels,
meta_requirements=meta_requirements, file_name='data_eval.json')
# Load dataset
dataset = datasets.load_dataset(os.path.abspath(transformers_ner.__file__),
data_files={'train': json_path}, # type: ignore
split='train',
cache_dir='/tmp/')
# We split before encoding so the split is document level, as encoding
#does the document spliting into max_seq_len
dataset = dataset.train_test_split(test_size=self.config.general['test_size']) # type: ignore
# Update labelmap in case the current dataset has more labels than what we had before
self.tokenizer.calculate_label_map(dataset['train'])
self.tokenizer.calculate_label_map(dataset['test'])
if self.model.num_labels != len(self.tokenizer.label_map):
self.log.warning("The dataset contains labels we've not seen before, model is being reinitialized")
self.log.warning("Model: {} vs Dataset: {}".format(self.model.num_labels, len(self.tokenizer.label_map)))
self.model = AutoModelForTokenClassification.from_pretrained(self.config.general['model_name'], num_labels=len(self.tokenizer.label_map))
self.tokenizer.cui2name = {k:self.cdb.get_name(k) for k in self.tokenizer.label_map.keys()}
self.model.config.id2label = {v:k for k,v in self.tokenizer.label_map.items()}
self.model.config.label2id = self.tokenizer.label_map
# Encode dataset
encoded_dataset = dataset.map(
lambda examples: self.tokenizer.encode(examples, ignore_subwords=False),
batched=True,
remove_columns=['ent_cuis', 'ent_ends', 'ent_starts', 'text'])
data_collator = CollateAndPadNER(self.tokenizer.hf_tokenizer.pad_token_id) # type: ignore
trainer = Trainer(
model=self.model,
args=self.training_arguments,
train_dataset=encoded_dataset['train'],
eval_dataset=encoded_dataset['test'],
compute_metrics=lambda p: metrics(p, tokenizer=self.tokenizer, dataset=encoded_dataset['test'], verbose=self.config.general['verbose_metrics']),
data_collator=data_collator, # type: ignore
tokenizer=None)
trainer.train() # type: ignore
# Save the training time
self.config.general['last_train_on'] = datetime.now().timestamp() # type: ignore
# Save everything
self.save(save_dir_path=os.path.join(self.training_arguments.output_dir, 'final_model'))
# Run an eval step and return metrics
p = trainer.predict(encoded_dataset['test']) # type: ignore
df, examples = metrics(p, return_df=True, tokenizer=self.tokenizer, dataset=encoded_dataset['test'])
# Create the pipeline for eval
self.create_eval_pipeline()
return df, examples, dataset
def eval(self, json_path: Union[str, list, None] = None, dataset=None, ignore_extra_labels=False, meta_requirements=None):
if dataset is None:
json_path = self._prepare_dataset(json_path, ignore_extra_labels=ignore_extra_labels,
meta_requirements=meta_requirements, file_name='data_eval.json')
# Load dataset
dataset = datasets.load_dataset(os.path.abspath(transformers_ner.__file__),
data_files={'train': json_path}, # type: ignore
split='train',
cache_dir='/tmp/')
# Encode dataset
encoded_dataset = dataset.map(
lambda examples: self.tokenizer.encode(examples, ignore_subwords=False),
batched=True,
remove_columns=['ent_cuis', 'ent_ends', 'ent_starts', 'text'])
data_collator = CollateAndPadNER(self.tokenizer.hf_tokenizer.pad_token_id) # type: ignore
# TODO: switch from trainer to model prediction
trainer = Trainer(
model=self.model,
args=self.training_arguments,
train_dataset=None,
eval_dataset=encoded_dataset, # type: ignore
compute_metrics=None,
data_collator=data_collator, # type: ignore
tokenizer=None)
# Run an eval step and return metrics
p = trainer.predict(encoded_dataset) # type: ignore
df, examples = metrics(p, return_df=True, tokenizer=self.tokenizer, dataset=encoded_dataset)
return df, examples
def save(self, save_dir_path: str) -> None:
r''' Save all components of this class to a file
Args:
save_dir_path(`str`):
Path to the directory where everything will be saved.
'''
# Create dirs if they do not exist
os.makedirs(save_dir_path, exist_ok=True)
# Save tokenizer
self.tokenizer.save(os.path.join(save_dir_path, 'tokenizer.dat'))
# Save config
self.config.save(os.path.join(save_dir_path, 'cat_config.json'))
# Save the model
self.model.save_pretrained(save_dir_path)
# Save the cdb
self.cdb.save(os.path.join(save_dir_path, 'cdb.dat'))
# This is everything we need to save from the class, we do not
#save the class itself.
@classmethod
def load(cls, save_dir_path: str, config_dict: Optional[Dict] = None) -> "TransformersNER":
r''' Load a meta_cat object.
Args:
save_dir_path (`str`):
The directory where all was saved.
config_dict (`dict`):
This can be used to overwrite saved parameters for this meta_cat
instance. Why? It is needed in certain cases where we autodeploy stuff.
Returns:
meta_cat (`medcat.MetaCAT`):
You don't say
'''
# Load config
config = cast(ConfigTransformersNER, ConfigTransformersNER.load(os.path.join(save_dir_path, 'cat_config.json')))
config.general['model_name'] = save_dir_path
# Overwrite loaded paramters with something new
if config_dict is not None:
config.merge_config(config_dict)
# Load cdb
cdb = CDB.load(os.path.join(save_dir_path, 'cdb.dat'))
ner = cls(cdb=cdb, config=config)
ner.create_eval_pipeline()
return ner
@staticmethod
def batch_generator(stream: Iterable[Doc], batch_size_chars: int) -> Iterable[List[Doc]]:
docs = []
char_count = 0
for doc in stream:
char_count += len(doc.text)
docs.append(doc)
if char_count < batch_size_chars:
continue
yield docs
docs = []
char_count = 0
# If there is anything left return that also
if len(docs) > 0:
yield docs
def pipe(self, stream: Iterable[Union[Doc, None]], *args, **kwargs) -> Iterator[Doc]:
r''' Process many documents at once.
Args:
stream (Iterable[spacy.tokens.Doc]):
List of spacy documents.
'''
# Just in case
if stream is None or not stream:
return stream
batch_size_chars = self.config.general['pipe_batch_size_in_chars']
yield from self._process(stream, batch_size_chars)
def _process(self,
stream: Iterable[Union[Doc, None]],
batch_size_chars: int) -> Iterator[Optional[Doc]]:
for docs in self.batch_generator(stream, batch_size_chars):
#all_text = [doc.text for doc in docs]
#all_text_processed = self.tokenizer.encode_eval(all_text)
# For now we will process the documents one by one, should be improved in the future to use batching
for doc in docs:
try:
res = self.ner_pipe(doc.text, aggregation_strategy=self.config.general['ner_aggregation_strategy'])
doc.ents = []
for r in res:
inds = []
for ind, word in enumerate(doc):
end_char = word.idx + len(word.text)
if end_char <= r['end'] and end_char > r['start']:
inds.append(ind)
# To not loop through everything
if end_char > r['end']:
break
entity = Span(doc, min(inds), max(inds) + 1, label=r['entity_group'])
entity._.cui = r['entity_group']
entity._.context_similarity = r['score']
entity._.detected_name = r['word']
entity._.id = len(doc._.ents)
entity._.confidence = r['score']
doc._.ents.append(entity)
create_main_ann(self.cdb, doc)
if self.cdb.config.general['make_pretty_labels'] is not None:
make_pretty_labels(self.cdb, doc, LabelStyle[self.cdb.config.general['make_pretty_labels']])
if self.cdb.config.general['map_cui_to_group'] is not None and self.cdb.addl_info.get('cui2group', {}):
map_ents_to_groups(self.cdb, doc)
except Exception as e:
self.log.warning(e, exc_info=True)
yield from docs
# Override
def __call__(self, doc: Doc) -> Doc:
''' Process one document, used in the spacy pipeline for sequential
document processing.
Args:
doc (spacy.tokens.Doc):
A spacy document
'''
# Just call the pipe method
doc = next(self.pipe(iter([doc])))
return doc | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/ner/transformers_ner.py | transformers_ner.py |
import re
import os
import spacy
from typing import Any, List, Dict, cast, Iterable, Union, Pattern
from spacy.tokenizer import Tokenizer
from spacy.language import Language
from spacy.tokens import Doc
from tokenizers import ByteLevelBPETokenizer
from transformers.models.bert.tokenization_bert_fast import BertTokenizerFast
from medcat.config import Config
def spacy_extended(nlp: Language) -> Tokenizer:
infix_re_list = ('\\.\\.+',
'''(?<=[A-Za-z]{1})[\-_;\,\/~]+(?=[A-Za-z]{1})|(?<=[0-9]{1})[\-_;\,\/]+(?=[A-Za-z]{1})|(?<=[A-Za-z]{1})[\-_;\,\/]+(?=[0-9]{1})|\d{2,4}[\-\s_\*]\d{1,2}[\-\s_\*]\d{1,2}|\d{1,2}:\d{1,2}:\d{1,2}|\d{1,2}:\d{2}'''
'…',
'[\\p{So}]',
'(?<=[[[\\p{Ll}&&\\p{Latin}]||[ёа-я]||[әөүҗңһ]||[α-ωάέίόώήύ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]])\\.(?=[[[\\p{Lu}&&\\p{Latin}]||[ЁА-Я]||[ӘӨҮҖҢҺ]||[Α-ΩΆΈΊΌΏΉΎ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]])',
'(?<=[[[\\p{Lu}&&\\p{Latin}]||[ЁА-Я]||[ӘӨҮҖҢҺ]||[Α-ΩΆΈΊΌΏΉΎ]||[\\p{Ll}&&\\p{Latin}]||[ёа-я]||[әөүҗңһ]||[α-ωάέίόώήύ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]]),(?=[[[\\p{Lu}&&\\p{Latin}]||[ЁА-Я]||[ӘӨҮҖҢҺ]||[Α-ΩΆΈΊΌΏΉΎ]||[\\p{Ll}&&\\p{Latin}]||[ёа-я]||[әөүҗңһ]||[α-ωάέίόώήύ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]])',
'(?<=[[[\\p{Lu}&&\\p{Latin}]||[ЁА-Я]||[ӘӨҮҖҢҺ]||[Α-ΩΆΈΊΌΏΉΎ]||[\\p{Ll}&&\\p{Latin}]||[ёа-я]||[әөүҗңһ]||[α-ωάέίόώήύ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]])[?";:=,.]*(?:-|–|—|--|---|——|~)(?=[[[\\p{Lu}&&\\p{Latin}]||[ЁА-Я]||[ӘӨҮҖҢҺ]||[Α-ΩΆΈΊΌΏΉΎ]||[\\p{Ll}&&\\p{Latin}]||[ёа-я]||[әөүҗңһ]||[α-ωάέίόώήύ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]])',
'(?<=[[[\\p{Lu}&&\\p{Latin}]||[ЁА-Я]||[ӘӨҮҖҢҺ]||[Α-ΩΆΈΊΌΏΉΎ]||[\\p{Ll}&&\\p{Latin}]||[ёа-я]||[әөүҗңһ]||[α-ωάέίόώήύ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]"])[:<>=/](?=[[[\\p{Lu}&&\\p{Latin}]||[ЁА-Я]||[ӘӨҮҖҢҺ]||[Α-ΩΆΈΊΌΏΉΎ]||[\\p{Ll}&&\\p{Latin}]||[ёа-я]||[әөүҗңһ]||[α-ωάέίόώήύ]||[\\p{L}&&\\p{Bengali}]||[\\p{L}&&\\p{Hebrew}]||[\\p{L}&&\\p{Arabic}]||[\\p{L}&&\\p{Sinhala}]]])')
prefix_iter = cast(Iterable[Union[str, Pattern[Any]]], Language.Defaults.prefixes)
suffix_iter = cast(Iterable[Union[str, Pattern[Any]]], Language.Defaults.suffixes)
prefix_re = spacy.util.compile_prefix_regex(prefix_iter)
suffix_re = spacy.util.compile_suffix_regex(suffix_iter)
infix_re = spacy.util.compile_infix_regex(infix_re_list)
return Tokenizer(nlp.vocab,
rules={},
prefix_search=prefix_re.search,
suffix_search=suffix_re.search,
infix_finditer=infix_re.finditer
)
def spacy_split_all(nlp: Language, config: Config) -> Tokenizer:
token_characters = r'[^A-Za-z0-9\@]'
if config.general['diacritics']:
token_characters = r'[^A-Za-zÀ-ÖØ-öø-ÿ0-9\@]'
infix_re = re.compile(token_characters)
suffix_re = re.compile(token_characters + r'$')
prefix_re = re.compile(r'^' + token_characters)
return Tokenizer(nlp.vocab,
rules={},
token_match=None,
prefix_search=prefix_re.search,
suffix_search=suffix_re.search,
infix_finditer=infix_re.finditer
)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab: Any, unk_token: str = "[UNK]", max_input_chars_per_word: int = 200) -> None:
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text: str) -> List:
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
# Why is convert_to_unicode undefined?
text = convert_to_unicode(text) # type: ignore # noqa
output_tokens = []
# Why is whitespace_tokenize undefined?
for token in whitespace_tokenize(text): # type: ignore # noqa
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class SpacyHFTok(object):
def __init__(self, w2v: Any) -> None:
self.nlp = spacy.load('en_core_sci_md', disable=['ner', 'parser'])
self.emb_map = {}
self.embs: List = []
for key in w2v.wv.key_to_index.keys():
self.emb_map[key] = len(self.embs)
self.embs.append(w2v[key])
# Add pad
self.embs.append([0.0] * 300)
def encode(self, text: str) -> 'SpacyHFDoc':
doc = self.nlp(text)
return SpacyHFDoc(doc)
def token_to_id(self, tok: Any) -> Any:
return self.emb_map.get(tok, len(self.emb_map) - 1)
class SpacyHFDoc(object):
def __init__(self, doc: Doc) -> None:
self.doc = doc
self.tokens = [x.text for x in self.doc]
self.offsets = [(x.idx, x.idx+len(x.text)) for x in self.doc]
class TokenizerWrapperBPE(object):
'''
'''
def __init__(self, hf_tokenizers: Any) -> None:
self.hf_tokenizers = hf_tokenizers
def __call__(self, text: str) -> Dict:
res = self.hf_tokenizers.encode(text)
return {'offset_mapping': res.offsets,
'input_ids': res.ids,
'tokens': res.tokens,
}
def save(self, dir_path, name='bbpe'):
self.hf_tokenizers.save_model(dir_path, prefix=name)
@classmethod
def load(cls, dir_path, name='bbpe', **kwargs):
tokenizer = cls()
vocab_file = os.path.join(dir_path, f'{name}-vocab.json')
merges_file = os.path.join(dir_path, f'{name}-merges.txt')
tokenizer.hf_tokenizers = ByteLevelBPETokenizer.from_file(vocab_filename=vocab_file,
merges_filename=merges_file,
**kwargs)
return tokenizer
class TokenizerWrapperBERT(object):
'''
'''
def __init__(self, hf_tokenizers=None):
self.hf_tokenizers = hf_tokenizers
def __call__(self, text: str) -> Dict:
res = self.hf_tokenizers.encode_plus(text,
return_offsets_mapping=True, add_special_tokens=False)
return {'offset_mapping': res['offset_mapping'],
'input_ids': res['input_ids'],
'tokens': self.hf_tokenizers.convert_ids_to_tokens(res['input_ids']),
}
def save(self, dir_path: str, name: str='bert') -> None:
path = os.path.join(dir_path, name)
self.hf_tokenizers.save_pretrained(path)
@classmethod
def load(cls, dir_path: str, name: str = 'bert', **kwargs) -> Any:
tokenizer = cls()
path = os.path.join(dir_path, name)
tokenizer.hf_tokenizers = BertTokenizerFast.from_pretrained(path, **kwargs)
return tokenizer | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/preprocessing/tokenizers.py | tokenizers.py |
import pandas
import re
from typing import List, Optional, Dict, Iterable, Any, Tuple
NUM = "NUMNUM"
FAST_SPLIT = re.compile("[^A-Za-z0-9]")
class EmbMimicCSV(object):
""" Iterate over MIMIC data in CSV format
csv_paths: paths to csv files containing the mimic data
"""
def __init__(self, csv_paths: List[str], tokenizer: Any, emb_dict: Optional[Dict] = None) -> None:
self.csv_paths = csv_paths
self.tokenizer = tokenizer
self.emb_dict = emb_dict
def __iter__(self) -> Iterable[List]:
chunksize = 10 ** 8
for csv_path in self.csv_paths:
for chunk in pandas.read_csv(csv_path, chunksize=chunksize):
for _, row in chunk.iterrows():
doc = self.tokenizer(row['text'])
data = []
for token in doc:
if not token._.is_punct and not token._.to_skip and len(token.lower_.strip()) > 1:
if token.is_digit:
data.append(NUM)
else:
if hasattr(token._, 'norm'):
tkn = token._.norm
else:
tkn = token.lower_
if self.emb_dict is not None:
if tkn in self.emb_dict:
data.append(tkn)
else:
data.append(tkn)
yield data
class BertEmbMimicCSV(object):
""" Iterate over MIMIC data in CSV format
csv_paths: paths to csv files containing the mimic data
"""
def __init__(self, csv_paths: List[str], tokenizer: 'BertTokenizer') -> None: # type: ignore # noqa
# Why pytorch-pretrained-bert is not among the dependencies?
# Looks like it needs to be migrated to transformers
from pytorch_pretrained_bert import BertTokenizer
self.csv_paths = csv_paths
self.tokenizer = tokenizer
self.bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def __iter__(self) -> Iterable[List]:
chunksize = 10 ** 8
for csv_path in self.csv_paths:
for chunk in pandas.read_csv(csv_path, chunksize=chunksize):
for _, row in chunk.iterrows():
doc = self.tokenizer(row['text'])
data = []
for token in doc:
tkn = token._.lower
for tkn in self.bert_tokenizer.tokenize(tkn):
data.append(tkn)
yield data
class BaseEmbMimicCSV(object):
""" Iterate over MIMIC data in CSV format
csv_paths: paths to csv files containing the mimic data
"""
def __init__(self, csv_paths: List[str], tokenizer: 'BertTokenizer') -> None: # type: ignore # noqa
# Why pytorch-pretrained-bert is not among the dependencies?
# Looks like it needs to be migrated to transformers
from pytorch_pretrained_bert import BertTokenizer
self.csv_paths = csv_paths
self.tokenizer = tokenizer
self.bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def __iter__(self) -> Iterable[Tuple]:
chunksize = 10 ** 8
for csv_path in self.csv_paths:
for chunk in pandas.read_csv(csv_path, chunksize=chunksize):
for _, row in chunk.iterrows():
text = row['text']
text = re.sub("\[\*[^\]]*\*\]", " ", text)
doc = self.tokenizer(text)
data1 = []
data2 = []
data3 = []
# Remove [] from text
for token in doc:
if not token._.to_skip and not token.is_digit or token._.norm == 'skipskip':
if len(token._.norm) > 1:
tkn1 = token._.norm
tkn2 = token._.lower
data1.append(tkn1)
data2.append(tkn2)
data3.append(token._.norm)
yield (data1, data2, data3)
class RawCSV(object):
""" Iterate over MIMIC data in CSV format
csv_paths: paths to csv files containing the mimic data
"""
def __init__(self, csv_paths: List[str]) -> None:
self.csv_paths = csv_paths
def __iter__(self) -> Iterable[str]:
chunksize = 10 ** 8
for csv_path in self.csv_paths:
for chunk in pandas.read_csv(csv_path, chunksize=chunksize):
for _, row in chunk.iterrows():
yield row['text']
class FastEmbMimicCSV(object):
""" Iterate over MIMIC data in CSV format
csv_paths: paths to csv files containing the mimic data
"""
def __init__(self, csv_paths: List[str]) -> None:
self.csv_paths = csv_paths
def __iter__(self) -> Iterable[List[str]]:
chunksize = 10 ** 8
for csv_path in self.csv_paths:
for chunk in pandas.read_csv(csv_path, chunksize=chunksize):
for _, row in chunk.iterrows():
doc = [x for x in FAST_SPLIT.split(row['text']) if len(x) > 0]
doc = [x.lower() if not x.isdigit() else NUM for x in doc]
yield doc
class SimpleIter(object):
def __init__(self, text_path: str) -> None:
self.text_path = text_path
def __iter__(self) -> Iterable[List[str]]:
data = open(self.text_path, encoding='utf-8')
for line in data:
yield str(line).strip().split(" ") | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/preprocessing/iterators.py | iterators.py |
from __future__ import absolute_import, division, print_function
import json
import logging
import datasets
_CITATION = """\
@misc{kraljevic2020multidomain,
title={Multi-domain Clinical Natural Language Processing with MedCAT: the Medical Concept Annotation Toolkit},
author={Zeljko Kraljevic and Thomas Searle and Anthony Shek and Lukasz Roguski and Kawsar Noor and Daniel Bean and Aurelie Mascio and Leilei Zhu and Amos A Folarin and Angus Roberts and Rebecca Bendayan and Mark P Richardson and Robert Stewart and Anoop D Shah and Wai Keong Wong and Zina Ibrahim and James T Teo and Richard JB Dobson},
year={2020},
eprint={2010.01165},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
Takes as input a json export from medcattrainer."""
class MedCATAnnotationsConfig(datasets.BuilderConfig):
""" BuilderConfig for MedCATNER.
Args:
**kwargs: keyword arguments forwarded to super.
"""
pass
class TransformersDatasetNER(datasets.GeneratorBasedBuilder):
"""MedCATNER: Output of MedCATtrainer"""
BUILDER_CONFIGS = [
MedCATAnnotationsConfig(
name="json",
version=datasets.Version("1.0.0", ""),
description="JSON output from MedCATtrainer",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"text": datasets.Value("string"),
"name": datasets.Value("string"),
"ent_starts": datasets.Sequence(datasets.Value("int32")),
"ent_ends": datasets.Sequence(datasets.Value("int32")),
"ent_cuis": datasets.Sequence(datasets.Value("string")),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager): # noqa
"""Returns SplitGenerators."""
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": self.config.data_files['train'],
},
),
]
def _generate_examples(self, filepaths):
cnt = 0
for filepath in filepaths:
logging.info("generating examples from = %s", filepath)
with open(filepath, 'r') as f:
projects = json.load(f)['projects']
for project in projects:
for doc in project['documents']:
starts = []
ends = []
cuis = []
for entity in doc['annotations']:
if (entity.get('correct', True) or
entity.get('manually_created', False) or
entity.get('alternative', False)) and not (
entity.get('deleted', False) or
entity.get('irrelevant', False) or
entity.get('killed', False)):
starts.append(entity['start'])
ends.append(entity['end'])
cuis.append(entity['cui'])
doc_id = doc.get('id', cnt)
cnt += 1
doc_name = doc.get('name', 'unknown')
yield "{}".format(doc_id), {
'id': int(doc_id),
'text': str(doc['text']),
'name': str(doc_name),
'ent_starts': starts,
'ent_ends': ends,
'ent_cuis': cuis,
} | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/datasets/transformers_ner.py | transformers_ner.py |
from __future__ import absolute_import, division, print_function
import pickle
import logging
import datasets
_CITATION = """\
@ARTICLE{Kraljevic2021-ln,
title="Multi-domain clinical natural language processing with {MedCAT}: The Medical Concept Annotation Toolkit",
author="Kraljevic, Zeljko and Searle, Thomas and Shek, Anthony and Roguski, Lukasz and Noor, Kawsar and Bean, Daniel and Mascio, Aurelie and Zhu, Leilei and Folarin, Amos A and Roberts, Angus and Bendayan, Rebecca and Richardson, Mark P and Stewart, Robert and Shah, Anoop D and Wong, Wai Keong and Ibrahim, Zina and Teo, James T and Dobson, Richard J B",
journal="Artif. Intell. Med.",
volume=117,
pages="102083",
month=jul,
year=2021,
issn="0933-3657",
doi="10.1016/j.artmed.2021.102083"
}
"""
_DESCRIPTION = """\
Takes as input a pickled dict of annotated documents from MedCAT. The format should be:
{'document_id': {'entities': <entities>, ...}
Where entities is the output from medcat.get_entities(<...>)['entities']
"""
class MedCATAnnotationsConfig(datasets.BuilderConfig):
""" BuilderConfig for MedCATAnnotations.
Args:
**kwargs: keyword arguments forwarded to super.
"""
pass
class MedCATAnnotations(datasets.GeneratorBasedBuilder):
"""MedCATAnnotations: Output of MedCAT"""
BUILDER_CONFIGS = [
MedCATAnnotationsConfig(
name="pickle",
version=datasets.Version("1.0.0", ""),
description="Pickled output from MedCAT",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"document_id": datasets.Value("string"),
"context_left": datasets.Value("string"),
"context_right": datasets.Value("string"),
"context_center": datasets.Value("string"),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager): # noqa
"""Returns SplitGenerators."""
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": self.config.data_files,
},
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with open(filepath, 'rb') as f:
docs = pickle.load(f)
for doc_id in docs:
doc = docs[doc_id]
for entity_id, entity in doc['entities'].items():
yield "{}|{}".format(doc_id, entity['id']), {
'id': int(entity_id),
'document_id': str(doc_id),
'context_left': "".join(entity['context_left']),
'context_right': "".join(entity['context_right']),
'context_center': "".join(entity['context_center']),
} | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/datasets/medcat_annotations.py | medcat_annotations.py |
from __future__ import absolute_import, division, print_function
import pickle
import logging
import datasets
_CITATION = """\
@misc{kraljevic2020multidomain,
title={Multi-domain Clinical Natural Language Processing with MedCAT: the Medical Concept Annotation Toolkit},
author={Zeljko Kraljevic and Thomas Searle and Anthony Shek and Lukasz Roguski and Kawsar Noor and Daniel Bean and Aurelie Mascio and Leilei Zhu and Amos A Folarin and Angus Roberts and Rebecca Bendayan and Mark P Richardson and Robert Stewart and Anoop D Shah and Wai Keong Wong and Zina Ibrahim and James T Teo and Richard JB Dobson},
year={2020},
eprint={2010.01165},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
Takes as input a pickled dict of pt2stream. The format should be:
{'patient_id': (concept_cui, concept_count_for_patient, timestamp_of_first_occurrence_for_patient), ...}
"""
class PatientConceptStreamConfig(datasets.BuilderConfig):
""" BuilderConfig for PatientConceptStream.
Args:
**kwargs: keyword arguments forwarded to super.
"""
pass
class PatientConceptStream(datasets.GeneratorBasedBuilder):
"""PatientConceptStream: as input takes the patient to stream of concepts.
TODO: Move the preparations scripts out of notebooks
"""
BUILDER_CONFIGS = [
PatientConceptStreamConfig(
name="pickle",
version=datasets.Version("1.0.0", ""),
description="Pickled output from Temporal dataset preparation scripts",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"patient_id": datasets.Value("string"),
"stream": [datasets.Value('string')],
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager): # noqa
"""Returns SplitGenerators."""
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": self.config.data_files,
},
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with open(filepath, 'rb') as f:
pt2stream = pickle.load(f)
for pt, stream in pt2stream.items():
out_stream = []
year = -1
for data in stream:
# 0 - CUI, 1 - CNT, 2 - TIME, 3 - Pt age in Years
if data[3] > year:
out_stream.append(str(data[3]))
year = data[3]
out_stream.append(data[0])
yield pt, {'patient_id': str(pt),
'stream': out_stream} | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/datasets/patient_concept_stream.py | patient_concept_stream.py |
import argparse
import logging
import yaml
from medcat.cdb_maker import CDBMaker
from medcat.utils.make_vocab import MakeVocab
from medcat.cat import CAT
from medcat.config import Config
from pathlib import Path
# Create Logger
logger = logging.getLogger(__package__)
logger.setLevel(logging.INFO)
def create_cdb(concept_csv_file, medcat_config):
"""Create concept database from csv.
Args:
concept_csv_file (pathlib.Path):
Path to CSV file containing all concepts and synonyms.
medcat_config (medcat.config.Config):
MedCAT configuration file.
Returns:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms, without context embeddings.
"""
logger.info('Creating concept database from concept table')
cdb_maker = CDBMaker(config=medcat_config)
cdb = cdb_maker.prepare_csvs([str(concept_csv_file)], full_build=True)
return cdb
def create_vocab(cdb, training_data_list, medcat_config, output_dir, unigram_table_size):
"""Create vocabulary for word embeddings and spell check from list of training documents and CDB.
Args:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms.
training_data_list (list):
List of example documents.
medcat_config (medcat.config.Config):
MedCAT configuration file.
output_dir (pathlib.Path):
Output directory to write vocabulary and data.txt (required to create vocabulary) to.
unigram_table_size (int):
Size of unigram table to be initialized before creating vocabulary.
Returns:
vocab (medcat.vocab.Vocab):
MedCAT vocabulary created from CDB and training documents.
"""
logger.info('Creating and saving vocabulary')
make_vocab = MakeVocab(cdb=cdb, config=medcat_config)
make_vocab.make(training_data_list, out_folder=str(output_dir))
make_vocab.add_vectors(in_path=str(output_dir/'data.txt'), unigram_table_size=unigram_table_size)
vocab = make_vocab.vocab
return vocab
def train_unsupervised(cdb, vocab, medcat_config, output_dir, training_data_list):
"""Perform unsupervised training and save updated CDB.
Although not returned explicitly in this function, the CDB will be updated with context embeddings.
Args:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms.
vocab (medcat.vocab.Vocab):
MedCAT vocabulary created from CDB and training documents.
medcat_config (medcat.config.Config):
MedCAT configuration file.
output_dir (pathlib.Path):
Output directory to write updated CDB to.
training_data_list (list):
List of example documents.
Returns:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms, as well as context embeddings.
"""
# Create MedCAT pipeline
cat = CAT(cdb=cdb, vocab=vocab, config=medcat_config)
# Perform unsupervised training and add model to concept database
logger.info('Performing unsupervised training')
cat.train(training_data_list)
# Save output
logger.info('Saving updated concept database')
cdb.save(str(output_dir / 'cdb.dat'))
return cdb
def create_models(config_file):
"""Create MedCAT CDB and Vocabulary models.
Args:
config_file (pathlib.Path):
Location of model creator configuration file to specify input, output and MedCAT configuration.
Returns:
cdb (medcat.cdb.CDB):
MedCAT concept database containing list of entities and synonyms, as well as context embeddings.
vocab (medcat.vocab.Vocab):
MedCAT vocabulary created from CDB and training documents.
"""
# Load model creator configuration
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
# Load data for unsupervised training
with open(Path(config['unsupervised_training_data_file']), 'r', encoding='utf-8') as training_data:
training_data_list = [line.strip() for line in training_data]
# Load MedCAT configuration
medcat_config = Config()
if 'medcat_config_file' in config:
medcat_config.parse_config_file(Path(config['medcat_config_file']))
# Create output dir if it does not exist
output_dir = Path(config['output_dir'])
output_dir.mkdir(parents=True, exist_ok=True)
# Create models
cdb = create_cdb(Path(config['concept_csv_file']), medcat_config)
vocab = create_vocab(cdb, training_data_list, medcat_config, output_dir, config['unigram_table_size'])
cdb = train_unsupervised(cdb, vocab, medcat_config, output_dir, training_data_list)
return cdb, vocab
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('config_file', help='YAML formatted file containing the parameters for model creator. An '
'example can be found in `tests/model_creator/config_example.yml`')
args = parser.parse_args()
# Run pipeline
create_models(args.config_file) | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/model_creator.py | model_creator.py |
import json
import torch
import copy
import numpy as np
from sklearn.metrics import cohen_kappa_score
from typing import Dict, List, Optional, Union, Tuple, Any, Set
from spacy.tokens.doc import Doc
from spacy.tokens.span import Span
from medcat.cdb import CDB
from collections import defaultdict
import random
def set_all_seeds(seed: int) -> None:
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def count_annotations_project(project: Dict, cnt_per_cui=None) -> Tuple[int, Any]:
cnt = 0
if cnt_per_cui is None:
cnt_per_cui = defaultdict(int)
for doc in project['documents']:
for ann in doc['annotations']:
# Only validated
if ann.get('validated', True):
cnt += 1
cnt_per_cui[ann['cui']] += 1
return cnt, cnt_per_cui
def load_data(data_path: str, require_annotations: bool = True, order_by_num_ann: bool = True) -> Dict:
r'''
Args:
require_annotations:
This will require anns but on project level, any doc in a project needs anns.
'''
data_candidates = json.load(open(data_path))
if require_annotations:
data: Dict = {'projects': []}
# Keep only projects that have annotations
for project in data_candidates['projects']:
keep = False
for document in project['documents']:
if len(document['annotations']) > 0:
keep = True
break
if keep:
data['projects'].append(project)
else:
data = data_candidates
cnts = []
if order_by_num_ann:
for project in data['projects']:
cnt, _ = count_annotations_project(project)
cnts.append(cnt)
srt = np.argsort(-np.array(cnts))
data['projects'] = [data['projects'][i] for i in srt]
return data
def count_annotations(data_path: str) -> Dict:
data = load_data(data_path, require_annotations=True)
g_cnt = 0
cnt_per_cui: Dict = defaultdict(int)
for project in data['projects']:
cnt, cnt_per_cui = count_annotations_project(project, cnt_per_cui)
g_cnt += cnt
print("Number of annotations in project '{}' is: {}".format(project['name'], cnt))
# Count annotates per entity
print("Total number of annotations is: {}".format(g_cnt))
# Annotates per CUI
cnt_per_cui = dict(cnt_per_cui)
print("Annotations per CUI: ")
for row in sorted(cnt_per_cui.items(), key=lambda x: x[1], reverse=True):
print(row)
return cnt_per_cui
def get_doc_from_project(project: Dict, doc_id: str) -> Optional[Dict]:
for document in project['documents']:
if document['id'] == doc_id:
return document
return None
def get_ann_from_doc(document: Dict, start: int, end: int) -> Optional[Dict]:
for ann in document['annotations']:
if ann['start'] == start and ann['end'] == end:
return ann
return None
def meta_ann_from_ann(ann: Dict, meta_name: Union[Dict, List]) -> Optional[Dict]:
meta_anns = ann['meta_anns']
# need for old versions of data
if type(meta_anns) == dict:
return meta_anns.get(meta_name, None)
else:
for meta_ann in meta_anns:
if meta_ann['name'] == meta_name:
return meta_ann
return None
def are_anns_same(ann: Dict, ann2: Dict, meta_names: List = [], require_double_inner: bool = True) -> bool:
if ann['cui'] == ann2['cui'] and \
ann['correct'] == ann2['correct'] and \
ann['deleted'] == ann2['deleted'] and \
ann['alternative'] == ann2['alternative'] and \
ann['killed'] == ann2['killed'] and \
ann['manually_created'] == ann2['manually_created'] and \
ann['validated'] == ann2['validated']:
#Check are meta anns the same if exist
for meta_name in meta_names:
meta_ann = meta_ann_from_ann(ann, meta_name)
meta_ann2 = meta_ann_from_ann(ann2, meta_name)
if meta_ann is not None and meta_ann2 is not None:
if meta_ann['value'] != meta_ann2['value']:
return False
elif require_double_inner:
# Remove all annotations that do not have the required meta_anns, this
#will basically remove double_anns on the meta_level that are marked as incorrect.
#We want this, so all good.
return False
else:
return False
return True
def get_same_anns(
document: Dict, document2: Dict, require_double_inner: bool = True, ann_stats: List = [], meta_names: List = []
) -> Dict:
new_document = copy.deepcopy(document)
new_document['annotations'] = []
if not ann_stats:
ann_stats.append([])
ann_stats.append([])
for meta_name in meta_names:
ann_stats.append([])
# Add some stats
for ann in document['annotations']:
# Take only validated anns
if ann['validated']:
ann2 = get_ann_from_doc(document2, ann['start'], ann['end'])
pair = [0, 0]
if ann['correct']:
pair[0] = 1
if ann2 is not None:
# Only do meta_anns if both anns exist
for id_meta, meta_name in enumerate(meta_names):
ann_stats[id_meta+2].append(['unk', 'unk'])
# For ann1
meta_ann = meta_ann_from_ann(ann, meta_name)
if meta_ann is not None:
ann_stats[id_meta+2][-1][0] = meta_ann['value']
# For ann2
meta_ann = meta_ann_from_ann(ann2, meta_name)
if meta_ann is not None:
ann_stats[id_meta+2][-1][1] = meta_ann['value']
if ann2['correct']:
pair[1] = 1
if not are_anns_same(ann, ann2, meta_names):
ann_stats[0].append((1, 0))
else:
ann_stats[0].append((1, 1))
new_document['annotations'].append(ann)
elif not require_double_inner:
ann_stats[0].append((1, 1))
new_document['annotations'].append(ann)
else:
ann_stats[0].append((1, 0))
# Append for NER+L stats
ann_stats[1].append(pair)
# Check the reverse also, but only ann2 if not in first doc
for ann2 in document2['annotations']:
if ann2 is not None:
ann = get_ann_from_doc(document, ann2['start'], ann2['end'])
if ann is None and ann2 is not None and ann2['validated']:
# Add a negative example to the stats
ann_stats[1].append([0, 1])
if not require_double_inner:
ann_stats[0].append((1, 1))
# Also append this annotation to the document, because it is missing from it
new_document['annotations'].append(ann2)
else:
ann_stats[0].append((0, 1))
return new_document
def print_consolid_stats(ann_stats: List = [], meta_names: List = []) -> None:
if ann_stats:
_ann_stats = np.array(ann_stats[0])
t = 0
for i in range(len(_ann_stats)):
if _ann_stats[i, 0] == _ann_stats[i, 1]:
t += 1
print("Overall given the parameters (scores here can be strange, be sure to know what they mean)")
print(" In Agreement vs Total: {} / {}\n\n".format(t, len(_ann_stats)))
_ann_stats = np.array(ann_stats[1])
ck = cohen_kappa_score(_ann_stats[:, 0], _ann_stats[:, 1])
t = 0
for i in range(len(_ann_stats)):
if _ann_stats[i, 0] == _ann_stats[i, 1]:
t += 1
agr = t / len(_ann_stats)
print("NER + L")
print(" Kappa: {:.4f}; Agreement: {:.4f}".format(ck, agr))
print(" InAgreement vs Total: {} / {}".format(t, len(_ann_stats)))
for id_meta, meta_name in enumerate(meta_names):
if len(ann_stats) > id_meta + 2:
_ann_stats = np.array(ann_stats[id_meta+2])
ck = cohen_kappa_score(_ann_stats[:, 0], _ann_stats[:, 1])
t = 0
for i in range(len(_ann_stats)):
if _ann_stats[i, 0] == _ann_stats[i, 1]:
t += 1
agr = t / len(_ann_stats)
print("Stats for: {}".format(meta_name))
print(" Kappa: {:.4f}; Agreement: {:.4f}".format(ck, agr))
print(" InAgreement vs Total: {} / {}".format(t, len(_ann_stats)))
# Deprecated and removable?
def check_differences(data_path: str, cat: Any, cntx_size=30, min_acc=0.2, ignore_already_done=False, only_start=False, only_saved=False) -> None:
data = load_data(data_path, require_annotations=True)
for pid, project in enumerate(data['projects']):
print("Starting: {} / {}".format(pid, len(data['projects'])))
cui_filter = None
tui_filter = None
if 'cuis' in project and len(project['cuis'].strip()) > 0:
cui_filter = set([x.strip() for x in project['cuis'].split(",")])
if 'tuis' in project and len(project['tuis'].strip()) > 0:
tui_filter = set([x.strip().upper() for x in project['tuis'].split(",")])
cat.spacy_cat.TUI_FILTER = tui_filter
cat.spacy_cat.CUI_FILTER = cui_filter
cat.spacy_cat.MIN_ACC = -5
cat.spacy_cat.IS_TRAINER = True
cat.train = False
for did, doc in enumerate(project['documents']):
print("Starting: {} / {}".format(did, len(project['documents'])))
text = doc['text']
if not doc.get('_verified', False) or ignore_already_done or only_saved:
# Get annotations with medcat
s_doc = cat(text)
t_anns_norm = []
p_anns_norm = []
t_anns_start = []
p_anns_start = []
for ann in doc['annotations']:
t_anns_norm.append((ann['start'], ann['cui']))
t_anns_start.append(ann['start'])
for ann in s_doc.ents:
p_anns_norm.append((ann.start_char, ann._.cui))
p_anns_start.append(ann.start_char)
print("__________________")
print("T: ", t_anns_norm)
print()
print("P: ", p_anns_norm)
print("\n\nSTARTING MC TO GT")
for id_p_ann, p_ann in enumerate(p_anns_norm):
if (only_start and p_ann[0] not in t_anns_start) or (not only_start and p_ann not in t_anns_norm):
ann = s_doc.ents[id_p_ann]
if not only_saved:
print("\n\nThis does not exist in gt annotations")
start = ann.start_char
end = ann.end_char
cui = ann._.cui
b = text[max(0, start-cntx_size):start].replace("\n", " ").replace('\r', ' ')
m = text[start:end].replace("\n", " ").replace('\r', ' ')
e = text[end:min(len(text), end+cntx_size)].replace("\n", " ").replace('\r', ' ')
print("SNIPPET: {} <<<{}>>> {}".format(b, m, e))
print(cui, " | ", cat.cdb.get_name(cui), " | ", cat.cdb.cui2tui.get(cui, ''), " | ", ann.start_char)
print(ann._.acc)
d = str(input("###Add as (or empty for skip): 1-Correct, 2-Incorrect, s-save: "))
if d:
new_ann: Dict = {}
new_ann['id'] = 0 # ignore
new_ann['user'] = 'auto'
new_ann['validated'] = True
new_ann['last_modified'] = ''
new_ann['manually_created'] = False
new_ann['acc'] = ann._.acc
new_ann['start'] = ann.start_char
new_ann['end'] = ann.end_char
new_ann['cui'] = ann._.cui
new_ann['value'] = ann.text
new_ann['killed'] = False
new_ann['alternative'] = False
if d == '1':
new_ann['correct'] = True
new_ann['deleted'] = False
if d == '2':
new_ann['correct'] = False
new_ann['deleted'] = True
if d == 'x':
# Save annotations and return
json.dump(data, open(data_path, 'w'))
return
if d == 's':
# Save
new_ann['correct'] = False
new_ann['deleted'] = False
new_ann['_saved'] = True
doc['annotations'].append(new_ann)
print("\n\nSTARTING GT TO MC")
# Redo
t_anns_norm = []
for ann in doc['annotations']:
t_anns_norm.append((ann['start'], ann['cui']))
t_anns_start.append(ann['start'])
new_doc_anns = []
for id_t_ann, t_ann in enumerate(t_anns_norm):
add_ann = True
ann = doc['annotations'][id_t_ann]
if (not only_saved and not only_start and t_ann not in p_anns_norm) or \
(not only_saved and only_start and t_ann[0] not in p_anns_start) or \
(only_saved and ann.get("_saved", False)):
# Check is it correct
if not ann.get('_verified', False) or ignore_already_done or (only_saved and ann.get('_saved', False)):
print("\n\nThis does not exist in mc annotations or it is a saved item")
b = text[max(0, ann['start']-cntx_size):ann['start']].replace("\n", " ").replace('\r', ' ')
m = text[ann['start']:ann['end']].replace("\n", " ").replace('\r', ' ')
e = text[ann['end']:min(len(text), ann['end']+cntx_size)].replace("\n", " ").replace('\r', ' ')
print("SNIPPET: {} <<<{}>>> {}".format(b, m, e))
print(ann['cui'], " | ", cat.cdb.get_name(ann['cui']), " | ", ann['start'])
print("Current status")
print(" Correct: " + str(ann['correct']))
print(" Incorrect: " + str(ann['deleted']))
print(" Alternative: " + str(ann['alternative']))
print(" Killed: " + str(ann['killed']))
d = str(input("###Change to (or empty for skip): 1-Correct, 2-Incorrect, d-delete, s-save: "))
if d == '1':
ann['correct'] = True
ann['deleted'] = False
ann['killed'] = False
ann['alternative'] = False
elif d == '2':
ann['correct'] = False
ann['deleted'] = True
ann['killed'] = False
ann['alternative'] = False
elif d == 'd':
add_ann = False
elif d == 's':
# Save for later
ann['_saved'] = True
elif d == 'x':
# Save annotations and return
json.dump(data, open(data_path, 'w'))
return
ann['_verified'] = True
if only_saved and ann.get('_saved', False) and d in ['1', '2']:
# Remove if it was saved but now it is done
del ann['_saved']
if add_ann:
new_doc_anns.append(ann)
doc['annotations'] = new_doc_anns
doc['_verified'] = True
json.dump(data, open(data_path, 'w'))
def consolidate_double_annotations(data_path: str, out_path: str, require_double: bool = True, require_double_inner: bool = False, meta_anns_to_match: List = []) -> Dict:
""" Consolidated a dataset that was multi-annotated (same documents two times).
data_path:
Output from MedCATtrainer - projects containig the same documents must have the same name.
out_path:
The consolidated data will be saved here - usually only annotations where both annotators agre
out_path:
The consolidated data will be saved here - usually only annotations where both annotators agreee
require_double (boolean):
If True everything must be double annotated, meaning there have to be two projects of the same name for each name. Else, it will
also use projects that do not have double annotiations. If this is False, projects that do not have double anns will be
included as is, and projects that have will still be checked.
require_double_inner (boolean):
If False - this will allow some entities to be annotated by only one annotator and not the other, while still requiring
annotations to be the same if they exist.
meta_anns_to_match (boolean):
List of meta annotations that must match for two annotations to be the same. If empty only the mention
level will be checked.
"""
d_stats_proj: Dict = {}
data: Dict = load_data(data_path, require_annotations=True)
out_data: Dict = {'projects': []}
projects_done: Set = set()
ann_stats: List = [] # This will keep score for agreement
# Consolidate
for project in data['projects']:
id_project = project['id']
new_documents = []
ann_stats_project: List = []
new_project = None
if id_project not in projects_done:
projects_done.add(id_project)
name = project['name']
documents = project['documents']
if not require_double:
new_project = copy.deepcopy(project)
projects_done.add(id_project)
else:
# Means we need double annotations
has_double = False
for project2 in data['projects']:
id2 = project2['id']
name2 = project2['name']
if name == name2 and id_project != id2:
has_double = True
projects_done.add(id2)
break
if has_double:
for document in documents:
document2 = get_doc_from_project(project2, document['id'])
if document2 is not None:
new_document = get_same_anns(document, document2, require_double_inner=require_double_inner, ann_stats=ann_stats_project, meta_names=meta_anns_to_match)
new_documents.append(new_document)
elif not require_double_inner:
# Use the base document if we are allowing no double anns
new_documents.append(document)
new_project = copy.deepcopy(project)
new_project['documents'] = new_documents
if new_project is not None:
if not ann_stats:
for _ in ann_stats_project:
ann_stats.append([])
for irow, one in enumerate(ann_stats_project):
ann_stats[irow].extend(one)
out_data['projects'].append(new_project)
if ann_stats_project:
print("** Printing stats for project: {}".format(project['name']))
print_consolid_stats(ann_stats_project, meta_names=meta_anns_to_match)
d_stats_proj[project['name']] = ann_stats_project
print("\n\n")
else:
print("** Project '{}' did not have double annotations\n\n".format(project['name']))
# Save
json.dump(out_data, open(out_path, 'w'))
print("** Overall stats")
print_consolid_stats(ann_stats, meta_names=meta_anns_to_match)
return d_stats_proj
def validate_ner_data(data_path: str, cdb: CDB, cntx_size: int = 70, status_only: bool = False, ignore_if_already_done: bool = False) -> None:
""" Please just ignore this function, I'm afraid to even look at it
"""
data: Dict = json.load(open(data_path))
name2cui: Dict = {}
cui2status: Dict = {}
print("This will overwrite the original data, make sure you've a backup")
print("If something is completely wrong or you do not know what to do, chose the [s]kip option, you can also skip by leaving input blank.")
print("If you want to [q]uit write q your progress will be saved to the json and you can continue later")
for project in data['projects']:
for document in project['documents']:
for ann in document['annotations']:
name = str(ann['value']).lower()
cui = ann['cui']
status = None
if ann['correct']:
status = 'Correct'
else:
status = "Other"
if name in name2cui:
name2cui[name][cui] = name2cui[name].get(cui, 0) + 1
else:
name2cui[name] = {cui: 1}
if cui in cui2status:
if name in cui2status[cui]:
cui2status[cui][name][status] = cui2status[cui][name].get(status, 0) + 1
else:
cui2status[cui][name] = {status: 1}
else:
cui2status[cui] = {name: {status: 1}}
quit_ = False
if not status_only:
for project in data['projects']:
for document in project['documents']:
text = str(document['text'])
for ann in document['annotations']:
name = str(ann['value']).lower()
cui = ann['cui']
status = None
start = ann['start']
end = ann['end']
if 'manul_verification_mention' not in ann or ignore_if_already_done:
if ann['correct']:
status = 'Correct'
else:
status = "Other"
# First check name
if len(name2cui[name]) > 1:
cuis = list(name2cui[name].keys())
print("\n\nThis name was annotated with multiple CUIs\n")
b = text[max(0, start-cntx_size):start].replace("\n", " ")
m = text[start:end].replace("\n", " ")
e = text[end:min(len(text), end+cntx_size)].replace("\n", " ")
print("SNIPPET: {} <<<{}>>> {}".format(b, m, e))
print()
print("C | {:3} | {:20} | {:70} | {}".format("ID", "CUI", "Concept", "Number of annotations in the dataset"))
print("-"*110)
for id_cui, _cui in enumerate(cuis):
if _cui == cui:
c = "+"
else:
c = " "
print("{} | {:3} | {:20} | {:70} | {}".format(c, id_cui, _cui, cdb.get_name(_cui)[:69], name2cui[name][_cui]))
print()
d = str(input("###Change to ([s]kip/[q]uit/id): "))
if d == 'q':
quit_ = True
break
ann['manul_verification_mention'] = True
if d == 's':
continue
if d.isnumeric():
d_ = int(d)
ann['cui'] = cuis[d_]
if quit_:
break
if quit_:
break
if not quit_:
# Re-calculate
name2cui = {}
cui2status = {}
for project in data['projects']:
for document in project['documents']:
for ann in document['annotations']:
name = str(ann['value']).lower()
cui = ann['cui']
status = None
if ann['correct']:
status = 'Correct'
else:
status = "Other"
if name in name2cui:
name2cui[name][cui] = name2cui[name].get(cui, 0) + 1
else:
name2cui[name] = {cui: 1}
if cui in cui2status:
if name in cui2status[cui]:
cui2status[cui][name][status] = cui2status[cui][name].get(status, 0) + 1
else:
cui2status[cui][name] = {status: 1}
else:
cui2status[cui] = {name: {status: 1}}
for project in data['projects']:
for document in project['documents']:
text = str(document['text'])
for ann in document['annotations']:
name = str(ann['value']).lower()
cui = ann['cui']
status = None
start = ann['start']
end = ann['end']
if 'manual_verification_status' not in ann or ignore_if_already_done:
if ann['correct']:
status = 'correct'
elif ann['deleted']:
status = 'incorrect'
elif ann['killed']:
status = 'terminated'
elif ann['alternative']:
status = 'alternative'
else:
status = 'unk'
if len(cui2status[cui][name]) > 1:
print("\n\nThis name was annotated with different status\n")
b = text[max(0, start-cntx_size):start].replace("\n", " ")
m = text[start:end].replace("\n", " ")
e = text[end:min(len(text), end+cntx_size)].replace("\n", " ")
print("SNIPPET : {} <<<{}>>> {}".format(b, m, e))
print("CURRENT STATUS : {}".format(status))
print("CURRENT ANNOTATION: {} - {}".format(cui, cdb.get_name(cui)))
print("ANNS TOTAL :")
for k,v in cui2status[cui][name].items():
print(" {}: {}".format(str(k), str(v)))
print()
d = str(input("###Change to ([q]uit/[s]kip/[c]orrect/[i]ncorrect/[t]erminate): "))
if d == 'q':
quit_ = True
break
ann['manual_verification_status'] = True
if d == 's':
continue
elif d == 'c':
ann['correct'] = True
ann['killed'] = False
ann['deleted'] = False
ann['alternative'] = False
elif d == 'i':
ann['correct'] = False
ann['killed'] = False
ann['deleted'] = True
ann['alternative'] = False
elif d == 't':
ann['correct'] = False
ann['killed'] = True
ann['deleted'] = False
ann['alternative'] = False
print()
print()
if quit_:
break
if quit_:
break
json.dump(data, open(data_path, 'w'))
class MetaAnnotationDS(torch.utils.data.Dataset):
def __init__(self, data: Dict, category_map: Dict):
r'''
Args:
data:
Dictionary of data values
category_map:
Map from category naem to id
'''
self.data = data
self.category_map = category_map
def __getitem__(self, idx: int) -> Dict:
item = {}
for key, value in self.data.items():
if key != 'labels':
item[key] = torch.tensor(value[idx])
else:
item[key] = torch.tensor(self.category_map[value[idx]])
return item
def __len__(self) -> int:
return len(self.data['input_ids'])
def prepare_from_json_hf(data_path: str,
cntx_left: int,
cntx_right: int,
tokenizer: Any,
cui_filter: Optional[Dict] = None,
replace_center: Optional[Dict] = None) -> Dict:
out: Dict = {}
data: Dict = json.load(open(data_path))
p_data = prepare_from_json_chars(data, cntx_left=cntx_left, cntx_right=cntx_right, tokenizer=tokenizer,
cui_filter=cui_filter, replace_center=replace_center)
for name in p_data.keys():
out[name] = {}
out[name]['labels'] = np.array([x[0] for x in p_data[name]])
out[name]['input_ids'] = [x[1] for x in p_data[name]]
out[name]['center_positions'] = np.array([x[2] for x in p_data[name]])
out[name]['token_type_ids'] = [[0] * len(x) for x in out[name]['input_ids']]
out[name]['attention_mask'] = [[1] * len(x) for x in out[name]['input_ids']]
return out
def prepare_from_json_chars(data: Dict,
cntx_left: int,
cntx_right: int,
tokenizer: Any,
cui_filter: Optional[Dict] = None,
replace_center: Optional[Dict] = None) -> Dict:
""" Convert the data from a json format into a CSV-like format for training.
data: json file from MedCAT
cntx_left: size of the context
cntx_right: size of the context
tokenizer: instance of the <FastTokenizer> class from huggingface
replace_center: if not None the center word (concept) will be replaced with whatever is set
return: {'category_name': [('category_value', 'tokens', 'center_token'), ...], ...}
"""
out_data: Dict = {}
for project in data['projects']:
for document in project['documents']:
text = str(document['text'])
if len(text) > 0:
for ann in document["annotations"]:
if cui_filter:
cui = ann['cui']
if cui_filter is None or not cui_filter or cui in cui_filter:
if ann.get('validated', True) and (not ann.get('deleted', False) and not ann.get('killed', False)):
start = ann['start']
end = ann['end']
_start = max(0, start - cntx_left)
_end = min(len(text), end + cntx_right)
t_left = tokenizer(text[_start:start])['input_ids']
t_right = tokenizer(text[end:_end])['input_ids']
if replace_center is None:
t_center = tokenizer(text[start:end])['input_ids']
else:
t_center = tokenizer(replace_center)['input_ids']
tkns = t_left + t_center + t_right
cpos = len(t_left)
# Backward compatibility if meta_anns is a list vs dict in the new approach
meta_anns: Union[List, Dict] = []
if 'meta_anns' in ann:
meta_anns = ann['meta_anns']
if type(meta_anns) == dict:
meta_anns = list(meta_anns.values())
# If the annotation is validated
for meta_ann in meta_anns:
name = meta_ann['name']
value = meta_ann['value']
sample = [value, tkns, cpos]
if name in out_data:
out_data[name].append(sample)
else:
out_data[name] = [sample]
return out_data
def make_mc_train_test(data: Dict, cdb: CDB, test_size: float = 0.2) -> Tuple:
""" This is a disaster
"""
cnts: Dict = {}
total_anns = 0
# Count all CUIs
for project in data['projects']:
cui_filter = None
tui_filter = None
if 'cuis' in project and len(project['cuis'].strip()) > 0:
cui_filter = [x.strip() for x in project['cuis'].split(",")]
"""
if 'tuis' in project and len(project['tuis'].strip()) > 0:
tui_filter = [x.strip().upper() for x in project['tuis'].split(",")]
"""
for document in project['documents']:
if type(document['annotations']) == list:
doc_annotations = document['annotations']
elif type(document['annotations']) == dict:
doc_annotations = list(document['annotations'].values())
for ann in doc_annotations:
if (cui_filter is None and tui_filter is None) or (cui_filter is not None and ann['cui'] in cui_filter):
if ann['cui'] in cnts:
cnts[ann['cui']] += 1
else:
cnts[ann['cui']] = 1
total_anns += 1
test_cnts: Dict = {}
test_anns = 0
test_prob = 0.90
test_set: Dict = {'projects': []}
train_set: Dict = {'projects': []}
for i_project in np.random.permutation(np.arange(0, len(data['projects']))):
project = data['projects'][i_project]
cui_filter = None
tui_filter = None
test_project: Dict = {}
train_project: Dict = {}
for k, v in project.items():
if k == 'documents':
test_project['documents'] = []
train_project['documents'] = []
else:
test_project[k] = v
train_project[k] = v
if 'cuis' in project and len(project['cuis'].strip()) > 0:
cui_filter = [x.strip() for x in project['cuis'].split(",")]
"""
if 'tuis' in project and len(project['tuis'].strip()) > 0:
tui_filter = [x.strip().upper() for x in project['tuis'].split(",")]
"""
for i_document in np.random.permutation(np.arange(0, len(project['documents']))):
# Do we have enough documents in the test set
if test_anns / total_anns >= test_size:
test_prob = 0
document = project['documents'][i_document]
# Coutn CUIs for this document
_cnts: Dict = {}
if type(document['annotations']) == list:
doc_annotations = document['annotations']
elif type(document['annotations']) == dict:
doc_annotations = list(document['annotations'].values())
for ann in doc_annotations:
if (cui_filter is None and tui_filter is None) or (cui_filter is not None and ann['cui'] in cui_filter) or \
(tui_filter is not None and cdb.cui2tui.get(ann['cui'], 'unk') in tui_filter):
if ann['cui'] in _cnts:
_cnts[ann['cui']] += 1
else:
_cnts[ann['cui']] = 1
# Did we get more than 30% of concepts for any CUI with >=10 cnt
is_test = True
for cui, v in _cnts.items():
if (v + test_cnts.get(cui, 0)) / cnts[cui] > 0.3:
if cnts[cui] >= 10:
# We only care for concepts if count >= 10, else they will be ignored
#during the test phase (for all metrics and similar)
is_test = False
break
# Add to test set
if is_test and np.random.rand() < test_prob:
test_project['documents'].append(document)
if type(document['annotations']) == list:
doc_annotations = document['annotations']
elif type(document['annotations']) == dict:
doc_annotations = list(document['annotations'].values())
for ann in doc_annotations:
if (cui_filter is None and tui_filter is None) or (cui_filter is not None and ann['cui'] in cui_filter) or \
(tui_filter is not None and cdb.cui2tui.get(ann['cui'], 'unk') in tui_filter):
test_anns += 1
if ann['cui'] in test_cnts:
test_cnts[ann['cui']] += 1
else:
test_cnts[ann['cui']] = 1
else:
train_project['documents'].append(document)
test_set['projects'].append(test_project)
train_set['projects'].append(train_project)
return train_set, test_set, test_anns, total_anns
def get_false_positives(doc: Dict, spacy_doc: Doc) -> List[Span]:
if type(doc['annotations']) == list:
truth = set([(ent['start'], ent['cui']) for ent in doc['annotations']])
elif type(doc['annotations']) == dict:
truth = set([(ent['start'], ent['cui']) for ent in doc['annotations'].values()])
fps = []
for ent in spacy_doc._.ents:
if (ent.start_char, ent._.cui) not in truth:
fps.append(ent)
return fps | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/data_utils.py | data_utils.py |
def get_lr_linking(config, cui_count):
if config.linking['optim']['type'] == 'standard':
return config.linking['optim']['lr']
elif config.linking['optim']['type'] == 'linear':
lr = config.linking['optim']['base_lr']
cui_count += 1 # Just in case incrase by 1
return max(lr / cui_count, config.linking['optim']['min_lr'])
else:
raise Exception("Optimizer not implemented")
def get_batch(ind, batch_size, x, y, cpos, device):
# Get the start/end index for this batch
start = ind * batch_size
end = (ind+1) * batch_size
# Get the batch
x_batch = x[start:end]
y_batch = y[start:end]
c_batch = cpos[start:end]
# Return and move the batches to the right device
return x_batch.to(device), y_batch.to(device), c_batch.to(device)
def load_hf_tokenizer(tokenizer_name):
try:
from transformers import AutoTokenizer
hf_tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
except Exception:
# Where is log defined?
log.exception("The Huggingface tokenizer could not be created") # noqa
return hf_tokenizer
def build_vocab_from_hf(model_name, hf_tokenizer, vocab):
rebuild = False
# Check is it necessary
for i in range(hf_tokenizer.vocab_size):
tkn = hf_tokenizer.ids_to_tokens[i]
if tkn not in vocab:
rebuild = True
if rebuild:
# Where is log defined?
log.info("Rebuilding vocab") # noqa
try:
from transformers import AutoModel
model = AutoModel.from_pretrained(model_name)
if 'xlnet' in model_name.lower():
embs = model.get_input_embeddings().weight.cpu().detach().numpy()
else:
embs = model.embeddings.word_embeddings.weight.cpu().detach().numpy()
# Reset all vecs in current vocab
vocab.vec_index2word = {}
for ind in vocab.index2word.keys():
vocab.vocab[vocab.index2word[ind]]['vec'] = None
for i in range(hf_tokenizer.vocab_size):
tkn = hf_tokenizer.ids_to_tokens[i]
vec = embs[i]
vocab.add_word(word=tkn, vec=vec, replace=True)
# Crate the new unigram table
vocab.reset_counts()
vocab.make_unigram_table()
except Exception:
# Where is log defined?
log.exception("The Huggingface model could not be loaded") # noqa | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/ml_utils.py | ml_utils.py |
import json
import pandas as pd
# Why is prepare_umls_csv defined twice here?
# def prepare_umls_csv(mrconso_path, mrsty_path, sep='|', lng='ENG', output_path=None, **kwargs):
# conso_df = mrconso_path(mrconso_path=mrconso_path, column_names=column_names, sep=sep, lng=lng, output_path=None, **kwargs)
def prepare_umls_csv(mrconso_path, mrsty_path, sep="|", lng="ENG", output_path=None, **kwargs):
column_names = ["CUI", "TUI", "STN", "STY", "ATUI", "CVF", "unk"]
sty_df = pd.read_csv(mrsty_path, names=column_names, sep=sep, dtype=str, **kwargs)
cui2tui = {}
for cui, tui in sty_df[['CUI', 'TUI']].values:
if cui in cui2tui:
cui2tui[cui].append(tui)
else:
cui2tui[cui] = [tui]
def mrconso_to_csv(mrconso_path, column_names=None, sep='|', lng='ENG', output_path=None, **kwargs):
if column_names is None:
column_names = ['CUI', 'LAT', 'TS', 'LUI', 'STT', 'SUI', 'ISPREF', 'AUI', 'SAUI', 'SCUI', 'SDUI', 'SAB', 'TTY', 'CODE', 'STR', 'SRL', 'SUPPRESS', 'CVF', 'unk']
df = pd.read_csv(mrconso_path, names=column_names, sep=sep, dtype=str, **kwargs)
df = df[df.LAT == lng]
df = df[['CUI', 'STR', 'TTY']]
# Change name status if required
df['TTY'] = ['P' if tty=='PN' else 'A' for tty in df['TTY'].values]
# Remove name duplicates (keep TTY also in the eq)
df = df.drop_duplicates(subset=['CUI', 'STR', 'TTY'])
# Rename columns
df.columns = ['cui', 'name', 'name_status']
if output_path is not None:
df.to_csv(output_path, index=False)
return df
def umls_to_snomed_name_extension(mrconso_path, snomed_codes, column_names=None, sep='|', lng='ENG', output_path=None, use_umls_primary_names=False, **kwargs):
r''' Prepare the MRCONSO.RRF to be used for extansion of SNOMED. Will output a CSV that can
be used with cdb_maker (use the snomed_cdb.dat as the base one and extend with this).
Args:
mrconso_path (`str`):
Path to the MRCONSO.RRF file from UMLS.
snomed_codes (`Set[str]`):
SNOMED codes that you want to extend with UMLS names.
column_names (`str`, optional):
Column names in the UMLS, leave blank and it will be autofiled.
sep (`str`, defaults to `|`):
Separator for the mrconso CSV (RRF is also a CSV)
lng (`str`, defaults to `ENG`):
What language to keep from the MRCONSO file
output_path (`str`, optional):
Where to save the built CSV - fullpath
kwargs
Will be forwarded to pandas.read_csv
use_umls_primary_names (`bool`, defaults to False):
If True the default names from UMLS will be used to inform medcat later once the CDB is built.
Return:
df (pandas.DataFrame):
Dataframe with UMLS names and SNOMED CUIs.
'''
if column_names is None:
column_names = ['CUI', 'LAT', 'TS', 'LUI', 'STT', 'SUI', 'ISPREF', 'AUI', 'SAUI', 'SCUI', 'SDUI', 'SAB', 'TTY', 'CODE', 'STR', 'SRL', 'SUPPRESS', 'CVF', 'unk']
df = pd.read_csv(mrconso_path, names=column_names, sep=sep, dtype=str, **kwargs)
df = df[df.LAT == lng]
umls2snomed = {}
# Get all SNOMED terms
df_snomed = df[df.SAB == 'SNOMEDCT_US']
# Keep only the SNOMED Codes that we need
df_snomed = df_snomed[[code in snomed_codes for code in df_snomed.CODE.values]]
# Remove all CUIs that map to more than one SNOMED term
cuis_to_remove = set()
for pair in df_snomed[['CUI', 'CODE']].values:
if pair[1] in snomed_codes: # Only if part of codes of interest
if pair[0] not in umls2snomed or pair[1] == umls2snomed[pair[0]]:
umls2snomed[pair[0]] = pair[1]
else:
cuis_to_remove.add(pair[0])
umls2snomed = {cui: snomed for cui, snomed in umls2snomed.items() if cui not in cuis_to_remove}
# Keep only cui and str
df = df[['CUI', 'STR', 'TTY']]
# Replace UMLS with SNOMED codes
df['CUI'] = [umls2snomed.get(cui, "unk-unk") for cui in df.CUI.values]
df = df[df.CUI != 'unk-unk']
# Change name status if required
if use_umls_primary_names:
df['TTY'] = ['P' if tty=='PN' else 'A' for tty in df['TTY'].values]
else:
df['TTY'] = ['A'] * len(df)
# Remove name duplicates (keep TTY also in the eq)
df = df.drop_duplicates(subset=['CUI', 'STR', 'TTY'])
# Rename columns
df.columns = ['cui', 'name', 'name_status']
if output_path is not None:
df.to_csv(output_path, index=False)
return df
def snomed_source_to_csv(snomed_term_paths=[], snomed_desc_paths=[], sep='\t', output_path=None, output_path_type_names=None, strip_fqn=True, na_filter=False, **kwargs):
r''' Given paths to the snomed files with concepts e.g. `sct2_Concept_Snapshot_INT_20180731.txt` this will
build a CSV required by the cdb_maker.py
Args:
snomed_term_paths (`List[str]`):
One or many paths to the different `sct2_Concept_Snapshot_*` files
snomed_desc_paths (`List[str]`):
One or many paths to the different `sct2_Description_Snapshot_*` files
sep (`str`, defaults to '\t'):
The separator used in the snomed files.
output_path (`str`, optional):
Where to save the built CSV - fullpath
output_path_type_names (`str`, optional):
Where to save the dictionary that maps from type_id to name
strip_fqn (bool, defaults to `True`):
If True all Fully Qualified Names will be striped of the semantic type e.g. (disorder)
and that cleaned name will be appended as an additional row in the CSV.
na_filter (bool, defaults to `False`):
If True, Pandas will apply its default detection of "missing" values and replace them with nan.
This is usually undesirable because some SNOMED concepts match the patterns considered as missing (e.g. "N/A")
kwargs:
Will be forwarded to pandas.read_csv
Return:
Touple[snomed_cdb_df (pandas.DataFrame), type_id2name (Dict)]:
- snomed_cdb_df - Dataframe with SNOMED concepts ready to be used with medcat.cdb_maker.
- type_id2name - map from type_id to name, can be used to extend a CDB.
'''
# Process terms
snomed_terms = [pd.read_csv(path, sep=sep, dtype=str, **kwargs) for path in snomed_term_paths]
snomed_terms = pd.concat(snomed_terms)
snomed_terms = snomed_terms[snomed_terms.active == '1']
# Process descs and keep only active ones (note this is not active concepts,
#but active descriptions).
snomed_descs = [pd.read_csv(path, sep=sep, dtype=str, na_filter=na_filter, **kwargs) for path in snomed_desc_paths]
snomed_descs = pd.concat(snomed_descs)
snomed_descs = snomed_descs[snomed_descs.active == '1']
# Keep only active terms in the snomed_descs
f = set(snomed_terms.id.values)
snomed_descs = snomed_descs[[id in f for id in snomed_descs.conceptId.values]]
# Remove everything that we do not need and rename columns
snomed_cdb_df = snomed_descs[['conceptId', 'term', 'typeId']]
snomed_cdb_df = snomed_cdb_df.rename(columns={"conceptId": "cui", "term": "name", 'typeId': 'name_status'})
# Ontology is always SNOMED
snomed_cdb_df['ontologies'] = ['SNOMED'] * len(snomed_cdb_df)
# Take primary names
snomed_cdb_df['name_status'] = ['P' if name_status == '900000000000003001' else 'A' for name_status in snomed_cdb_df.name_status.values]
# Get type names and IDs, there is no real way to do this, so I'll invent a type ID
cui2type_name = {cui:name[name.rfind("(")+1:name.rfind(")")] for cui, name in
snomed_cdb_df[snomed_cdb_df['name_status'] == 'P'][['cui', 'name']].values if name.endswith(")")}
# Create map from name2id
type_name2id = {type_name: 'T-{}'.format(id) for id, type_name in enumerate(sorted(set(cui2type_name.values())))}
# Add stripped FQNs if necessary, they will be appended at the end of the dataframe
if strip_fqn:
fqn_stripped = snomed_cdb_df[[name_status == 'P' and name.endswith(")")
for name, name_status in snomed_cdb_df[['name', 'name_status']].values]]
fqn_stripped['name'] = [name[0:name.rfind("(")].strip() for name in fqn_stripped['name'].values]
snomed_cdb_df = pd.concat([snomed_cdb_df, fqn_stripped])
# Add type_ids column to the output df
snomed_cdb_df['type_ids'] = [type_name2id.get(cui2type_name.get(cui, 'unk'), 'unk') for cui in snomed_cdb_df.cui]
if output_path is not None:
snomed_cdb_df.to_csv(output_path, index=False)
# Create reverse mapping of types
type_id2name = {v:k for k,v in type_name2id.items()}
if output_path_type_names is not None:
# Reverse tje type 2 id nad save
json.dump(type_id2name, open(output_path_type_names, 'w'))
return snomed_cdb_df, type_id2name | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/cdb_helpers.py | cdb_helpers.py |
import re
import spacy
from medcat.pipeline.pipe_runner import PipeRunner
CONTAINS_NUMBER = re.compile('[0-9]+')
class BasicSpellChecker(object):
r'''
'''
def __init__(self, cdb_vocab, config, data_vocab=None):
self.vocab = cdb_vocab
self.config = config
self.data_vocab = data_vocab
def P(self, word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
cnt = self.vocab.get(word, 0)
if cnt != 0:
return -1 / cnt
else:
return 0
def __contains__(self, word):
if word in self.vocab:
return True
elif self.data_vocab is not None and word in self.data_vocab:
return False
else:
return False
def fix(self, word):
"Most probable spelling correction for word."
fix = max(self.candidates(word), key=self.P)
if fix != word:
return fix
else:
return None
def candidates(self, word):
"Generate possible spelling corrections for word."
if self.config.general['spell_check_deep']:
# This will check a two letter edit distance
return self.known([word]) or self.known(self.edits1(word)) or self.known(self.edits2(word)) or [word]
else:
# Will check only one letter edit distance
return self.known([word]) or self.known(self.edits1(word)) or [word]
def known(self, words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in self.vocab)
def edits1(self, word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
if self.config.general['diacritics']:
letters += 'àáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(self, word):
"All edits that are two edits away from `word`."
return (e2 for e1 in self.edits1(word) for e2 in self.edits1(e1))
def edits3(self, word):
"All edits that are two edits away from `word`."
# Do d3 edits
pass
class TokenNormalizer(PipeRunner):
r''' Will normalize all tokens in a spacy document.
Args:
config
spell_checker
'''
# Custom pipeline component name
name = 'token_normalizer'
# Override
def __init__(self, config, spell_checker=None):
self.config = config
self.spell_checker = spell_checker
self.nlp = spacy.load(config.general['spacy_model'], disable=config.general['spacy_disabled_components'])
super().__init__(self.config.general['workers'])
# Override
def __call__(self, doc):
for token in doc:
if len(token.lower_) < self.config.preprocessing['min_len_normalize']:
token._.norm = token.lower_
elif (self.config.preprocessing.get('do_not_normalize', set())) and token.tag_ is not None and \
token.tag_ in self.config.preprocessing.get('do_not_normalize'):
token._.norm = token.lower_
elif token.lemma_ == '-PRON-':
token._.norm = token.lemma_
token._.to_skip = True
else:
token._.norm = token.lemma_.lower()
if self.config.general['spell_check']:
# Fix the token if necessary
if len(token.text) >= self.config.general['spell_check_len_limit'] and not token._.is_punct \
and token.lower_ not in self.spell_checker and not CONTAINS_NUMBER.search(token.lower_):
fix = self.spell_checker.fix(token.lower_)
if fix is not None:
tmp = self.nlp(fix)[0]
if len(token.lower_) < self.config.preprocessing['min_len_normalize']:
token._.norm = tmp.lower_
else:
token._.norm = tmp.lemma_.lower()
return doc | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/normalizers.py | normalizers.py |
import json
import pandas as pd
def original2concept_csv(data_path, out_path):
f = open(data_path)
csv_data = [['cui', 'type_id', 'name', 'name_status']]
cui2names = {}
for row in f:
if row != '\n':
if '|t|' not in row[0:13] and '|a|' not in row[0:13]:
# Entity row
parts = row.split("\t")
cui = parts[5].strip()
type_id = "|".join(parts[4].split(","))
name = parts[3]
csv_data.append([cui, type_id, name, 'A'])
if cui in cui2names:
cui2names[cui].add(name)
else:
cui2names[cui] = {name}
df = pd.DataFrame(csv_data[1:], columns=csv_data[0])
df.to_csv(out_path, index=False)
return df
def original2pure_text(data_path, out_path):
f = open(data_path)
out = open(out_path, 'w')
for row in f:
if row != '\n':
if '|t|' in row[0:13]:
# It is title
parts = row.split("|t|")
title = parts[1].strip()
elif '|a|' in row[0:13]:
# Text row
parts = row.split("|a|")
text = parts[1].strip()
out.write(title + " " + text + " " + "\n")
out.close()
def original2json(data_path, out_path):
f = open(data_path)
data = {'projects': [{'name': 'medmentions', 'id': 0, 'documents': []}]}
documents = []
document = {}
for row in f:
if row != '\n':
if '|t|' in row[0:13]:
# It is title
parts = row.split("|t|")
title = parts[1].strip()
elif '|a|' in row[0:13]:
# Text row
parts = row.split("|a|")
text = parts[1].strip()
document['text'] = title + " " + text
document['annotations'] = []
else:
# Entity row
parts = row.split("\t")
start = int(parts[1])
end = int(parts[2])
cui = parts[5].strip()
type_id = "|".join(parts[4].split(","))
name = parts[3]
document['annotations'].append({
'start': start,
'end': end,
'cui': cui,
'type_id': type_id,
'value': name})
else:
documents.append(document)
document = {}
data['projects'][0]['documents'] = documents
json.dump(data, open(out_path, 'w'))
return data | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/medmentions.py | medmentions.py |
import os
import json
import re
import hashlib
import pandas as pd
def parse_file(filename, first_row_header=True, columns=None):
with open(filename, encoding='utf-8') as f:
entities = [[n.strip() for n in line.split('\t')] for line in f]
return pd.DataFrame(entities[1:], columns=entities[0] if first_row_header else columns)
class Snomed:
r"""
Pre-process SNOMED CT release files:
Args:
data_path:
Path to the unzipped SNOMED CT folder
uk_ext: bool
Specification of a SNOMED UK extension after 2021 to process the divergent release format.
"""
def __init__(self, data_path, uk_ext=False):
self.data_path = data_path
self.release = data_path[-16:-8]
self.uk_ext = uk_ext
def to_concept_df(self):
"""
Please remember to specify if the version is a SNOMED UK extension released after 2021.
This can be done prior to this step: Snomed.uk_ext = True.
This step is not required for UK extension releases pre-2021.
:return: SNOMED CT concept DataFrame ready for MEDCAT CDB creation
"""
snomed_releases = []
paths = []
if "Snapshot" in os.listdir(self.data_path):
paths.append(self.data_path)
snomed_releases.append(self.release)
else:
for folder in os.listdir(self.data_path):
if "SnomedCT" in folder:
paths.append(os.path.join(self.data_path, folder))
snomed_releases.append(folder[-16:-8])
if len(paths) == 0:
raise FileNotFoundError('Incorrect path to SNOMED CT directory')
df2merge = []
for i, snomed_release in enumerate(snomed_releases):
contents_path = os.path.join(paths[i], "Snapshot", "Terminology")
concept_snapshot = "sct2_Concept_Snapshot"
description_snapshot = "sct2_Description_Snapshot-en"
if self.uk_ext:
if "SnomedCT_UKClinicalRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_UKCLSnapshot"
description_snapshot = "sct2_Description_UKCLSnapshot-en"
elif "SnomedCT_UKEditionRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_UKEDSnapshot"
description_snapshot = "sct2_Description_UKEDSnapshot-en"
elif "SnomedCT_UKClinicalRefsetsRF2_PRODUCTION" in paths[i]:
continue
else:
pass
for f in os.listdir(contents_path):
m = re.search(f'{concept_snapshot}'+r'_(.*)_\d*.txt', f)
if m:
snomed_v = m.group(1)
int_terms = parse_file(f'{contents_path}/{concept_snapshot}_{snomed_v}_{snomed_release}.txt')
active_terms = int_terms[int_terms.active == '1']
del int_terms
int_desc = parse_file(f'{contents_path}/{description_snapshot}_{snomed_v}_{snomed_release}.txt')
active_descs = int_desc[int_desc.active == '1']
del int_desc
_ = pd.merge(active_terms, active_descs, left_on=['id'], right_on=['conceptId'], how='inner')
del active_terms
del active_descs
active_with_primary_desc = _[_['typeId'] == '900000000000003001'] # active description
active_with_synonym_desc = _[_['typeId'] == '900000000000013009'] # active synonym
del _
active_with_all_desc = pd.concat([active_with_primary_desc, active_with_synonym_desc])
active_snomed_df = active_with_all_desc[['id_x', 'term', 'typeId']]
del active_with_all_desc
active_snomed_df = active_snomed_df.rename(columns={'id_x': 'cui', 'term': 'name', 'typeId': 'name_status'})
active_snomed_df['ontologies'] = 'SNOMED-CT'
active_snomed_df['name_status'] = active_snomed_df['name_status'].replace(
['900000000000003001', '900000000000013009'],
['P', 'A'])
active_snomed_df = active_snomed_df.reset_index(drop=True)
temp_df = active_snomed_df[active_snomed_df['name_status'] == 'P'][['cui', 'name']]
temp_df['description_type_ids'] = temp_df['name'].str.extract(r"\((\w+\s?.?\s?\w+.?\w+.?\w+.?)\)$")
active_snomed_df = pd.merge(active_snomed_df, temp_df.loc[:, ['cui', 'description_type_ids']],
on='cui',
how='left')
del temp_df
# Hash semantic tag to get a 8 digit type_id code
active_snomed_df['type_ids'] = active_snomed_df['description_type_ids'].apply(
lambda x: int(hashlib.sha256(str(x).encode('utf-8')).hexdigest(), 16) % 10 ** 8)
df2merge.append(active_snomed_df)
return pd.concat(df2merge).reset_index(drop=True)
def list_all_relationships(self):
"""
SNOMED CT provides a rich set of inter-relationships between concepts.
:return: List of all SNOMED CT relationships
"""
snomed_releases = []
paths = []
if "Snapshot" in os.listdir(self.data_path):
paths.append(self.data_path)
snomed_releases.append(self.release)
else:
for folder in os.listdir(self.data_path):
if "SnomedCT" in folder:
paths.append(os.path.join(self.data_path, folder))
snomed_releases.append(folder[-16:-8])
if len(paths) == 0:
raise FileNotFoundError('Incorrect path to SNOMED CT directory')
all_rela = []
for i, snomed_release in enumerate(snomed_releases):
contents_path = os.path.join(paths[i], "Snapshot", "Terminology")
concept_snapshot = "sct2_Concept_Snapshot"
relationship_snapshot = "sct2_Relationship_Snapshot"
if self.uk_ext:
if "SnomedCT_InternationalRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_Snapshot"
relationship_snapshot = "sct2_Relationship_Snapshot"
elif "SnomedCT_UKClinicalRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_UKCLSnapshot"
relationship_snapshot = "sct2_Relationship_UKCLSnapshot"
elif "SnomedCT_UKEditionRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_UKEDSnapshot"
relationship_snapshot = "sct2_Relationship_UKEDSnapshot"
elif "SnomedCT_UKClinicalRefsetsRF2_PRODUCTION" in paths[i]:
continue
else:
pass
for f in os.listdir(contents_path):
m = re.search(f'{concept_snapshot}'+r'_(.*)_\d*.txt', f)
if m:
snomed_v = m.group(1)
int_relat = parse_file(f'{contents_path}/{relationship_snapshot}_{snomed_v}_{snomed_release}.txt')
active_relat = int_relat[int_relat.active == '1']
del int_relat
all_rela.extend([relationship for relationship in active_relat["typeId"].unique()])
return all_rela
def relationship2json(self, relationshipcode, output_jsonfile):
"""
:param relationshipcode: A single SCTID or unique concept identifier of the relationship type
:param output_jsonfile: Name of json file output. Tip: include SNOMED edition to avoid downstream confusions
:return: json file of relationship mapping
"""
snomed_releases = []
paths = []
if "Snapshot" in os.listdir(self.data_path):
paths.append(self.data_path)
snomed_releases.append(self.release)
else:
for folder in os.listdir(self.data_path):
if "SnomedCT" in folder:
paths.append(os.path.join(self.data_path, folder))
snomed_releases.append(folder[-16:-8])
if len(paths) == 0:
raise FileNotFoundError('Incorrect path to SNOMED CT directory')
output_dict = {}
for i, snomed_release in enumerate(snomed_releases):
contents_path = os.path.join(paths[i], "Snapshot", "Terminology")
concept_snapshot = "sct2_Concept_Snapshot"
relationship_snapshot = "sct2_Relationship_Snapshot"
if self.uk_ext:
if "SnomedCT_InternationalRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_Snapshot"
relationship_snapshot = "sct2_Relationship_Snapshot"
elif "SnomedCT_UKClinicalRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_UKCLSnapshot"
relationship_snapshot = "sct2_Relationship_UKCLSnapshot"
elif "SnomedCT_UKEditionRF2_PRODUCTION" in paths[i]:
concept_snapshot = "sct2_Concept_UKEDSnapshot"
relationship_snapshot = "sct2_Relationship_UKEDSnapshot"
elif "SnomedCT_UKClinicalRefsetsRF2_PRODUCTION" in paths[i]:
continue
else:
pass
for f in os.listdir(contents_path):
m = re.search(f'{concept_snapshot}'+r'_(.*)_\d*.txt', f)
if m:
snomed_v = m.group(1)
int_relat = parse_file(f'{contents_path}/{relationship_snapshot}_{snomed_v}_{snomed_release}.txt')
active_relat = int_relat[int_relat.active == '1']
del int_relat
relationship = dict([(key, []) for key in active_relat["destinationId"].unique()])
for _, v in active_relat.iterrows():
if v['typeId'] == str(relationshipcode):
_ = v['destinationId']
relationship[_].append(v['sourceId'])
else:
pass
output_dict.update(relationship)
with open(output_jsonfile, 'w') as json_file:
json.dump(output_dict, json_file)
return
def map_snomed2icd10(self):
"""
:return: SNOMED to ICD10 mapping DataFrame which includes all metadata
"""
snomed_releases = []
paths = []
if "Snapshot" in os.listdir(self.data_path):
paths.append(self.data_path)
snomed_releases.append(self.release)
else:
for folder in os.listdir(self.data_path):
if "SnomedCT" in folder:
paths.append(os.path.join(self.data_path, folder))
snomed_releases.append(folder[-16:-8])
if len(paths) == 0:
raise FileNotFoundError('Incorrect path to SNOMED CT directory')
df2merge = []
for i, snomed_release in enumerate(snomed_releases):
refset_terminology = f'{paths[i]}/Snapshot/Refset/Map'
icd10_ref_set = 'der2_iisssccRefset_ExtendedMapSnapshot'
if self.uk_ext:
if "SnomedCT_InternationalRF2_PRODUCTION" in paths[i]:
icd10_ref_set = "der2_iisssccRefset_ExtendedMapSnapshot"
elif "SnomedCT_UKClinicalRF2_PRODUCTION" in paths[i]:
icd10_ref_set = "der2_iisssccRefset_ExtendedMapUKCLSnapshot"
elif "SnomedCT_UKEditionRF2_PRODUCTION" in paths[i]:
icd10_ref_set = "der2_iisssccRefset_ExtendedMapUKEDSnapshot"
elif "SnomedCT_UKClinicalRefsetsRF2_PRODUCTION" in paths[i]:
continue
else:
pass
for f in os.listdir(refset_terminology):
m = re.search(f'{icd10_ref_set}'+r'_(.*)_\d*.txt', f)
if m:
snomed_v = m.group(1)
mappings = parse_file(f'{refset_terminology}/{icd10_ref_set}_{snomed_v}_{snomed_release}.txt')
mappings = mappings[mappings.active == '1']
icd_mappings = mappings.sort_values(by=['referencedComponentId', 'mapPriority', 'mapGroup']).reset_index(
drop=True)
df2merge.append(icd_mappings)
return pd.concat(df2merge)
def map_snomed2opcs4(self):
"""
:return: SNOMED to OPSC4 mapping DataFrame which includes all metadata
"""
snomed_releases = []
paths = []
if "Snapshot" in os.listdir(self.data_path):
paths.append(self.data_path)
snomed_releases.append(self.release)
else:
for folder in os.listdir(self.data_path):
if "SnomedCT" in folder:
paths.append(os.path.join(self.data_path, folder))
snomed_releases.append(folder[-16:-8])
if len(paths) == 0:
raise FileNotFoundError('Incorrect path to SNOMED CT directory')
df2merge = []
for i, snomed_release in enumerate(snomed_releases):
refset_terminology = f'{paths[i]}/Snapshot/Refset/Map'
snomed_v = ''
opcs4_ref_set = 'der2_iisssccRefset_ExtendedMapSnapshot'
if self.uk_ext:
if "SnomedCT_InternationalRF2_PRODUCTION" in paths[i]:
continue
elif "SnomedCT_UKClinicalRF2_PRODUCTION" in paths[i]:
opcs4_ref_set = "der2_iisssciRefset_ExtendedMapUKCLSnapshot"
elif "SnomedCT_UKEditionRF2_PRODUCTION" in paths[i]:
opcs4_ref_set = "der2_iisssciRefset_ExtendedMapUKEDSnapshot"
elif "SnomedCT_UKClinicalRefsetsRF2_PRODUCTION" in paths[i]:
continue
else:
pass
for f in os.listdir(refset_terminology):
m = re.search(f'{opcs4_ref_set}'+r'_(.*)_\d*.txt', f)
if m:
snomed_v = m.group(1)
if snomed_v == '':
raise FileNotFoundError("This SNOMED release does not contain OPCS mapping files")
mappings = parse_file(f'{refset_terminology}/{opcs4_ref_set}_{snomed_v}_{snomed_release}.txt')
mappings = mappings[mappings.active == '1']
icd_mappings = mappings.sort_values(by=['referencedComponentId', 'mapPriority', 'mapGroup']).reset_index(
drop=True)
df2merge.append(icd_mappings)
return pd.concat(df2merge) | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/preprocess_snomed.py | preprocess_snomed.py |
ethnicity_map = {'Algerian': 'Black',
'Any Other Group': 'Other',
'Asian and Chinese': 'Asian',
'Bangladeshi': 'Asian',
'Black African': 'Black',
'Black British': 'Black',
'British': 'White',
'Caribbean': 'Black',
'Chinese': 'Asian',
'Cypriot (Part nt st)': 'White',
'Ecuadorian': 'Other',
'English': 'White',
'Ethiopian': 'Black',
'Filipino': 'Asian',
'Ghanaian': 'Black',
'Greek Cypriot': 'White',
'Indian/British India': 'Asian',
'Iranian': 'Other',
'Italian': 'White',
'Mixed Black': 'Black',
'Mixed Caribbean': 'Black',
'Nigerian': 'Black',
'Not Given': 'Unknown',
'Not Specified': 'Unknown',
'Not Stated': 'Unknown',
'OTHER ASIAN BACKGROU': 'Asian',
'Other Asian Unspecif': 'Asian',
'OTHER BLACK BACKGROU': 'Black',
'Other Black Unspecif': 'Black',
'Other Ethnic Group': 'Other',
'Other Latin American': 'Other',
'OTHER WHITE BACK GRO': 'White',
'Other White Unspecif': 'White',
'Other White/Mixed Eu': 'White',
'Pakistani/British Pa': 'Asian',
'Portuguese': 'White',
'Somali': 'Black',
'Spanish': 'White',
'Sri Lankan': 'Asian',
'Sudanese': 'Black',
'Turkish': 'Other',
'Ugandan': 'Black',
'Vietnamese': 'Asian',
'White Irish': 'White',
'Former USSR Rep': 'White',
'POLISH': 'White',
'Iraqi': 'Other',
'Albanian': 'Other',
'Columbian': 'Other',
'Scottish': 'White',
'Not stated': 'Unknown',
'OTHER MIXED BACKGROU': 'Mixed',
'Welsh': 'White',
'British Asian': 'Asian',
'Caribbean Asian': 'Asian',
'Eritrean': 'Black',
'Turkish Cypriot': 'Other',
'Sinhalese': 'Asian',
'White and Asian': 'Asian',
'Other Mixed': 'Mixed',
'Mixed Asian': 'Asian',
'Greek': 'White',
'Arab': 'Other',
'MULTIPLE CODES': 'MULTIPLE CODES',
'Irish': 'White',
'Japanese': 'Asian',
'Middle East': 'Other',
'Croatian': 'White',
'Black and Asian': 'Mixed',
'Black and White': 'Mixed'}
# Mapped on bottom-level of 2001 NHS Data Dictionary; https://datadictionary.nhs.uk/data_elements/ethnic_category.html
ethnicity_map_detail = {'Algerian': 'Black or Black British - African',
'Any Other Group': 'Other Ethnic Groups - Any other ethnic group',
'Asian and Chinese': 'Other Ethnic Groups - Chinese',
'Bangladeshi': 'Asian or Asian British - Pakistani',
'Black African': 'Black or Black British - African',
'Black British': 'Black or Black British - Any Other Black background',
'British': 'White - British',
'Caribbean': 'Black or Black British - Caribbean',
'Chinese': 'Other Ethnic Groups - Chinese',
'Cypriot (Part nt st)': 'White - Any other White background',
'Ecuadorian': 'Other Ethnic Groups - Any other ethnic group',
'English': 'White - British',
'Ethiopian': 'Black or Black British - African',
'Filipino': 'Asian or Asian British - Any other Asian background',
'Ghanaian': 'Black or Black British - African',
'Greek Cypriot': 'White - Any other White background',
'Indian/British India': 'Asian or Asian British - Indian',
'Iranian': 'Other Ethnic Groups - Any other ethnic group',
'Italian': 'White - Any other White background',
'Mixed Black': 'Black or Black British - Any other Black background',
'Mixed Caribbean': 'Black or Black British - Caribbean',
'Nigerian': 'Black or Black British - African',
'Not Given': 'Not stated',
'Not Specified': 'Not stated',
'Not Stated': 'Not stated',
'OTHER ASIAN BACKGROU': 'Asian or Asian British - Any other Asian background',
'Other Asian Unspecif': 'Asian or Asian British - Any other Asian background',
'OTHER BLACK BACKGROU': 'Black or Black British - Any Other Black background',
'Other Black Unspecif': 'Black or Black British - Any Other Black background',
'Other Ethnic Group': 'Other Ethnic Groups - Any other ethnic group',
'Other Latin American': 'Other Ethnic Groups - Any other ethnic group',
'OTHER WHITE BACK GRO': 'White - Any other White background',
'Other White Unspecif': 'White - Any other White background',
'Other White/Mixed Eu': 'White - Any other White background',
'Pakistani/British Pa': 'Asian or Asian British - Pakistani',
'Portuguese': 'White - Any other White background',
'Somali': 'Black or Black British - African',
'Spanish': 'White - Any other White background',
'Sri Lankan': 'Asian or Asian British - Any other Asian background',
'Sudanese': 'Black or Black British - African',
'Turkish': 'Other Ethnic Groups - Any other ethnic group',
'Ugandan': 'Black or Black British - African',
'Vietnamese': 'Other Ethnic Groups - Any other ethnic group',
'White Irish': 'White - Irish',
'Former USSR Rep': 'White - Any other White background',
'POLISH': 'White - Any other White background',
'Iraqi': 'Other Ethnic Groups - Any other ethnic group',
'Albanian': 'White - Any other White background',
'Columbian': 'Other Ethnic Groups - Any other ethnic group',
'Scottish': 'White - British',
'Not stated': 'Not stated',
'OTHER MIXED BACKGROU': 'Mixed - Any other mixed background',
'Welsh': 'White - British',
'British Asian': 'Asian or Asian British - Any other Asian background',
'Caribbean Asian': 'Mixed - Any other mixed background',
'Eritrean': 'Black or Black British - African',
'Turkish Cypriot': 'Other Ethnic Groups - Any other ethnic group',
'Sinhalese': 'Asian or Asian British - Any other Asian background',
'White and Asian': 'Mixed - White and Asian',
'Other Mixed': 'Mixed - Any other mixed background',
'Mixed Asian': 'Mixed - Any other mixed background',
'Greek': 'White - Any other White background',
'Arab': 'Other Ethnic Groups - Any other ethnic group',
'MULTIPLE CODES': 'MULTIPLE CODES',
'Irish': 'White - Irish',
'Japanese': 'Other Ethnic Groups - Any other ethnic group',
'Middle East': 'Other Ethnic Groups - Any other ethnic group',
'Croatian': 'White - Any other White background',
'Black and Asian': 'Mixed - White and Asian',
'Black and White': 'Mixed - Any other mixed background'} | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/ethnicity_map.py | ethnicity_map.py |
import os
import logging
import time
from dataclasses import dataclass
from typing import List, Tuple, Optional, TypeVar, Type
from medcat.cdb import CDB
from medcat.utils.decorators import check_positive
T = TypeVar("T", bound="Checkpoint")
class Checkpoint(object):
r""" The base class of checkpoint objects
Args:
dir_path (str):
The path to the parent directory of checkpoint files
steps (int):
The number of processed sentences/documents before a checkpoint is saved
(N.B.: A small number could result in error "no space left on device")
max_to_keep (int):
The maximum number of checkpoints to keep
(N.B.: A large number could result in error "no space left on device")
"""
DEFAULT_STEP = 1000
DEFAULT_MAX_TO_KEEP = 1
log = logging.getLogger(__package__)
@check_positive
def __init__(self, dir_path: str, *, steps: int = DEFAULT_STEP, max_to_keep: int = DEFAULT_MAX_TO_KEEP) -> None:
self._dir_path = os.path.abspath(dir_path)
self._steps = steps
self._max_to_keep = max_to_keep
self._file_paths: List[str] = []
self._count = 0
os.makedirs(self._dir_path, exist_ok=True)
@property
def steps(self) -> int:
return self._steps
@steps.setter
def steps(self, value: int) -> None:
check_positive(lambda _: ...)(value) # [https://github.com/python/mypy/issues/1362]
self._steps = value
@property
def max_to_keep(self) -> int:
return self._max_to_keep
@max_to_keep.setter
def max_to_keep(self, value: int) -> None:
check_positive(lambda _: ...)(value) # [https://github.com/python/mypy/issues/1362]
self._max_to_keep = value
@property
def count(self) -> int:
return self._count
@property
def dir_path(self) -> str:
return self._dir_path
@classmethod
def from_latest(cls: Type[T], dir_path: str) -> T:
r'''
Retrieve the latest checkpoint from the parent directory.
Args:
dir_path (string):
The path to the directory containing checkpoint files
Returns:
A new checkpoint object
'''
if not os.path.isdir(dir_path):
raise Exception("Checkpoints not found. You need to train from scratch.")
ckpt_file_paths = cls._get_ckpt_file_paths(dir_path)
if not ckpt_file_paths:
raise Exception("Checkpoints not found. You need to train from scratch.")
latest_ckpt = ckpt_file_paths[-1]
steps, count = cls._get_steps_and_count(latest_ckpt)
checkpoint = cls(dir_path, steps=steps)
checkpoint._file_paths = ckpt_file_paths
checkpoint._count = count
cls.log.info(f"Checkpoint loaded from {latest_ckpt}")
return checkpoint
def save(self, cdb: CDB, count: int) -> None:
r'''
Save the CDB as the latest checkpoint.
Args:
cdb (medcat.CDB):
The MedCAT CDB object to be checkpointed
count (count):
The number of the finished steps
'''
ckpt_file_path = os.path.join(os.path.abspath(self._dir_path), "checkpoint-%s-%s" % (self.steps, count))
while len(self._file_paths) >= self._max_to_keep:
to_remove = self._file_paths.pop(0)
os.remove(to_remove)
cdb.save(ckpt_file_path)
self.log.debug("Checkpoint saved: %s", ckpt_file_path)
self._file_paths.append(ckpt_file_path)
self._count = count
def restore_latest_cdb(self) -> CDB:
r'''
Restore the CDB from the latest checkpoint.
Returns:
cdb (medcat.CDB):
The MedCAT CDB object
'''
if not os.path.isdir(self._dir_path):
raise Exception("Checkpoints not found. You need to train from scratch.")
ckpt_file_paths = self._get_ckpt_file_paths(self._dir_path)
if not ckpt_file_paths:
raise Exception("Checkpoints not found. You need to train from scratch.")
latest_ckpt = ckpt_file_paths[-1]
_, count = self._get_steps_and_count(latest_ckpt)
self._file_paths = ckpt_file_paths
self._count = count
return CDB.load(self._file_paths[-1])
@staticmethod
def _get_ckpt_file_paths(dir_path: str) -> List[str]:
ckpt_file_paths = [os.path.abspath(os.path.join(dir_path, f)) for f in os.listdir(dir_path)]
ckpt_file_paths = [f for f in ckpt_file_paths if os.path.isfile(f) and "checkpoint-" in f]
if ckpt_file_paths:
ckpt_file_paths.sort(key=lambda f: Checkpoint._get_steps_and_count(f)[1])
return ckpt_file_paths
@staticmethod
def _get_steps_and_count(file_path) -> Tuple[int, int]:
file_name_parts = os.path.basename(file_path).split('-')
return int(file_name_parts[1]), int(file_name_parts[2])
@dataclass
class CheckpointConfig(object):
output_dir: str = "checkpoints"
steps: int = Checkpoint.DEFAULT_STEP
max_to_keep: int = Checkpoint.DEFAULT_MAX_TO_KEEP
class CheckpointManager(object):
r"""
The class for managing checkpoints of specific training type and their configuration
Args:
name (str):
The name of the checkpoint manager (also used as the checkpoint base directory name).
checkpoint_config (medcat.utils.checkpoint.CheckpointConfig):
The checkpoint config object.
"""
def __init__(self, name: str, checkpoint_config: CheckpointConfig) -> None:
self.name = name
self.checkpoint_config = checkpoint_config
def create_checkpoint(self, dir_path: Optional[str] = None) -> "Checkpoint":
r'''
Create a new checkpoint inside the checkpoint base directory.
Args:
dir_path (str):
The path to the checkpoint directory
Returns:
A checkpoint object
'''
dir_path = dir_path or os.path.join(os.path.abspath(os.getcwd()), self.checkpoint_config.output_dir, self.name, str(int(time.time())))
return Checkpoint(dir_path,
steps=self.checkpoint_config.steps,
max_to_keep=self.checkpoint_config.max_to_keep)
def get_latest_checkpoint(self, base_dir_path: Optional[str] = None) -> "Checkpoint":
r'''
Retrieve the latest checkpoint from the checkpoint base directory.
Args:
base_dir_path (string):
The path to the directory containing checkpoint files
Returns:
A checkpoint object
'''
base_dir_path = base_dir_path or os.path.join(os.path.abspath(os.getcwd()), self.checkpoint_config.output_dir, self.name)
ckpt_dir_path = self.get_latest_training_dir(base_dir_path=base_dir_path)
checkpoint = Checkpoint.from_latest(dir_path=ckpt_dir_path)
checkpoint.steps = self.checkpoint_config.steps
checkpoint.max_to_keep = self.checkpoint_config.max_to_keep
return checkpoint
@classmethod
def get_latest_training_dir(cls, base_dir_path: str) -> str:
r'''
Retrieve the latest training directory containing all checkpoints.
Args:
base_dir_path (string):
The path to the directory containing all checkpointed trainings
Returns:
The path to the latest training directory containing all checkpoints.
'''
if not os.path.isdir(base_dir_path):
raise ValueError(f"Checkpoint folder passed in does not exist: {base_dir_path}")
ckpt_dir_paths = os.listdir(base_dir_path)
if not ckpt_dir_paths:
raise ValueError("No existing training found")
ckpt_dir_paths.sort()
ckpt_dir_path = os.path.abspath(os.path.join(base_dir_path, ckpt_dir_paths[-1]))
return ckpt_dir_path | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/checkpoint.py | checkpoint.py |
import html
from medcat.cdb import CDB
from medcat.preprocessing.cleaners import clean_name
from medcat.utils.other import TPL_ENT, TPL_ENTS
def get_important_config_parameters(config):
cnf = {
"config.ner['min_name_len']": {
'value': config.ner['min_name_len'],
'description': "Minimum detection length (found terms/mentions shorter than this will not be detected)."
},
"config.ner['upper_case_limit_len']": {
'value': config.ner['upper_case_limit_len'],
'description': "All detected terms shorter than this value have to be uppercase, otherwise they will be ignored."
},
"config.linking['similarity_threshold']": {
'value': config.linking['similarity_threshold'],
'description': "If the confidence of the model is lower than this a detection will be ignore."
},
"config.general['spell_check']": {
'value': config.general['spell_check'],
'description': "Is spell checking enabled."
},
"config.general['spell_check_len_limit']": {
'value': config.general['spell_check_len_limit'],
'description': "Words shorter than this will not be spell checked."
},
}
return cnf
def to_json_simple(docs, cdb):
"""
output: [{'text': <text>, 'entities': [<start,end,type>, ]}]
"""
d = []
for doc in docs:
d.append({'text': doc.text, 'entities': [(e.start_char, e.end_char, cdb.tui2name[cdb.cui2tui[e.label_]]) for e in doc._.ents]})
def to_json_sumithra(docs, cdb):
"""
output: [
[ text, {'entities': [<start,end,type>, ]} ],
...]
"""
d = []
for doc in docs:
d.append([doc.text, {'entities': [(e.start_char, e.end_char, cdb.tui2name[cdb.cui2tui[e.label_]]) for e in doc._.ents]}])
return d
def doc2html(doc):
markup = ""
offset = 0
text = doc.text
for span in list(doc.ents):
start = span.start_char
end = span.end_char
fragments = text[offset:start].split("\n")
for i, fragment in enumerate(fragments):
markup += html.escape(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
ent = {'label': '', 'id': span._.id, 'bg': "rgb(74, 154, 239, {})".format(span._.context_similarity * span._.context_similarity + 0.12), 'text': html.escape(span.text)}
# Add the entity
markup += TPL_ENT.format(**ent)
offset = end
markup += html.escape(text[offset:])
out = TPL_ENTS.format(content=markup, dir='ltr')
return out
def json2html(doc):
markup = ""
offset = 0
text = doc['text']
for span in list(doc['entities']):
start = span['start']
end = span['end']
fragments = text[offset:start].split("\n")
for i, fragment in enumerate(fragments):
markup += html.escape(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
ent = {'label': '', 'id': span['id'], 'bg': "rgb(74, 154, 239, {})".format(span._.context_similarity * span._.context_similarity + 0.12), 'text': html.escape(span['str'])}
# Add the entity
markup += TPL_ENT.format(**ent)
offset = end
markup += html.escape(text[offset:])
out = TPL_ENTS.format(content=markup, dir='ltr')
return out
def prepare_name(cat, name, version='CLEAN'):
""" Cleans up the name
"""
name = clean_name(name)
if version.lower() == 'clean':
sc_name = cat(name)
tokens = [str(t.lemma_).lower() for t in sc_name if not t._.is_punct
and not t._.to_skip]
if version.lower() == 'raw':
sc_name = cat(name)
tokens = [t.lower_ for t in sc_name if not t._.is_punct
and not (t._.to_skip and not t.is_stop)]
if version.lower() == 'none':
sc_name = cat(name)
tokens = [t.lower_ for t in sc_name]
# Join everything and return name
name = "".join(tokens)
return name, tokens
def get_all_from_name(name, nlp, source_value, SEP="", version='clean'):
sc_name = nlp(source_value)
name, tokens = prepare_name(nlp, name=name, version=version)
tokens_vocab = [t.lower_ for t in sc_name if not t._.is_punct]
snames = []
sname = ""
for token in tokens:
sname = sname + token + SEP
snames.append(sname.strip())
return name, tokens, snames, tokens_vocab
def tkn_inds_from_doc(spacy_doc, text_inds=None, source_val=None):
tkn_inds = None
start = None
end = None
if text_inds is None and source_val in spacy_doc.text:
start = spacy_doc.text.index(source_val)
end = start + len(source_val)
elif text_inds is not None:
start = text_inds[0]
end = text_inds[1]
if start is not None:
tkn_inds = []
for tkn in spacy_doc:
if tkn.idx >= start and tkn.idx <= end:
tkn_inds.append(tkn.i)
return tkn_inds
def tkns_from_doc(spacy_doc, start, end):
tkns = []
for tkn in spacy_doc:
if tkn.idx >= start and tkn.idx <= end:
tkns.append(tkn)
return tkns
def filter_cdb_by_icd10(cdb: CDB) -> CDB:
"""
Filters an existing CDB to only contain concepts that have an associated ICD-10 code.
Can be used for snomed orr UMLS CDBs.
:return: filtered CDB
"""
cuis_to_keep = [cui for cui in cdb.cui2names.keys() if 'icd10' in cdb.cui2info[cui]]
cdb.filter_by_cui(cuis_to_keep)
return cdb
def umls_to_icd10cm(cdb, csv_path):
import pandas as pd
df = pd.read_csv(csv_path)
for _, row in df.iterrows():
try:
cuis = str(row['CUI']).split("|")
chapter = row['Class ID'].split('/')[-1]
name = row['Preferred Label']
for cui in cuis:
if cui is not None and cui in cdb.cui2names:
icd10 = {'chapter': chapter, 'name': name}
if 'icd10' in cdb.cui2info[cui]:
# Check is the chapter already in
isin = False
for tmp in cdb.cui2info[cui]['icd10']:
if tmp['chapter'] == chapter:
isin = True
if not isin:
cdb.cui2info[cui]['icd10'].append(icd10)
else:
cdb.cui2info[cui]["icd10"] = [icd10]
except Exception:
print(row["CUI"])
def umls_to_icd10_over_snomed(cdb, pickle_path):
import pickle
u2i = pickle.load(open(pickle_path, 'rb'))
for cui in u2i.keys():
if cui in cdb.cui2names:
if cui not in cdb.cui2info:
cdb.cui2info[cui] = {}
for icd10 in u2i[cui]:
if 'icd10' in cdb.cui2info[cui]:
# If it exists skip it
pass
else:
print(cui, icd10)
cdb.cui2info[cui]['icd10'] = [icd10]
else:
pass
def umls_to_icd10_ext(cdb, pickle_path):
import pickle
u2i = pickle.load(open(pickle_path, 'rb'))
for cui in u2i.keys():
if cui in cdb.cui2names:
if cui in cdb.cui2info and 'icd10' not in cdb.cui2info[cui]:
icd10 = u2i[cui]
print(cui, icd10)
cdb.cui2info[cui]['icd10'] = [icd10]
def umls_to_icd10(cdb, csv_path):
import pandas as pd
df = pd.read_csv(csv_path)
for _, row in df.iterrows():
try:
cui = str(row['cui'])
chapter = row['chapter']
name = row['name']
if cui is not None and cui in cdb.cui2names:
icd10 = {'chapter': chapter, 'name': name}
if 'icd10' in cdb.cui2info[cui]:
# Check is the chapter already in
isin = False
for tmp in cdb.cui2info[cui]['icd10']:
if tmp['chapter'] == chapter:
isin = True
if not isin:
cdb.cui2info[cui]['icd10'].append(icd10)
else:
cdb.cui2info[cui]["icd10"] = [icd10]
except Exception:
print(row["CUI"])
def umls_to_snomed(cdb, pickle_path):
""" Map UMLS CDB to SNOMED concepts
"""
import pickle
data = pickle.load(open(pickle_path, 'rb'))
for key in data.keys():
cui = str(key)
for snomed_cui in data[key]:
if "S-" in str(snomed_cui):
snomed_cui = str(snomed_cui)
else:
snomed_cui = "S-" + str(snomed_cui)
if key in cdb.cui2info:
if 'snomed' in cdb.cui2info[key]:
cdb.cui2info[cui]['snomed'].append(snomed_cui)
else:
cdb.cui2info[cui]['snomed'] = [snomed_cui]
def snomed_to_umls(cdb, pickle_path):
""" Map SNOMED CDB to UMLS concepts
"""
import pickle
data = pickle.load(open(pickle_path, 'rb'))
for key in data.keys():
for umls_cui in data[key]:
# Add S if it is not there
if "S-" in str(key):
cui = key
else:
cui = "S-" + str(key)
if cui in cdb.cui2info:
if 'umls' in cdb.cui2info[cui]:
cdb.cui2info[cui]['umls'].append(umls_cui)
else:
cdb.cui2info[cui]['umls'] = [umls_cui]
def snomed_to_icd10(cdb, csv_path):
""" Add map from cui to icd10 for concepts
"""
import pandas as pd
df = pd.read_csv(csv_path)
for _, row in df.iterrows():
icd = str(row['icd10'])
name = str(row['name'])
if "S-" in str(row['cui']):
cui = str(row['cui'])
else:
cui = "S-" + str(row['cui'])
if cui in cdb.cui2names and icd is not None and icd != 'nan' and len(icd) > 0:
icd = {'chapter': icd, 'name': name}
if 'icd10' in cdb.cui2info[cui]:
cdb.cui2info[cui]['icd10'].append(icd)
else:
cdb.cui2info[cui]['icd10'] = [icd]
def snomed_to_desc(cdb, csv_path):
""" Add descriptions to the concepts
"""
import pandas as pd
df = pd.read_csv(csv_path)
for _, row in df.iterrows():
desc = row['desc']
if "S-" in str(row['cui']):
cui = str(row['cui'])
else:
cui = "S-" + str(row['cui'])
# Check do we have this concept at all
if cui in cdb.cui2names:
# If yes add description
if cui not in cdb.cui2desc:
cdb.cui2desc[cui] = str(desc)
elif str(desc) not in str(cdb.cui2desc[cui]):
cdb.cui2desc[cui] = str(cdb.cui2desc[cui]) + "\n\n" + str(desc)
def filter_only_icd10(doc, cat):
ents = []
for ent in doc._.ents:
if 'icd10' in cat.cdb.cui2info.get(ent._.cui, {}):
ents.append(ent)
doc._.ents = ents
doc.ents = []
cat.spacy_cat._create_main_ann(doc)
def add_names_icd10(csv_path, cat):
import pandas as pd
df = pd.read_csv(csv_path)
for index, row in df.iterrows():
try:
cui = str(row['cui'])
name = row['name']
cat.add_name(cui, name, is_pref_name=False, only_new=True)
except Exception:
print(row["cui"])
if index % 1000 == 0:
print(index)
def add_names_icd10cm(cdb, csv_path, cat):
import pandas as pd
df = pd.read_csv(csv_path)
for index, row in df.iterrows():
try:
cuis = str(row['CUI']).split("|")
name = row['Preferred Label']
for cui in cuis:
bl = len(cdb.cui2names.get(cui, []))
cat.add_name(cui, name, is_pref_name=False, only_new=True)
if bl != len(cdb.cui2names.get(cui, [])):
print(name, cui)
except Exception as e:
print(e)
break
if index % 1000 == 0:
print(index)
def remove_icd10_ranges(cdb):
for cui in cdb.cui2info:
if 'icd10' in cdb.cui2info[cui]:
new_icd = []
for icd in list(cdb.cui2info[cui]['icd10']):
if '-' not in icd['chapter']:
new_icd.append(icd)
if len(new_icd) > 0:
cdb.cui2info[cui]['icd10'] = new_icd
else:
del cdb.cui2info[cui]['icd10']
def dep_check_scispacy():
# IGNORE FUNCTION
import spacy
import subprocess
import sys
try:
_ = spacy.load("en_core_sci_md")
except Exception:
print("Installing the missing models for scispacy\n")
pkg = 'https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_md-0.4.0.tar.gz'
subprocess.check_call([sys.executable, '-m', 'pip', 'install', pkg])
def run_cv(cdb_path, data_path, vocab_path, cv=100, nepochs=16, test_size=0.1, lr=1, groups=None, **kwargs):
from medcat.cat import CAT
from medcat.utils.vocab import Vocab
import json
use_groups = False
if groups is not None:
use_groups = True
f1s = {}
ps = {}
rs = {}
tps = {}
fns = {}
fps = {}
cui_counts = {}
examples = {}
for _ in range(cv):
cdb = CDB()
cdb.load_dict(cdb_path)
vocab = Vocab()
vocab.load_dict(path=vocab_path)
# This does not conform to the latest API which requires config
cat = CAT(cdb, vocab=vocab)
cat.train = False
cat.spacy_cat.MIN_ACC = 0.30
cat.spacy_cat.MIN_ACC_TH = 0.30
# Add groups if they exist
if groups is not None:
for cui in cdb.cui2info.keys():
if "group" in cdb.cui2info[cui]:
del cdb.cui2info[cui]['group']
groups = json.load(open("./groups.json"))
for k,v in groups.items():
for val in v:
cat.add_cui_to_group(val, k)
# cat.train_supervised does not accept lr
fp, fn, tp, p, r, f1, cui_counts, examples = cat.train_supervised(data_path=data_path,
lr=1, test_size=test_size, use_groups=use_groups, nepochs=nepochs, **kwargs)
for key in f1.keys():
if key in f1s:
f1s[key].append(f1[key])
else:
f1s[key] = [f1[key]]
if key in ps:
ps[key].append(p[key])
else:
ps[key] = [p[key]]
if key in rs:
rs[key].append(r[key])
else:
rs[key] = [r[key]]
if key in tps:
tps[key].append(tp.get(key, 0))
else:
tps[key] = [tp.get(key, 0)]
if key in fps:
fps[key].append(fp.get(key, 0))
else:
fps[key] = [fp.get(key, 0)]
if key in fns:
fns[key].append(fn.get(key, 0))
else:
fns[key] = [fn.get(key, 0)]
return fps, fns, tps, ps, rs, f1s, cui_counts, examples | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/helpers.py | helpers.py |
import logging
from pathlib import Path
from gensim.models import Word2Vec
from medcat.vocab import Vocab
from medcat.pipe import Pipe
from medcat.preprocessing.tokenizers import spacy_split_all
from medcat.preprocessing.iterators import SimpleIter
from medcat.preprocessing.taggers import tag_skip_and_punct
class MakeVocab(object):
r'''
Create a new vocab from a text file.
Args:
config (medcat.config.Config):
Global configuration for medcat
cdb (medcat.cdb.CDB):
The concept database that will be added ontop of the Vocab built from the text file.
vocab (medcat.vocab.Vocab, optional):
Vocabulary to be extended, leave as None if you want to make a new Vocab. Default: None
word_tokenizer (<function>):
A custom tokenizer for word spliting - used if embeddings are BERT or similar.
Default: None
Examples:
To make a vocab and train word embeddings.
>>> cdb = <your existing cdb>
>>> maker = MakeVocab(cdb=cdb, config=config)
>>> maker.make(data_iterator, out_folder="./output/")
>>> maker.add_vectors(in_path="./output/data.txt")
'''
log = logging.getLogger(__name__)
def __init__(self, config, cdb=None, vocab=None, word_tokenizer=None):
self.cdb = cdb
self.config = config
self.w2v = None
if vocab is not None:
self.vocab = vocab
else:
self.vocab = Vocab()
# Build the required spacy pipeline
self.pipe = Pipe(tokenizer=spacy_split_all, config=config)
self.pipe.add_tagger(tagger=tag_skip_and_punct,
name='skip_and_punct',
additional_fields=['is_punct'])
# Get the tokenizer
if word_tokenizer is not None:
self.tokenizer = word_tokenizer
else:
self.tokenizer = self._tok
# Used for saving if the real path is not set
self.vocab_path = "./tmp_vocab.dat"
def _tok(self, text):
return [text]
def make(self, iter_data, out_folder, join_cdb=True, normalize_tokens=False):
r'''
Make a vocab - without vectors initially. This will create two files in the out_folder:
- vocab.dat -> The vocabulary without vectors
- data.txt -> The tokenized dataset prepared for training of word2vec or similar embeddings.
Args:
iter_data (Iterator):
An iterator over sentences or documents. Can also be a simple array of text documents/sentences.
out_folder (string):
A path to a folder where all the results will be saved
join_cdb (bool):
Should the words from the CDB be added to the Vocab. Default: True
normalize_tokens (bool, defaults to True):
If set tokens will be lematized - tends to work better in some cases where the difference
between e.g. plural/singular should be ignored. But in general not so important if the dataset is big enough.
'''
# Save the preprocessed data, used for emb training
out_path = Path(out_folder) / "data.txt"
vocab_path = Path(out_folder) / "vocab.dat"
self.vocab_path = vocab_path
out = open(out_path, 'w', encoding='utf-8')
for ind, doc in enumerate(iter_data):
if ind % 10000 == 0:
self.log.info("Vocab builder at: %s", str(ind))
print(ind)
doc = self.pipe.spacy_nlp.tokenizer(doc)
line = ""
for token in doc:
if token.is_space or token.is_punct:
continue
if len(token.lower_) > 0:
if normalize_tokens:
self.vocab.inc_or_add(token._.norm)
else:
self.vocab.inc_or_add(token.lower_)
if normalize_tokens:
line = line + " " + "_".join(token._.norm.split(" "))
else:
line = line + " " + "_".join(token.lower_.split(" "))
out.write(line.strip())
out.write("\n")
out.close()
if join_cdb and self.cdb:
for word in self.cdb.vocab.keys():
if word not in self.vocab:
self.vocab.add_word(word)
else:
# Update the count with the counts from the new dataset
self.cdb.vocab[word] += self.vocab[word]
# Save the vocab also
self.vocab.save(path=self.vocab_path)
def add_vectors(self, in_path=None, w2v=None, overwrite=False, data_iter=None, workers=14, epochs=2, min_count=10, window=10, vector_size=300,
unigram_table_size=100000000):
r'''
Add vectors to an existing vocabulary and save changes to the vocab_path.
Args:
in_path (str):
Path to the data.txt that was created by the MakeVocab.make() function.
w2v (Word2Vec, optional):
An existing word2vec instance. Default: None
overwrite (bool):
If True it will overwrite existing vectors in the vocabulary. Default: False
data_iter (iterator):
If you want to provide a customer iterator over the data use this. If yes, then in_path is not needed.
**: Word2Vec arguments
Returns:
A trained word2vec model.
'''
if w2v is None:
if data_iter is None:
data = SimpleIter(in_path)
else:
data = data_iter
w2v = Word2Vec(data, window=window, min_count=min_count, workers=workers, vector_size=vector_size, epochs=epochs)
for word in w2v.wv.key_to_index.keys():
if word in self.vocab:
if overwrite:
self.vocab.add_vec(word, w2v.wv.get_vector(word))
else:
if self.vocab.vec(word) is None:
self.vocab.add_vec(word, w2v.wv.get_vector(word))
# Save the vocab again, now with vectors
self.vocab.make_unigram_table(table_size=unigram_table_size)
self.vocab.save(path=self.vocab_path)
return w2v
def destroy_pipe(self):
self.pipe.destroy() | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/make_vocab.py | make_vocab.py |
import pandas as pd
from medcat.config import Config
from medcat.cat import CAT
from medcat.cdb_maker import CDBMaker
class RepairCDB(object):
def __init__(self, base_cdb, final_cdb, vocab):
self.base_cdb = base_cdb
self.vocab = vocab
self.final_cdb = final_cdb
self.final_cat = None
self.cdb = None
self.cat = None
self.base_cat = None
def prepare(self, cuis):
self.base_cdb.filter_by_cui(cuis)
csv = [['cui', 'name']]
names = set()
cui = 0
for base_cui in self.base_cdb.cui2names:
if self.base_cdb.cui2context_vectors.get(base_cui, {}):
for name in self.base_cdb.cui2names[base_cui]:
if name not in names and name in self.base_cdb.name2cuis:
csv.append([cui, name.replace("~", " ")])
cui += 1
names.add(name)
df_cdb = pd.DataFrame(csv[1:], columns=csv[0])
df_cdb.to_csv("/tmp/data.csv", index=False)
config = Config()
cdb_maker = CDBMaker(config=config)
cdb = cdb_maker.prepare_csvs(['/tmp/data.csv'])
# Rempove ambigous
for name in cdb.name2cuis:
cuis = cdb.name2cuis[name]
if len(cuis) > 1:
cnts = []
for cui in cuis:
cnts.append([cui, len(cdb.cui2names[cui])])
cnts.sort(key=lambda x: x[1])
cdb.name2cuis[name] = [cnts[-1][0]]
self.cdb = cdb
self.base_cdb.reset_cui_count(n=10)
self.cat = CAT(cdb=self.cdb, config=self.cdb.config, vocab=self.vocab)
self.base_cat = CAT(cdb=self.base_cdb, config=self.base_cdb.config, vocab=self.vocab)
def train(self, data_iterator, n_docs=100000):
docs = []
for doc in data_iterator:
docs.append(doc)
if len(docs) >= n_docs:
break
self.cat.train(data_iterator=docs)
self.base_cat.train(data_iterator=docs)
def calculate_scores(self, count_limit=1000):
data = [['new_cui', 'base_cui', 'name', 'new_count', 'base_count', 'score', 'decision']]
for name, cuis2 in self.cdb.name2cuis.items():
cui2 = cuis2[0]
count2 = self.cdb.cui2count_train.get(cui2, 0)
if count2 > count_limit:
cuis = self.base_cdb.name2cuis.get(name, [])
for cui in cuis:
count = self.base_cdb.cui2count_train.get(cui, 0)
if self.base_cdb.cui2context_vectors.get(cui, {}):
score = count2 / count
data.append([cui2, cui, name, count2, count, score, ''])
self.scores_df = pd.DataFrame(data[1:], columns=data[0])
def unlink_names(self, sort='score', skip=0, cui_filter=None, apply_existing_decisions=0):
scores_df = self.scores_df.sort_values(sort, ascending=False)
self.final_cdb.config.general['full_unlink'] = False
if self.final_cat is None:
self.final_cat = CAT(cdb=self.final_cdb, config=self.final_cdb.config, vocab=self.vocab)
for ind, row in enumerate(scores_df.iterrows()):
row_ind, row = row
if ind < skip:
continue
name = row['name']
base_cui = row['base_cui']
new_cui = row['new_cui']
base_count = row['base_count']
new_count = row['new_count']
cui = row['base_cui']
if base_cui in cui_filter:
print("{:3} -- {:20} -> {:20}, base_count: {}, new_count: {}, cui: {}".format(
ind, str(name)[:20], str(self.final_cdb.get_name(base_cui))[:30], base_count, new_count, cui))
if apply_existing_decisions and apply_existing_decisions > ind:
decision = row['decision']
else:
decision = input("Decision (l/...): ")
if decision == 'l':
names = self.cdb.cui2names[new_cui]
print("Unlinking: " + str(names))
print("\n\n")
for name in names:
self.final_cat.unlink_concept_name(base_cui, name, preprocessed_name=True)
elif decision == 'f':
if base_cui in cui_filter:
print("Removing from filter: " + str(base_cui))
print("\n\n")
cui_filter.remove(base_cui)
else:
decision = 'k' # Means keep
self.scores_df.iat[row_ind, 6] = decision | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/repair_cdb.py | repair_cdb.py |
import regex
import logging
def normalize_date(date, id_, start, end):
""" Normalizes different dates encountered in the clinical notes.
Current accepted formats:
28 Feb 2913 04:50
Thu 28 Feb 2013 04:50
28-Feb-2013 04:50
Output:
28 Feb 2013 04:50
"""
if '-' in date:
date = date.replace("-", " ").strip()
elif date.strip()[0].isalpha():
date = date[date.index(' '):].strip()
elif date.strip()[0].isnumeric():
# all good
date = date.strip()
else:
logging.warning("Unsupported date format: %s for id: %s with start: %s, end: %s", date, id_, start, end)
return None
return date
def split_one_note(id_, text):
""" Splits the text of one note by date.
Return:
split_note (List[Dict]):
Returns a list of dictionary in the format: {'start': <start char of the specific note in the big one>,
'end': <end char of the specifc note in the big one>,
'text': <text of the specific note>,
'date': <date of the specific note>}
"""
r = r'\n\w{0,5}\s*\d{1,2}(\s|-)[a-zA-Z]{3,5}(\s|-)\d{4}\s+\d{2}\:\d{2}'
dates = regex.finditer(r, text)
start = 0
end = -1
split_note = []
previous_date = None
for date in dates:
if start == 0:
start = date.span()[0]
previous_date = date.captures()[0]
elif previous_date is None or date.captures()[0] != previous_date:
end = date.span()[0]
note_text = text[start:end]
if 'entered on -' in note_text.lower():
if len(regex.findall(r'entered on -', note_text)) > 1:
logging.warning("Possible problems for span with start: %s and end: %s for note with id: %s", start, end, id_)
split_note.append({'start': start, 'end': end, 'text': note_text, 'date': normalize_date(previous_date, id_, start, end)})
start = end
previous_date = date.captures()[0]
# Add the last note
if previous_date is not None and 'entered on -' in text[start:].lower():
split_note.append({'start': start, 'end': len(text), 'text': text[start:], 'date': normalize_date(previous_date, id_, start, len(text))})
else:
logging.warning("No date/entered-on detected for id: %s wth start: %s, end: %s and text:\n%s...", id_, start, end, text[0:300])
return split_note
def split_clinical_notes(clinical_notes):
""" Splits clinical notes.
Args:
clinical_notes(dict):
Dictionary in the form {<clinical_note_id>: <text>, ...}
"""
split_notes = {}
for id_text, text in clinical_notes.items():
split_notes[id_text] = split_one_note(id_text, text)
return split_notes | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/clinical_note_splitter.py | clinical_note_splitter.py |
from sklearn.metrics import classification_report
import numpy as np
import pandas as pd
from collections import defaultdict
from scipy.special import softmax
def metrics(p, return_df=False, plus_recall=0, tokenizer=None, dataset=None, merged_negative={0, 1, -100}, padding_label=-100, csize=15, subword_label=1,
verbose=False):
r''' TODO: This could be done better, for sure. But it works.
'''
predictions = np.array(p.predictions)
predictions = softmax(predictions, axis=2)
examples = None
if plus_recall > 0:
# Devalue 0 and 1
predictions[:, :, 0] = predictions[:, :, 0] - (predictions[:, :, 0] * plus_recall)
predictions[:, :, 1] = predictions[:, :, 1] - (predictions[:, :, 1] * plus_recall)
preds = np.argmax(predictions, axis=2)
# Ignore predictions where label == -100, padding
preds[np.where(p.label_ids == -100)] = -100
if dataset is not None and tokenizer is not None:
examples = {'fp': defaultdict(list), 'fn': defaultdict(list)}
ilbl = {v:k for k,v in tokenizer.label_map.items()}
for i in range(len(preds)):
st = None
for j in range(len(preds[i])):
_p = preds[i][j]
_l = p.label_ids[i][j]
if len(p.label_ids[i]) > (j+1) and p.label_ids[i][j+1] != padding_label:
_p2 = preds[i][j+1]
_l2 = p.label_ids[i][j+1]
else:
_p2 = None
_l2 = None
_d = dataset[i]['input_ids']
id = dataset[i]['id']
name = dataset[i]['name']
if _l not in {subword_label, padding_label}: # We ignore padding and subwords
if _l != _p:
if st is None:
st = max(0, j-csize)
_j = j
if not (_l2 is not None and _l2 == _l and _l2 != _p2 and _p2 == _p):
# We want to merge tokens if it is the same label and same prediction when recording the examples, that is why we have the if
t = tokenizer.hf_tokenizer.decode(_d[st:_j]) + "<<" + str(tokenizer.hf_tokenizer.decode(_d[_j:j+1])) + \
">>" + tokenizer.hf_tokenizer.decode(_d[j+1:j+csize])
value = str(tokenizer.hf_tokenizer.decode(_d[_j:j+1])).strip()
examples['fp'][ilbl[_p]].append(({'id': id, 'name': name, 'value': value,
'label': tokenizer.cui2name.get(ilbl[_l], ilbl[_l]), 'text': t}))
examples['fn'][ilbl[_l]].append(({'id': id, 'name': name, 'value': value,
'prediction': tokenizer.cui2name.get(ilbl[_p], ilbl[_p]), 'text': t}))
st = None
_labels = np.reshape(p.label_ids, -1)
_preds = np.reshape(preds, -1)
cr = classification_report(_labels, _preds, output_dict=True)
_cr = {}
ignore = [str(tokenizer.label_map['O']), str(tokenizer.label_map['X']), '-100']
ilabel_map = {str(v):k for k,v in tokenizer.label_map.items()}
for key in cr.keys():
if key not in ignore and key in ilabel_map:
_cr[key] = cr[key]
# Get merged metrics, ie all PID is just one entity
tp = defaultdict(int)
fn = defaultdict(int)
tp_all = 0
fp_all = 0
for i, _label in enumerate(_labels):
_pred = _preds[i]
if _label in merged_negative:
if _pred in merged_negative:
tp[_label] += 1
else:
fn[_label] += 1
if _label == 0:
if _pred not in merged_negative:
fp_all += 1
else:
if _pred not in merged_negative:
tp[_label] += 1
tp_all += 1
else:
fn[_label] += 1
for key in _cr:
key = int(key)
if int(key) in tp:
_cr[str(key)]['r_merged'] = tp[key] / (tp[key] + fn.get(key, 0)) if tp[key] + fn.get(key, 0) > 0 else 0
else:
_cr[str(key)]['r_merged'] = None
data = [['cui', 'name', 'p', 'r', 'f1', 'support', 'r_merged', 'p_merged']]
for key in _cr:
cui = ilabel_map[key]
p_merged = tp_all / (tp_all + fp_all) if (tp_all + fp_all) > 0 else 0
data.append([cui, tokenizer.cui2name.get(cui, cui), _cr[key]['precision'],
_cr[key]['recall'], _cr[key]['f1-score'], _cr[key]['support'], _cr[key]['r_merged'], p_merged])
df = pd.DataFrame(data[1:], columns=data[0])
if verbose:
print(df)
if not return_df:
return {'recall': np.average(df.r.values), 'precision': np.average(df.p.values), 'f1': np.average(df.f1.values),
'recall_merged': np.average([x for x in df.r_merged.values if pd.notna(x)]),
'precison_merged': np.average([x for x in df.p_merged.values if pd.notna(x)])}
else:
return df, examples | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/ner/metrics.py | metrics.py |
from typing import Dict, Optional, Tuple, Iterable, List
from medcat.tokenizers.meta_cat_tokenizers import TokenizerWrapperBase
def prepare_from_json(data: Dict,
cntx_left: int,
cntx_right: int,
tokenizer: TokenizerWrapperBase,
cui_filter: Optional[set] = None,
replace_center: Optional[str] = None,
prerequisites: Dict = {},
lowercase: bool = True) -> Dict:
""" Convert the data from a json format into a CSV-like format for training. This function is not very efficient (the one
working with spacy documents as part of the meta_cat.pipe method is much better). If your dataset is > 1M documents think
about rewriting this function - but would be strange to have more than 1M manually annotated documents.
Args:
data (`dict`):
Loaded output of MedCATtrainer. If we have a `my_export.json` from MedCATtrainer, than data = json.load(<my_export>).
cntx_left (`int`):
Size of context to get from the left of the concept
cntx_right (`int`):
Size of context to get from the right of the concept
tokenizer (`medcat.tokenizers.meta_cat_tokenizers`):
Something to split text into tokens for the LSTM/BERT/whatever meta models.
replace_center (`str`, optional):
If not None the center word (concept) will be replaced with whatever this is.
prerequisites (`dict`, optional):
A map of prerequisities, for example our data has two meta-annotations (experiencer, negation). Assume I want to create
a dataset for `negation` but only in those cases where `experiencer=patient`, my prerequisites would be:
{'Experiencer': 'Patient'} - Take care that the CASE has to match whatever is in the data
lowercase (`bool`, defaults to True):
Should the text be lowercased before tokenization
Returns:
out_data (`dict`):
Example: {'category_name': [('<category_value>', '<[tokens]>', '<center_token>'), ...], ...}
"""
out_data: Dict = {}
for project in data['projects']:
for document in project['documents']:
text = str(document['text'])
if lowercase:
text = text.lower()
if len(text) > 0:
doc_text = tokenizer(text)
for ann in document.get('annotations', document.get('entities', {}).values()): # A hack to suport entities and annotations
cui = ann['cui']
skip = False
if 'meta_anns' in ann and prerequisites:
# It is possible to require certain meta_anns to exist and have a specific value
for meta_ann in prerequisites:
if meta_ann not in ann['meta_anns'] or ann['meta_anns'][meta_ann]['value'] != prerequisites[meta_ann]:
# Skip this annotation as the prerequisite is not met
skip = True
break
if not skip and (cui_filter is None or not cui_filter or cui in cui_filter):
if ann.get('validated', True) and (not ann.get('deleted', False) and not ann.get('killed', False)
and not ann.get('irrelevant', False)):
start = ann['start']
end = ann['end']
# Get the index of the center token
ind = 0
for ind, pair in enumerate(doc_text['offset_mapping']):
if start >= pair[0] and start < pair[1]:
break
_start = max(0, ind - cntx_left)
_end = min(len(doc_text['input_ids']), ind + 1 + cntx_right)
tkns = doc_text['input_ids'][_start:_end]
cpos = cntx_left + min(0, ind-cntx_left)
if replace_center is not None:
if lowercase:
replace_center = replace_center.lower()
for p_ind, pair in enumerate(doc_text['offset_mapping']):
if start >= pair[0] and start < pair[1]:
s_ind = p_ind
if end > pair[0] and end <= pair[1]:
e_ind = p_ind
ln = e_ind - s_ind
tkns = tkns[:cpos] + tokenizer(replace_center)['input_ids'] + tkns[cpos+ln+1:]
# Backward compatibility if meta_anns is a list vs dict in the new approach
meta_anns = []
if 'meta_anns' in ann:
meta_anns = ann['meta_anns'].values() if type(ann['meta_anns']) == dict else ann['meta_anns']
# If the annotation is validated
for meta_ann in meta_anns:
name = meta_ann['name']
value = meta_ann['value']
sample = [tkns, cpos, value]
if name in out_data:
out_data[name].append(sample)
else:
out_data[name] = [sample]
return out_data
def encode_category_values(data: Dict, existing_category_value2id: Optional[Dict] = None) -> Tuple:
r''' Converts the category values in the data outputed by `prepare_from_json`
into integere values.
Args:
data (`dict`):
Output of `prepare_from_json`
existing_category_value2id(`dict`, optional):
Map from category_value to id (old/existing)
Returns:
data (`dict`):
New data with integeres inplace of strings for categry values.
category_value2id (`dict`):
Map rom category value to ID for all categories in the data.
'''
data = list(data)
if existing_category_value2id is not None:
category_value2id = existing_category_value2id
else:
category_value2id = {}
category_values = set([x[2] for x in data])
for c in category_values:
if c not in category_value2id:
category_value2id[c] = len(category_value2id)
# Map values to numbers
for i in range(len(data)):
data[i][2] = category_value2id[data[i][2]]
return data, category_value2id
def json_to_fake_spacy(data: Dict, id2text: Dict) -> Iterable:
r''' Creates a generator of fake spacy documents, used for running
meta_cat pipe separately from main cat pipeline.
Args:
data(`dict`):
Output from cat formated as: {<id>: <output of get_entities, ...}
id2text(`dict`):
Map from document id to text of that documetn
Returns:
generator:
Generator of spacy like documents that can be feed into meta_cat.pipe
'''
for id_ in data.keys():
ents = data[id_]['entities'].values()
doc = Doc(text=id2text[id_], id_=id_)
doc.ents.extend([Span(ent['start'], ent['end'], ent['id']) for ent in ents])
yield doc
class Empty(object):
def __init__(self) -> None:
pass
class Span(object):
def __init__(self, start_char: str, end_char: str, id_: str) -> None:
self._ = Empty()
self.start_char = start_char
self.end_char = end_char
self._.id = id_ # type: ignore
self._.meta_anns = None # type: ignore
class Doc(object):
def __init__(self, text: str, id_: str) -> None:
self._ = Empty()
self._.share_tokens = None # type: ignore
self.ents: List = []
# We do not have overlapps at this stage
self._ents = self.ents
self.text = text
self.id = id_ | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/meta_cat/data_utils.py | data_utils.py |
import os
import random
import math
import torch
import numpy as np
import torch.optim as optim
from typing import List, Optional, Tuple, Any, Dict
from torch import nn
from scipy.special import softmax
from medcat.config_meta_cat import ConfigMetaCAT
from medcat.tokenizers.meta_cat_tokenizers import TokenizerWrapperBase
from sklearn.metrics import classification_report, precision_recall_fscore_support
def set_all_seeds(seed: int) -> None:
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def create_batch_piped_data(data: List, start_ind: int, end_ind: int, device: torch.device, pad_id: int) -> Tuple:
r''' Creates a batch given data and start/end that denote batch size, will also add
padding and move to the right device.
Args:
data (List[List[int], int, Optional[int]]):
Data in the format: [[<[input_ids]>, <cpos>, Optional[int]], ...], the third column is optional
and represents the output label
start_ind (`int`):
Start index of this batch
end_ind (`int`):
End index of this batch
device (`torch.device`):
Where to move the data
pad_id (`int`):
Padding index
Returns:
x ():
Same as data, but subsetted and as a tensor
cpos ():
Center positions for the data
'''
max_seq_len = max([len(x[0]) for x in data])
x = [x[0][0:max_seq_len] + [pad_id]*max(0, max_seq_len - len(x[0])) for x in data[start_ind:end_ind]]
cpos = [x[1] for x in data[start_ind:end_ind]]
y = None
if len(data[0]) == 3:
# Means we have the y column
y = torch.tensor([x[2] for x in data[start_ind:end_ind]], dtype=torch.long).to(device)
x = torch.tensor(x, dtype=torch.long).to(device)
cpos = torch.tensor(cpos, dtype=torch.long).to(device)
return x, cpos, y
def predict(model: nn.Module, data: List, config: ConfigMetaCAT) -> Tuple:
r''' Predict on data used in the meta_cat.pipe
Args:
data (List[List[List[int], int]]):
Data in the format: [[<input_ids>, <cpos>], ...]
config (medcat.config_meta_cat.ConfigMetaCAT):
Configuration for this meta_cat instance.
Returns:
predictions (List[int]):
For each row of input data a prediction
confidence (List[float]):
For each prediction a confidence value
'''
pad_id = config.model['padding_idx']
batch_size = config.general['batch_size_eval']
device = config.general['device']
ignore_cpos = config.model['ignore_cpos']
model.eval()
model.to(device)
num_batches = math.ceil(len(data) / batch_size)
all_logits = []
with torch.no_grad():
for i in range(num_batches):
x, cpos, _ = create_batch_piped_data(data, i*batch_size, (i+1)*batch_size, device=device, pad_id=pad_id)
logits = model(x, cpos, ignore_cpos=ignore_cpos)
all_logits.append(logits.detach().cpu().numpy())
predictions = []
confidences = []
# Can be that there are not logits, data is empty
if all_logits:
logits = np.concatenate(all_logits, axis=0)
predictions = np.argmax(logits, axis=1)
confidences = np.max(softmax(logits, axis=1), axis=1)
return predictions, confidences
def split_list_train_test(data: List, test_size: int, shuffle: bool = True) -> Tuple:
r''' Shuffle and randomply split data
Args:
data
test_size
shuffle
'''
if shuffle:
random.shuffle(data)
test_ind = int(len(data) * test_size)
test_data = data[:test_ind]
train_data = data[test_ind:]
return train_data, test_data
def print_report(epoch: int, running_loss: List, all_logits: List, y: Any, name: str = 'Train') -> None:
r''' Prints some basic stats during training
Args:
epoch
running_loss
all_logits
y
name
'''
if all_logits:
print(f'Epoch: {epoch} ' + "*"*50 + f" {name}")
print(classification_report(y, np.argmax(np.concatenate(all_logits, axis=0), axis=1)))
def train_model(model: nn.Module, data: List, config: ConfigMetaCAT, save_dir_path: Optional[str] = None) -> Dict:
r''' Trains a LSTM model (for now) with autocheckpoints
Args:
data
config
save_dir_path
'''
# Get train/test from data
train_data, test_data = split_list_train_test(data, test_size=config.train['test_size'], shuffle=config.train['shuffle_data'])
device = torch.device(config.general['device']) # Create a torch device
class_weights = config.train['class_weights']
if class_weights is not None:
class_weights = torch.FloatTensor(class_weights).to(device)
criterion = nn.CrossEntropyLoss(weight=class_weights) # Set the criterion to Cross Entropy Loss
else:
criterion = nn.CrossEntropyLoss() # Set the criterion to Cross Entropy Loss
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = optim.Adam(parameters, lr=config.train['lr'])
model.to(device) # Move the model to device
batch_size = config.train['batch_size']
batch_size_eval = config.general['batch_size_eval']
pad_id = config.model['padding_idx']
nepochs = config.train['nepochs']
ignore_cpos = config.model['ignore_cpos']
num_batches = math.ceil(len(train_data) / batch_size)
num_batches_test = math.ceil(len(test_data) / batch_size_eval)
# Can be pre-calculated for the whole dataset
y_test = [x[2] for x in test_data]
y_train = [x[2] for x in train_data]
winner_report: Dict = {}
for epoch in range(nepochs):
running_loss = []
all_logits = []
model.train()
for i in range(num_batches):
x, cpos, y = create_batch_piped_data(train_data, i*batch_size, (i+1)*batch_size, device=device, pad_id=pad_id)
logits = model(x, center_positions=cpos, ignore_cpos=ignore_cpos)
loss = criterion(logits, y)
loss.backward()
# Track loss and logits
running_loss.append(loss.item())
all_logits.append(logits.detach().cpu().numpy())
parameters = filter(lambda p: p.requires_grad, model.parameters())
nn.utils.clip_grad_norm_(parameters, 0.25)
optimizer.step()
all_logits_test = []
running_loss_test = []
model.eval()
with torch.no_grad():
for i in range(num_batches_test):
x, cpos, y = create_batch_piped_data(test_data, i*batch_size_eval, (i+1)*batch_size_eval, device=device, pad_id=pad_id)
logits = model(x, cpos, ignore_cpos=ignore_cpos)
# Track loss and logits
running_loss_test.append(loss.item())
all_logits_test.append(logits.detach().cpu().numpy())
print_report(epoch, running_loss, all_logits, y=y_train, name='Train')
print_report(epoch, running_loss_test, all_logits_test, y=y_test, name='Test')
_report = classification_report(y_test, np.argmax(np.concatenate(all_logits_test, axis=0), axis=1), output_dict=True)
if not winner_report or _report[config.train['metric']['base']][config.train['metric']['score']] > \
winner_report['report'][config.train['metric']['base']][config.train['metric']['score']]:
report = classification_report(y_test, np.argmax(np.concatenate(all_logits_test, axis=0), axis=1), output_dict=True)
winner_report['report'] = report
winner_report['epoch'] = epoch
# Save if needed
if config.train['auto_save_model']:
if save_dir_path is None:
raise Exception(
"The `save_dir_path` argument is required if `aut_save_model` is `True` in the config")
else:
path = os.path.join(save_dir_path, 'model.dat')
torch.save(model.state_dict(), path)
print("\n##### Model saved to {} at epoch: {} and {}/{}: {} #####\n".format(path, epoch, config.train['metric']['base'],
config.train['metric']['score'], winner_report['report'][config.train['metric']['base']][config.train['metric']['score']]))
return winner_report
def eval_model(model: nn.Module, data: List, config: ConfigMetaCAT, tokenizer: TokenizerWrapperBase) -> Dict:
r''' Evaluate a trained model on the provided data
Args:
model
data
config
'''
device = torch.device(config.general['device']) # Create a torch device
batch_size_eval = config.general['batch_size_eval']
pad_id = config.model['padding_idx']
ignore_cpos = config.model['ignore_cpos']
class_weights = config.train['class_weights']
if class_weights is not None:
class_weights = torch.FloatTensor(class_weights).to(device)
criterion = nn.CrossEntropyLoss(weight=class_weights) # Set the criterion to Cross Entropy Loss
else:
criterion = nn.CrossEntropyLoss() # Set the criterion to Cross Entropy Loss
y_eval = [x[2] for x in data]
num_batches = math.ceil(len(data) / batch_size_eval)
running_loss = []
all_logits = []
model.to(device)
model.eval()
with torch.no_grad():
for i in range(num_batches):
x, cpos, y = create_batch_piped_data(data, i*batch_size_eval, (i+1)*batch_size_eval, device=device, pad_id=pad_id)
logits = model(x, cpos, ignore_cpos=ignore_cpos)
loss = criterion(logits, y)
# Track loss and logits
running_loss.append(loss.item())
all_logits.append(logits.detach().cpu().numpy())
print_report(0, running_loss, all_logits, y=y_eval, name='Eval')
score_average = config.train['score_average']
predictions = np.argmax(np.concatenate(all_logits, axis=0), axis=1)
precision, recall, f1, support = precision_recall_fscore_support(y_eval, predictions, average=score_average)
examples: Dict = {'FP': {}, 'FN': {}, 'TP': {}}
id2category_value = {v: k for k, v in config.general['category_value2id'].items()}
for i, p in enumerate(predictions):
y = id2category_value[y_eval[i]]
p = id2category_value[p]
c = data[i][1]
tkns = data[i][0]
assert tokenizer.hf_tokenizers is not None
text = tokenizer.hf_tokenizers.decode(tkns[0:c]) + " <<"+ tokenizer.hf_tokenizers.decode(tkns[c:c+1]).strip() + ">> " + \
tokenizer.hf_tokenizers.decode(tkns[c+1:])
info = "Predicted: {}, True: {}".format(p, y)
if p != y:
# We made a mistake
examples['FN'][y] = examples['FN'].get(y, []) + [(info, text)]
examples['FP'][p] = examples['FP'].get(p, []) + [(info, text)]
else:
examples['TP'][y] = examples['TP'].get(y, []) + [(info, text)]
return {'precision': precision, 'recall': recall, 'f1': f1, 'examples': examples} | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/meta_cat/ml_utils.py | ml_utils.py |
import torch
from collections import OrderedDict
from typing import Optional, Any, List
from torch import nn, Tensor
from torch.nn import CrossEntropyLoss
from transformers import BertPreTrainedModel, BertModel, BertConfig
from transformers.modeling_outputs import TokenClassifierOutput
from medcat.meta_cat import ConfigMetaCAT
class LSTM(nn.Module):
def __init__(self, embeddings: Optional[Tensor], config: ConfigMetaCAT) -> None:
super(LSTM, self).__init__()
self.config = config
# Get the required sizes
vocab_size = config.general['vocab_size']
embedding_size = config.model['input_size']
# Initialize embeddings
self.embeddings = nn.Embedding(vocab_size, embedding_size, padding_idx=config.model['padding_idx'])
if embeddings is not None:
self.embeddings.load_state_dict(OrderedDict([('weight', embeddings)]))
# Disable training for the embeddings - IMPORTANT
self.embeddings.weight.requires_grad = config.model['emb_grad']
# Create the RNN cell - devide
self.rnn = nn.LSTM(input_size=config.model['input_size'],
hidden_size=config.model['hidden_size'] // config.model['num_directions'],
num_layers=config.model['num_layers'],
dropout=config.model['dropout'],
bidirectional=config.model['num_directions'] == 2)
self.fc1 = nn.Linear(config.model['hidden_size'], config.model['nclasses'])
self.d1 = nn.Dropout(config.model['dropout'])
def forward(self,
input_ids: torch.LongTensor,
center_positions: Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
ignore_cpos: bool = False) -> Tensor:
x = input_ids
# Get the mask from x
if attention_mask is None:
mask = x != self.config.model['padding_idx']
else:
mask = attention_mask
# Embed the input: from id -> vec
x = self.embeddings(x) # x.shape = batch_size x sequence_length x emb_size
# Tell RNN to ignore padding and set the batch_first to True
x = nn.utils.rnn.pack_padded_sequence(x, mask.sum(1).int().view(-1).cpu(), batch_first=True, enforce_sorted=False)
# Run 'x' through the RNN
x, hidden = self.rnn(x)
# Add the padding again
x, _ = torch.nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
# Get what we need
row_indices = torch.arange(0, x.size(0)).long()
# If this is True we will always take the last state and not CPOS
if ignore_cpos:
x = hidden[0]
x = x.view(self.config.model['num_layers'], self.config.model['num_directions'], -1,
self.config.model['hidden_size']//self.config.model['num_directions'])
x = x[-1, :, :, :].permute(1, 2, 0).reshape(-1, self.config.model['hidden_size'])
else:
x = x[row_indices, center_positions, :]
# Push x through the fc network and add dropout
x = self.d1(x)
x = self.fc1(x)
return x
class BertForMetaAnnotation(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected: List[str] = [r"pooler"] # type: ignore
def __init__(self, config: BertConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights() # type: ignore
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
center_positions: Optional[Any] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> TokenClassifierOutput:
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict # type: ignore
outputs = self.bert( # type: ignore
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0] # (batch_size, sequence_length, hidden_size)
row_indices = torch.arange(0, sequence_output.size(0)).long()
sequence_output = sequence_output[row_indices, center_positions, :]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | zensols.medcat | /zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/meta_cat/models.py | models.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Any, Tuple, Union
from dataclasses import dataclass, field
import logging
import re
import json
from json.decoder import JSONDecodeError
import requests
from lxml.html import fromstring
from lxml.etree import _Element as Element
from zensols.persist import Stash
from . import MedNLPError
logger = logging.getLogger(__name__)
class UTSError(MedNLPError):
"""An error thrown by wrapper of the UTS system.
"""
pass
class NoResultsError(UTSError):
"""Thrown when no results, usually for a CUI not found.
"""
pass
class AuthenticationError(UTSError):
"""Thrown when authentication fails."""
def __init__(self, api_key: str):
super().__init__(f'Authentication error using key: {api_key}')
self.api_key = api_key
@dataclass
class Authentication(object):
"""A utility class to manage the authentication with the UTS system.
"""
SERVICE = 'http://umlsks.nlm.nih.gov'
"""The service endpoint URL."""
AUTH_URI = 'https://utslogin.nlm.nih.gov'
"""The authetication service endpoint URL."""
api_key: str = field()
"""The API key used for the RESTful NIH service."""
auth_endpoint: str = field(default='/cas/v1/api-key')
"""The path of the authentication service endpoint."""
def gettgt(self):
params = {'apikey': self.api_key}
h = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-Agent': 'python'}
r = requests.post(
self.AUTH_URI + self.auth_endpoint, data=params, headers=h)
if r.text[0] == '{':
try:
obj = json.loads(r.text)
if 'authentication_exceptions' in obj:
raise AuthenticationError(self.api_key)
except JSONDecodeError as e:
logger.warning('looks like JSON, but not decodable: ' +
f'<{r.text}>: {e}')
response: Element = fromstring(r.text)
if isinstance(response, Element):
err: str = None
if response.tag == 'p':
try:
content: str = json.loads(response.text)
if 'name' in content and 'message' in content:
name: str = content['name']
err = f"{name}: {content['message']}"
if name == 'UnauthorizedError':
raise AuthenticationError(err)
else:
raise UTSError(f'Unknown response: {content}')
except json.decoder.JSONDecodeError as e:
err = f'Appears to be an error, but can not parse: {e}'
raise UTSError(err) from e
if err is not None:
raise UTSError(err)
# extract the entire URL needed from the HTML form (action attribute)
# returned - looks similar to
# https://utslogin.nlm.nih.gov/cas/v1/tickets/TGT-36471-aYqNLN2rFIJPXKzxwdTNC5ZT7z3B3cTAKfSc5ndHQcUxeaDOLN-cas
# we make a POST call to this URL in the getst method
tgt = response.xpath('//form/@action')[0]
return tgt
def getst(self, tgt):
params = {'service': self.SERVICE}
h = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-Agent': 'python'}
r = requests.post(tgt, data=params, headers=h)
st = r.text
return st
@dataclass
class UTSClient(object):
URI = 'https://uts-ws.nlm.nih.gov'
"""The service URL endpoint."""
REL_ID_REGEX = re.compile(r'.*CUI\/(.+)$')
"""Used to parse related CUIs in :meth:`get_related_cuis`."""
NO_RESULTS_ERR = 'No results containing all your search terms were found.'
"""Error message from UTS indicating a missing CUI."""
MISSING_VALUE = '<missing>'
"""Value to store in the stash when there is a missing CUI."""
api_key: str = field()
"""The API key used for the RESTful NIH service."""
version: str = field(default='2020AA')
"""The version of the UML we want."""
request_stash: Stash = field(default=None)
def _get_ticket(self) -> str:
"""Generate a new service ticket for each page if needed."""
if logger.isEnabledFor(logging.INFO):
logger.info(f'logging in to UTS with {self.api_key}')
auth_client = Authentication(self.api_key)
tgt = auth_client.gettgt()
return auth_client.getst(tgt)
def _parse_json(self, s: str) -> Union[Exception, Dict[str, Any]]:
try:
return json.loads(s)
except JSONDecodeError as e:
logger.debug(f'can not parse: <{s}>: {e}')
return e
def _request_remote(self, url: str, query: Dict[str, str],
expect: bool) -> Any:
query['ticket'] = self._get_ticket()
r = requests.get(url, params=query)
r.encoding = 'utf-8'
items = self._parse_json(r.text)
if isinstance(items, Exception):
raise UTSError(f'Could not parse: <{r.text}>, ' +
f'code: {r.status_code}') from items
err = items.get('error')
if err is not None and err == self.NO_RESULTS_ERR:
if not expect:
return None
raise NoResultsError(
f'Could not request {url}, query={query}: {err}')
if r.status_code != 200:
if err is None:
msg = f'response: <{r.text}>'
else:
msg = err
raise UTSError(f'Could not request {url}, query={query}: {msg}')
if err is not None:
raise UTSError(f'Could not request {url}, query={query}: {err}')
if 'result' not in items:
raise UTSError(f'Unknown resposne: <{r.text}>')
return items['result']
def _request_cache(self, url: str, query: Dict[str, str],
expect: bool) -> Any:
q = '&'.join(map(lambda k: f'{k}={query[k]}', sorted(query.keys())))
key = url + '?' + q
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'key: {key}')
val = self.request_stash.load(key)
if val is None:
val = self._request_remote(url, query, expect)
if val is None:
val = self.MISSING_VALUE
self.request_stash.dump(key, val)
if val == self.MISSING_VALUE:
val = None
return val
def _request(self, url: str, query: Dict[str, str], expect: bool) -> Any:
if self.request_stash is None:
return self._request_remote(url, query, expect)
else:
return self._request_cache(url, query, expect)
def search_term(self, term: str, pages: int = 1) -> List[Dict[str, str]]:
"""Search for a string term in UMLS.
:param term: the string term to match against
:return: a list (one for each page), each with a dictionary of matching
terms that have the ``name`` of the term, the ``ui`` (CUI),
the ``uri`` of the term and the ``rootSource`` of the
orginitating system
"""
url = '{uri}/rest/search/{version}'.format(
**{'uri': self.URI, 'version': self.version})
res = []
for page_n in range(pages):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'fetching page {page_n}')
query = {'string': term,
'page_n': page_n}
res.extend(self._request(url, query, False)['results'])
return res
def get_atoms(self, cui: str, preferred: bool = True,
expect: bool = True) -> \
Union[Dict[str, str], List[Dict[str, str]]]:
"""Get the UMLS atoms of a CUI from UTS.
:param cui: the concept ID used to query
:param preferred: if ``True`` only return preferred atoms
:return: a list of atom entries in dictionary form or a single dict if
` ``preferred`` is ``True``
"""
pat = '{uri}/rest/content/{version}/CUI/{cui}/atoms/'
if preferred:
pat += 'preferred/'
url = pat.format(
**{'uri': self.URI, 'version': self.version, 'cui': cui})
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'fetching atom {cui}')
return self._request(url, {}, expect)
def get_relations(self, cui: str, expect: bool = True) -> \
List[Dict[str, Any]]:
"""Get the UMLS related concepts connected to a concept by ID.
:param cui: the concept ID used to get related concepts
:return: a list of relation entries in dictionary form in the order
returned by UTS
"""
url = '{uri}/rest/content/{version}/CUI/{cui}/relations/'.format(
**{'uri': self.URI, 'version': self.version, 'cui': cui})
try:
return self._request(url, {}, expect)
except NoResultsError as e:
if expect:
raise e
def get_related_cuis(self, cui: str, expect: bool = True) -> \
List[Tuple[str, Dict[str, Any]]]:
"""Get the UMLS related concept IDs connected to a concept by ID.
:param cui: the concept ID used to get related concepts
:return: a list of tuples, each the related CUIs and the relation
entry, in the order returned by UTS
"""
rel_ids = []
relations = self.get_relations(cui, expect)
if relations is None:
if logger.isEnabledFor(logging.INFO):
logger.info(f'no relations for cui {cui}')
else:
for rel in relations:
rel_url = rel['relatedId']
m = self.REL_ID_REGEX.match(rel_url)
if m is None:
raise UTSError(
f'Could not parse relation ID from {rel_url}')
rel_ids.append((m.group(1), rel))
return rel_ids | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/uts.py | uts.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, Set
from dataclasses import dataclass, field, InitVar
import logging
from pathlib import Path
import pandas as pd
from frozendict import frozendict
from medcat.config import Config, MixingConfig
from medcat.vocab import Vocab
from medcat.cdb import CDB
from medcat.cat import CAT
from medcat.meta_cat import MetaCAT
from zensols.util import APIError
from zensols.install import Resource, Installer
from zensols.persist import persisted, PersistedWork
logger = logging.getLogger(__name__)
@dataclass
class MedCatResource(object):
"""A factory class that creates MedCAT resources.
"""
installer: Installer = field()
"""Installs and provides paths to the model files."""
vocab_resource: Resource = field()
"""The path to the ``vocab.dat`` file."""
cdb_resource: Resource = field()
"""The ``cdb-medmen-v1.dat`` file.
"""
mc_status_resource: Resource = field()
"""The the ``mc_status`` directory.
"""
umls_tuis: Resource = field()
"""The UMLS TUIs (types) mapping resource that maps from TUIs to descriptions.
:see: `Semantic Types <https://lhncbc.nlm.nih.gov/ii/tools/MetaMap/documentation/SemanticTypesAndGroups.html>`_
"""
umls_groups: Resource = field()
"""Like :obj:`umls_tuis` but groups TUIs in gropus."""
filter_tuis: Set[str] = field(default=None)
"""Types used to filter linked CUIs (i.e. ``{'T047', 'T048'}``).
"""
filter_groups: Set[str] = field(default=None)
"""Just like :obj:`filter_tuis` but each element is treated as a group used to
generate a list of CUIs from those mapped from ``name`` to ``tui` in
:obj:`groups`.
"""
spacy_enable_components: Set[str] = field(
default_factory=lambda: set('sentencizer parser'.split()))
"""By default, MedCAT disables several pipeline components. Some of these are
needed for sentence chunking and other downstream tasks.
:see: `MedCAT Config <https://github.com/CogStack/MedCAT/blob/master/medcat/config.py>`_
"""
cat_config: Dict[str, Dict[str, Any]] = field(default=None)
"""If provieded, set the CDB configuration. Keys are ``general``,
``preprocessing`` and all other attributes documented in the `MedCAT Config
<https://github.com/CogStack/MedCAT/blob/master/medcat/config.py>`_
"""
cache_global: InitVar[bool] = field(default=True)
"""Whether or not to globally cache resources, which saves load time.
"""
requirements_dir: Path = field(default=None)
"""The directory with the pip requirements files."""
def __post_init__(self, cache_global: bool):
self._tuis = PersistedWork('_tuis', self, cache_global=cache_global)
self._cat = PersistedWork('_cat', self, cache_global=cache_global)
self._installed = False
@staticmethod
def _filter_medcat_logger():
class NoCdbExportFilter(logging.Filter):
def filter(self, record):
s = 'The CDB was exported by an unknown version of MedCAT.'
return not record.getMessage() == s
logging.getLogger('medcat.cdb').addFilter(NoCdbExportFilter())
def _assert_installed(self):
if not self._installed:
self.installer()
self._installed = True
def _override_config(self, targ: Config, src: Dict[str, Dict[str, Any]]):
src_top: str
src_conf = Dict[str, Any]
for src_top, src_conf in src.items():
targ_any: Any = getattr(targ, src_top)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"updating dict '{src_top}' ({type(targ_any)}): " +
f"<{targ_any}> with <{src_conf}>")
if isinstance(targ_any, dict):
targ_any.update(src_conf)
elif isinstance(targ_any, MixingConfig):
targ_any.merge_config(src_conf)
else:
setattr(targ, src_top, src_conf)
def _add_filters(self, config: Config, cdb: CDB):
filter_tuis = set()
if self.filter_tuis is not None:
filter_tuis.update(self.filter_tuis)
if self.filter_groups is not None:
df: pd.DataFrame = self.groups
reg = '.*(' + '|'.join(self.filter_groups) + ')'
df = df[df['name'].str.match(reg)]
filter_tuis.update(df['tui'].tolist())
if logger.isEnabledFor(logging.INFO):
logger.info(f'filtering on tuis: {", ".join(filter_tuis)}')
if len(filter_tuis) > 0:
cui_filters = set()
for tui in filter_tuis:
cui_filters.update(cdb.addl_info['type_id2cuis'][tui])
config.linking['filters']['cuis'] = cui_filters
@property
@persisted('_tuis')
def tuis(self) -> Dict[str, str]:
"""A mapping of type identifiers (TUIs) to their descriptions."""
self._assert_installed()
path: Path = self.installer[self.umls_tuis]
df = pd.read_csv(path, delimiter='|', header=None)
df.columns = 'abbrev tui desc'.split()
df_tups = df[['tui', 'desc']].itertuples(name=None, index=False)
return frozendict(df_tups)
@property
@persisted('_groups')
def groups(self) -> pd.DataFrame:
"""A dataframe of TUIs, their abbreviations, descriptions and a group name
associated with each.
"""
self._assert_installed()
path: Path = self.installer[self.umls_groups]
df = pd.read_csv(path, delimiter='|', header=None)
df.columns = 'abbrev name tui desc'.split()
return df
@property
@persisted('_cat')
def cat(self) -> CAT:
"""The MedCAT NER tagger instance.
When this property is accessed, all models are downloaded first, then
loaded, if not already.
"""
self._assert_installed()
# Load the vocab model you downloaded
vocab = Vocab.load(self.installer[self.vocab_resource])
# Load the cdb model you downloaded
cdb = CDB.load(self.installer[self.cdb_resource])
# mc status model
mc_status = MetaCAT.load(self.installer[self.mc_status_resource])
# enable sentence boundary annotation
for name in self.spacy_enable_components:
cdb.config.general['spacy_disabled_components'].remove(name)
# override configuration
if self.cat_config is not None:
self._override_config(cdb.config, self.cat_config)
#
self._add_filters(cdb.config, cdb)
# create cat - each cdb comes with a config that was used to train it;
# you can change that config in any way you want, before or after
# creating cat
try:
cat = CAT(cdb=cdb, config=cdb.config, vocab=vocab,
meta_cats=[mc_status])
except OSError as e:
msg: str = str(e)
if msg.find("Can't find model") == -1:
raise e
else:
self._install_model()
cat = CAT(cdb=cdb, config=cdb.config, vocab=vocab,
meta_cats=[mc_status])
return cat
def _install_model(self):
if self.requirements_dir is None:
raise APIError('model not installed and no requirements found')
else:
import pip
logger.info('no scispacy model found--attempting to install')
req_file: Path
for req_file in self.requirements_dir.iterdir():
pip.main(['install', '--use-deprecated=legacy-resolver',
'-r', str(req_file), '--no-deps'])
def clear(self):
self._tuis.clear()
self._cat.clear()
MedCatResource._filter_medcat_logger() | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/resource.py | resource.py |
__author__ = 'Paul Landes'
from typing import Optional
from dataclasses import dataclass, field
from enum import Enum, auto
import sys
import logging
from io import TextIOBase
import re
from pprint import pprint
from pathlib import Path
import pandas as pd
from zensols.config import Dictable, ConfigFactory
from zensols.cli import ApplicationError
from zensols.nlp import FeatureDocumentParser, FeatureDocument
from zensols.nlp.dataframe import FeatureDataFrameFactory
from . import MedCatResource, MedicalLibrary
logger = logging.getLogger(__name__)
class GroupInfo(Enum):
"""Used to group TUI information in :meth:`.Application.group`
"""
csv = auto()
byname = auto()
@dataclass
class Application(Dictable):
"""A natural language medical domain parsing library.
"""
CLI_META = {'mnemonic_excludes': {'write'},
'mnemonic_overrides': {'show_config': 'conf'},
'option_excludes': {'doc_parser', 'config_factory', 'library'},
'option_overrides': {'input_dir': {'long_name': 'input',
'short_name': 'i',
'metavar': 'DIR'}}}
config_factory: ConfigFactory = field()
"""Used to create a cTAKES stash."""
doc_parser: FeatureDocumentParser = field()
"""Parses and NER tags medical terms."""
library: MedicalLibrary = field()
"""Medical resource library that contains UMLS access, cui2vec etc.."""
def _get_text(self, text_or_file: str) -> str:
"""Return the text from a file or the text passed based on if ``text_or_file``
is a file on the file system.
"""
path = Path(text_or_file)
if path.is_file():
with open(path) as f:
text_or_file = f.read()
return text_or_file
def _write_doc(self, doc: FeatureDocument, only_medical: bool,
depth: int = 0, writer: TextIOBase = sys.stdout):
for sent in doc.sents:
if len(sent.text.strip()) == 0:
continue
self._write_line(sent.text, depth, writer)
for tok in sent:
if not only_medical or tok.is_ent:
self._write_line(f'{tok.norm}:', depth + 1, writer)
tok.write_attributes(
depth + 2, writer,
feature_ids=self.doc_parser.token_feature_ids
)
self._write_line('entities:', depth, writer)
for ents in sent.entities:
self._write_line(
' '.join(map(lambda e: e.norm, ents)), depth + 1, writer)
def show(self, text_or_file: str, only_medical: bool = False):
"""Parse and output medical entities.
:param text_or_file: natural language to be processed
:param only_medical: only provide medical linked tokens
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsing: <{text_or_file}>...')
text: str = self._get_text(text_or_file)
doc: FeatureDocument = self.doc_parser.parse(text)
self._write_doc(doc, only_medical)
def _output_dataframe(self, df: pd.DataFrame, out: Optional[Path] = None):
"""Output the dataframe generated by other actions of the app.
:param df: the dataframe to output:
:param out: the output path, or ``None`` standard out
"""
if out is None:
out = sys.stdout
df.to_csv(out, index=False)
row_s = 's' if len(df) != 1 else ''
if out != sys.stdout:
logger.info(f'wrote {len(df)} row{row_s} to {out}')
def features(self, text_or_file: str, out: Path = None, ids: str = None,
only_medical: bool = False):
"""Dump features as CSV output.
:param text_or_file: natural language to be processed
:param out: the path to output the CSV file or stdout if missing
:param ids: the comma separate feature IDs to output
:param only_medical: only provide medical linked tokens
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsing: <{text_or_file}>...')
params = {}
if ids is None:
ids = self.doc_parser.token_feature_ids
else:
ids = set(re.split(r'\W+', ids))
needs = 'norm cui_ is_concept'.split()
missing = set(filter(lambda i: i not in ids, needs))
ids |= missing
params['token_feature_ids'] = ids
params['priority_feature_ids'] = needs
df_fac = FeatureDataFrameFactory(**params)
self.doc_parser.token_feature_ids = ids
text: str = self._get_text(text_or_file)
doc: FeatureDocument = self.doc_parser.parse(text)
df: pd.DataFrame = df_fac(doc)
if only_medical:
df = df[df['is_concept'] == True]
self._output_dataframe(df, out)
def search(self, term: str):
"""Search the UMLS database using UTS and show results.
:param term: the term to search for (eg 'lung cancer')
"""
pprint(self.library.uts_client.search_term(term))
def atom(self, cui: str):
"""Search the UMLS database using UTS and show results.
:param cui: the concept ID to search for (eg 'C0242379')
"""
pprint(self.library.uts_client.get_atoms(cui))
def define(self, cui: str):
"""Look up an entity by CUI. This takes a long time.
:param cui: the concept ID to search for (eg 'C0242379')
"""
entity = self.library.get_linked_entity(cui)
print(entity)
def group(self, info: GroupInfo, query: str = None):
"""Get TUI group information.
:param info: the type of information to return
:param query: comma delimited name list used to subset the output data
"""
res: MedCatResource = self.library.medcat_resource
df: pd.DataFrame = res.groups
if info == GroupInfo.csv:
path = Path('tui-groups.csv')
df.to_csv(path)
logger.info(f'wrote TUI groups to {path}')
elif info == GroupInfo.byname:
if query is None:
raise ApplicationError('Missing query string for grouping')
reg = '.*(' + '|'.join(query.split(',')) + ')'
df = df[df['name'].str.match(reg)]
print(','.join(df['tui'].tolist()))
else:
raise ApplicationError(f'Unknown query info type: {info}')
def ctakes(self, text_or_file: str, only_medical: bool = False):
"""Invoke cTAKES on a directory with text files.
:param text_or_file: natural language to be processed
:param only_medical: only provide medical linked tokens
"""
text: str = self._get_text(text_or_file)
stash: CTakesParserStash = self.library.get_new_ctakes_parser_stash()
stash.set_documents([text])
print(stash['0'].to_string())
def similarity(self, term: str):
"""Get the cosine similarity between two CUIs.
"""
for sim in self.library.similarity_by_term(term):
print(sim.cui)
sim.write(1) | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/app.py | app.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Any, ClassVar
from dataclasses import dataclass, field, InitVar
import logging
from scispacy.linking import EntityLinker
from scispacy.linking_utils import Entity as SciSpacyEntity
from zensols.persist import persisted, PersistedWork
from zensols.config import Dictable
from zensols.nlp import FeatureToken, FeatureTokenDecorator
from . import MedicalLibrary
logger = logging.getLogger(__name__)
@dataclass
class Entity(Dictable):
"""A convenience container class that Wraps a SciSpacy entity.
"""
_DICTABLE_ATTRIBUTES: ClassVar[List[str]] = 'cui name definition'.split()
sci_spacy_entity: SciSpacyEntity = field(repr=False)
"""The entity identified by :mod:`scispacy.linking_utils`."""
@property
def name(self) -> str:
"""The canonical name of the entity."""
return self.sci_spacy_entity.canonical_name
@property
def definition(self) -> str:
"""The human readable description of the entity."""
return self.sci_spacy_entity.definition
@property
def cui(self) -> str:
"""The unique concept identifier."""
return self.sci_spacy_entity.concept_id
def __str__(self) -> str:
return f'{self.name} ({self.cui})'
def __repr__(self):
return self.cui
@dataclass
class EntitySimilarity(Entity):
"""A similarity measure of a medical concept in cui2vec.
:see: :meth:`.MedicalFeatureDocumentParser.similarity_by_term`
"""
similiarty: float = field()
@dataclass
class EntityLinkerResource(object):
"""Provides a way resolve :class:`scispacy.linking_utils.Entity` instances from
CUIs.
:see: :meth:`.get_linked_entity`
"""
params: Dict[str, Any] = field(
default_factory=lambda: {'resolve_abbreviations': True,
'linker_name': 'umls'})
"""Parameters given to the scispaCy entity linker."""
cache_global: InitVar[bool] = field(default=True)
"""Whether or not to globally cache resources, which saves load time.
"""
def __post_init__(self, cache_global: bool):
self._linker = PersistedWork(
'_linker', self, cache_global=cache_global)
@property
@persisted('_linker')
def linker(self) -> EntityLinker:
"""The ScispaCy entity linker."""
self._silence_scispacy_warn()
return EntityLinker(**self.params)
@staticmethod
def _silence_scispacy_warn():
"""This warning has should have no bearing on this application as we're simply
doing a CUI looking.
"""
import warnings
s = '.*Trying to unpickle estimator Tfidf(?:Transformer|Vectorizer) from version.*'
warnings.filterwarnings('ignore', message=s)
s = 'Please use `csr_matrix` from the `scipy.sparse` namespace.*'
warnings.filterwarnings('ignore', message=s)
def get_linked_entity(self, cui: str) -> Entity:
"""Get a scispaCy linked entity.
:param cui: the unique concept ID
"""
linker: EntityLinker = self.linker
se: SciSpacyEntity = linker.kb.cui_to_entity.get(cui)
if se is not None:
return Entity(se)
@dataclass
class LinkFeatureTokenDecorator(FeatureTokenDecorator):
"""Adds linked SciSpacy definitions to tokens using the
:class:`.MedicalLibrary`.
"""
lib: MedicalLibrary = field(default=None)
"""The medical library used for linking entities."""
def decorate(self, token: FeatureToken):
e: SciSpacyEntity = self.lib.get_linked_entity(token.cui_)
if e is not None:
token._definition = e.definition | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/entlink.py | entlink.py |
__author__ = 'Paul Landes'
from typing import Union, Dict, Optional, Tuple
import logging
from functools import reduce
from frozendict import frozendict
from spacy.tokens.token import Token
from spacy.tokens.span import Span
from medcat.cdb import CDB
from zensols.nlp import FeatureToken, SpacyFeatureToken
from . import MedCatResource
from .domain import _MedicalEntity
logger = logging.getLogger(__name__)
class MedicalFeatureToken(SpacyFeatureToken):
"""A set of token features that optionally contains a medical concept.
"""
FEATURE_IDS_BY_TYPE = frozendict({
'str': frozenset(('cui_ pref_name_ detected_name_ tuis_ ' +
'definition_ tui_descs_').split()),
'bool': frozenset('is_concept'.split()),
'float': frozenset('context_similarity'.split()),
'int': frozenset('cui'.split()),
'list': frozenset('tuis sub_names'.split())})
FEATURE_IDS = frozenset(
reduce(lambda res, x: res | x, FEATURE_IDS_BY_TYPE.values()))
WRITABLE_FEATURE_IDS = tuple(list(FeatureToken.WRITABLE_FEATURE_IDS) +
'cui_'.split())
CONCEPT_ENTITY_LABEL = 'concept'
_NONE_SET = frozenset()
def __init__(self, spacy_token: Union[Token, Span], norm: str,
res: MedCatResource, ix2ent: Dict[int, _MedicalEntity]):
super().__init__(spacy_token, norm)
self._definition: str = self.NONE
self._cdb: CDB = res.cat.cdb
self._res = res
med_ent: Optional[_MedicalEntity] = ix2ent.get(self.idx)
if med_ent is None:
med_ent = _MedicalEntity()
self.med_ent = med_ent
self.is_ent = med_ent.is_ent
@property
def ent(self) -> str:
return self.med_ent.concept_span.label if self.is_concept else super().ent
@property
def ent_(self) -> str:
return self.med_ent.concept_span.label_ if self.is_concept else super().ent_
@property
def is_concept(self) -> bool:
"""``True`` if this has a CUI and identifies a medical concept."""
return self.is_ent
@property
def cui_(self) -> str:
"""The unique UMLS concept ID."""
return self.med_ent.cui_ if self.is_concept else self.NONE
@property
def cui(self) -> int:
"""Returns the numeric part of the concept ID."""
return -1 if not self.is_concept else int(self.cui_[1:])
@property
def pref_name_(self) -> str:
"""The preferred name of the concept."""
if self.is_concept:
return self._cdb.cui2preferred_name.get(self.cui_)
else:
return self.NONE
@property
def detected_name_(self) -> str:
"""The detected name of the concept."""
if self.is_concept:
return self.med_ent.concept_span._.detected_name
else:
return self.NONE
@property
def sub_names(self) -> Tuple[str]:
"""Return other names for the concept."""
if self.is_concept:
return tuple(sorted(self._cdb.cui2names[self.cui_]))
else:
return []
@property
def context_similarity(self) -> float:
"""The similiarity of the concept."""
if self.is_concept:
return self.med_ent.concept_span._.context_similarity
else:
return -1
@property
def definition_(self) -> str:
"""The definition if the concept."""
return self._definition or FeatureToken.NONE
@property
def tuis(self) -> Tuple[str]:
"""The the CUI type of the concept."""
if self.is_concept:
cui: str = self.cui_
return tuple(sorted(self._cdb.cui2type_ids.get(cui)))
else:
return self._NONE_SET
@property
def tuis_(self) -> str:
"""All CUI TUIs (types) of the concept sorted as a comma delimited list.
"""
return ','.join(sorted(self.tuis))
@property
def tui_descs_(self) -> str:
"""Descriptions of :obj:`tuis_`."""
def map_tui(k: str) -> str:
v = self._res.tuis.get(k)
if v is None:
v = f'? ({k})'
return v
return ', '.join(map(map_tui, sorted(self.tuis)))
def __str__(self):
cui_str = f' ({self.cui_})' if self.is_concept else ''
return self.norm + cui_str | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/tok.py | tok.py |
__author__ = 'Paul Landes'
from typing import Iterable
from dataclasses import dataclass, field
import logging
import os
from pathlib import Path
import pandas as pd
from zensols.config import Dictable
from zensols.persist import Stash, DirectoryStash, ReadOnlyStash, Primeable
from zensols.util.executor import Executor
import ctakes_parser.ctakes_parser as ctparser
from . import MedNLPError
logger = logging.getLogger(__name__)
ctakes_logger = logging.getLogger(__name__ + '.ctakes')
@dataclass
class _TextDirectoryStash(DirectoryStash):
pattern: str = field(default='{name}.txt')
def _load_file(self, path: Path) -> str:
with open(path, 'r') as f:
return f.read()
def _dump_file(self, inst: str, path: Path):
with open(path, 'w') as f:
f.write(inst)
@dataclass
class CTakesParserStash(ReadOnlyStash, Primeable, Dictable):
"""Runs the cTAKES CUI entity linker on a directory of medical notes. For each
medical text file, it generates an ``xmi`` file, which is then parsed by
the the :mod:`ctakes_parser` library.
This straightforward wrapper around the ``ctparser`` library automates the
file system orchestration that needs to happen. Configure an instance of
this class as an application configuration and use a
:class:`~zensols.config.ImportConfigFactory` to create the objects. See
the ``examples/ctakes`` directory for a quick start guide on how to use
this class.
"""
entry_point_bin: Path = field()
"""Entry point script in to the cTAKES parser."""
entry_point_cmd: str = field()
"""Command line arguments passed to cTAKES."""
home: Path = field()
"""The directory where cTAKES is installed."""
source_dir: Path = field()
"""Contains a path to the source directory where the text documents live.
"""
output_dir: Path = field(default=None)
"""The directory where to output the xmi files."""
def __post_init__(self):
super().__post_init__()
self.strict = True
self._pattern: str = field(default='{name}.txt.xmi')
if self.output_dir is None:
self.output_dir = self.source_dir.parent / 'output'
for attr in 'entry_point_bin source_dir output_dir'.split():
setattr(self, attr, getattr(self, attr).absolute())
self._source_stash = _TextDirectoryStash(self.source_dir)
self._out_stash = _TextDirectoryStash(
path=self.output_dir,
pattern=self._source_stash.pattern + '.xmi')
@property
def source_stash(self) -> Stash:
"""The stash that tracks the text documents that are to be parsed by cTAKES.
"""
return self._source_stash
def set_documents(self, docs: Iterable[str]):
"""Set the document to be parsed by cTAKES.
:param docs: an iterable of string text documents to persist to the
file system, and then be parsed by cTAKES.
"""
self.clear()
for i, doc in enumerate(docs):
self._source_stash.dump(str(i), doc)
def _run(self):
"""Run cTAKES (see class docs)."""
if logger.isEnabledFor(logging.INFO):
logger.info(f'running ctakes parser on {self.source_dir}')
os.environ['CTAKES_HOME'] = str(self.home.absolute())
cmd = self.entry_point_cmd.format(**self.asdict())
if logger.isEnabledFor(logging.INFO):
logger.info(f'executing {cmd}')
exc = Executor(ctakes_logger)
exc.run(cmd)
def prime(self):
super().prime()
if not self.source_dir.is_dir():
raise MedNLPError('cTAKES temporary path is not an existing ' +
f'directory: {self.source_dir}')
if len(self._source_stash) == 0:
raise MedNLPError(
f'Source directory contains no data: {self.source_dir}')
if len(self._out_stash) == 0:
self.output_dir.mkdir(parents=True, exist_ok=True)
self._run()
def load(self, name: str) -> pd.DataFrame:
self.prime()
path: Path = self._out_stash.key_to_path(name)
return ctparser.parse_file(file_path=str(path))
def keys(self) -> Iterable[str]:
self.prime()
return self._out_stash.keys()
def exists(self, name: str) -> bool:
self.prime()
return self._out_stash.exists(name)
def clear(self):
self._out_stash.clear()
self._source_stash.clear() | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/ctakes.py | ctakes.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Any, List, Dict, Tuple
from dataclasses import dataclass, field
from zensols.config import ConfigFactory
from . import MedCatResource, UTSClient
@dataclass
class MedicalLibrary(object):
"""A utility class that provides access to medical APIs.
"""
config_factory: ConfigFactory = field(default=None)
"""The configuration factory used to create cTAKES and cui2vec instances.
"""
medcat_resource: MedCatResource = field(default=None)
"""The MedCAT factory resource."""
entity_linker_resource: 'EntityLinkerResource' = field(default=None)
"""The entity linker resource."""
uts_client: UTSClient = field(default=None)
"""Queries UMLS data."""
def get_entities(self, text: str) -> Dict[str, Any]:
"""Return the all concept entity data.
:return: concepts as a multi-tiered dict
"""
return self.medcat_resource.cat.get_entities(text)
def get_linked_entity(self, cui: str) -> 'Entity':
"""Get a scispaCy linked entity.
:param cui: the unique concept ID
"""
from scispacy.linking_utils import Entity as SciSpacyEntity
from .entlink import Entity
ent: Entity = self.entity_linker_resource.get_linked_entity(cui)
return ent
def get_atom(self, cui: str) -> Dict[str, str]:
"""Get the UMLS atoms of a CUI from UTS.
:param cui: the concept ID used to query
:param preferred: if ``True`` only return preferred atoms
:return: a list of atom entries in dictionary form
"""
return self.uts_client.get_atoms(cui, preferred=True)
def get_relations(self, cui: str) -> List[Dict[str, Any]]:
"""Get the UMLS related concepts connected to a concept by ID.
:param cui: the concept ID used to get related concepts
:return: a list of relation entries in dictionary form in the order
returned by UTS
"""
return self.uts_client.get_relations(cui)
def get_new_ctakes_parser_stash(self) -> CTakesParserStash:
"""Return a new instance of a ctakes parser stash.
"""
return self.config_factory.new_instance('ctakes_parser_stash')
@property
def cui2vec_embedding(self) -> Cui2VecEmbedModel:
"""The cui2vec embedding model.
"""
return self.config_factory('cui2vec_500_embedding')
def similarity_by_term(self, term: str, topn: int = 5) -> \
List['EntitySimilarity']:
"""Return similaries of a medical term.
:param term: the medical term (i.e. ``heart disease``)
:param topn: the top N count similarities to return
"""
from .entlink import Entity, EntitySimilarity
embedding: Cui2VecEmbedModel = self.cui2vec_embedding
kv: KeyedVectors = embedding.keyed_vectors
res: List[Dict[str, str]] = self.uts_client.search_term(term)
cui: str = res[0]['ui']
sims_by_word: List[Tuple[str, float]] = kv.similar_by_word(cui, topn)
sims: List[EntitySimilarity] = []
for rel_cui, proba in sims_by_word:
entity: Entity = self.get_linked_entity(rel_cui)
# name: str = entity.canonical_name.lower()
# defn: str = entity.definition
sim: float = embedding.keyed_vectors.similarity(cui, rel_cui)
sims.append(EntitySimilarity(entity.sci_spacy_entity, sim))
return sims | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/lib.py | lib.py |
__author__ = 'Paul Landes'
from typing import Type, Iterable, Dict, Set
from dataclasses import dataclass, field
import logging
import collections
from spacy.tokens.doc import Doc
from spacy.language import Language
from zensols.nlp import (
FeatureToken, SpacyFeatureDocumentParser, FeatureDocumentParser,
)
from . import MedNLPError, MedCatResource, MedicalFeatureToken
from .domain import _MedicalEntity
logger = logging.getLogger(__name__)
@dataclass
class MedicalFeatureDocumentParser(SpacyFeatureDocumentParser):
"""A medical based language resources that parses concepts.
"""
TOKEN_FEATURE_IDS = frozenset(FeatureDocumentParser.TOKEN_FEATURE_IDS |
MedicalFeatureToken.FEATURE_IDS)
"""Default token feature ID set for the medical parser.
"""
token_feature_ids: Set[str] = field(default=TOKEN_FEATURE_IDS)
"""The features to keep from spaCy tokens.
:see: :obj:`TOKEN_FEATURE_IDS`
"""
token_class: Type[FeatureToken] = field(default=MedicalFeatureToken)
"""The class to use for instances created by :meth:`features`."""
medcat_resource: MedCatResource = field(default=None)
"""The MedCAT factory resource."""
def __post_init__(self):
if self.medcat_resource is None:
raise MedNLPError('No medcat resource set')
super().__post_init__()
def _create_model_key(self) -> str:
return f'name-{self.name}'
def _create_model(self) -> Language:
return self.medcat_resource.cat.pipe.spacy_nlp
def _normalize_tokens(self, doc: Doc) -> Iterable[FeatureToken]:
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsing: {doc}')
# load/create model resources
res: MedCatResource = self.medcat_resource
ix2ent: Dict[int, _MedicalEntity] = collections.defaultdict(_MedicalEntity)
# add entities
for ent in doc.ents:
for i in range(ent.start, ent.end):
tok = doc[i]
ix2ent[tok.idx].concept_span = ent
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'normalizing with: {self.token_normalizer}')
return super()._normalize_tokens(doc, res=res, ix2ent=ix2ent) | zensols.mednlp | /zensols.mednlp-1.4.0-py3-none-any.whl/zensols/mednlp/parser.py | parser.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
Dict, Iterable, Set, Tuple, List, Any, Optional, ClassVar, Sequence
)
from dataclasses import dataclass, field, fields
from abc import ABCMeta, abstractmethod
from enum import Enum, auto
import logging
import sys
import re
import collections
import copy
import itertools as it
from itertools import chain
from io import TextIOBase
from frozendict import frozendict
import pandas as pd
from zensols.config import Dictable, ConfigFactory
from zensols.persist import PersistableContainer, persisted, Primeable
from zensols.nlp import LexicalSpan, FeatureToken, FeatureDocument
from zensols.nlp.dataframe import FeatureDataFrameFactory
from . import NoteEvent
logger = logging.getLogger(__name__)
class NoteFormat(Enum):
"""Used in :meth:`.Note.format` for a parameterized method to write a note.
"""
text = auto()
raw = auto()
verbose = auto()
summary = auto()
json = auto()
yaml = auto()
markdown = auto()
@property
def ext(self) -> str:
return {
self.text: 'txt',
self.raw: 'txt',
self.verbose: 'txt',
self.summary: 'txt',
self.json: 'json',
self.yaml: 'yaml',
self.markdown: 'md'
}[self]
class SectionAnnotatorType(Enum):
"""The type of :class:`.Section` annotator for :class:`.Note` instances.
The `MedSecId`_ project adds the :obj:`human` and :obj:`model`:
:see: `MedSecId <https://github.com/plandes/mimicsid>`_
"""
NONE = auto()
"""Default for those without section identifiers."""
REGULAR_EXPRESSION = auto()
"""Sections are automatically assigned by regular expressions."""
HUMAN = auto()
"""A `MedSecId`_ human annotator."""
MODEL = auto()
"""Predictions are provided by a `MedSecId`_ model."""
@dataclass
class ParagraphFactory(object):
"""Splits a document in to constituent paragraphs.
"""
_PARA_REGEX: ClassVar[re.Pattern] = re.compile(r'\n[\s.]*\n')
def __call__(self, sec: Section) -> List[FeatureDocument]:
par_spans: List[LexicalSpan] = []
paras: List[FeatureDocument] = []
bspan: LexicalSpan = sec.body_span
bdoc: LexicalSpan = sec.body_doc
marks: List[int] = [bspan.begin]
for i in self._PARA_REGEX.finditer(sec.body):
marks.extend((i.start() + bspan.begin, i.end() + bspan.begin))
marks.append(bspan.end)
mi = iter(marks)
for beg in mi:
par_spans.append(LexicalSpan(beg, next(mi)))
ps: LexicalSpan
for ps in par_spans:
para: FeatureDocument = bdoc.get_overlapping_document(ps)
para.text = ' '.join(map(lambda s: s.text.strip(), para))
paras.append(para)
return paras
@dataclass
class Section(PersistableContainer, Dictable):
"""A section segment with an identifier and represents a section of a
:class:`.Note`, one for each section. An example of a section is the
*history of present illness* in a discharge note.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
_PERSITABLE_TRANSIENT_ATTRIBUTES: ClassVar[Set[str]] = {
'container', '_doc_stash', '_paragraph_factory'}
_SENT_FILTER_REGEX: ClassVar[re.Pattern] = re.compile(r'^\s*\d+\.\s*')
"""Remove enumerated lists (<number> .) as separate sentences. Example is
hadm=119960, cat=Discharge summary, section=Discharge Medications:
``1. Vancomycin 125 mg``.
"""
id: int = field()
"""The unique ID of the section."""
name: Optional[str] = field()
"""The name of the section (i.e. ``hospital-course``). This field is what's
called the ``type`` in the paper, which is not used since ``type`` is a
keyword in Python.
"""
container: SectionContainer = field(repr=False)
"""The container that has this section."""
header_spans: Tuple[LexicalSpan] = field()
"""The character offsets of the section headers. The first is usually the
:obj:`name` of the section. If there are no headers, this is an 0-length
tuple.
"""
body_span: LexicalSpan = field()
"""Like :obj:`header_spans` but for the section body. The body and name do
not intersect.
"""
def __post_init__(self):
super().__init__()
if self.name is None:
if len(self.headers) == 0:
self.name = 'unknown'
else:
header = ' '.join(self.headers)
self.name = re.sub(r'[_/ ]+', '-', header).lower()
@property
def note_text(self) -> str:
"""The entire parent note's text."""
return self.container.text
@property
@persisted('_headers', transient=True)
def headers(self) -> Tuple[str]:
"""The section text."""
text = self.note_text
return tuple(map(lambda s: text[s.begin:s.end], self.header_spans))
@property
def body(self) -> str:
"""The section text."""
return self.note_text[self.body_span.begin:self.body_span.end]
def _get_doc(self) -> FeatureDocument:
return self.container._get_doc()
@property
def header_tokens(self) -> Iterable[FeatureToken]:
doc: FeatureDocument = self._get_doc()
spans = doc.map_overlapping_tokens(self.header_spans)
return chain.from_iterable(spans)
@property
def body_tokens(self) -> Iterable[FeatureToken]:
doc: FeatureDocument = self._get_doc()
return doc.get_overlapping_tokens(self.body_span)
@property
@persisted('_doc', transient=True)
def doc(self) -> FeatureDocument:
"""A feature document of the section's body text."""
return self._narrow_doc(self._get_doc(), self.lexspan, False)
@property
@persisted('_body_doc', transient=True)
def body_doc(self) -> FeatureDocument:
"""A feature document of the body of this section's body text."""
return self._narrow_doc(self._get_doc(), self.body_span)
def _narrow_doc(self, doc: FeatureDocument, span: LexicalSpan,
filter_sent: bool = True) -> \
FeatureDocument:
# using inclusive=true will very often leave newlines, but keep the last
# sentence character when the sentence chunker gets confused
doc = doc.get_overlapping_document(span, inclusive=True)
if filter_sent:
sreg: re.Pattern = self._SENT_FILTER_REGEX
doc.sents = tuple(filter(lambda s: sreg.match(s.text) is None,
doc.sents))
return doc
@property
@persisted('_lexspan')
def lexspan(self) -> LexicalSpan:
"""The widest lexical extent of the sections, including headers."""
return LexicalSpan.widen(
chain.from_iterable(((self.body_span,), self.header_spans)))
@property
def text(self) -> str:
"""Get the entire text of the section, which includes the headers."""
span: LexicalSpan = self.lexspan
ntext: str = self.note_text
return ntext[span.begin:span.end]
@property
@persisted('_paragraphs', transient=True)
def paragraphs(self) -> Tuple[FeatureDocument]:
"""The list of paragraphs, each as as a feature document, of this
section's body text.
"""
return tuple(self._paragraph_factory(self))
@property
def is_empty(self) -> bool:
"""Whether the content of the section is empty."""
return len(self.body) == 0
def _copy_resources(self, target: Section):
for attr in self._PERSITABLE_TRANSIENT_ATTRIBUTES:
setattr(target, attr, getattr(self, attr))
target._row_id = self._row_id
def clone(self) -> Section:
clone = copy.copy(self)
self._copy_resources(clone)
return clone
def write_sentences(self, depth: int = 0, writer: TextIOBase = sys.stdout,
container: FeatureDocument = None, limit: int = 0):
"""Write all parsed sentences of the section with respective entities.
"""
def map_ent(tp: Tuple[FeatureToken]):
"""Map a feature token to a readable string."""
if tp[0].ent_ == 'concept':
desc = f' ({tp[0].cui_})'
else:
desc = f' ({tp[0].ent_})'
return ' '.join(map(lambda t: t.norm, tp)) + desc
container = self.body_doc if container is None else container
for sent in it.islice(container, limit):
self._write_divider(depth, writer)
self._write_line(sent.norm, depth, writer)
mtoks = tuple(map(lambda tk: f'{tk.text} ({tk.norm})',
filter(lambda t: t.mimic_ != FeatureToken.NONE,
sent.token_iter())))
if len(mtoks) > 0:
self._write_line(f"mimic: {', '.join(mtoks)}", depth, writer)
if len(sent.entities) > 0:
ents = ', '.join(map(map_ent, sent.entities))
self._write_line(f'entities: {ents}', depth, writer)
def write_as_item(self, depth: int = 0, writer: TextIOBase = sys.stdout):
"""A terse output designed for list iteration."""
self._write_line(f'id: {self.id}', depth, writer)
self.write(depth + 1, writer, body_line_limit=0, norm_line_limit=0,
include_header_spans=True, include_body_span=True,
include_id_name=False)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
body_line_limit: int = sys.maxsize,
norm_line_limit: int = sys.maxsize,
par_limit: int = 0, sent_limit: int = 0,
include_header: bool = True, include_id_name: bool = True,
include_header_spans: bool = False,
include_body_span: bool = False):
"""Write a note section's name, original body, normalized body and
sentences with respective sentence entities.
:param body_line_limit: the number of line of the section's body to
output
:param norm_line_limit: the number of line of the section's normalized
(parsed) body to output
:param par_limit: the number of paragraphs to output
:param sent_limit: the number of sentences to output
:param include_header: whether to include the header
:param include_id_name: whether to write the section ID and name
"""
header = ' '.join(self.headers)
if include_id_name:
self._write_line(f'id: {self.id}', depth, writer)
self._write_line(f'name: {self.name}', depth, writer)
if include_header:
self._write_line(f'header: {header}', depth, writer)
if include_header_spans:
self._write_line(f'header spans: {self.header_spans}',
depth, writer)
if include_body_span:
self._write_line(f'body span: {self.body_span}', depth, writer)
if not self.is_empty:
if body_line_limit > 0:
self._write_line('body:', depth, writer)
self._write_block(self.body, depth + 1, writer,
limit=body_line_limit)
if norm_line_limit > 0:
self._write_line('normalized:', depth, writer)
self._write_block(self.body_doc.norm, depth + 1, writer,
limit=norm_line_limit)
if par_limit > 0 and sent_limit > 0:
for par in self.paragraphs:
self._write_line('paragraph:', depth, writer)
self.write_sentences(depth + 1, writer, par, sent_limit)
def __len__(self) -> int:
return len(self.body_span) + sum(map(len, self.header_spans))
def __str__(self):
return f'{self.name} ({self.id}): body_len={len(self.body)}'
@dataclass
class SectionContainer(Dictable, metaclass=ABCMeta):
"""A *note like* container base class that has sections. Note based classes
extend this base class. Sections in order of their position in the document
are produced when using this class as an iterable.
"""
_DICTABLE_ATTRIBUTES: ClassVar[Set[str]] = {'sections'}
DEFAULT_SECTION_NAME: ClassVar[str] = 'default'
"""The name of the singleton section when none the note is not sectioned."""
@abstractmethod
def _get_doc(self) -> FeatureDocument:
"""Return the parsed document that represents the text in this
container."""
pass
@abstractmethod
def _get_sections(self) -> Iterable[Section]:
"""Generate the sections cached and returned in the :obj:`sections`
property.
"""
pass
@property
@persisted('_sections')
def sections(self) -> Dict[int, Section]:
"""A map from the unique section identifier to a note section.
"""
secs: Iterable[Section] = self._get_sections()
return frozendict({sec.id: sec for sec in secs})
@property
@persisted('_sections_ordered', transient=True)
def sections_ordered(self) -> Tuple[Section]:
"""Sections returned in order as they appear in the note."""
return tuple(map(lambda t: t[1], sorted(
self.sections.items(), key=lambda t: t[0])))
@property
@persisted('_by_name', transient=True)
def sections_by_name(self) -> Dict[str, Tuple[Section]]:
"""A map from the name of a section (i.e. *history of present illness*
in discharge notes) to a note section.
"""
by_name = collections.defaultdict(list)
for s in self.sections.values():
by_name[s.name].append(s)
return frozendict(map(lambda s: (s[0], tuple(s[1])), by_name.items()))
@property
def section_dataframe(self) -> pd.DataFrame:
"""A Pandas dataframe containing the section's name, header and body
offset spans.
"""
rows = []
cols = 'name body headers body_begin body_end'.split()
sec: Section
for sec in self.sections.values():
rows.append((sec.name, sec.body, sec.header_spans,
sec.body_span.begin, sec.body_span.end))
return pd.DataFrame(rows, columns=cols)
@property
def feature_dataframe(self) -> pd.DataFrame:
"""Return a dataframe useful for feature craft."""
def map_df(sec: Section):
df = dataframe_factory(sec.body_doc)
df['section'] = sec.name
return df
dataframe_factory: FeatureDataFrameFactory = \
self._trans_context['dataframe_factory']
dfs = map(map_df, self.sections.values())
return pd.concat(dfs, ignore_index=True, copy=False)
def write_fields(self, depth: int = 0, writer: TextIOBase = sys.stdout):
"""Write note header fields such as the ``row_id`` and ``category``.
"""
pass
def write_human(self, depth: int = 0, writer: TextIOBase = sys.stdout,
normalize: bool = False):
"""Generates a human readable version of the annotation. This calls the
following methods in order: :meth:`write_fields` and
:meth:`write_sections`.
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
:param normalize: whether to use the paragraphs' normalized
(:obj:~zensols.nlp.TokenContainer.norm`) or text
"""
self.write_fields(depth, writer)
self.write_sections(depth, writer, normalize=normalize)
def write_sections(self, depth: int = 0, writer: TextIOBase = sys.stdout,
normalize: bool = False):
"""Writes the sections of the container.
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
:param normalize: whether to use the paragraphs' normalized
(:obj:~zensols.nlp.TokenContainer.norm`) or text
"""
for sec in self:
header = ' '.join(sec.headers)
div_text: str = f'{sec.id}:{sec.name}'
if len(header) > 0:
div_text += f' ({header})'
self._write_divider(depth, writer, header=div_text)
if normalize:
for i, para in enumerate(sec.paragraphs):
if i > 0:
self._write_empty(writer)
self._write_wrap(para.norm, depth, writer)
elif len(sec.body) > 0:
self._write_block(sec.body, depth, writer)
def write_markdown(self, depth: int = 0, writer: TextIOBase = sys.stdout,
normalize: bool = False):
"""Generates markdown version of the annotation.
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
:param normalize: whether to use the paragraphs' normalized
(:obj:~zensols.nlp.TokenContainer.norm`) or text
"""
self._write_line(f'# {self.category} ({self.row_id})', depth, writer)
for sec in self.sections.values():
header = ' '.join(sec.headers)
self._write_empty(writer)
self._write_empty(writer)
self._write_line(f'## {header}', depth, writer)
self._write_empty(writer)
if normalize:
for i, para in enumerate(sec.paragraphs):
if i > 0:
self._write_empty(writer)
self._write_wrap(para.norm, depth, writer)
elif len(sec.body) > 0:
self._write_block(sec.body, depth, writer)
def write_full(self, depth: int = 0, writer: TextIOBase = sys.stdout,
note_line_limit: int = sys.maxsize,
section_line_limit: int = sys.maxsize,
section_sent_limit: int = sys.maxsize,
include_section_header: bool = True,
sections: Set[str] = None,
include_fields: bool = True,
include_note_divider: bool = True,
include_section_divider: bool = True):
"""Write the custom parts of the note.
:param note_line_limit: the number of lines to write from the note text
:param section_line_limit: the number of line of the section's body and
number of sentences to output
:param par_limit: the number of paragraphs to output
:param sections: the sections, by name, to write
:param include_section_header: whether to include the header
:param include_fields: whether to write the note fields
:param include_note_divider: whether to write dividers between notes
:param include_section_divider: whether to write dividers between
sections
"""
secs: Sequence[Section] = self.sections.values()
if sections is not None:
secs = tuple(filter(lambda s: s.name in sections, secs))
if len(secs) > 0:
self._write_line('sections:', depth + 1, writer)
sec: Section
for sec in secs:
aft: str = ''
if section_line_limit == 0 and include_section_header:
aft = ':'
self._write_line(f'{sec.name}{aft}', depth + 2, writer)
sec.write(depth + 3, writer,
include_id_name=False,
body_line_limit=section_line_limit,
norm_line_limit=section_line_limit,
sent_limit=section_sent_limit,
include_header=include_section_header)
if include_section_divider:
self._write_divider(depth + 3, writer)
if include_note_divider:
self._write_divider(depth, writer, '=')
def write_by_format(self, depth: int = 0, writer: TextIOBase = sys.stdout,
note_format: NoteFormat = NoteFormat):
"""Write the note in the specified format.
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
:param note_format: the format to use for the output
"""
def summary_format(writer: TextIOBase):
for s in self.sections.values():
print(s, s.header_spans, len(s))
{NoteFormat.text: lambda: self.write_human(depth, writer),
NoteFormat.verbose: lambda: self.write_full(depth, writer),
NoteFormat.raw: lambda: writer.write(self.text),
NoteFormat.json: lambda: self.asjson(writer=writer, indent=4),
NoteFormat.yaml: lambda: self.asyaml(writer=writer, indent=4),
NoteFormat.markdown: lambda: self.write_markdown(depth, writer),
NoteFormat.summary: lambda: summary_format(depth, writer),
}[note_format]()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self.write_human(depth, writer)
def __getitem__(self, id: int) -> Section:
return self.sections[id]
def __iter__(self) -> Iterable[Section]:
return iter(sorted(self.sections.values(), key=lambda s: s.lexspan))
@dataclass
class GapSectionContainer(SectionContainer):
"""A container that fills in missing sections of text from a note with
additional sections.
"""
delegate: Note = field()
"""The note with the sections to be filled."""
def _get_doc(self) -> FeatureDocument:
return self.delegate._get_doc()
def _get_sections(self) -> Iterable[Section]:
sections: List[Section] = list(
map(lambda s: s.clone(), self.delegate.sections.values()))
if len(sections) > 0:
note_text: str = self.delegate.text
gaps: Sequence[LexicalSpan] = LexicalSpan.gaps(
spans=map(lambda s: s.lexspan, sections),
end=len(note_text))
ref_sec: Section = sections[0]
sec_cont: SectionContainer = ref_sec.container
gap_secs: List[Section] = []
for gs in gaps:
gsec = Section(
id=-1,
name=None,
container=sec_cont,
header_spans=(),
body_span=gs)
ref_sec._copy_resources(gsec)
gap_secs.append(gsec)
sections.extend(gap_secs)
sections.sort(key=lambda s: s.lexspan)
sec: Section
for sid, sec in enumerate(sections):
sec.original_id = sec.id
sec.id = sid
return sections
@dataclass(repr=False)
class Note(NoteEvent, SectionContainer):
"""A container class of :class:`.Section` for each section for the
text in the note events given by the property :obj:`sections`.
"""
_PERSITABLE_PROPERTIES: ClassVar[Set[str]] = {'sections'} | \
NoteEvent._PERSITABLE_PROPERTIES
_DICTABLE_WRITE_EXCLUDES: ClassVar[Set[str]] = \
NoteEvent._DICTABLE_WRITE_EXCLUDES | {'sections'}
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
def _get_sections(self) -> Iterable[Section]:
sec = Section(0, self.DEFAULT_SECTION_NAME, self, (),
LexicalSpan(0, len(self.text)))
sec._row_id = self.row_id
return [sec]
@property
def section_annotator_type(self) -> SectionAnnotatorType:
"""A human readable string describing who or what annotated the note."""
return self._get_section_annotator_type()
def _get_section_annotator_type(self) -> SectionAnnotatorType:
return SectionAnnotatorType.NONE
def _trans_context_update(self, trans_context: Dict[str, Any]):
for sec in self.sections.values():
sec.container = self
sec._row_id = self.row_id
sec._doc_stash = trans_context['doc_stash']
sec._paragraph_factory = trans_context['paragraph_factory']
def write_fields(self, depth: int = 0, writer: TextIOBase = sys.stdout):
sat: SectionAnnotatorType = self.section_annotator_type
self._write_line(f'row_id: {self.row_id}', depth, writer)
self._write_line(f'category: {self.category}', depth, writer)
self._write_line(f'description: {self.description}', depth, writer)
self._write_line(f'annotator: {sat.name.lower()}', depth, writer)
def write_full(self, depth: int = 0, writer: TextIOBase = sys.stdout,
note_line_limit: int = sys.maxsize,
section_line_limit: int = sys.maxsize,
section_sent_limit: int = sys.maxsize,
include_section_header: bool = True,
sections: Set[str] = None,
include_fields: bool = True,
include_note_divider: bool = True,
include_section_divider: bool = True):
super().write(depth, writer,
line_limit=note_line_limit,
include_fields=include_fields)
super().write_full(
depth, writer,
note_line_limit=note_line_limit,
section_line_limit=section_line_limit,
section_sent_limit=section_sent_limit,
include_section_header=include_section_header,
sections=sections,
include_fields=include_fields,
include_note_divider=include_note_divider,
include_section_divider=include_section_divider)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
SectionContainer.write(self, depth, writer)
@dataclass
class NoteFactory(Primeable):
"""Creates an instance of :class:`.Note` from :class:`.NoteEvent`.
"""
config_factory: ConfigFactory = field()
"""The factory used to create notes.
"""
category_to_note: Dict[str, str] = field()
"""A mapping between notes' category to section name for :class:.Note`
configuration.
"""
mimic_default_note_section: str = field()
"""The section name holding the configuration of the class to create when there
is no mapping in :obj:`category_to_note`.
"""
def _event_to_note(self, note_event: NoteEvent, section: str,
params: Dict[str, Any] = None) -> Note:
"""Create a note from the application configuration
:param section: the configuration section that details the class
:param params: used to initialize the new instance
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'even to note (section={section}): {note_event}')
ne_params = {f.name: getattr(note_event, f.name)
for f in fields(note_event)}
if params is not None:
ne_params.update(params)
return self.config_factory.new_instance(section, **ne_params)
def _create_from_note_event(self, note_event: NoteEvent,
section: str = None) -> Note:
"""Because subclasses override :meth:`create`, we need a method that
specifically creates from :class:`.NoteEvent` for subclasses that
recover from errors (such as MedSecId prediction) when they cannot
create notes themselves. This method provides a way to create them
directly using the default regular expressions (:mod:`regexnote`).
**Important**: do not override this method.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'create note from event: {note_event}')
if section is None:
section = self.category_to_note.get(note_event.category)
if section is None:
section = self.mimic_default_note_section
return self._event_to_note(note_event, section)
def create(self, note_event: NoteEvent, section: str = None) -> Note:
"""Create a new factory based instance of a :class:`.Note` from a
:class:`.NoteEvent`.
:param note_event: the source data
:param section: the configuration section to use to create the new note,
which is one of the regular expression based sections or
:obj:`mimic_default_note_section` for a :class:`.Note`
"""
return self._create_from_note_event(note_event, section)
def prime(self):
"""The MedSecId project primes by installing the model files."""
if logger.isEnabledFor(logging.INFO):
logger.info('priming...')
def __call__(self, note_event: NoteEvent, section: str = None) -> Note:
"""See :meth:`.create`."""
return self.create(note_event, section) | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/note.py | note.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
import logging
import sys
from pathlib import Path
from io import TextIOBase
from zensols.config import Dictable, ConfigFactory
from . import (
RecordNotFoundError, HospitalAdmission, HospitalAdmissionDbStash,
PatientPersister, AdmissionPersister, DiagnosisPersister,
NoteEventPersister, Note,
)
logger = logging.getLogger(__name__)
@dataclass
class Corpus(Dictable):
"""A container class provided access to the MIMIC-III dataset using a
relational database (by default Postgress per the resource library
configuration). It also has methods to dump corpus statistics.
:see: `Resource Libraries <https://plandes.github.io/util/doc/config.html#resource-libraries>`_
"""
config_factory: ConfigFactory = field()
"""Used to clear the note event cache."""
patient_persister: PatientPersister = field()
"""The persister for the ``patients`` table."""
admission_persister: AdmissionPersister = field()
"""The persister for the ``admissions`` table."""
diagnosis_persister: DiagnosisPersister = field()
"""The persister for the ``diagnosis`` table."""
note_event_persister: NoteEventPersister = field()
"""The persister for the ``noteevents`` table."""
hospital_adm_stash: HospitalAdmissionDbStash = field()
"""Creates hospital admission instances. Note that this might be a caching
stash instance, but method calls are delegated through to the instance of
:class:`.HospitalAdmissionDbStash`.
"""
temporary_results_dir: Path = field()
"""The path to create the output results. This is not used, but needs to
stay until the next :mod:`zensols.mimicsid` is retrained."""
def __post_init__(self):
# allow pass through method delegation from any configured cache
# stashes on to the HospitalAdmissionDbStash such as `process_keys`
self.hospital_adm_stash.delegate_attr = True
def clear(self, include_notes: bool = False):
"""Clear the all cached admission and note parses.
:param include_notes: whether to also clear the parsed notes cache
"""
self.hospital_adm_stash.clear()
if include_notes:
# the note event cache stash used by :meth:`clear` to remove cached
# parsed files
self.config_factory('mimic_note_event_persister_stash').clear()
self.config_factory('mimic_hospital_adm_factory_stash').clear()
def get_hospital_adm_by_id(self, hadm_id: int) -> HospitalAdmission:
"""Return a hospital admission by its unique identifier."""
return self.hospital_adm_stash[str(hadm_id)]
def get_hospital_adm_for_note(self, row_id: int) -> HospitalAdmission:
"""Return an admission that has note ``row_id``.
:raise: RecordNotFoundError if ``row_id`` is not found in the database
"""
hadm_id: int = self.note_event_persister.get_hadm_id(row_id)
if hadm_id is None:
raise RecordNotFoundError(self, 'hadm_id', hadm_id)
return self.hospital_adm_stash[str(hadm_id)]
def get_note_by_id(self, row_id: int) -> Note:
"""Return the note (via the hospital admission) for ``row_id``.
:raise: RecordNotFoundError if ``row_id`` is not found in the database
"""
return self.get_hospital_adm_for_note(row_id)[row_id]
def write_note_event_counts(self, subject_id: int, depth: int = 0,
writer: TextIOBase = sys.stdout):
"""Print a list of hospital admissions by count of related notes in
descending order.
:see: :meth:`.NoteEventPersister.get_note_counts_by_subject_id`
"""
np: NoteEventPersister = self.note_event_persister
for hadm_id, count in np.get_note_counts_by_subject_id(subject_id):
self._write_line(f'{hadm_id}: {count}', depth, writer)
def write_hosptial_count_admission(self, depth: int = 0,
writer: TextIOBase = sys.stdout,
limit: int = sys.maxsize):
"""Write the counts for each hospital admission.
:param limit: the limit on the return admission counts
:see: :meth:`.AdmissionPersister.get_admission_admission_counts`
"""
for i in self.admission_persister.get_admission_admission_counts(limit):
self._write_line(str(i), depth, writer)
def write_hospital_admission(self, hadm_id: int, depth: int = 0,
writer: TextIOBase = sys.stdout,
note_line_limit: int = sys.maxsize):
"""Write the hospital admission identified by ``hadm_id``.
"""
fac: HospitalAdmissionDbStash = self.hospital_adm_stash
hadm: HospitalAdmission = fac.get(hadm_id)
hadm.write(depth, writer, note_line_limit=note_line_limit)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
n_notes: int = self.note_event_persister.get_count()
n_adms: int = self.admission_persister.get_count()
n_patients: int = self.patient_persister.get_count()
self._write_line(f'patients: {n_patients:,}', depth, writer)
self._write_line(f'admissions: {n_adms:,}', depth, writer)
self._write_line(f'notes: {n_notes:,}', depth, writer) | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/corpus.py | corpus.py |
__author__ = 'Paul Landes'
from typing import Tuple, Optional
from dataclasses import dataclass, field
import logging
from pathlib import Path
from zensols.util import stdout
from zensols.config import ConfigFactory
from zensols.cli import ApplicationError
from zensols.nlp import FeatureDocumentParser, FeatureDocument, FeatureToken
from . import (
NoteEvent, NoteEventPersister, NoteFormat, Note,
HospitalAdmission, HospitalAdmissionDbStash, Corpus,
NoteDocumentPreemptiveStash,
)
logger = logging.getLogger(__name__)
@dataclass
class Application(object):
"""A utility library for parsing the MIMIC-III corpus
"""
config_factory: ConfigFactory = field()
"""Used to get temporary resources"""
doc_parser: FeatureDocumentParser = field()
"""Used to parse command line documents."""
corpus: Corpus = field()
"""The contains assets to access the MIMIC-III corpus via database."""
preempt_stash: NoteDocumentPreemptiveStash = field()
"""A multi-processing stash used to preemptively parse notes."""
def write_features(self, sent: str, out_file: Path = None):
"""Parse a sentence as MIMIC data and write features to CSV.
:param sent: the sentence to parse and generate features
:param out_file: the file to write
"""
import pandas as pd
doc: FeatureDocument = self.doc_parser(sent)
df = pd.DataFrame(map(lambda t: t.asdict(), doc.tokens))
out_file = Path('feature.csv') if out_file is None else out_file
df.to_csv(out_file)
logger.info(f'wrote: {out_file}')
def show(self, sent: str):
"""Parse a sentence and print all features for each token.
:param sent: the sentence to parse and generate features
"""
fids = set(FeatureToken.WRITABLE_FEATURE_IDS) | \
{'ent_', 'cui_', 'mimic_'}
fids = fids - set('dep i_sent sent_i tag children is_wh'.split())
# parse the text in to a hierarchical langauge data structure
doc: FeatureDocument = self.doc_parser(sent)
print('tokens:')
for tok in doc.token_iter():
print(f'{tok.norm}:')
tok.write_attributes(1, include_type=False,
feature_ids=fids, inline=True)
print('-' * 80)
# named entities are also stored contiguous tokens at the document
# level
print('named entities:')
for e in doc.entities:
print(f'{e}: cui={e[0].cui_}/{e[0].ent_}')
def corpus_stats(self):
"""Print corpus statistics."""
self.corpus.write()
def uniform_sample_hadm_ids(self, limit: int = 1):
"""Print a uniform random sample of admission hadm_ids.x
:param limit: the number to fetch
"""
for i in self.corpus.admission_persister.uniform_sample_hadm_ids(limit):
print(i)
def _get_adm(self, hadm_id: str) -> HospitalAdmission:
stash: HospitalAdmissionDbStash = self.corpus.hospital_adm_stash
if hadm_id == '-':
adm = next(iter(stash.values()))
else:
adm = stash[str(hadm_id)]
return adm
def write_admission_summary(self, hadm_id: str):
"""Write an admission note categories and section names.
:param hadm_id: the hospital admission ID or ``-`` for a random ID
"""
adm: HospitalAdmission = self._get_adm(hadm_id)
adm.write()
def write_discharge_reports(self, limit: int = 1,
out_dir: Path = Path('.')):
"""Write discharge reports (as apposed to addendums).
:param limit: the number to fetch
:param out_dir: the output directory
"""
np: NoteEventPersister = self.corpus.note_event_persister
out_dir.mkdir(parents=True, exist_ok=True)
notes: Tuple[NoteEvent] = np.get_discharge_reports(limit)
for note in notes:
path = out_dir / f'{note.hadm_id}.txt'
with open(path, 'w') as f:
note.write(writer=f)
if logger.isEnabledFor(logging.INFO):
logger.info(f'wrote {len(notes)} notes to {out_dir}')
def _write_note(self, note: NoteEvent, out_file: Path,
output_format: NoteFormat):
if out_file is None:
out_file = Path(stdout.STANDARD_OUT_PATH)
with stdout(out_file) as f:
note.write_by_format(writer=f, note_format=output_format)
if out_file.name != stdout.STANDARD_OUT_PATH:
logger.info(f'wrote note to {out_file}')
def write_note(self, row_id: int, out_file: Path = None,
output_format: NoteFormat = NoteFormat.text):
"""Write a note.
:param row_id: the unique note identifier in the NOTEEVENTS table
:param output_format: the output format of the note
:param out_file: the file to write
"""
note: NoteEvent = self.corpus.get_note_by_id(row_id)
self._write_note(note, out_file, output_format)
def write_admission(self, hadm_id: str, out_dir: Path = Path('.'),
output_format: NoteFormat = NoteFormat.text):
"""Write all the notes of an admission.
:param hadm_id: the hospital admission ID or ``-`` for a random ID
:param out_dir: the output directory
:param output_format: the output format of the note
"""
if hadm_id == '-':
stash: HospitalAdmissionDbStash = self.corpus.hospital_adm_stash
hadm_id = next(iter(stash.keys()))
adm: HospitalAdmission = self._get_adm(hadm_id)
out_dir = out_dir / 'adm' / hadm_id
out_dir.mkdir(parents=True, exist_ok=True)
note: Note
for note in adm.notes:
path: Path = out_dir / f'{note.normal_name}.{output_format.ext}'
self._write_note(note, path, output_format)
def write_hadm_id_for_note(self, row_id: int) -> int:
"""Get the hospital admission ID (``hadm_id``) that has note ``row_id``.
:param row_id: the unique note identifier in the NOTEEVENTS table
"""
np: NoteEventPersister = self.corpus.note_event_persister
hadm_id: Optional[int] = np.get_hadm_id(row_id)
if hadm_id is None:
raise ApplicationError(f'No note found: {row_id}')
print(hadm_id)
return hadm_id
def preempt_notes(self, input_file: Path, workers: int = None):
"""Preemptively document parse notes across multiple threads.
:param input_file: a file of notes' unique ``row_id`` IDs
:param workers: the number of processes to use to parse notes
"""
if logger.isEnabledFor(logging.INFO):
if input_file is None:
from_str: str = 'all note anontations'
else:
from_str: str = str(input_file)
logger.info(f'preempting notes from {from_str} ' +
f'for {workers} workers')
try:
with open(input_file) as f:
row_ids = tuple(map(str.strip, f.readlines()))
except OSError as e:
raise ApplicationError(
f'Could not preempt notes from file {input_file}: {e}') from e
self.preempt_stash.process_keys(row_ids, workers)
def _get_temporary_results_dir(self) -> Path:
return Path(self.config_factory.config.get_option(
'temporary_results_dir', 'mimic_default'))
def clear(self):
"""Clear the all cached admission and note parses."""
self.corpus.clear()
def _unmatched_tokens(self, hadm_id: str, no_ents: bool = False):
"""Find all unmatched tokens for an admission.
:param hadm_id: the hospital admission ID or ``-`` for a random ID
:param no_ents: do not include unmatched entities
"""
adm: HospitalAdmission = self._get_adm(hadm_id)
for note in adm.notes:
print(note)
norm = note.doc.norm
found_unmatch_tok = norm.find('**') > -1
found_unmatch_ent = norm.find('<UNKNOWN>') > -1
if found_unmatch_tok or (not no_ents and found_unmatch_ent):
print('original:')
print(note.doc.text)
print('norm:')
print(norm)
print('_' * 120) | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/app.py | app.py |
__author__ = 'Paul Landes'
from typing import Tuple, Union, Optional, ClassVar, List
from dataclasses import dataclass, field
import logging
import re
from frozendict import frozendict
from spacy.language import Language
from spacy.lang.char_classes import ALPHA
from spacy.util import compile_infix_regex
from zensols.nlp import Component, FeatureTokenDecorator, FeatureToken
logger = logging.getLogger(__name__)
@dataclass
class MimicTokenizerComponent(Component):
"""Modifies the spacCy tokenizer to split on colons (``:``) to capture more
MIMIC-III mask tokens.
"""
def init(self, model: Language):
inf = list(model.Defaults.infixes)
SCHARS = ',:;/=@#%+.-'
# split on newlines; handle newline as an infix token
inf.insert(0, r'\n')
# split on special characters before
inf.insert(1, r"(?<=\*\*\])(?:[{s}])(?=[{a}0-9])".format(
a=ALPHA, s=SCHARS))
inf.insert(2, r"(?<=\*\*\])(?=[{a}0-9])".format(a=ALPHA))
# split on special characters after
inf.insert(3, r"(?<=[{a}0-9])(?:[{s}])(?=\[\*\*)".format(
a=ALPHA, s=SCHARS))
inf.insert(4, r"(?<=[{a}0-9])(?=\[\*\*)".format(a=ALPHA))
# split on what look to be ranges or hospital1-hospital2
inf.insert(3, r"(?<=\*\*\])(?:[{s}])(?=\[\*\*)".format(s=SCHARS))
infix_re = compile_infix_regex(inf)
model.tokenizer.infix_finditer = infix_re.finditer
def __hash__(self) -> int:
return super().__hash__()
@dataclass
class MimicTokenDecorator(FeatureTokenDecorator):
"""Contains the MIMIC-III regular expressions and other patterns to annotate
and normalized feature tokens. The class finds mask tokens and
separators (such as a long string of dashes or asterisks).
Attribute :obj:`onto_mapping` is a mapping from the MIMIC symbol in
:obj:`token_entities` (2nd value in tuple) to Onto Notes 5, which is used as
the NER symbol in spaCy.
"""
TOKEN_FEATURE_ID: ClassVar[str] = 'mimic_'
"""The feature ID to use for MIMIC-III tokens."""
ONTO_FEATURE_ID: ClassVar[str] = 'onto_'
"""The feature ID to use for the Onto Notes 5 (:obj:`onto_mapping`)."""
MASK_REGEX: ClassVar[re.Pattern] = re.compile(r'\[\*\*([^\*]+)\*\*\]')
"""Matches mask tokens."""
MASK_TOKEN_FEATURE: ClassVar[str] = 'mask'
"""The value given from entity :obj:`TOKEN_FEATURE_ID` for mask tokens
(i.e. ``[**First Name**]``).
"""
SEPARATOR_TOKEN_FEATURE: ClassVar[str] = 'separator'
"""The value name of separators defined by :obj:`SEP_REGEX`.
"""
SEP_REGEX: ClassVar[re.Pattern] = re.compile(r'(_{5,}|[*]{5,}|[-]{5,})')
"""Matches text based separators such as a long string of dashes."""
UNKNOWN_ENTITY: ClassVar[str] = '<UNKNOWN>'
"""The mask nromalized token form for unknown MIMIC entity text
(i.e. First Name).
"""
_REGEXES: ClassVar[List] = [[MASK_REGEX, MASK_TOKEN_FEATURE],
[SEP_REGEX, SEPARATOR_TOKEN_FEATURE]]
token_entities: Tuple[Tuple[Union[re.Pattern, str]], str, Optional[str]] = \
field(default=(
(re.compile(r'^First Name'), 'FIRSTNAME', 'PERSON'),
(re.compile(r'^Last Name'), 'LASTNAME', 'PERSON'),
(re.compile(r'^21\d{2}-\d{1,2}-\d{1,2}$'), 'DATE', 'DATE')))
"""A list of psuedo token patterns and a string to replace with the
respective match.
"""
token_replacements: Tuple[Tuple[Union[re.Pattern, str], str]] = field(
default=())
"""A list of token text to replaced as the normalized token text."""
def __post_init__(self):
self.onto_mapping = {}
self._compile_regexes('token_entities')
self._compile_regexes('token_replacements')
self.onto_mapping = frozendict(self.onto_mapping)
def _compile_regexes(self, attr: str):
repls = []
ent: str
pat: Union[re.Pattern, str]
for pat, ent, onto_name in getattr(self, attr):
if isinstance(pat, str):
pat = re.compile(pat)
repls.append((pat, ent))
if onto_name is not None:
self.onto_mapping[ent] = onto_name
setattr(self, attr, tuple(repls))
def decorate(self, token: FeatureToken):
pat: re.Pattern
ent: str
oid: str = FeatureToken.NONE
matched: bool = False
for pat, ent in self._REGEXES:
m: re.Match = pat.match(token.norm)
if m is not None:
matched = True
setattr(token, self.TOKEN_FEATURE_ID, ent)
if ent == self.MASK_TOKEN_FEATURE:
token.norm: str = self.UNKNOWN_ENTITY
mask_val: str = m.group(1)
for regex, repl in self.token_entities:
if regex.match(mask_val) is not None:
oid = self.onto_mapping.get(repl, FeatureToken.NONE)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dec: {self.TOKEN_FEATURE_ID} ' +
f' -> {ent}, norm -> {mask_val}')
token.norm = repl
break
break
if not matched:
setattr(token, self.TOKEN_FEATURE_ID,
FeatureToken.NONE)
repl: str
for pat, repl in self.token_replacements:
m: re.Match = pat.match(token.norm)
if m is not None:
matched = True
token.norm = repl
break
setattr(token, self.ONTO_FEATURE_ID, oid) | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/tokenizer.py | tokenizer.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, Type, ClassVar, Set, Callable
from dataclasses import dataclass, field, InitVar
import sys
import logging
from datetime import datetime
from io import TextIOBase
from zensols.util import APIError
from zensols.config import Dictable, Settings
from zensols.persist import PersistableContainer, persisted, Stash, FileTextUtil
from zensols.nlp import FeatureDocument
logger = logging.getLogger(__name__)
class MimicError(APIError):
"""Raised for any application level error."""
pass
class RecordNotFoundError(MimicError):
"""Raised on any domain/container class error."""
def __init__(self, actor: Type, key_type: str, key: int):
actor = actor.__class__.__name__
super().__init__(f'Actor {actor} could not find {key_type} ID {key}')
class MimicParseError(MimicError):
"""Raised for MIMIC note parsing errors."""
def __init__(self, text: str):
self.text = text
trunc = 50
if len(text) > trunc:
text = text[0:trunc] + '...'
super().__init__(f'Could not parse: <{text}>')
@dataclass(repr=False)
class MimicContainer(PersistableContainer, Dictable):
"""Abstract base class for data containers, which are plain old Python
objects that are CRUD'd from DAO persisters.
"""
row_id: int = field()
"""Unique row identifier."""
def __post_init__(self):
super().__init__()
if self.row_id is None:
raise RecordNotFoundError(self, 'row id', self.row_id)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
dct: Dict[str, Any] = None):
if dct is None:
dct = self.asdict()
del dct['row_id']
self._write_line(f'row_id: {self.row_id}', depth, writer)
self._write_object(dct, depth + 1, writer)
def __str__(self) -> str:
return f'id: {self.row_id}'
def __repr__(self) -> str:
return self.__str__()
@dataclass(repr=False)
class Admission(MimicContainer):
"""The ADMISSIONS table gives information regarding a patient’s admission to
the hospital. Since each unique hospital visit for a patient is assigned a
unique HADM_ID, the ADMISSIONS table can be considered as a definition
table for HADM_ID. Information available includes timing information for
admission and discharge, demographic information, the source of the
admission, and so on.
Table source: Hospital database.
Table purpose: Define a patient’s hospital admission, HADM_ID.
Number of rows: 58976
Links to:
* PATIENTS on SUBJECT_ID
:see: `Dictionary <https://mimic.physionet.org/mimictables/admissions/>`_
"""
subject_id: int = field()
"""Foreign key. Identifies the patient."""
hadm_id: int = field()
"""Primary key. Identifies the hospital admission."""
admittime: datetime = field()
"""Time of admission to the hospital."""
dischtime: datetime = field()
"""Time of discharge from the hospital."""
deathtime: datetime = field()
"""Time of death."""
admission_type: str = field()
"""Type of admission, for example emergency or elective."""
admission_location: str = field()
"""Admission location."""
discharge_location: str = field()
"""Discharge location"""
insurance: str = field()
"""The INSURANCE, LANGUAGE, RELIGION, MARITAL_STATUS, ETHNICITY columns
describe patient demographics. These columns occur in the ADMISSIONS table
as they are originally sourced from the admission, discharge, and transfers
(ADT) data from the hospital database. The values occasionally change
between hospital admissions (HADM_ID) for a single patient
(SUBJECT_ID). This is reasonable for some fields (e.g. MARITAL_STATUS,
RELIGION), but less reasonable for others (e.g. ETHNICITY).
"""
language: str = field()
"""See :obj:`insurance`."""
religion: str = field()
"""See :obj:`insurance`."""
marital_status: str = field()
"""See :obj:`insurance`."""
ethnicity: str = field()
"""See :obj:`insurance`."""
edregtime: datetime = field()
"""Time that the patient was registered and discharged from the emergency
department.
"""
edouttime: datetime = field()
"""See :obj:`edregtime`."""
diagnosis: str = field()
"""The DIAGNOSIS column provides a preliminary, free text diagnosis for the
patient on hospital admission. The diagnosis is usually assigned by the
admitting clinician and does not use a systematic ontology. As of MIMIC-III
v1.0 there were 15,693 distinct diagnoses for 58,976 admissions. The
diagnoses can be very informative (e.g. chronic kidney failure) or quite
vague (e.g. weakness). Final diagnoses for a patient’s hospital stay are
coded on discharge and can be found in the DIAGNOSES_ICD table. While this
field can provide information about the status of a patient on hospital
admission, it is not recommended to use it to stratify patients.
"""
hospital_expire_flag: int = field()
"""This indicates whether the patient died within the given
hospitalization. 1 indicates death in the hospital, and 0 indicates survival
to hospital discharge.
"""
has_chartevents_data: int = field()
"""Hospital admission has at least one observation in the CHARTEVENTS table.
"""
@dataclass(repr=False)
class Patient(MimicContainer):
"""Table source: CareVue and Metavision ICU databases.
Table purpose: Defines each SUBJECT_ID in the database, i.e. defines a
single patient.
Number of rows: 46,520
Links to:
ADMISSIONS on SUBJECT_ID
ICUSTAYS on SUBJECT_ID
"""
row_id: int = field()
"""Unique row identifier."""
subject_id: int = field()
"""Primary key. Identifies the patient."""
gender: str = field()
"""Gender (one character: ``M``/``F``)."""
dob: datetime = field()
"""Date of birth."""
dod: datetime = field()
"""Date of death. Null if the patient was alive at least 90 days post
hospital discharge.
"""
dod_hosp: datetime = field()
"""Date of death recorded in the hospital records."""
dod_ssn: datetime = field()
"""Date of death recorded in the social security records."""
expire_flag: int = field()
"""Flag indicating that the patient has died."""
@dataclass(repr=False)
class HospitalAdmissionContainer(MimicContainer):
"""Any data container that has a unique identifier with an (inpatient)
non-null identifier.
"""
hadm_id: int = field()
"""Primary key. Identifies the hospital admission."""
@dataclass(repr=False)
class ICD9Container(MimicContainer):
"""A data container that has ICD-9 codes.
"""
icd9_code: str = field()
"""ICD9 code for the diagnosis or procedure."""
short_title: str = field()
"""Short title associated with the code."""
long_title: str = field()
"""Long title associated with the code."""
@dataclass(repr=False)
class Diagnosis(ICD9Container):
"""Table source: Hospital database.
Table purpose: Contains ICD diagnoses for patients, most notably ICD-9
diagnoses.
Number of rows: 651,047
Links to:
PATIENTS on SUBJECT_ID
ADMISSIONS on HADM_ID
D_ICD_DIAGNOSES on ICD9_CODE
"""
pass
@dataclass(repr=False)
class Procedure(ICD9Container):
"""Table source: Hospital database.
Table purpose: Contains ICD procedures for patients, most notably ICD-9
procedures.
Number of rows: 240,095
Links to:
PATIENTS on SUBJECT_ID
ADMISSIONS on HADM_ID
D_ICD_PROCEDURES on ICD9_CODE
"""
pass
@dataclass(repr=False)
class NoteEvent(MimicContainer):
"""Table source: Hospital database.
Table purpose: Contains all notes for patients.
Number of rows: 2,083,180
Links to:
* PATIENTS on SUBJECT_ID
* ADMISSIONS on HADM_ID
* CAREGIVERS on CGID
:see: `Dictionary <https://mimic.physionet.org/mimictables/noteevents/>`_
"""
_DICTABLE_WRITE_EXCLUDES: ClassVar[Set[str]] = {'hadm_id', 'text'}
_PERSITABLE_PROPERTIES: ClassVar[Set[str]] = set()
_PERSITABLE_TRANSIENT_ATTRIBUTES: ClassVar[Set[str]] = {
'_trans_context_var'}
subject_id: int = field()
"""Foreign key. Identifies the patient.
Identifiers which specify the patient: SUBJECT_ID is unique to a patient
and HADM_ID is unique to a patient hospital stay.
:see :obj:`hadm_id`
"""
hadm_id: int = field()
"""Foreign key. Identifies the hospital admission."""
chartdate: datetime = field()
"""Date when the note was charted.
CHARTDATE records the date at which the note was charted. CHARTDATE will
always have a time value of 00:00:00.
CHARTTIME records the date and time at which the note was charted. If both
CHARTDATE and CHARTTIME exist, then the date portions will be
identical. All records have a CHARTDATE. A subset are missing
CHARTTIME. More specifically, notes with a CATEGORY value of ‘Discharge
Summary’, ‘ECG’, and ‘Echo’ never have a CHARTTIME, only CHARTDATE. Other
categories almost always have both CHARTTIME and CHARTDATE, but there is a
small amount of missing data for CHARTTIME (usually less than 0.5% of the
total number of notes for that category).
STORETIME records the date and time at which a note was saved into the
system. Notes with a CATEGORY value of ‘Discharge Summary’, ‘ECG’,
‘Radiology’, and ‘Echo’ never have a STORETIME. All other notes have a
STORETIME.
"""
charttime: datetime = field()
"""Date and time when the note was charted. Note that some notes
(e.g. discharge summaries) do not have a time associated with them: these
notes have NULL in this column.
:see: :obj:`chartdate`
"""
storetime: datetime = field()
"""See :obj:`chartdate`."""
category: str = field()
"""Category of the note, e.g. Discharge summary.
CATEGORY and DESCRIPTION define the type of note recorded. For example, a
CATEGORY of ‘Discharge summary’ indicates that the note is a discharge
summary, and the DESCRIPTION of ‘Report’ indicates a full report while a
DESCRIPTION of ‘Addendum’ indicates an addendum (additional text to be
added to the previous report).
"""
description: str = field()
"""A more detailed categorization for the note, sometimes entered by
free-text."""
cgid: int = field()
"""Foreign key. Identifies the caregiver."""
iserror: bool = field()
"""Flag to highlight an error with the note."""
text: str = field()
"""Content of the note."""
context: InitVar[Settings] = field()
"""Contains resources needed by new and re-hydrated notes, such as the
document stash.
"""
def __post_init__(self, context: Settings):
super().__post_init__()
if self.hadm_id is None:
raise MimicError('NoteEvent is missing hadm_id')
self.category = self.category.strip()
self.text = self.text.rstrip()
self._trans_context = context.asdict()
@property
def _trans_context(self) -> Dict[str, Any]:
return self._trans_context_var
@_trans_context.setter
def _trans_context(self, trans_context: Dict[str, Any]):
if hasattr(self, '_trans_context_var') and \
self._trans_context_var is not None:
self._trans_context_var.update(trans_context)
else:
self._trans_context_var = dict(trans_context)
self._trans_context_update(self._trans_context)
def _trans_context_update(self, trans_context: Dict[str, Any]):
pass
@property
def _doc_stash(self) -> Stash:
return self._trans_context['doc_stash']
@property
@persisted('_truncated_text', transient=True)
def truncted_text(self) -> str:
"""A beginning substring of the note's text useful for debugging."""
return self._trunc(self.text.strip(), 70).replace('\n', ' ').strip()
@property
@persisted('_doc', transient=True)
def doc(self) -> FeatureDocument:
"""The parsed document of the :obj:`name` of the section."""
return self._get_doc()
@property
@persisted('_id')
def id(self) -> str:
"""The unique identifier of this note event."""
return FileTextUtil.normalize_text(self.category).lower()
def get_normal_name(self, include_desc: bool = True) -> str:
"""A normalized name of the note useful as a file name (sans extension).
:param include_desc: whether or not to add the note's desc field, which
adds an extra dash (``-``) for any subsequent file
name parsing
"""
nfn: Callable = FileTextUtil.normalize_text
if include_desc:
return (f'{self.row_id}--{nfn(self.category)}--' +
nfn(self.description))
else:
return nfn(f'{self.row_id}-{nfn(self.category)}')
@property
def normal_name(self) -> str:
"""A normalized name of the note useful as a file name (sans extension).
"""
return self.get_normal_name()
def _get_doc(self) -> FeatureDocument:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'getting doc for {self.row_id} ' +
f'from {type(self._doc_stash)}')
return self._doc_stash[str(self.row_id)]
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
line_limit: int = sys.maxsize, write_divider: bool = True,
indent_fields: bool = True, note_indent: int = 1,
include_fields: bool = True):
"""Write the note event.
:param line_limit: the number of lines to write from the note text
:param write_divider: whether to write a divider before the note text
:param indent_fields: whether to indent the fields of the note
:param note_indent: how many indentation to indent the note fields
"""
if include_fields:
dct = self._writable_dict()
if indent_fields:
super().write(depth, writer, dct)
else:
self._write_object(dct, depth, writer)
if line_limit is not None and line_limit > 0:
text = '\n'.join(
filter(lambda s: len(s.strip()) > 0, self.text.split('\n')))
if write_divider:
self._write_divider(depth + note_indent, writer, char='_')
self._write_block(text, depth + note_indent, writer,
limit=line_limit)
if write_divider:
self._write_divider(depth + note_indent, writer, char='_')
def __str__(self):
return f'{self.row_id}: ({self.category}): {self.truncted_text}' | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Iterable, ClassVar
from dataclasses import dataclass
from abc import ABCMeta, abstractmethod
import re
from zensols.nlp import LexicalSpan
from . import Section, SectionAnnotatorType, Note
@dataclass(repr=False)
class RegexNote(Note, metaclass=ABCMeta):
"""Base class used to collect subclass regular expressions captures and
create sections from them.
"""
@abstractmethod
def _get_matches(self, text: str) -> Iterable[re.Match]:
pass
def _get_section_annotator_type(self) -> SectionAnnotatorType:
return SectionAnnotatorType.REGULAR_EXPRESSION
def _get_sections(self) -> Iterable[Section]:
# add to match on most regex's that expect two newlines between sections
ext_text = self.text + '\n\n'
matches: Iterable[re.Match] = self._get_matches(ext_text)
matches = filter(lambda m: (m.end() - m.start() > 0), matches)
secs = []
sid = 0
try:
while matches:
m: re.Match = next(matches)
name, sec_text = m.groups()
sec = Section(
id=sid,
name=None,
container=self,
header_spans=(LexicalSpan(m.start(1), m.end(1)),),
body_span=LexicalSpan(m.start(2), m.end(2)))
secs.append(sec)
sid += 1
except StopIteration:
pass
if len(secs) == 0:
secs = super()._get_sections()
return secs
@dataclass(repr=False)
class DischargeSummaryNote(RegexNote):
"""Contains sections for the discharge summary. There should be only one of
these per hospital admission.
"""
CATEGORY: ClassVar[str] = 'Discharge summary'
_SECTION_REGEX: ClassVar[re.Pattern] = {
'header': re.compile(r'([a-zA-Z ]+):\n+(.+?)\n{2,}', re.DOTALL),
'para': re.compile(r'([A-Z ]+):[ ]{2,}(.+?)\n{2,}', re.DOTALL),
}
def _get_matches(self, text: str) -> Iterable[re.Match]:
regex: re.Pattern
if text.find('HISTORY OF PRESENT ILLNESS:') > -1:
regex = self._SECTION_REGEX['para']
else:
regex = self._SECTION_REGEX['header']
return re.finditer(regex, text)
@dataclass(repr=False)
class NursingOtherNote(RegexNote):
CATEGORY: ClassVar[str] = 'Nursing/other'
_SECTION_REGEX: ClassVar[re.Pattern] = {
'para': re.compile(r'([a-zA-Z ]+):[ ](.+?)\n{2,}', re.DOTALL),
}
def _get_matches(self, text: str) -> Iterable[re.Match]:
regex: re.Pattern = self._SECTION_REGEX['para']
return re.finditer(regex, text)
@dataclass(repr=False)
class EchoNote(RegexNote):
CATEGORY: ClassVar[str] = 'Echo'
_SECTION_REGEX: ClassVar[re.Pattern] = {
'para': re.compile(
'(' +
'|'.join('conclusions findings impression indication'.split() +
['patient/test information', 'clinical implications']) +
r'):[\n ]+(.+?)\n{2,}', re.DOTALL | re.IGNORECASE),
}
def _get_matches(self, text: str) -> Iterable[re.Match]:
regex: re.Pattern = self._SECTION_REGEX['para']
return re.finditer(regex, text)
@dataclass(repr=False)
class PhysicianNote(RegexNote):
CATEGORY: ClassVar[str] = 'Physician'
_SECTION_REGEX: ClassVar[re.Pattern] = {
'header': re.compile(
r'[ ]{3}(' +
'HPI|Current medications|24 Hour Events|Last dose of Antibiotics|Flowsheet Data|physical examination|labs / radiology|assessment and plan|code status|disposition' +
r'):?\n(.+?)\n[ ]{3}[a-zA-Z0-9/ ]+:', re.DOTALL | re.IGNORECASE),
}
def _get_matches(self, text: str) -> Iterable[re.Match]:
regex: re.Pattern = self._SECTION_REGEX['header']
return re.finditer(regex, text)
@dataclass(repr=False)
class RadiologyNote(RegexNote):
CATEGORY: ClassVar[str] = 'Radiology'
_SECTION_REGEX: ClassVar[re.Pattern] = {
'para': re.compile(r'\s*([A-Z ]+):[\n ]{2,}(.+?)\n{2,}', re.DOTALL),
}
def _get_matches(self, text: str) -> Iterable[re.Match]:
regex: re.Pattern = self._SECTION_REGEX['para']
return re.finditer(regex, text)
@dataclass(repr=False)
class ConsultNote(RegexNote):
"""Contains sections for the discharge summary. There should be only one of
these per hospital admission.
"""
CATEGORY: ClassVar[str] = 'Consult'
_SECTION_REGEX: ClassVar[re.Pattern] = {
'header': re.compile(r'\s*([a-zA-Z/ ]+):\n+(.+?)(?:[\n]{2,}|\s+\.\n)',
re.DOTALL),
}
def _get_matches(self, text: str) -> Iterable[re.Match]:
regex: re.Pattern = self._SECTION_REGEX['header']
return re.finditer(regex, text) | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/regexnote.py | regexnote.py |
__author__ = 'Paul Landes'
from typing import Tuple, Iterable, Optional, List
from dataclasses import dataclass, field
import logging
import sys
from itertools import chain
from zensols.config import Settings
from zensols.persist import persisted, ReadOnlyStash
from zensols.db import DbPersister, DataClassDbPersister
from zensols.nlp import FeatureDocument, FeatureDocumentParser
from . import (
MimicError, RecordNotFoundError,
Admission, Patient, Diagnosis, Procedure, NoteEvent
)
logger = logging.getLogger(__name__)
@dataclass
class AdmissionPersister(DataClassDbPersister):
"""Manages instances of :class:`.Admission`.
"""
def __post_init__(self):
self.bean_class = Admission
super().__post_init__()
def get_by_hadm_id(self, hadm_id: int) -> Admission:
"""Return the admission by it's hospital admission ID."""
adm = self.execute_by_name(
'select_admission_by_hadm_id', params=(hadm_id,))
if len(adm) == 0:
raise RecordNotFoundError(self, 'hadm', hadm_id)
if len(adm) > 1:
raise MimicError('Found {len(adm)}>1 record(s) for hadm {hadm_id}')
return adm[0]
def get_hadm_ids(self, subject_id: int) -> Iterable[int]:
"""Get all hospital admission IDs (``hadm_id``) for a patient."""
ids = self.execute_by_name(
'select_hadm_for_subject_id', params=(subject_id,),
row_factory='tuple')
return map(lambda x: x[0], ids)
def get_by_subject_id(self, subject_id: int) -> Tuple[Admission]:
"""Get an admissions by patient ID."""
return self.execute_by_name(
'select_admission_by_subject_id', params=(subject_id,))
def get_admission_counts(self, limit: int = sys.maxsize) -> \
Tuple[Tuple[int, int]]:
"""Return the counts of subjects for each hospital admission.
:param limit: the limit on the return admission counts
:return: a list of tuples, each in the form (``subject_id``, ``count``)
"""
return self.execute_by_name(
'select_admission_counts', params=(limit,),
row_factory='tuple')
def uniform_sample_hadm_ids(self, limit: int) -> Iterable[int]:
"""Return a sample from the uniform distribution of admission IDs.
"""
return self.execute_by_name(
'random_hadm', params=(limit,), row_factory=lambda x: x)
@dataclass
class PatientPersister(DataClassDbPersister):
"""Manages instances of :class:`.Patient`.
"""
def __post_init__(self):
self.bean_class = Patient
super().__post_init__()
def get_by_subject_id(self, subject_id: int) -> Patient:
pat = self.execute_by_name(
'select_patient_by_subject_id', params=(subject_id,))
assert len(pat) == 1
return pat[0]
@dataclass
class DiagnosisPersister(DataClassDbPersister):
"""Manages instances of :class:`.Diagnosis`.
"""
def __post_init__(self):
self.bean_class = Diagnosis
super().__post_init__()
def get_by_hadm_id(self, hadm_id: int) -> Diagnosis:
"""Get ICD-9 diagnoses codes by hospital admission IDs.
"""
return self.execute_by_name(
'select_diagnosis_by_hadm_id', params=(hadm_id,))
def get_heart_failure_hadm_ids(self) -> Tuple[int]:
"""Return hospital admission IDs that are heart failure related.
"""
return tuple(map(lambda r: r[0],
self.execute_by_name('select_heart_failure_hadm_id',
row_factory='tuple')))
@dataclass
class ProcedurePersister(DataClassDbPersister):
"""Manages instances of :class:`.Procedure`.
"""
def __post_init__(self):
self.bean_class = Procedure
super().__post_init__()
def get_by_hadm_id(self, hadm_id: int) -> Procedure:
return self.execute_by_name(
'select_procedure_by_hadm_id', params=(hadm_id,))
@dataclass
class NoteDocumentStash(ReadOnlyStash):
"""Reads ``noteevents`` from the database and returns parsed documents.
"""
doc_parser: FeatureDocumentParser = field(default=None)
"""NER+L medical domain natural langauge parser."""
note_db_persister: DbPersister = field(default=None)
"""Fetches the note text by key from the DB."""
def load(self, row_id: str) -> FeatureDocument:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading row ID {row_id}')
text = self.note_db_persister.execute_by_name(
'select_note_text_by_id', params=(row_id,), row_factory='tuple')
# many notes have trailing newlines, which may cause issues with spaCy
# or downstream prediction tasks
return self.doc_parser(text[0][0].strip())
def keys(self) -> Iterable[str]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('returning note all DB keys')
return map(lambda x: str(x[0]),
self.note_db_persister.execute_by_name(
'select_keys', row_factory='tuple'))
def exists(self, name: str) -> bool:
res = self.note_db_persister.execute_by_name(
'select_hadm_id_by_row_id', params=(name,), row_factory='tuple')
return len(res) > 0
@dataclass
class NoteEventPersister(DataClassDbPersister):
"""Manages instances of :class:`.NoteEvent`.
"""
mimic_note_context: Settings = field(default=None)
"""Contains resources needed by new and re-hydrated notes, such as the
document stash.
"""
def __post_init__(self):
self.bean_class = NoteEvent
super().__post_init__()
self.row_factory = self._create_bean
def _create_bean(self, *args):
return NoteEvent(*args, context=self.mimic_note_context)
@property
@persisted('_categories', cache_global=True)
def categories(self) -> Tuple[str]:
"""All unique categories."""
cats = self.execute_by_name('categories', row_factory='tuple')
return tuple(map(lambda x: x[0], cats))
def get_note_count(self, hadm_id: int) -> int:
"""Return the count of notes for a hospital admission.
:param hadm_id: the hospital admission ID
"""
return self.execute_by_name(
'select_note_count', params=(hadm_id,), row_factory='tuple')[0][0]
def get_note_counts_by_subject_id(self, subject_id: int) -> \
Tuple[Tuple[int, int]]:
"""Get counts of notes related to a subject.
:param subject_id: the patient's ID
:return: tuple of (``hadm_id``, ``count``) pairs for a subject
"""
return self.execute_by_name(
'select_note_count_by_subject_id', params=(subject_id,),
row_factory='tuple')
def get_row_ids_by_hadm_id(self, hadm_id: int) -> Tuple[int]:
"""Return all note row IDs for a admission ID."""
return tuple(chain.from_iterable(
self.execute_by_name(
'select_row_ids_by_hadm_id', params=(hadm_id,),
row_factory='identity')))
def get_notes_by_hadm_id(self, hadm_id: int) -> Tuple[NoteEvent]:
"""Return notes by hospital admission ID.
:param hadm_id: the hospital admission ID
"""
return self.execute_by_name(
'select_notes_by_hadm_id', params=(hadm_id,))
def get_hadm_id(self, row_id: int) -> Optional[int]:
"""Return the hospital admission for a note.
:param row_id: the unique ID of the note event
:return: the hospital admission unique ID ``hadm_id`` if ``row_id`` is
in the database
"""
maybe_row: List[int] = self.execute_by_name(
'select_hadm_id_by_row_id', params=(row_id,),
row_factory=lambda x: x)
if len(maybe_row) > 0:
return maybe_row[0]
def get_hadm_ids(self, row_ids: Iterable[int]) -> Iterable[int]:
"""Return the hospital admission for a set of note.
:param row_id: the unique IDs of the note events
:return: the hospital admission admissions unique ID ``hadm_id``
"""
return map(self.get_hadm_id, row_ids)
def get_notes_by_category(self, category: str,
limit: int = sys.maxsize) -> Tuple[NoteEvent]:
"""Return notes by what the category to which they belong.
:param category: the category of the note (i.e. ``Radiology``)
:param limit: the limit of notes to return
"""
return self.execute_by_name(
'select_notes_by_category', params=(category, limit))
def get_discharge_reports(self, limit: int = sys.maxsize) -> \
Tuple[NoteEvent]:
"""Return discharge reports (as apposed to addendums).
:param limit: the limit of notes to return
"""
return self.execute_by_name('select_discharge_reports', params=[limit]) | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/persist.py | persist.py |
__author__ = 'Paul Landes'
from typing import Tuple, Dict, Iterable, List, Set, Callable, ClassVar, Any
from dataclasses import dataclass, field
import sys
import os
import logging
from functools import reduce
import collections
import itertools as it
from frozendict import frozendict
from io import TextIOBase
import pandas as pd
from zensols.persist import (
PersistableContainer, persisted, Primeable, Stash,
ReadOnlyStash, FactoryStash, KeySubsetStash,
)
from zensols.config import Dictable, ConfigFactory, Settings
from zensols.multi import MultiProcessDefaultStash
from zensols.db import BeanStash
from . import (
RecordNotFoundError, Admission, Patient, Diagnosis, Procedure, NoteEvent,
DiagnosisPersister, ProcedurePersister, PatientPersister,
NoteEventPersister, AdmissionPersister, Note, NoteFactory,
)
logger = logging.getLogger(__name__)
@dataclass
class HospitalAdmission(PersistableContainer, Dictable):
"""Represents data collected by a patient over the course of their hospital
admission. Note: this object keys notes using their ``row_id`` IDs used in
the MIMIC dataset as integers and not strings like some note stashes.
"""
_DICTABLE_ATTRIBUTES: ClassVar[List[str]] = 'hadm_id'.split()
_PERSITABLE_TRANSIENT_ATTRIBUTES: ClassVar[Set[str]] = {'_note_stash'}
admission: Admission = field()
"""The admission of the admission."""
patient: Patient = field()
"""The patient/subject."""
diagnoses: Tuple[Diagnosis] = field()
"""The ICD-9 diagnoses of the hospital admission."""
procedures: Tuple[Procedure] = field()
"""The ICD-9 procedures of the hospital admission."""
def __post_init__(self):
super().__init__()
def _init(self, note_stash: Stash):
self._note_stash = note_stash
@property
def hadm_id(self) -> int:
"""The hospital admission unique identifier."""
return self.admission.hadm_id
@property
def notes(self) -> Iterable[Note]:
"""The notes by the care givers."""
return iter(self._note_stash.values())
@property
@persisted('_by_category', transient=True)
def notes_by_category(self) -> Dict[str, Tuple[Note]]:
"""All notes by :obj:`.Note.category` as keys with the list of
resepctive notes as a list as values.
"""
notes = collections.defaultdict(list)
for note in self.notes:
notes[note.category].append(note)
return frozendict({k: tuple(notes[k]) for k in notes.keys()})
def get_duplicate_notes(self, text_start: int = None) -> Tuple[Set[str]]:
"""Notes with the same note text, each in their respective set.
:param text_start: the number of first N characters used to compare
notes, or the entire note text if ``None``
:return: the duplicate note``row_id``, or if there are no duplicates,
an empty tuple
"""
dups = collections.defaultdict(set)
note: Note
for note in self.notes:
key = note.text
if text_start is not None:
key = key[:text_start]
dups[key].add(note.row_id)
return tuple(map(lambda x: x[1], filter(
lambda x: len(x[1]) > 1, dups.items())))
def get_non_duplicate_notes(self, dup_sets: Tuple[Set[str]],
filter_fn: Callable = None) -> \
Tuple[Tuple[Note, bool]]:
"""Return non-duplicated notes.
:param dup_sets: the duplicate sets generated from
:meth:`get_duplicate_notes`
:param filer_fn: if provided it is used to filter duplicates; if
everything is filtered, a note from the respective
duplicate set is chosen at random
:return: a tuple of ``(<note>, <is duplicate>)`` pairs
:see: :obj:`duplicate_notes`
"""
def filter_ans(n: Note) -> bool:
if n.row_id in ds:
if filter_fn is not None:
return filter_fn(n)
return True
else:
return False
notes: Tuple[Note] = self.notes
nid: Dict[int, Note] = self.notes_by_id
dups: Set[str] = reduce(lambda x, y: x | y, dup_sets)
# initialize with the notes not in any duplicate group, which are
# non-duplicates
non_dups: List[Note] = list(
map(lambda x: (x, False),
filter(lambda n: n.row_id not in dups, notes)))
ds: Set[str]
for ds in dup_sets:
note: Note
maybe_an: Note = tuple(filter(filter_ans, notes))
if len(maybe_an) > 0:
# if filter_fn is used, it returns preferred notes to use
note = maybe_an[0]
else:
# if there is no preference (all filtered) pick a random
note = nid[next(iter(ds))]
non_dups.append((note, True))
return tuple(non_dups)
@property
def feature_dataframe(self) -> pd.DataFrame:
"""The feature dataframe for the hospital admission as the constituent
note feature dataframes.
"""
dfs: List[pd.DataFrame] = []
by_cat = self.notes_by_category
for note_key in sorted(by_cat.keys()):
for note in by_cat[note_key]:
df = note.feature_dataframe
df = df[df['ent_type_'] == 'mc']
df['hadm_id'] = self.hadm_id
first = 'hadm_id section'.split()
new_cols = list(filter(lambda c: c not in first, df.columns))
new_cols = first + new_cols
dfs.append(df[new_cols])
return pd.concat(dfs)
def write_notes(self, depth: int = 0, writer: TextIOBase = sys.stdout,
note_limit: int = sys.maxsize,
categories: Set[str] = None,
include_note_id: bool = False,
**note_kwargs):
"""Write the notes of the admission.
:param note_limit: the number of notes to write
:param include_note_id: whether to include the note identification info
:param categories: the note categories to write
:param note_kwargs: the keyword arguments gtiven to
:meth:`.Note.write_full`
"""
notes = self.notes
if categories is not None:
notes = filter(lambda c: c.category in categories, notes)
note: Note
for note in it.islice(notes, note_limit):
if include_note_id:
self._write_line(f'row_id: {note.row_id} ({note.category})',
depth, writer)
note.write_full(depth, writer, **note_kwargs)
else:
note.write_full(depth, writer, **note_kwargs)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_admission: bool = False,
include_patient: bool = False,
include_diagnoses: bool = False,
include_procedures: bool = False,
**note_kwargs):
"""Write the admission and the notes of the admission.
:param note_kwargs: the keyword arguments gtiven to
:meth:`.Note.write_full`
"""
nkwargs = dict(note_line_limit=0,
section_line_limit=0,
include_fields=False,
include_section_divider=False,
include_note_divider=False,
include_section_header=False,
include_note_id=True)
nkwargs.update(note_kwargs)
self._write_line(f'hadm_id: {self.admission.hadm_id}', depth, writer)
if include_admission:
self._write_line('admission:', depth + 1, writer)
self._write_object(self.admission, depth + 2, writer)
if include_patient:
self._write_line('patient:', depth + 1, writer)
self._write_object(self.patient, depth + 2, writer)
if include_diagnoses:
self._write_line('diagnoses:', depth + 1, writer)
self._write_object(self.diagnoses, depth + 2, writer)
if include_procedures:
self._write_line('procedures:', depth + 1, writer)
self._write_object(self.procedures, depth + 2, writer)
if 'note_limit' not in nkwargs or nkwargs['note_limit'] > 0:
self._write_line('notes:', depth + 1, writer)
self.write_notes(depth + 2, writer, **nkwargs)
def write_full(self, depth: int = 0, writer: TextIOBase = sys.stdout,
**kwargs):
"""Write a verbose output of the admission.
:param kwargs: the keyword arguments given to meth:`write`
"""
wkwargs = dict(note_line_limit=sys.maxsize,
section_line_limit=sys.maxsize,
include_fields=True,
include_section_divider=True,
include_note_divider=True,
include_section_header=True,
include_note_id=False,
include_admission=True,
include_patient=True,
include_diagnoses=True,
include_procedures=True)
wkwargs.update(kwargs)
self.write(depth, writer, **wkwargs)
def keys(self) -> Iterable[int]:
return map(int, self._note_stash.keys())
def __getitem__(self, row_id: int):
return self._note_stash[str(row_id)]
def __contains__(self, row_id: int):
return str(row_id) in self.notes_by_id
def __iter__(self) -> Iterable[Note]:
return iter(self._note_stash.values())
def __len__(self) -> int:
return len(self._note_stash)
def __str__(self):
return (f'subject: {self.admission.subject_id}, ' +
f'hadm: {self.admission.hadm_id}, ' +
f'num notes: {len(self)}')
@dataclass
class _NoteBeanStash(BeanStash):
"""Adapts the :class:`.NoteEventPersister` to a
:class:`~zensols.persist.domain.Stash`.
"""
mimic_note_factory: NoteFactory = field()
"""The factory that creates :class:`.Note` for hopsital admissions."""
def load(self, row_id: str) -> Note:
note_event: NoteEvent = super().load(row_id)
if note_event is not None:
logger.debug(f'creating note from {note_event}')
return self.mimic_note_factory.create(note_event)
@dataclass
class _NoteFactoryStash(FactoryStash):
"""Creates instances of :class:`.Note`.
"""
mimic_note_context: Settings = field(default=None)
"""Contains resources needed by new and re-hydrated notes, such as the
document stash.
"""
def load(self, row_id: str) -> Note:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading note: {row_id}')
note: Note = super().load(row_id)
if note is not None:
logger.debug(f'setting note context on {row_id}')
note._trans_context = self.mimic_note_context
return note
@dataclass
class HospitalAdmissionDbStash(ReadOnlyStash, Primeable):
"""A stash that creates :class:`.HospitalAdmission` instances. This
instance is used by caching stashes per the default resource library
configuration for this package.
"""
config_factory: ConfigFactory = field()
"""The factory used to create domain objects (ie hospital admission).
"""
mimic_note_factory: NoteFactory = field()
"""The factory that creates :class:`.Note` for hopsital admissions."""
admission_persister: AdmissionPersister = field()
"""The persister for the ``admissions`` table."""
diagnosis_persister: DiagnosisPersister = field()
"""The persister for the ``diagnosis`` table."""
patient_persister: PatientPersister = field()
"""The persister for the ``patients`` table."""
procedure_persister: ProcedurePersister = field()
"""The persister for the ``procedure`` table."""
note_event_persister: NoteEventPersister = field()
"""The persister for the ``noteevents`` table."""
note_stash: Stash = field()
"""Creates cached instances of :class:`.Note`."""
hospital_adm_name: str = field()
"""The configuration section name of the :class:`.HospitalAdmission` used to
load instances.
"""
def __post_init__(self):
super().__post_init__()
self.strict = True
def _create_note_stash(self, adm: Admission):
np: NoteEventPersister = self.note_event_persister
row_ids: Tuple[int] = np.get_row_ids_by_hadm_id(adm.hadm_id)
return KeySubsetStash(
delegate=self.note_stash,
key_subset=set(map(str, row_ids)),
dynamic_subset=False)
def load(self, hadm_id: str) -> HospitalAdmission:
"""Create a *complete picture* of a hospital stay with admission,
patient and notes data.
:param hadm_id: the ID that specifics the hospital admission to create
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading hospital admission: {hadm_id}')
hadm_id = int(hadm_id)
dp: DiagnosisPersister = self.diagnosis_persister
pp: ProcedurePersister = self.procedure_persister
adm: Admission = self.admission_persister.get_by_hadm_id(hadm_id)
pat: Patient = self.patient_persister.get_by_subject_id(adm.subject_id)
diag: Tuple[Diagnosis] = dp.get_by_hadm_id(hadm_id)
procds: Tuple[Procedure] = pp.get_by_hadm_id(hadm_id)
note_stash: Stash = self._create_note_stash(adm)
adm: HospitalAdmission = self.config_factory.new_instance(
self.hospital_adm_name, adm, pat, diag, procds)
adm._init(note_stash)
return adm
@persisted('_keys', cache_global=True)
def keys(self) -> Iterable[str]:
return tuple(self.admission_persister.get_keys())
def exists(self, hadm_id: str) -> bool:
return self.admission_persister.exists(int(hadm_id))
def prime(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'priming {type(self)}...')
self.mimic_note_factory.prime()
super().prime()
@dataclass
class HospitalAdmissionDbFactoryStash(FactoryStash, Primeable):
"""A factory stash that configures :class:`.NoteEvent` instances so they can
parse the MIMIC-III English text as :class:`.FeatureDocument` instances.
"""
doc_stash: Stash = field(default=None)
"""Contains the document that map to :obj:`row_id`."""
mimic_note_context: Settings = field(default=None)
"""Contains resources needed by new and re-hydrated notes, such as the
document stash.
"""
def load(self, hadm_id: str) -> HospitalAdmission:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading hospital admission: {hadm_id}')
adm: HospitalAdmission = super().load(hadm_id)
db_stash: HospitalAdmissionDbStash = self.factory
adm._init(db_stash._create_note_stash(adm))
return adm
def clear(self,):
# admission cached (i.e. data/adm)
super().clear()
# parsed docs (i.e. data/note-doc)
self.doc_stash.clear()
# note containers with sections (i.e. data/note-cont)
self.factory.note_stash.delegate.clear()
def prime(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'priming {type(self)}...')
self.factory.prime()
super().prime()
@dataclass
class NoteDocumentPreemptiveStash(MultiProcessDefaultStash):
"""Contains the stash that preemptively creates :class:`.Admission`,
:class:`.Note` and :class:`~zensols.nlp.container.FeatureDocument` cache
files. This class is not useful for returning any data (see
:class:`.HospitalAdmissionDbFactoryStash).
"""
note_event_persister: NoteEventPersister = field(default=None)
"""The persister for the ``noteevents`` table."""
adm_factory_stash: HospitalAdmissionDbFactoryStash = field(default=None)
"""The factory to create the admission instances."""
def __post_init__(self):
super().__post_init__()
self._row_ids: Tuple[str] = None
def _create_data(self) -> Iterable[HospitalAdmission]:
keys: Set[str] = self._row_ids
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'keys to process: {len(keys)}')
return keys
def _process(self, chunk: List[Any]) -> Iterable[Tuple[str, Any]]:
np: NoteEventPersister = self.note_event_persister
# for each row ID get the note throught the admission so sections are
# created per the implementation specified in the configuration
row_id: str
for row_id in chunk:
if logger.isEnabledFor(logging.DEBUG):
pid = os.getpid()
self._debug(f'processing key {row_id} in {pid}')
hadm_id: int = np.get_hadm_id(int(row_id))
adm: HospitalAdmission = self.adm_factory_stash[hadm_id]
note: Note = adm[row_id]
# force document parse
note.doc
# it doesn't matter what we return becuase it won't be used, so
# return the note's debugging string
yield (row_id, str(note))
def _get_existing_note_row_ids(self) -> Set[str]:
"""Return the note row_ids that both have container and feature doc
cached ID files.
"""
existing_note_cont_ids: Set[str] = set(
self.adm_factory_stash.factory.note_stash.delegate.keys())
existing_doc_ids: Set[str] = set(
self.adm_factory_stash.doc_stash.delegate.keys())
if logger.isEnabledFor(logging.INFO):
logger.info(f'already cached: doc={len(existing_doc_ids)}, ' +
f'container={len(existing_note_cont_ids)}')
return existing_note_cont_ids & existing_doc_ids
def prime(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'priming {type(self)}...')
# this leads to priming the stash that installs the MedSecId in the
# mimicsid package
self.adm_factory_stash.prime()
np: NoteEventPersister = self.note_event_persister
# get the IDs we already have create previously
existing_row_ids: Set[str] = self._get_existing_note_row_ids()
# create a list of those row IDs we still need to create
to_create_row_ids: Set[str] = self._row_ids - existing_row_ids
# populate admissions that have at least one missing note
hadm_ids: Set[int] = set()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'need: {self._row_ids}, ' +
f'existing: {existing_row_ids}, ' +
f'create: {to_create_row_ids}')
row_id: str
for row_id in to_create_row_ids:
hadm_id: int = np.get_hadm_id(row_id)
if hadm_id is None:
raise RecordNotFoundError(self, 'row_id', row_id)
hadm_ids.add(hadm_id)
# first create the admissions to processes overwrite, only then can
# notes be dervied from admissions and written across procs
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating {len(hadm_ids)} cached admissions')
hadm_id: int
for hadm_id in hadm_ids:
adm: HospitalAdmission = self.adm_factory_stash[hadm_id]
assert isinstance(adm, HospitalAdmission)
# don't fork processes only to find the work is already complete
if len(hadm_ids) == 0:
if logger.isEnabledFor(logging.INFO):
logger.info('no note docs to create')
else:
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating {len(to_create_row_ids)} note docs')
super().prime()
def process_keys(self, row_ids: Iterable[str], workers: int = None,
chunk_size: int = None):
"""Invoke the multi-processing system to preemptively parse and store
note events for the IDs provided.
:param row_ids: the admission IDs to parse and cache
:param workers: the number of processes spawned to accomplish the work
:param chunk_size: the size of each group of data sent to the child
process to be handled
:see: :class:`~zensols.persist.multi.stash.MultiProcessStash`
"""
if workers is not None:
self.workers = workers
if chunk_size is not None:
self.chunk_size = chunk_size
self._row_ids = set(row_ids)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'processing {len(row_ids)} notes')
self.prime() | zensols.mimic | /zensols.mimic-1.4.2-py3-none-any.whl/zensols/mimic/adm.py | adm.py |
__author__ = 'Paul Landes'
from typing import Tuple, List, Dict, Any, Sequence
from dataclasses import dataclass, field
from enum import Enum, auto
import sys
import logging
from io import StringIO, TextIOBase
from pathlib import Path
import pandas as pd
from zensols.util import loglevel, stdout
from zensols.persist import Stash
from zensols.config import ConfigFactory
from zensols.cli import ApplicationError
from zensols.deeplearn.cli import FacadeApplication
from zensols.mimic import (
NoteFormat, Note, Corpus, HospitalAdmission,
NoteDocumentPreemptiveStash,
)
from . import AnnotatedNote, AnnotationResource, NoteStash, PredictedNote
from .pred import SectionPredictor
logger = logging.getLogger(__name__)
@dataclass
class Application(FacadeApplication):
"""Use the MedSecId section annotations with MIMIC-III corpus parsing.
"""
config_factory: ConfigFactory = field(default=None)
"""The config used to create facade instances."""
corpus: Corpus = field(default=None)
"""A container class for the resources that access the MIMIC-III corpus."""
anon_resource: AnnotationResource = field(default=None)
"""Contains resources to acces the MIMIC-III MedSecId annotations."""
note_stash: NoteStash = field(default=None)
"""A stash that returns :class:`~zensols.mimic.Note` instances by thier
unique ``row_id`` keys.
"""
preempt_stash: NoteDocumentPreemptiveStash = field(default=None)
"""A multi-processing stash used to preemptively parse notes."""
def dump_ontology(self, out_file: Path = None):
"""Writes the ontology.
:param out_file: the output path
"""
out_file = Path('ontology.csv') if out_file is None else out_file
self.anon_resource.ontology.to_csv(out_file)
logger.info(f'wrote: {out_file}')
def _write_note(self, note: Note, out_file: Path = None,
output_format: NoteFormat = NoteFormat.text):
with stdout(out_file) as f:
note.write_by_format(writer=f, note_format=output_format)
if out_file.name != stdout.STANDARD_OUT_PATH:
logger.info(f'wrote to {out_file}')
def write_note(self, row_id: int, out_file: Path = None,
output_format: NoteFormat = NoteFormat.text):
"""Write an admission, note or section.
:param row_id: the row ID of the note to write
:param out_file: the output path
:param output_format: the output format of the note
"""
def summary_format(writer: TextIOBase):
for s in note.sections.values():
print(s, s.header_spans, len(s))
if out_file is None:
out_file = Path(stdout.STANDARD_OUT_PATH)
note: Note = self.corpus.get_note_by_id(row_id)
self._write_note(note, out_file, output_format)
def write_admission(self, hadm_id: str, out_dir: Path = Path('.'),
output_format: NoteFormat = NoteFormat.text):
"""Write all the notes of an admission.
:param hadm_id: the admission ID
:param out_dir: the output directory
:param output_format: the output format of the note
"""
adm: HospitalAdmission = self.corpus.get_hospital_adm_by_id(hadm_id)
out_dir = out_dir / 'adm' / hadm_id
out_dir.mkdir(parents=True, exist_ok=True)
note: Note
for note in adm.notes:
path: Path = out_dir / f'{note.normal_name}.{output_format.ext}'
self._write_note(note, path, output_format)
def admission_notes(self, hadm_id: str, out_file: Path = None,
keeps: str = None) -> pd.DataFrame:
"""Create a CSV of note information by admission.
:param hadm_id: the admission ID
:param out_file: the output path
:param keeps: a comma-delimited list of column to keep in the output;
defaults to all columns
"""
if out_file is None:
out_file: Path = Path(f'notes-{hadm_id}.csv')
adm: HospitalAdmission = self.corpus.hospital_adm_stash.get(hadm_id)
rows: List[Dict[str, Any]] = []
note: Note
for note in adm.notes:
is_anon: bool = isinstance(note, AnnotatedNote)
dct: Dict[str, Any] = note.asdict()
for k in 'text sections'.split():
del dct[k]
dct['is_anon'] = is_anon
if is_anon:
dct['age_type'] = note.age_type.name
rows.append(dct)
df = pd.DataFrame(rows)
if keeps is not None:
df = df[keeps.split(',')]
df.to_csv(out_file)
logger.info(f'wrote: {out_file}')
return df
def note_counts_by_admission(self, out_file: Path = None) -> pd.DataFrame:
"""Write the counts of each category and row IDs for each admission.
:param out_file: the output path
"""
out_file = Path('admissions.csv') if out_file is None else out_file
df: pd.DataFrame = self.anon_resource.note_counts_by_admission
df.to_csv(out_file, index=False)
logger.info(f'wrote: {out_file}')
return df
def preempt_notes(self, input_file: Path = None, workers: int = None):
"""Preemptively document parse notes across multiple threads.
:param input_file: a file of notes' unique ``row_id`` IDs
:param workers: the number of processes to use to parse notes
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'preemting admissions from {input_file} ' +
f'for {workers} workers')
row_ids: Sequence[str]
if input_file is None:
df: pd.DataFrame = self.anon_resource.note_ids
row_ids = df['row_id'].to_list()
else:
try:
with open(input_file) as f:
row_ids = tuple(map(str.strip, f.readlines()))
except OSError as e:
raise ApplicationError(
f'Could not preempt notes from file {input_file}: {e}') \
from e
self.preempt_stash.process_keys(row_ids, workers)
def clear(self):
"""Remove all admission, note and section cached (parsed) data.
"""
stash: Stash = self.corpus.hospital_adm_stash
logger.info('clearing admission cache')
with loglevel('zensols'):
stash.clear()
class PredOutputType(Enum):
"""The types of prediction output formats."""
text = auto()
json = auto()
@dataclass
class PredictionApplication(object):
"""An application that predicts sections in file(s) on the file system, then
dumps them back to the file system (or standard out).
"""
config_factory: ConfigFactory = field(default=None)
"""The config factory used to help find the packed model."""
note_stash: NoteStash = field(default=None)
"""A stash that returns :class:`~zensols.mimic.Note` instances by thier
unique ``row_id`` keys.
"""
section_predictor: SectionPredictor = field(default=None)
"""The section name that contains the name of the :class:`.SectionPredictor`
to create from the ``config_factory``.
"""
def predict_sections(self, input_path: Path,
output_path: Path = Path('preds'),
out_type: PredOutputType = PredOutputType.text,
file_limit: int = None):
"""Predict the section IDs of a medical notes by file name or all files
in a directory.
:param input_path: the path to the medical note(s) to annotate
:param output_path: where to write the prediction(s) or - for standard
out
:param out_type: the prediction output format
:param file_limit: the max number of document to predict when the input
path is a directory
"""
file_limit = sys.maxsize if file_limit is None else file_limit
if input_path.is_dir():
paths = list(input_path.iterdir())
paths = paths[:file_limit]
output_path.mkdir(parents=True, exist_ok=True)
else:
paths = [input_path]
docs: List[str] = []
if not input_path.exists():
raise ApplicationError(f'Input path does not exist: {input_path}')
for path in paths:
with open(path) as f:
docs.append(f.read())
ext = 'txt' if out_type == PredOutputType.text else 'json'
notes: Tuple[PredictedNote] = self.section_predictor.predict(docs)
for path, note in zip(paths, notes):
path = path.parent / f'{path.stem}.{ext}'
sio = StringIO()
if out_type == PredOutputType.text:
note.write_human(writer=sio)
else:
note.asjson(writer=sio, indent=4)
if output_path.name == '-':
print(sio.getvalue())
else:
fpath = output_path / f'{path.stem}-pred.{ext}'
fpath.parent.mkdir(parents=True, exist_ok=True)
with open(fpath, 'w') as f:
f.write(sio.getvalue())
logger.info(f'wrote: {fpath}')
return notes
def repredict(self, row_id: int,
output_path: Path = Path('preds'),
out_type: PredOutputType = PredOutputType.text):
"""Predict the section IDs of an existing MIMIC III note.
:param row_id: the row ID of the note to write
:param output_path: where to write the prediction(s) or - for standard
out
:param out_type: the prediction output format
"""
out_path: Path = output_path / f'{row_id}.txt'
out_path.parent.mkdir(parents=True, exist_ok=True)
note = self.note_stash[row_id]
if isinstance(note, AnnotatedNote):
fmt_path = out_path.parent / f'{row_id}-formatted.txt'
with open(fmt_path, 'w') as f:
note.write_human(writer=f)
logger.info(f'wrote: {fmt_path}')
with open(out_path, 'w') as f:
f.write(note.text)
return self.predict_sections(out_path) | zensols.mimicsid | /zensols.mimicsid-1.4.3-py3-none-any.whl/zensols/mimicsid/app.py | app.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, List, ClassVar, Set, Iterable
from dataclasses import dataclass, field, InitVar
from enum import Enum, auto
import sys
from io import TextIOBase
import re
from zensols.persist import persisted, PersistableContainer
from zensols.nlp import LexicalSpan, FeatureDocument
from zensols.mimic import (
MimicError, Note, Section, SectionContainer, SectionAnnotatorType
)
class MimicSectionError(MimicError):
pass
class MimicSectionAssertError(MimicSectionError):
def __init__(self, a, b):
super().__init__(f'Assertion error: {a} != {b}')
class AgeType(Enum):
"""An enumeration of all possible ages identified by the physicians per note
in the annotation set.
"""
adult = auto()
newborn = auto()
pediatric = auto()
@dataclass
class AnnotatedSection(Section):
"""A section that uses the MedSecId annotations for section demarcation
(:obj:`header_span`, :obj:`header_spans` and :obj:`body_span`) and
identification (:obj:`id`).
Many of the header identifiers are found in multiple locations in the body
of the text. In other cases there are no header spans at all. The
:obj:`header_spans` field has all of them, and if there is at least one,
the :obj:`header_span` is set to the first.
See the MedSecId paper for details.
"""
annotation: Dict[str, Any] = field(default=None, repr=False)
"""The raw annotation data parsed from the zip file containing the JSON."""
@dataclass
class AnnotatedNote(Note):
"""An annotated note that contains instances of :class:`.AnnotationSection`.
It also contains the ``age type`` taken from the annotations.
"""
_DICTABLE_ATTRIBUTES = Note._DICTABLE_ATTRIBUTES | {'age_type'}
_POST_HEADER_REGEX = re.compile(r'^[:\s\n]+(.*)$', re.DOTALL)
annotation: Dict[str, Any] = field(default=None, repr=False)
"""The annotation (JSON) parsed from the annotations zip file."""
@property
def age_type(self) -> AgeType:
"""The age type of the discharge note as annotated by the physicians.
"""
atstr = self.annotation['age_type']
return AgeType[atstr]
def _get_section_annotator_type(self) -> SectionAnnotatorType:
return SectionAnnotatorType.HUMAN
def _create_sec(self, sid: int, anon: Dict[str, Any]) -> Section:
body_span = LexicalSpan(**anon['body_span'])
header_spans: List[LexicalSpan] = []
header_span: LexicalSpan = None
for hspan in anon.get('header_spans', ()):
header_spans.append(LexicalSpan(**hspan))
if len(header_spans) > 0:
header_span = header_spans[-1]
header_end = header_span.end
body = self.text[header_end:body_span.end]
m: re.Match = self._POST_HEADER_REGEX.match(body)
if m is not None:
header_end += m.start(1)
body_span = LexicalSpan(header_end, body_span.end)
return AnnotatedSection(
id=sid,
name=anon['id'],
container=self,
body_span=body_span,
header_spans=header_spans,
annotation=anon)
def _get_sections(self) -> Iterable[Section]:
an: Dict[str, Any] = self.annotation
hadm_id: str = str(an['hadm_id'])
row_id: str = str(an['row_id'])
if self.hadm_id != hadm_id:
raise MimicSectionAssertError(self.hadm_id, hadm_id)
if self.row_id != row_id:
raise MimicSectionAssertError(self.row_id, row_id)
if self.category != an['category']:
raise MimicSectionAssertError(self.category, an['category'])
secs: List[Section] = []
sec_anon: Dict[str, Any]
for sid, sec_anon in enumerate(an['sections']):
sec = self._create_sec(sid, sec_anon)
sec._row_id = self.row_id
secs.append(sec)
return secs
def write_fields(self, depth: int = 0, writer: TextIOBase = sys.stdout):
super().write_fields(depth, writer)
self._write_line(f'age: {self.age_type.name}', depth, writer)
@dataclass
class PredictedNote(PersistableContainer, SectionContainer):
"""A note with predicted sections.
"""
_PERSITABLE_PROPERTIES: ClassVar[Set[str]] = {'sections'}
predicted_sections: List[Section] = field(repr=False)
"""The sections predicted by the model.
"""
doc: InitVar[FeatureDocument] = field(repr=False)
"""The used document that was parsed for prediction."""
def __post_init__(self, doc: FeatureDocument):
self._doc = doc
super().__init__()
if self.text != doc.text:
raise MimicSectionAssertError({self.text}, {doc.text})
@property
def _predicted_sections(self) -> List[Section]:
return self._predicted_sections_val
@_predicted_sections.setter
def _predicted_sections(self, sections: List[Section]):
self._predicted_sections_val = sections
if hasattr(self, '_sections'):
self._sections.clear()
@property
def text(self) -> str:
""""The entire note text."""
return self._get_doc().text
@property
@persisted('_truncated_text', transient=True)
def truncted_text(self) -> str:
return self._trunc(self.text, 70).replace('\n', ' ').strip()
def _get_sections(self) -> Iterable[Section]:
return self.predicted_sections
def _get_doc(self) -> FeatureDocument:
return self._doc
def __setstate__(self, state: Dict[str, Any]):
super().__setstate__(state)
for sec in self.predicted_sections:
sec.container = self
def __str__(self):
text = self.truncted_text
if hasattr(self, 'row_id') and hasattr(self, 'category'):
return f'{self.row_id}: ({self.category}): {text}'
else:
return text
PredictedNote.predicted_sections = PredictedNote._predicted_sections
@dataclass(init=False)
class MimicPredictedNote(Note):
"""A note that comes from the MIMIC-III corpus with predicted sections.
This takes an instance of :class:`.PredictedNote` created by the model
during inference. It creates :class:`~zensols.mimic.note.Section`
instances, and then discards the predicted note on pickling.
This method avoids having to serialize the
:class:`~zensols.nlp.container.FeatureDocument` (:obj:`.PredictedNote.doc`)
twice.
"""
_PERSITABLE_TRANSIENT_ATTRIBUTES: ClassVar[Set[str]] = \
Note._PERSITABLE_TRANSIENT_ATTRIBUTES | {'_pred_note'}
def __init__(self, *args, predicted_note: PredictedNote, **kwargs):
self._pred_note = predicted_note
super().__init__(*args, **kwargs)
if predicted_note.text != self.text:
raise MimicSectionAssertError(predicted_note.text, self.text)
def _get_section_annotator_type(self) -> SectionAnnotatorType:
return SectionAnnotatorType.MODEL
def _get_sections(self) -> Iterable[Section]:
def map_sec(ps: Section) -> Section:
sec = Section(
id=ps.id,
name=ps.name,
container=self,
header_spans=ps.header_spans,
body_span=ps.body_span)
return sec
for es, ns in zip(self._pred_note._doc.sents, self.doc.sents):
if es.lexspan != ns.lexspan:
raise MimicSectionAssertError(es.lexspan, ns.lexspan)
return map(map_sec, self._pred_note.predicted_sections) | zensols.mimicsid | /zensols.mimicsid-1.4.3-py3-none-any.whl/zensols/mimicsid/domain.py | domain.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import List, Tuple, Optional, Union, Set
from dataclasses import dataclass, field, InitVar
import logging
from pathlib import Path
from zensols.config import ConfigFactory, Configurable
from zensols.persist import (
PersistableContainer, persisted, PersistedWork, Primeable
)
from zensols.nlp import LexicalSpan, FeatureDocument, FeatureDocumentParser
from zensols.deeplearn.model import ModelPacker, ModelFacade
from zensols.deeplearn.cli import FacadeApplication
from zensols.mimic import Section, NoteEvent, Note
from . import PredictedNote, AnnotationNoteFactory, MimicPredictedNote
from .model import PredictionError, EmptyPredictionError, SectionFacade
logger = logging.getLogger(__name__)
@dataclass
class SectionPredictor(PersistableContainer, Primeable):
"""Creates a complete prediction by collating the predictions of both the
section ID (type) and header token models. If :obj:`header_model_packer` is
not set, then only section identifiers (types) and body spans are predicted.
In this case, all header spans are left empty.
Implementation note: when :obj:`auto_deallocate` is ``False`` you must wrap
creations of this instance in :func:`~zensols.persist.dealloc` as this
instance contains resources
(:class:`~zensols.deeplearn.cli.app.FacadeApplication`) that need
deallocation. Their deallocation logic is invoked with this instance and
deallocated by :class:`~zensols.persist.annotation.PersistableContainer`.
"""
name: str = field()
"""The name of this object instance definition in the configuration."""
config_factory: ConfigFactory = field()
"""The config factory used to help find the packed model."""
section_id_model_packer: ModelPacker = field(default=None)
"""The packer used to create the section identifier model."""
header_model_packer: Optional[ModelPacker] = field(default=None)
"""The packer used to create the header token identifier model."""
model_config: Configurable = field(default=None)
"""Configuration that overwrites the packaged model configuration."""
doc_parser: FeatureDocumentParser = field(default=None)
"""Used for parsing documents for predicton. Default to using model's
configured document parser.
"""
min_section_body_len: int = field(default=1)
"""The minimum length of the body needed to make a section."""
auto_deallocate: bool = field(default=True)
"""Whether or not to deallocate resources after every call to
:meth:`predict`. See class docs.
"""
def __post_init__(self):
self._section_id_app = PersistedWork('_section_id_app', self)
self._header_app = PersistedWork('_header_app', self)
@persisted('_section_id_app')
def _get_section_id_app(self) -> SectionFacade:
model_path: Path = self.section_id_model_packer.install_model()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'section ID model path: {model_path}')
return FacadeApplication(
config=self.config_factory.config,
model_path=model_path,
cache_global_facade=False,
model_config_overwrites=self.model_config)
@persisted('_header_app')
def _get_header_app(self) -> SectionFacade:
if self.header_model_packer is not None:
model_path: Path = self.header_model_packer.install_model()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'header model path: {model_path}')
return FacadeApplication(
config=self.config_factory.config,
model_path=model_path,
cache_global_facade=False,
model_config_overwrites=self.model_config)
def _merge_note(self, sn: PredictedNote, hn: PredictedNote):
"""Merge header tokens from ``hn`` to ``sn``."""
def ff_chars(s: str, start: int, end: int, avoid: Set[str]) -> \
Optional[int]:
p: int = None
for p in range(start, end + 1):
c: str = s[p]
if c not in avoid:
break
return p
avoid: Set[str] = set(': \n\t\r')
ssec: Section
for ssec in sn.sections.values():
sspan: LexicalSpan = ssec.body_span
hspans: List[LexicalSpan] = []
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sec span: {ssec}: {ssec.body_doc}, ' +
f'lex: {ssec.lexspan}/{sspan}')
hsec: Section
for hsec in hn.sections.values():
hspan: LexicalSpan = hsec.body_span
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'header span: {hsec}: {hsec.body_doc}, ' +
f'overlap: {hspan.overlaps_with(sspan)}, ' +
f'begin: {hspan.begin == sspan.begin}')
if hspan.overlaps_with(sspan) and hspan.begin == sspan.begin:
# skip over the colon and space after
if len(hspan) > 1 and \
sn.text[hspan.end - 1:hspan.end] == ':':
hspan = LexicalSpan(hspan.begin, hspan.end - 1)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding header span: {hspan}')
hspans.append(hspan)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found {len(hspans)} overlapping header spans')
if len(hspans) > 0:
# skip leading space/colon for a tighter begin section boundary
p: int = ff_chars(sn.text, hspans[-1].end, sspan.end, avoid)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'char skip: {p}')
if p is None:
ssec.body_span = LexicalSpan(sspan.end, sspan.end)
else:
ssec.body_span = LexicalSpan(p, sspan.end)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding {hspans} to {ssec}')
ssec.header_spans = tuple(hspans)
def _merge_notes(self, snotes: List[PredictedNote],
hnotes: List[PredictedNote]):
"""Merge header tokens from ``hnotes`` to ``snotes``."""
sn: PredictedNote
hn: PredictedNote
for sn, hn in zip(snotes, hnotes):
self._merge_note(sn, hn)
@persisted('__validate_version', transient=True)
def _validate_version(self, packer_name: str, facade: ModelFacade) -> bool:
packer: ModelPacker = getattr(self, packer_name)
model_pred: SectionPredictor = facade.config_factory(self.name)
model_packer = getattr(model_pred, packer_name)
if packer.version != model_packer.version:
model_name: str = facade.model_settings.model_name
logger.warning(
f'API {model_name} version ({packer.version}) does not ' +
f'match the trained model version ({model_packer.version})')
return False
return True
def _trim_notes(self, notes: List[PredictedNote]):
def filter_sec(sec: Section) -> bool:
return len(sec.body_span) > self.min_section_body_len
note: Note
for note in notes:
note.predicted_sections = list(
filter(filter_sec, note.predicted_sections))
def _predict_from_docs(self, docs: Tuple[FeatureDocument],
sid_fac: SectionFacade) -> List[PredictedNote]:
head_app: FacadeApplication = self._get_header_app()
snotes: List[PredictedNote] = sid_fac.predict(docs)
if head_app is not None:
head_fac: SectionFacade = head_app.get_cached_facade()
self._validate_version('header_model_packer', head_fac)
hnotes: List[PredictedNote] = head_fac.predict(docs)
self._merge_notes(snotes, hnotes)
self._trim_notes(snotes)
return snotes
def predict_from_docs(self, docs: Tuple[FeatureDocument]) -> \
List[PredictedNote]:
sid_fac: SectionFacade = self._get_section_id_app().get_cached_facade()
self._validate_version('section_id_model_packer', sid_fac)
return self._predict_from_docs(docs, sid_fac)
def _predict(self, doc_texts: List[str]) -> List[PredictedNote]:
sid_fac: SectionFacade = self._get_section_id_app().get_cached_facade()
self._validate_version('section_id_model_packer', sid_fac)
doc_parser: FeatureDocumentParser = \
sid_fac.doc_parser if self.doc_parser is None else self.doc_parser
docs: Tuple[FeatureDocument] = tuple(map(doc_parser, doc_texts))
return self._predict_from_docs(docs, sid_fac)
def predict(self, doc_texts: List[str]) -> List[PredictedNote]:
"""Collate the predictions of both the section ID (type) and header
token models.
:param doc_texts: the text of the medical note to segment
:return: a list of the predictions as notes for each respective
``doc_texts``
"""
if self.auto_deallocate:
try:
return self._predict(doc_texts)
finally:
self.deallocate()
else:
return self._predict(doc_texts)
def prime(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'priming {type(self)}...')
self.section_id_model_packer.install_model()
self.header_model_packer.install_model()
super().prime()
def deallocate(self):
super().deallocate()
self._section_id_app.clear()
self._header_app.clear()
def __call__(self, docs: List[Union[str, FeatureDocument]]) -> \
List[PredictedNote]:
"""See :meth:`predict` and :meth:`predict_from_docs`."""
if len(docs) > 0 and isinstance(docs[0], FeatureDocument):
return self.predict_from_docs(docs)
else:
return self.predict(docs)
@dataclass
class PredictionNoteFactory(AnnotationNoteFactory):
"""A note factory that predicts so that
:class:`~zensols.mimic.adm.HospitalAdmissionDbStash` predicts missing
sections.
**Implementation note:** The :obj:`section_predictor_name` is used with the
application context factory :obj:`config_factory` since declaring it in the
configuration creates an instance cycle.
"""
config_factory: ConfigFactory = field()
"""The factory to get the section predictor."""
mimic_pred_note_section: str = field(default=None)
"""The section name holding the configuration of the
:class:`.MimicPredictedNote` class.
"""
section_predictor_name: InitVar[str] = field(default=None)
"""The name of the section predictor as an app config section name. See
class docs.
"""
def __post_init__(self, section_predictor_name: str):
self._section_predictor_name = section_predictor_name
@property
@persisted('_section_predictor')
def section_predictor(self) -> SectionPredictor:
"""The section predictor (see class docs)."""
sp: SectionPredictor = self.config_factory(self._section_predictor_name)
# turn off deallocation to keep the same facade for all prediction calls
sp.auto_deallocate = False
return sp
def prime(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'priming {type(self)}...')
self.section_predictor.prime()
super().prime()
def _create_missing_anon_note(self, note_event: NoteEvent,
section: str) -> Note:
sp: SectionPredictor = self.section_predictor
note: Note = None
try:
if logger.isEnabledFor(logging.DEBUG):
logger.info(f'predicting note: {note_event}')
pred_note: PredictedNote = sp.predict_from_docs([note_event.doc])[0]
if len(pred_note.sections) == 0:
note = None
else:
note: MimicPredictedNote = self._event_to_note(
note_event,
section=self.mimic_pred_note_section,
params={'predicted_note': pred_note})
except EmptyPredictionError as e:
msg = f'nothing predicted: {note_event}: {e}--using regexs'
# skip the stack trace since no classified tokens somewhat often
logger.error(msg)
except PredictionError as e:
raise e
except Exception as e:
msg = f'could not predict note: {note_event}: {e}--using regexs'
logger.error(msg, exc_info=True)
if note is None:
# the model does not perform well on short nursing notes because it
# was not trained on them
if logger.isEnabledFor(logging.INFO):
logger.info(f'no sections predicted for {note_event.row_id}, ' +
'using regular expression prediction')
try:
note = self._create_from_note_event(note_event)
except Exception as e:
raise PredictionError('Failed twice to predict section: ' +
f'{note_event}: {e}') from e
return note | zensols.mimicsid | /zensols.mimicsid-1.4.3-py3-none-any.whl/zensols/mimicsid/pred.py | pred.py |
__author__ = 'Paul Landes'
from typing import Tuple, Type, Any, List, Dict, Optional, ClassVar, Union
from dataclasses import dataclass, field
from enum import Enum, auto
import logging
import pandas as pd
from zensols.persist import persisted
from zensols.nlp import FeatureToken, FeatureDocument, LexicalSpan
from zensols.mednlp import MedicalFeatureToken
from zensols.mimic import MimicTokenDecorator
from zensols.deeplearn.batch import DataPoint
from zensols.deeplearn.result import ResultsContainer
from zensols.deepnlp.classify import (
ClassificationPredictionMapper, TokenClassifyModelFacade
)
from zensols.mimic import Section
from . import MimicSectionError, AnnotatedNote, AnnotatedSection, PredictedNote
logger = logging.getLogger(__name__)
class PredictionError(MimicSectionError):
"""Raised for any issue predicting sections.
"""
pass
class EmptyPredictionError(PredictionError):
"""Raised when the model classifies all tokens as having no section.
"""
def __init__(self):
super().__init__('Model classified all tokens as having no section')
class TokenType(Enum):
"""A custom token type feature that identifies specifies whether the token
is::
* a separator
* a space
* a colon character (``:``)
* if its upper, lower case or capitalized
* if its punctuation (if not a colon)
* all digits
* anything else is ``MIX``
"""
SEP = auto()
SPACE = auto()
COLON = auto()
NEWLINE = auto()
UPCASE = auto()
DOWNCASE = auto()
CAPITAL = auto()
PUNCTUATION = auto()
DIGIT = auto()
MIX = auto()
@dataclass
class SectionDataPoint(DataPoint):
"""A data point for the section ID model.
"""
TOKEN_TYPES: ClassVar[Tuple[str]] = tuple(
map(lambda t: str(t.name), TokenType))
"""The list of types used as enumerated nominal values in labeled encoder
vectorizer components.
"""
note: AnnotatedNote = field(repr=False)
"""The note contained by this data point."""
pred_doc: FeatureDocument = field(default=None)
"""The parsed document used for prediction when using this data point for
prediction.
"""
def __post_init__(self):
if self.note is not None:
assert isinstance(self.note, AnnotatedNote)
@property
def is_pred(self) -> bool:
"""Whether this data point is used for prediction."""
return self.note is None
@property
def doc(self) -> FeatureDocument:
"""The document from where this data point originates."""
return self.pred_doc if self.is_pred else self.note.doc
@property
@persisted('_features', transient=True)
def feature_dataframe(self) -> pd.DataFrame:
"""A dataframe used to create some of the features of this data point.
"""
rows: List[Tuple[Any]] = []
tok2sec: Dict[FeatureToken, Section] = None
if self.note is not None:
tok2sec = {}
sec: AnnotatedSection
for sec in self.note.sections.values():
for tok in sec.header_tokens:
tok2sec[tok] = (sec, True)
for tok in sec.body_tokens:
tok2sec[tok] = (sec, False)
tok: FeatureToken
for tok in self.doc.token_iter():
entry: Optional[Tuple[Section, bool]] = None
if tok2sec is not None:
entry = tok2sec.get(tok)
if entry is None:
sec_name, is_header = FeatureToken.NONE, False
else:
sec_name, is_header = entry[0].name, entry[1]
tt: TokenType
norm: str = tok.norm
ent: str = FeatureToken.NONE
cui: Optional[str] = tok.cui_ if hasattr(tok, 'cui_') else None
header_lab: str = 'y' if is_header else 'n'
if cui == FeatureToken.NONE:
cui = None
if tok.ent_ != MedicalFeatureToken.CONCEPT_ENTITY_LABEL and \
tok.ent_ != FeatureToken.NONE:
ent = tok.ent_
elif tok.mimic_ == MimicTokenDecorator.MASK_TOKEN_FEATURE:
ent = tok.onto_
if tok.mimic_ == MimicTokenDecorator.SEPARATOR_TOKEN_FEATURE:
tt = TokenType.SEP
elif norm == ':':
tt = TokenType.COLON
elif tok.is_punctuation:
tt = TokenType.PUNCTUATION
elif tok.is_space:
tt = {' ': TokenType.SPACE,
'\t': TokenType.SPACE,
'\n': TokenType.NEWLINE,
'\r': TokenType.NEWLINE,
}[norm[0]]
else:
if norm.isupper():
tt = TokenType.UPCASE
elif norm.islower():
tt = TokenType.DOWNCASE
elif norm[0].isupper():
tt = TokenType.CAPITAL
elif norm.isdigit():
tt = TokenType.DIGIT
else:
tt = TokenType.MIX
ts = tt.name
rows.append((tok.norm, sec_name, header_lab, tok.idx, ts, ent, cui))
return pd.DataFrame(
rows, columns='norm sec_name is_header idx ttype ent cui'.split())
@property
def section_names(self) -> Tuple[str]:
"""The section names label (section types per the paper)."""
return tuple(self.feature_dataframe['sec_name'])
@property
def headers(self) -> Tuple[str]:
"""The header label (section types per the paper)."""
return tuple(self.feature_dataframe['is_header'])
@property
def idxs(self) -> Tuple[int]:
"""The index feature."""
return tuple(self.feature_dataframe['idx'])
@property
def ttypes(self) -> Tuple[str]:
"""The token type feature, which is the string value of
:class:`.TokenType`.
"""
return tuple(self.feature_dataframe['ttype'])
@property
def ents(self) -> Tuple[str]:
"""The named entity feature."""
return tuple(self.feature_dataframe['ent'])
@property
def cuis(self) -> Tuple[Optional[str]]:
"""The CUI feature."""
return tuple(self.feature_dataframe['cui'])
def __len__(self):
return self.doc.token_len
@dataclass
class SectionPredictionMapper(ClassificationPredictionMapper):
"""Predict sections from a :class:`~zensols.nlp.FeatureDocument` as a list
of :class:`.PredictedNote` instances. It does this by creating data points
of type :class:`.SectionDataPoint` that are used by the model.
"""
def _create_tok_list(self, doc: FeatureDocument, labels: Tuple[str],
tok_lists: List[Tuple[str, List[FeatureToken]]]):
"""Create token lists for each document. This coallates a section label
with the respective list of tokens from which they were predicted.
:param doc: the document used for prediction
:param labels: the predicted labels for ``doc``
:tok_lists: the coallated label/token list to populate
"""
def add_tok_list(lab: str, tok_list: List[FeatureToken]):
"""Strip front and back newlines."""
for beg, tok in enumerate(tok_list):
if tok.norm != '\n':
break
for end, tok in enumerate(reversed(tok_list)):
if tok.norm != '\n':
break
end = len(tok_list) - end
tok_lists.append((lab, tok_list[beg:end]))
tok_list: List[FeatureToken] = None
last_lab: str = None
label: str
tok: FeatureToken
for label, tok in zip(labels, doc.token_iter()):
if tok.is_space:
continue
if last_lab != label:
if tok_list is not None:
add_tok_list(last_lab, tok_list)
tok_list = [tok]
else:
tok_list.append(tok)
last_lab = label
if tok_list is not None and len(tok_list) > 0:
add_tok_list(last_lab, tok_list)
def _create_sections(self, tok_lists: Tuple[str, List[FeatureToken]],
doc: FeatureDocument, secs: List[AnnotatedSection]):
"""Create sections from token lists.
:param tok_lists: the token lists created in :meth:`_create_tok_list`
:param doc: the document used for prediction
:param secs: the list to populate with creeated sections
"""
# remove token lists with no classified section
tok_lists = tuple(
filter(lambda x: x[0] != FeatureToken.NONE, tok_lists))
for sid, (label, toks) in enumerate(tok_lists):
span: LexicalSpan = None
if len(toks) == 0:
# bail past deep framework and handled higher in the stack
raise EmptyPredictionError()
elif len(toks) == 1:
span = toks[0].lexspan
else:
begin = toks[0].lexspan.begin
end = toks[-1].lexspan.end
span = LexicalSpan(begin, end)
assert span is not None
secs.append(Section(
id=sid,
name=label,
container=None,
header_spans=(),
body_span=span))
def _collate(self, docs: Tuple[FeatureDocument],
classes: Tuple[Tuple[str]]) -> List[PredictedNote]:
"""Collate predictions with feature tokens.
:param docs: he documents used for prediction
:param classes: the predicted classes
"""
notes: List[PredictedNote] = []
doc_tok_lists: List[Tuple[str, List[FeatureToken]]] = []
# create token lists that have the section label with respective tokens
labels: List[str]
doc: FeatureDocument
for labels, doc in zip(classes, docs):
tok_lists: List[Tuple[str, List[FeatureToken]]] = []
doc_tok_lists.append(tok_lists)
self._create_tok_list(doc, labels, tok_lists)
# create predicted notes
tok_lists: Tuple[str, List[FeatureToken]]
doc: FeatureDocument
for doc, tok_lists in zip(docs, doc_tok_lists):
secs: List[AnnotatedSection] = []
self._create_sections(tok_lists, doc, secs)
pn = PredictedNote(
predicted_sections=secs,
doc=doc)
sec: Section
for sec in secs:
sec.container = pn
notes.append(pn)
return notes
def _create_features(self, data: Union[FeatureDocument, str]) -> \
Tuple[FeatureDocument]:
if isinstance(data, FeatureDocument):
self._docs.append(data)
return [data]
else:
return super()._create_features(data)
def _create_data_point(self, cls: Type[DataPoint],
feature: Any) -> DataPoint:
return cls(None, self.batch_stash, note=None, pred_doc=feature)
def map_results(self, result: ResultsContainer) -> List[PredictedNote]:
docs: Tuple[FeatureDocument] = tuple(self._docs)
classes: Tuple[Tuple[str]] = tuple(self._map_classes(result))
return self._collate(docs, classes)
@dataclass
class SectionFacade(TokenClassifyModelFacade):
"""The application model facade. This only adds the ``zensols.install``
package to the CLI output logging.
"""
def _configure_cli_logging(self, info_loggers: List[str],
debug_loggers: List[str]):
super()._configure_cli_logging(info_loggers, debug_loggers)
if not self.progress_bar:
info_loggers.append('zensols.install') | zensols.mimicsid | /zensols.mimicsid-1.4.3-py3-none-any.whl/zensols/mimicsid/model.py | model.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, ClassVar, Iterable, Set, List, Tuple, Union
from dataclasses import dataclass, field
import logging
import json
import re
from pathlib import Path
from io import BytesIO
from frozendict import frozendict
import pandas as pd
from zensols.util import time
from zensols.config import Dictable
from zensols.install import Installer
from zensols.persist import (
persisted, PersistedWork,
Stash, ReadOnlyStash, ZipStash, PrimeableStash, DelegateStash
)
from zensols.mimic import (
Note, NoteFactory, NoteEvent, HospitalAdmission,
NoteEventPersister, Corpus,
)
from . import AnnotatedNote
logger = logging.getLogger(__name__)
@dataclass
class AnnotationResource(Dictable):
"""This class providess access to the ``.zip`` file that contains the JSON
section identification annotations. It also has the ontology provided as a
Pandas dataframe.
"""
_DICTABLE_ATTRIBUTES: ClassVar[str] = {'corpus_path'}
_ROOT_ZIP_DIR: ClassVar[str] = 'section-id-annotations'
_ANN_ENTRY: ClassVar[str] = 'annotations'
_ONTOLOGY_ENTRY: ClassVar[str] = 'ontology.csv'
_KEY_REGEX: ClassVar[str] = re.compile(_ANN_ENTRY + r'/(\d+)-(\d+)-([^.]+)')
installer: Installer = field(repr=False)
"""Used to download the annotation set as a zip file and provide the
location to the downloaded file.
"""
@property
def corpus_path(self) -> Path:
"""The path to the annotations ``.zip`` file (see class docs)."""
return self.installer.get_singleton_path()
@persisted('_stash')
def _get_stash(self) -> ZipStash:
""""Return the stash containing the section annotations."""
self.installer()
path: Path = self.corpus_path
return ZipStash(path, root=self._ROOT_ZIP_DIR)
@property
@persisted('_ontology')
def ontology(self) -> pd.DataFrame:
"""A dataframe representing the note to section ontology. It contains
the relation from notes to sections along with their respective
descriptions.
"""
csv_data: bytearray = self._get_stash().get(self._ONTOLOGY_ENTRY)
return pd.read_csv(BytesIO(csv_data))
@property
@persisted('_note_ids')
def note_ids(self) -> pd.DataFrame:
"""Return a dataframe of hospital admission and corresponding note IDs.
"""
rows = []
for k in self._get_stash().keys():
m: re.Match = self._KEY_REGEX.match(k)
if m is not None:
rows.append(m.groups())
return pd.DataFrame(rows, columns='hadm_id row_id category'.split())
@staticmethod
def category_to_id(name: str) -> str:
"""Return the ID form for the category name."""
return name.replace(' ', '-').replace('/', '-').lower()
def get_annotation(self, note_event: NoteEvent) -> Dict[str, Any]:
"""Get the raw annotation as Python dict of dics for a
:class:`~zensols.mimic.NoteEvent`.
"""
ne = note_event
cat = self.category_to_id(ne.category)
path = f'{self._ANN_ENTRY}/{ne.hadm_id}-{ne.row_id}-{cat}.json'
item: bytearray = self._get_stash().get(path)
if item is not None:
return json.load(BytesIO(item))
@property
@persisted('_note_counts_by_admission')
def note_counts_by_admission(self) -> pd.DataFrame:
"""The counts of each category and row IDs for each admission.
"""
df: pd.DataFrame = self.note_ids
cats: List[str] = sorted(df['category'].drop_duplicates().tolist())
cols: List[str] = ['hadm_id'] + cats + ['total', 'row_ids']
rows: List[Tuple[str, int]] = []
for hadm_id, dfg in df.groupby('hadm_id'):
cnts = dfg.groupby('category').size()
row: List[Union[str, int]] = [hadm_id]
row.extend(map(lambda c: cnts[c] if c in cnts else 0, cats))
row.append(cnts.sum())
rows.append(row)
row.append(','.join(dfg['row_id']))
df = pd.DataFrame(rows, columns=cols)
return df.sort_values('total', ascending=False)
@dataclass
class AnnotationNoteFactory(NoteFactory):
"""Override to replace section with MedSecId annotations if they exist.
"""
anon_resource: AnnotationResource = field(default=None)
"""Contains the annotations and ontolgy/metadata note to section data."""
annotated_note_section: str = field(default=None)
"""The section to use for creating new annotated section, for those that
found in the annotation set.
"""
def _create_missing_anon_note(self, note_event: NoteEvent,
section: str) -> Note:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'create missing anon note from: {note_event}')
return super().create(note_event, section)
def _create_note(self, note_event: NoteEvent, section: str,
anon: Dict[str, Any]) -> Note:
if anon is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating from annotation')
note = self._event_to_note(
note_event,
section=self.annotated_note_section,
params={'annotation': anon})
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('annotation not found, creating ')
note = self._create_missing_anon_note(note_event, section)
return note
def create(self, note_event: NoteEvent, section: str = None) -> Note:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating from note event: {note_event}, ' +
f'section: {section}')
anon: Dict[str, Any] = self.anon_resource.get_annotation(note_event)
return self._create_note(note_event, section, anon)
@dataclass
class AnnotatedNoteStash(ReadOnlyStash, PrimeableStash):
"""A stash that returns :class:`~zensols.mimic.Note` instances by thier
unique ``row_id`` keys.
"""
corpus: Corpus = field()
"""A container class for the resources that access the MIMIC-III corpus."""
anon_resource: AnnotationResource = field()
"""Contains the annotations and ontolgy/metadata note to section data."""
row_hadm_map_path: Path = field()
"""The path to the note to admission ID mapping cached file."""
def __post_init__(self):
super().__post_init__()
self._row_hadm_map = PersistedWork(
self.row_hadm_map_path, self, mkdir=True, recover_empty=True)
@property
@persisted('_row_hadm_map')
def row_to_hadm_ids(self) -> Dict[str, str]:
"""A mapping of row to hospital admission IDs."""
with time('calc key diff'):
df: pd.DataFrame = self.anon_resource.note_ids
rows: Dict[str, str] = dict(
df['row_id hadm_id'.split()].itertuples(index=False))
return frozendict(rows)
def prime(self):
stash: Stash = self.corpus.hospital_adm_stash
df: pd.DataFrame = self.anon_resource.note_ids
hadm_ids: Set[str] = set(df['hadm_id'].drop_duplicates())
remaining: Set[str] = hadm_ids - set(stash.keys())
if len(remaining) > 0:
if logger.isEnabledFor(logging.INFO):
logger.info(f'priming {len(remaining)} admissions')
with time(f'wrote {len(remaining)} admissions'):
for hadm_id in remaining:
stash[hadm_id]
def clear(self):
self._row_hadm_map.clear()
def load(self, row_id: str) -> AnnotatedNote:
row_to_hadm: Dict[str, str] = self.row_to_hadm_ids
stash: Stash = self.corpus.hospital_adm_stash
hadm_id: str = row_to_hadm.get(row_id)
if hadm_id is not None:
adm: HospitalAdmission = stash[hadm_id]
note: AnnotatedNote = adm[int(row_id)]
if isinstance(note, AnnotatedNote):
return note
else:
logger.warning('No annotation found for hadm_id: ' +
f'{hadm_id}, row_id: {row_id}')
def keys(self) -> Iterable[str]:
return self.anon_resource.note_ids['row_id'].tolist()
def exists(self, row_id: str) -> bool:
return any(self.anon_resource.note_ids['row_id'] == row_id)
def __len__(self) -> int:
return len(self.anon_resource.note_ids)
@dataclass
class NoteStash(DelegateStash):
"""Creates notes of type :class:`~zensols.mimic.note.Note` or
:class:`.AnnotatedNote` depending on if the note was annotated.
"""
corpus: Corpus = field()
"""A container class for the resources that access the MIMIC-III corpus."""
def load(self, row_id: str) -> Note:
note: Note = self.delegate.load(row_id)
if note is None:
np: NoteEventPersister = self.corpus.note_event_persister
hadm_id: int = np.get_hadm_id(str(row_id))
if hadm_id is not None:
adm: HospitalAdmission = self.corpus.hospital_adm_stash[hadm_id]
note = adm[int(row_id)]
return note
def get(self, name: str, default: Any = None) -> Any:
return Stash.get(self, name, default) | zensols.mimicsid | /zensols.mimicsid-1.4.3-py3-none-any.whl/zensols/mimicsid/anon.py | anon.py |
__author__ = 'Paul Landes'
from typing import List, Any, Dict
import sys
from zensols.config import ConfigFactory
from zensols.mimic import Corpus
from zensols.cli import ActionResult, CliHarness
from zensols.cli import ApplicationFactory as CliApplicationFactory
from . import SectionPredictor, NoteStash, AnnotationResource
class ApplicationFactory(CliApplicationFactory):
"""The application factory for section identification.
"""
def __init__(self, *args, **kwargs):
kwargs['package_resource'] = 'zensols.mimicsid'
super().__init__(*args, **kwargs)
@classmethod
def instance(cls, name: str) -> ConfigFactory:
"""Return the section predictor using the app context."""
harness: CliHarness = cls.create_harness()
fac: ConfigFactory = harness.get_config_factory()
return fac(name)
@classmethod
def corpus(cls) -> Corpus:
"""Return the section predictor using the app context."""
return cls.instance('mimic_corpus')
@classmethod
def section_predictor(cls) -> SectionPredictor:
"""Return the section predictor using the app context."""
return cls.instance('mimicsid_section_predictor')
@classmethod
def annotation_resource(cls) -> AnnotationResource:
"""Contains resources to acces the MIMIC-III MedSecId annotations."""
return cls.instance('mimicsid_anon_resource')
@classmethod
def note_stash(cls, host: str, port: str, db_name: str,
user: str, password: str) -> NoteStash:
"""Return the note stash using the app context, which is populated with
the Postgres DB login provided as the parameters.
"""
harness: CliHarness = cls.create_harness(
app_config_context={
'mimic_postgres_conn_manager':
dict(host=host, port=port, db_name=db_name,
user=user, password=password)})
return harness.get_instance('note').note_stash
def main(args: List[str] = sys.argv, **kwargs: Dict[str, Any]) -> ActionResult:
harness: CliHarness = ApplicationFactory.create_harness(relocate=False)
harness.invoke(args, **kwargs) | zensols.mimicsid | /zensols.mimicsid-1.4.3-py3-none-any.whl/zensols/mimicsid/cli.py | cli.py |
__author__ = 'plandes'
import logging
import sys
from time import sleep
import itertools as it
import csv
from multiprocessing import Pool
from zensols.actioncli.time import time
from zensols.actioncli import (
persisted,
StashFactory,
)
from zensols.db import DbPersisterFactory
from zensols.ngramdb import Downloader, AppConfig
logger = logging.getLogger(__name__)
class PersisterContainer(object):
SECTION = 'data'
@property
@persisted('_persister')
def persister(self):
fac = DbPersisterFactory(self.config)
return fac.instance('ngram')
class Inserter(PersisterContainer):
def __init__(self, paths, config, year_limit, batches):
self.paths = paths
self.config = config
self.year_limit = year_limit
self.batches = batches
self.chunk_size = self.config.get_option_int('chunk_size', self.SECTION)
def insert_files(self):
n_files = 0
for path in self.paths:
logger.info(f'importing {path}')
with open(path) as f:
with time('imported {row_count} rows'):
row_count = self.insert_rows(f, self.batches)
n_files += 1
logger.info(f'imported {n_files} files from {path}')
return n_files
def insert_rows(self, f, batches):
row_count = 0
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
reader = iter(reader)
for i in range(batches):
logger.info(f'loading batch {i} with {self.chunk_size} rows')
rows = []
for row in it.islice(reader, self.chunk_size):
yr = int(row[1])
if self.year_limit is None or yr >= self.year_limit:
row[1] = yr
rows.append(row)
if len(rows) == 0:
break
else:
self.persister.insert_rows(rows, errors='ignore')
row_count += len(rows)
logger.info(f'added {len(rows)} rows')
rows.clear()
return row_count
class CreateDatabase(PersisterContainer):
def __init__(self, config, year_limit=None):
self.config = config
self.n_workers = self.config.get_option_int(
'n_workers', section=self.SECTION)
if year_limit is None:
self.year_limit = None
else:
self.year_limit = int(year_limit)
@property
def downloaded_files(self):
return Downloader(self.config).get_downloaded()
@classmethod
def insert_file(cls, payload):
inserter = Inserter(**payload)
return inserter.insert_files()
def load(self, clear=True, batches=sys.maxsize):
logging.getLogger('zensols.dbpg').setLevel(level=logging.DEBUG)
if clear:
self.persister.conn_manager.drop()
self.persister.conn_manager.create()
# apparently postgres lets go of the connection before the DB is
# dropped
sleep(2)
csv.field_size_limit(10 ** 9)
n = self.n_workers
paths = tuple(self.downloaded_files)
path_sets = []
for i in range(0, len(paths), n):
payload = {'paths': paths[i:i + n],
'config': self.config,
'year_limit': self.year_limit,
'batches': batches}
path_sets.append(payload)
pool = Pool(self.n_workers)
logger.info(f'starting {len(path_sets)} groups importing ' +
f'on/after {self.year_limit}')
totals = pool.map(CreateDatabase.insert_file, path_sets)
logger.info(f'completed inserting {sum(totals)} files')
class Query(object):
def __init__(self, config, year_limit=None, stash_name='ngram'):
self.config = config
self.stash_name = stash_name
self.year_limit = year_limit
@property
@persisted('_stash', cache_global=True)
def stash(self):
fac = StashFactory(self.config)
if self.year_limit is None:
inst = fac.instance(self.stash_name)
else:
inst = fac.instance(self.stash_name, year_limit=self.year_limit)
return inst
def probability(self, ngrams):
stash = self.stash
cnt = len(stash)
return stash[ngrams] / cnt
def __call__(self, ngrams):
stash = self.stash
return stash[ngrams] | zensols.ngramdb | /zensols.ngramdb-0.0.2-py3-none-any.whl/zensols/ngramdb/app.py | app.py |
__author__ = 'plandes'
import os
from pathlib import Path
from zensols.actioncli import (
Config,
ExtendedInterpolationEnvConfig,
)
class AppConfig(ExtendedInterpolationEnvConfig):
NGRAM_SECTION = 'ngram_db'
# NGRAM_PERSISTER_SECTION = 'ngram_db_persister'
# NGRAM_STASH = 'ngram'
# NGRAM_STASH_SECTION = NGRAM_STASH + '_stash'
def __init__(self, *args, **kwargs):
if 'default_expect' not in kwargs:
kwargs['default_expect'] = True
if 'remove_vars' not in kwargs:
kwargs['remove_vars'] = 'LANG USER user'.split()
super(AppConfig, self).__init__(*args, **kwargs)
@property
def n_gram(self):
self.get_option('n_gram', self.NGRAM_SECTION)
@n_gram.setter
def n_gram(self, n_gram):
self.set_option('n_gram', n_gram, self.NGRAM_SECTION)
@property
def lang(self):
self.get_option('lang', self.NGRAM_SECTION)
@lang.setter
def lang(self, lang):
self.set_option('lang', lang, self.NGRAM_SECTION)
@classmethod
def instance(cls, db_type: str):
return cls(db_type, Path('~/.ngramdbrc').expanduser(),
default_vars=os.environ)
@property
def app_config(self) -> Config:
return self.get_app_config(self)
@classmethod
def get_app_config(cls, config: Config) -> Config:
self = config
db_sec = cls.NGRAM_SECTION
db_type = self.get_option('db_type', db_sec)
path = self.resource_filename(f'resources/ngramdb-{db_type}.conf')
new_conf = self.derive_from_resource(
str(path.absolute()),
copy_sections=(db_sec, self.NGRAM_SECTION, 'default',))
rc_dir = str(self.resource_filename('resources').absolute())
new_conf.set_option('rc_dir', rc_dir, db_sec)
return new_conf
@classmethod
def add_config(cls, config: Config):
secs = 'ngram_db_persister ngram_agg_db_persister ngram_stash ngram_agg_stash'.split()
nconf = cls(config.config_file)
nconf = nconf.app_config
nconf.copy_sections(config, secs) | zensols.ngramdb | /zensols.ngramdb-0.0.2-py3-none-any.whl/zensols/ngramdb/config.py | config.py |
__author__ = 'plandes'
import logging
from pathlib import Path
from zensols.actioncli import (
persisted,
PersistedWork,
StashFactory,
DelegateStash,
)
from zensols.db import (
Bean,
BeanDbPersister,
DbPersisterFactory,
)
import zensols.dbpg
from zensols.ngramdb import AppConfig
logger = logging.getLogger(__name__)
class NgramBean(Bean):
def __init__(self, grams: str, yr: int, match_count: int,
page_count: int, volume_count: int, id: int):
self.grams = grams
self.yr = yr
self.match_count = match_count
self.page_count = page_count
self.volume_count = volume_count
self.id = id
def get_attr_names(self):
return 'grams yr match_count page_count volume_count id'.split()
class NgramPersister(BeanDbPersister):
def __init__(self, lang: str, n_gram: str, prefix: str, *args, **kwargs):
super(NgramPersister, self).__init__(
*args, row_factory=NgramBean, **kwargs)
self.lang = lang
self.n_gram = n_gram
self.prefix = prefix
def get_by_grams(self, grams) -> list:
return self.execute_by_name(
self.prefix + '_select_by_ngram',
params=(grams,),
row_factory=self.row_factory)
def get_match_count(self, grams=None, year_limit=None) -> int:
entry_name = self.prefix + '_select_aggregate_match_count'
params = []
if grams is not None:
entry_name = entry_name + '_by_grams'
params.append(grams)
if year_limit is not None:
entry_name = entry_name + '_by_year'
params.append(year_limit)
res = self.execute_singleton_by_name(entry_name, params=params)
if res is not None:
return res[0]
def get_match_pattern_count(self, grams) -> int:
row = self.execute_singleton_by_name(
'ngram_agg_select_aggregate_match_count_by_grams',
params=(grams,))
if row is not None:
return row[0]
def get_top_nth_count(self, n) -> int:
return self.execute_singleton_by_name(
'ngram_agg_top_n_count',
row_factory='dict',
params=(n,))
def grams_exist(self, grams) -> bool:
res = self.execute_singleton_by_name(
self.prefix + '_select_entry_exists_by_id',
params=(grams,))
if res is not None:
return res[0] > 0
def get_grams(self) -> list:
return self.execute_by_name(
self.prefix + '_select_distinct_grams',
map_fn=lambda x: x[0])
DbPersisterFactory.register(NgramPersister)
class NgramStash(DelegateStash):
def __init__(self, config: AppConfig, persister: str, year_limit=None):
super(NgramStash, self).__init__()
fac = DbPersisterFactory(config)
self.persister = fac.instance(persister)
self.year_limit = year_limit
data_dir = config.get_option(
'data_dir', section=AppConfig.NGRAM_SECTION, expect=False)
if data_dir is not None:
cnf = config.populate(section=AppConfig.NGRAM_SECTION)
prefix = self.persister.prefix
fname = f'len_{prefix}_{cnf.lang}_{cnf.n_gram}ngram.dat'
path = Path(data_dir, fname)
path.parent.mkdir(parents=True, exist_ok=True)
self._len = PersistedWork(path, self)
def load(self, name: str) -> int:
return self.persister.get_match_count(name, year_limit=self.year_limit)
def exists(self, name: str) -> bool:
return self.persister.grams_exist(name)
def keys(self) -> iter:
return self.persister.get_grams()
@persisted('_len')
def __len__(self):
return self.persister.get_match_count(year_limit=self.year_limit)
StashFactory.register(NgramStash) | zensols.ngramdb | /zensols.ngramdb-0.0.2-py3-none-any.whl/zensols/ngramdb/db.py | db.py |
__author__ = 'plandes'
import logging
from zensols.actioncli import OneConfPerActionOptionsCliEnv
from zensols.ngramdb import (
AppConfig,
Downloader,
CreateDatabase,
Query,
)
#logging.basicConfig(level=logging.DEBUG)
class Cli(object):
def __init__(self, config, ngram=None, lang=None, grams=None, year=None):
self.config = config.app_config
if ngram is not None:
self.config.n_gram = ngram
if lang is not None:
self.config.lang = lang
self.grams = grams
self.year = year
def env(self):
self.config.default_vars['file_n'] = 0
for sec in 'data ngram_db'.split():
for k, v in self.config.get_options(sec).items():
print(f'{sec}.{k}={v}')
def download(self):
#logging.getLogger('zensols.ngramdb').setLevel(logging.INFO)
dl = Downloader(self.config)
dl.download()
def load(self):
#logging.getLogger('zensols.ngramdb').setLevel(logging.INFO)
cd = CreateDatabase(self.config, year_limit=self.year)
cd.load()
def query(self):
logging.getLogger('zensols.ngramdb').setLevel(logging.WARNING)
query = Query(self.config, year_limit=self.year)
n_occurs = query(self.grams)
print(n_occurs)
def probability(self):
logging.getLogger('zensols.ngramdb').setLevel(logging.WARNING)
query = Query(self.config, year_limit=self.year)
proba = query.probability(self.grams)
print(proba)
class ConfAppCommandLine(OneConfPerActionOptionsCliEnv):
def __init__(self):
ngram_op = ['-n', '--ngram', False, # require argument
{'dest': 'ngram', 'metavar': 'INTEGER',
'default': '1',
'help': 'ngram corpus to download or use'}]
lang_op = ['-l', '--lang', True, # require argument
{'dest': 'lang', 'metavar': 'INTEGER',
'default': 'eng',
'help': 'lang corpus to download or use'}]
grams_op = ['-g', '--grams', True, # require argument
{'dest': 'grams', 'metavar': 'STRING',
'help': 'the token(s) used for the n_gram lookup'}]
year_op = ['-y', '--year', False, # require argument
{'dest': 'year', 'metavar': 'INTEGER',
'help': 'the year to limit queries (aggregates up to value)'}]
cnf = {'executors':
[{'name': 'exporter',
'executor': lambda params: Cli(**params),
'actions': [{'name': 'env',
'doc': 'get environment configuration',
'opts': [ngram_op, lang_op]},
{'name': 'download',
'doc': 'download the corpus',
'opts': [ngram_op, lang_op]},
{'name': 'load',
'doc': 'load the corpus in to the database',
'opts': [year_op, ngram_op, lang_op]},
{'name': 'query',
'doc': 'query the database for occurances',
'opts': [year_op, ngram_op, lang_op, grams_op]},
{'name': 'probability',
'doc': 'compute the probability of the occurances',
'opts': [ngram_op, lang_op, grams_op]}]}],
'config_option': {'name': 'config',
'expect': True,
'opt': ['-c', '--config', False,
{'dest': 'config',
'metavar': 'FILE',
'help': 'configuration file'}]},
'whine': 1}
super(ConfAppCommandLine, self).__init__(
cnf, config_env_name='ngramdbrc', pkg_dist='zensols.ngramdb',
config_type=AppConfig)
# def _create_config(self, conf_file, default_vars):
# db_type = self.parsed_options.db_type
# del self.parsed_options.db_type
# return self.config_type(
# db_type, config_file=conf_file, default_vars=default_vars)
def main():
cl = ConfAppCommandLine()
cl.invoke() | zensols.ngramdb | /zensols.ngramdb-0.0.2-py3-none-any.whl/zensols/ngramdb/cli.py | cli.py |
__author__ = 'Paul Landes'
from typing import Set, Iterable, Tuple
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
from io import StringIO
from . import ParseError, FeatureToken
class SpanNormalizer(metaclass=ABCMeta):
"""Subclasses normalize feature tokens on a per :class:`spacy.Language`.
All subclasses must be re-entrant.
"""
@abstractmethod
def get_norm(self) -> str:
"""Create a string that follows the langauge spacing rules."""
pass
@abstractmethod
def get_canonical(self, tokens: Iterable[FeatureToken]) -> str:
"""A canonical representation of the container, which are non-space
tokens separated by :obj:`CANONICAL_DELIMITER`.
"""
@dataclass(frozen=True)
class EnglishSpanNormalizer(SpanNormalizer):
"""An implementation of a span normalizer for the Enlish language.
"""
post_space_skip: Set[str] = field(default=frozenset("""`‘“[({<-"""))
"""Characters after which no space is added for span normalization."""
pre_space_skip: Set[str] = field(default=frozenset(
"'s n't 'll 'm 've 'd 're -".split()))
"""Characters before whcih no space is added for span normalization."""
canonical_delimiter: str = field(default='|')
"""The token delimiter used in :obj:`canonical`."""
def __post_init__(self):
# bypass frozen setattr guards
self.__dict__['_longest_pre_space_skip'] = \
max(map(len, self.pre_space_skip))
def get_norm(self, tokens: Iterable[FeatureToken]) -> str:
nsent: str
toks: Tuple[FeatureToken] = tuple(
filter(lambda t: t.text != '\n', tokens))
tlen: int = len(toks)
has_punc = tlen > 0 and hasattr(toks[0], 'is_punctuation')
if has_punc:
post_space_skip: Set[str] = self.post_space_skip
pre_space_skip: Set[str] = self.pre_space_skip
n_pre_space_skip: int = self._longest_pre_space_skip
sio = StringIO()
last_avoid = False
tix: int
tok: FeatureToken
for tix, tok in enumerate(toks):
norm: str = tok.norm
if norm is None:
raise ParseError(f'Token {tok.text} has no norm')
if tix > 0 and tix < tlen:
do_post_space_skip = False
nlen = len(norm)
if nlen == 1:
do_post_space_skip = norm in post_space_skip
if (not tok.is_punctuation or do_post_space_skip) and \
not last_avoid and \
not (nlen <= n_pre_space_skip and
norm in pre_space_skip):
sio.write(' ')
last_avoid = do_post_space_skip or tok.norm == '--'
sio.write(norm)
nsent = sio.getvalue()
else:
nsent = ' '.join(map(lambda t: t.norm, toks))
return nsent.strip()
def get_canonical(self, tokens: Iterable[FeatureToken]) -> str:
return self.canonical_delimiter.join(
map(lambda t: t.text,
filter(lambda t: not t.is_space, tokens)))
def __getstate__(self):
raise RuntimeError(f'Instances of {type(self)} are not picklable')
DEFAULT_FEATURE_TOKEN_NORMALIZER = EnglishSpanNormalizer() | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/spannorm.py | spannorm.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.