code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import logging
import operator
import math
import pathlib
from typing import Callable, List, Union
from detectron2.data.build import build_detection_train_loader
import numpy as np
from detectron2.engine import DefaultTrainer, HookBase
from detectron2.engine.hooks import EvalHook, BestCheckpointer
from detectron2.utils import comm, logger
from detectron2.utils.logger import log_first_n
from detectron2.config import CfgNode
from detectron2.evaluation import (
verify_results,
)
import detectron2.data.transforms as T_
import torch
from fvcore.common.checkpoint import Checkpointer
from numpy import logical_not
from sap_computer_vision.data.samplers import PKTripletSampler, TripletReservoirSampler, PredictReservoirHook, PKTripletStrategySwitcher
from sap_computer_vision.data.data_build import build_detection_test_loader_batched, DatasetMapperWithAdditionalAugmentaions
from sap_computer_vision.evaluators import ContrastiveEvaluator, ImageClassificationEvaluator, ObjectDetectionEvaluator
from sap_computer_vision.data import build_classification_train_loader, build_classification_test_loader_batched, DatasetMapperClassification
logger = logging.getLogger(__name__)
class LoadBest(HookBase):
"""This hook loads the best. Only usable in combination with BestCheckpointer hook."""
def __init__(self, best_checkpoint_hook: BestCheckpointer):
"""Create hook to load the best model at the end of the training.
Parameters
----------
best_checkpoint_hook: BestCheckpointer
Instance of the BestCheckpointer hook.
"""
self.checkpointer = best_checkpoint_hook._checkpointer
self.model_prefix = best_checkpoint_hook._file_prefix
def after_train(self):
if self.trainer.iter + 1 >= self.trainer.max_iter:
path = pathlib.Path(self.checkpointer.save_dir) / f'{self.model_prefix}.pth'
if path.is_file():
logger.info(f'Loading best model from checkpoint {path}')
self.checkpointer.load(str(path))
else:
logger.warning('Not loading any model, because no best model was found!')
class EarlyStoppingHook(EvalHook):
"""Hook to perform early stopping. It uses the same eval condition as the EvalHook.
"""
def __init__(self,
eval_period: int,
patience: int,
delta: float,
get_last_eval_result_f: Callable,
metric_name: Union[None, str]=None,
mode: str='max'):
"""
Parameters
----------
eval_period: int
This class is a subclass of the EvalHook and shares uses its way of
trigering an evaluation to check for an early stop. The evaluation is
simply triggered every 'eval_period' steps.
patience: int
Number of evaluation without an improvement without stopping the
training.
delta: float
Minimal Improvement.
get_last_eval_result_f: callable
Function returning a dict with the last evaluation results or
the value of the a specific metric directly.
When combined with EvalHook it is probably advisable
to retrieve its results and not to redo the evaluation.
Check EarlyStoppingTrainer.build_hooks as an example.
metric_name: None or str, optional, default=None
Name of the metric used to decide on an early stop.
In cases 'get_last_eval_result_f' returns a value
directly the metric_name is ignored. If the function
returns a dict the metric_name is the key of the metric.
mode: str, optional, default='max'
'max' if the metric should be maximized.
'min' if the metric should be minimized.
"""
super().__init__(eval_period, None)
self.patience = patience
self.delta = delta
self.metric_name = metric_name
self.best_result = None
self.unimproved_evals = 0
if mode == 'max':
self.is_better = lambda x: (self.best_result + self.delta) <= x
elif mode == 'min':
self.is_better = lambda x: (self.best_result - self.delta) >= x
else:
ValueError(f'`mode` (cfg.SOLVER.EARLY_STOPPING.MODE) for EarlyStopping has to either `max` or `min`. Got {mode}!')
self.get_last_result = get_last_eval_result_f
def _do_eval(self):
new_result = self.get_last_result()
if isinstance(new_result, dict):
if self.metric_name is None:
new_result = None
log_first_n(
logging.WARN,
"The provided result function returns a dict. Please specifiy"
" the name of the metric for early stopping!",
name=__name__)
else:
try:
new_result = new_result[self.metric_name]
except KeyError:
log_first_n(
logging.WARN,
f"Not metric {self.metric_name} found in the result.",
name=__name__)
new_result = None
if new_result is None:
return # No evaluation result found!
if self.best_result is None:
self.best_result = new_result
else:
if self.is_better(new_result):
was_stalling = self.unimproved_evals > 0
self.unimproved_evals = 0
self.best_result = new_result
if was_stalling:
logger.info('The model performance improved again. Early stopping countered was resetted')
else:
self.unimproved_evals += 1
logger.info(f'The performance has not improved in the last'
f' {self.unimproved_evals} evaluation (patience: {self.patience}).')
if self.unimproved_evals >= self.patience:
msg = 'No improvements of the model performance'
msg += (f" ({self.metric_name}) " if self.metric_name is None else ' ')
msg += f'in the last {self.patience} evaluations.'
logger.info(msg)
raise EarlyStop
def after_train(self):
"""Stopping after training is senseless. so the EvalHook
after_train func is overwritten."""
pass
class AIFLogging(EvalHook):
"""This hooks tries to log the metric through the AICore Tracking SDK."""
nan_value = -100.
def __init__(self, eval_period: int, get_last_eval_result_f: Callable):
"""Create instance of the hook.
Parameters
----------
eval_period: int
This class is a subclass of the EvalHook and shares uses its way of
trigering an evaluation to check for an early stop. The evaluation is
simply triggered every 'eval_period' steps.
get_last_eval_result_f: callable
Function returning a dict with the last evaluation results or
the value of the a specific metric directly.
When combined with EvalHook it is probably advisable
to retrieve its results and not to redo the evaluation.
Check EarlyStoppingTrainer.build_hooks as an example.
"""
super().__init__(eval_period, None)
logger = logging.getLogger(__name__)
try:
from ai_core_sdk.tracking import Tracking
except ImportError:
logger.warn("AI Core Tracking Module not found")
self.tracking_module = None
else:
self.tracking_module = Tracking()
self.get_last_result = get_last_eval_result_f
def _do_eval(self):
if self.tracking_module is None:
return
new_result = self.get_last_result()
if new_result is None:
return
metrics = self.format_metrics(new_result, self.trainer.iter + 1, [{'name': 'data_split', 'value': 'validation'}])
self.tracking_module.log_metrics(metrics=metrics)
@classmethod
def format_metrics(cls, results, step, labels=[]):
try:
from ai_core_sdk.models import Metric, MetricLabel, MetricTag, MetricCustomInfo
from datetime import datetime
except ImportError:
logger.warn("AI Core Models for Metrics, Tags and CustomInfo not found")
return []
else:
metrics = []
for key, value in results.items():
label_objs = []
for label in labels:
label_objs.append(MetricLabel(name=label['name'], value=label['value']))
# If the value for any metric is NAN, we add a `nans` label to it
if not np.isfinite(value):
value = cls.nan_value
label_objs.append(MetricLabel(name='nans', value=f'{value}=nan'))
metric = Metric(
name=key,
value=value,
step=step,
timestamp=datetime.utcnow(),
labels=label_objs
)
metrics.append(metric)
return metrics
class EarlyStop(Exception):
"""Exception used to trigger an early stop during the training."""
pass
class EarlyStoppingTrainer(DefaultTrainer):
"""Default trainer. This is base trainer and cannot be used for trainer.
Use ObjectDetectionTrainer, ImageClassificationTrainer and TripletDistanceTrainer
for the actual trianing
It includes multiple extentions in comparison to the detectron2.DefaultTrainer
- Includes early stopping
- AICore logging hook
- Load best hook
- Pop EvalHook from hook list when no test dataset was defined in the config
- More augmentations configurable through the cfg
"""
def build_hooks(self) -> List[HookBase]:
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns
-------
list[HookBase]
List of hooks.
"""
hooks = super().build_hooks()
got_eval_hook = True
if self.cfg.DATASETS.TEST is None or len(self.cfg.DATASETS.TEST) == 0:
got_eval_hook = False
eval_hook = None
for i, h in enumerate(hooks):
if isinstance(h, EvalHook) and not issubclass(type(h), EvalHook):
eval_hook = i
break
if eval_hook is not None:
del hooks[eval_hook]
if not hasattr(self.cfg.SOLVER, 'EARLY_STOPPING'):
raise ValueError('No `EARLY_STOPPING` node in config. Add the `Base-EarlyStopping` config to use the `EarlyStoppingTrainer`!')
if got_eval_hook:
if self.cfg.SOLVER.EARLY_STOPPING.LOAD_BEST:
best_checkpoint_hook = BestCheckpointer(eval_period=self.cfg.TEST.EVAL_PERIOD,
checkpointer=self.checkpointer,
val_metric=self.cfg.SOLVER.EARLY_STOPPING.METRIC_NAME,
mode=self.cfg.SOLVER.EARLY_STOPPING.MODE,
file_prefix=self.cfg.SOLVER.EARLY_STOPPING.BEST_MODEL_PREFIX)
hooks.append(best_checkpoint_hook)
if self.cfg.SOLVER.EARLY_STOPPING.ENABLED:
early_stopping_hook = EarlyStoppingHook(
eval_period=self.cfg.TEST.EVAL_PERIOD,
patience=self.cfg.SOLVER.EARLY_STOPPING.PATIENCE,
delta=self.cfg.SOLVER.EARLY_STOPPING.MIN_IMPROVEMENT,
mode=self.cfg.SOLVER.EARLY_STOPPING.MODE,
metric_name=self.cfg.SOLVER.EARLY_STOPPING.METRIC_NAME,
get_last_eval_result_f=lambda: getattr(self, '_last_eval_results', None))
hooks.append(early_stopping_hook)
if self.cfg.EVAL.LOG_METRICS:
hooks.append(AIFLogging(eval_period=self.cfg.TEST.EVAL_PERIOD,
get_last_eval_result_f=lambda: getattr(self, '_last_eval_results', None)))
return [h for h in hooks if isinstance(h, HookBase)]
def train(self):
try:
return super().train()
except EarlyStop:
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
@classmethod
def build_train_loader(cls, cfg: CfgNode) -> 'torch.utils.data.DataLoader':
raise NotImplementedError
@classmethod
def build_additional_augmentations(cls, cfg: CfgNode) -> List['T_.Transform']:
"""Build additional augmentations.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
list of T_.Transform
List of augmentation
"""
augmentations = []
if cfg.INPUT.get('RANDOM_LIGHTING', {}).get('ENABLED', False):
augmentations.append(T_.RandomLighting(cfg.INPUT.RANDOM_LIGHTING.STRENGTH))
if cfg.INPUT.get('RANDOM_BRIGHTNESS', {}).get('ENABLED', False):
augmentations.append(T_.RandomBrightness(*cfg.INPUT.RANDOM_BRIGHTNESS.STRENGTH))
if cfg.INPUT.get('RANDOM_SATURATION', {}).get('ENABLED', False):
augmentations.append(T_.RandomSaturation(*cfg.INPUT.RANDOM_BRIGHTNESS.STRENGTH))
if cfg.INPUT.get('RANDOM_CONTRAST', {}).get('ENABLED', False):
augmentations.append(T_.RandomContrast(*cfg.INPUT.RANDOM_CONTRAST.STRENGTH))
return augmentations
class ObjectDetectionTrainer(EarlyStoppingTrainer):
"""Object detection version of the EarlyStoppingTrainer.
Default config: Base-EarlyStopping.yaml
To make the trainer work for image classification the changer are:
- 'build_evaluator' creates instance of ObjectDetectionEvaluator
- 'build_train_loader' uses DatasetMapperWithAdditionalAugmentaions extending
- 'build_test_loader' extends to default detectron2 build_test_loader func use
bataching during evaluation.
"""
@classmethod
def build_evaluator(cls, cfg: CfgNode, _) -> 'ObjectDetectionEvaluator':
return ObjectDetectionEvaluator(cfg)
@classmethod
def build_train_loader(cls, cfg: CfgNode) -> 'torch.utils.data.DataLoader':
"""Build train loader.
The loder is the default loader from detectron2 but adds additional
augementations. See build_additional_augmentations for details.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
torch.utils.data.DataLoader
Return the DataLoader for train data.
"""
mapper = DatasetMapperWithAdditionalAugmentaions(cfg, is_train=True) # pylint: disable=E1121, E1123, E1124, E1125
return build_detection_train_loader(cfg, mapper=mapper) # pylint: disable=E1121, E1123, E1124, E1125
@classmethod
def build_test_loader(cls, cfg: CfgNode, dataset_name: str):
return build_detection_test_loader_batched(cfg, dataset_name) # pylint: disable=E1121, E1123, E1124, E1125
class ImageClassificationTrainer(EarlyStoppingTrainer):
"""Image classification version of the EarlyStoppingTrainer.
Default config: Base-EarlyStopping.yaml
To make the trainer work for image classification the changer are:
- 'build_evaluator' creates instance of ImageClassificationEvaluator
- 'build_train_loader'/'build_test_loader' use DatasetMapperClassification
"""
@classmethod
def build_evaluator(cls, cfg: CfgNode, _) -> 'ImageClassificationEvaluator':
return ImageClassificationEvaluator(cfg)
@classmethod
def build_train_loader(cls, cfg: CfgNode) -> 'torch.utils.data.DataLoader':
mapper = DatasetMapperClassification(cfg, is_train=True) # pylint: disable=E1121, E1123, E1124, E1125
return build_classification_train_loader(cfg, mapper=mapper) # pylint: disable=E1121, E1123, E1124, E1125
@classmethod
def build_test_loader(cls, cfg: CfgNode, dataset_name: Union[str, List[str]]) -> 'torch.utils.data.DataLoader':
return build_classification_test_loader_batched(cfg, dataset_name) # pylint: disable=E1121, E1123, E1124, E1125
class TripletDistanceTrainer(ImageClassificationTrainer):
"""Triplet distance learning version of the EarlyStoppingTrainer.
Default config: Base-EarlyStopping.yaml
In builds on top of the ImageClassificationTrainer because both use the same
DataLoaders. Additional changes to make the trainer work for distance learning are:
- 'build_evaluator' creates instance of ContrastiveEvaluator
- 'build_hooks' it extends the EarlyStoppingTrainerHooks and adds PKTripletStrategySwitcher
and PredictReservoirHook if needed for the sampler.
"""
@classmethod
def build_evaluator(cls, cfg: CfgNode, _) -> 'ContrastiveEvaluator':
return ContrastiveEvaluator(cfg)
def build_hooks(self) -> List[HookBase]:
hooks = super().build_hooks()
try:
if isinstance(self.data_loader.batch_sampler, TripletReservoirSampler):
hooks.append(PredictReservoirHook(self.data_loader.batch_sampler))
except AttributeError:
pass
try:
batch_sampler = self.data_loader.batch_sampler
uses_triplet_strategy = False
if isinstance(batch_sampler, PKTripletSampler):
uses_triplet_strategy = True
target = self.model
strategies = self.cfg.DATALOADER.PK_SAMPLER.get('STRATEGY_SWITCHES', None)
elif isinstance(batch_sampler, TripletReservoirSampler):
uses_triplet_strategy = True
target = batch_sampler
strategies = self.cfg.DATALOADER.TRIPLET_RESERVOIR_SAMPLER.get('STRATEGY_SWITCHES', None)
except AttributeError:
pass
else:
if uses_triplet_strategy:
if strategies is not None:
hook = PKTripletStrategySwitcher(strategies, target)
if len(hook.strategies) > 0:
hooks.append(hook)
return hooks
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/engine/trainers.py
| 0.905966 | 0.270525 |
trainers.py
|
pypi
|
```
! [ -d "GroceryStoreDataset-master" ] && echo "skipping" || (wget -nc --no-check-certificate https://github.com/marcusklasson/GroceryStoreDataset/archive/refs/heads/master.zip && unzip master.zip -d .)
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import pathlib, json
from sap_computer_vision.datasets import image_folder as imgf
dataset_folder = pathlib.Path('GroceryStoreDataset-master/').resolve()
images_train, class_names = imgf.register('grocery_train',
base_dir=dataset_folder / 'dataset' / 'train')
images_val, class_names = imgf.register('grocery_val',
base_dir=dataset_folder / 'dataset' / 'val',
class_names=class_names)
images_test, class_names = imgf.register('grocery_test',
base_dir=dataset_folder / 'dataset' / 'test',
class_names=class_names)
import numpy as np
from sap_computer_vision import setup_loggers, get_cfg, get_config_file
out_dir = 'distance_learning_grocery_higher_lr'
setup_loggers(out_dir)
cfg = get_cfg()
cfg.merge_from_file(get_config_file('Base-EarlyStopping'))
cfg.merge_from_file(get_config_file('Base-Evaluation'))
cfg.merge_from_file(get_config_file('TripletDistanceLearner/FPN-Resnet50'))
cfg.OUTPUT_DIR = out_dir
cfg.DATASETS.TRAIN = ('grocery_train', )
cfg.DATASETS.TEST = ('grocery_val', )
cfg.DATALOADER.PK_SAMPLER.P_CLASSES_PER_BATCH = 30
cfg.DATALOADER.PK_SAMPLER.K_EXAMPLES_PER_CLASS = 4
cfg.DATALOADER.NUM_WORKERS = 10
cfg.SOLVER.MAX_ITER = 5000
cfg.SOLVER.BASE_LR = 0.01
cfg.SOLVER.GAMMA = float(np.sqrt(0.1))
cfg.SOLVER.EARLY_STOPPING.ENABLED = False
cfg.SOLVER.WARMUP_ITERS = max(int(0.01 * cfg.SOLVER.MAX_ITER), 0)
cfg.SOLVER.STEPS = [cfg.SOLVER.MAX_ITER * p for p in (0.25, 0.375, 0.5, 0.75, 0.9)]
for aug in ['RANDOM_LIGHTING', 'RANDOM_BRIGHTNESS', 'RANDOM_SATURATION', 'RANDOM_CONTRAST', 'RANDOM_ROTATION', 'CROP', 'CUT_OUT']:
if cfg.INPUT.get(aug, None) is not None:
cfg.INPUT[aug].ENABLED = True
cfg.MODEL.TRIPLET_DISTANCE_LEARNER.MARGIN_LOSS.MARGIN = 0.5
cfg.MODEL.TRIPLET_DISTANCE_LEARNER.LOSS = 'MARGIN_LOSS'
cfg.MODEL.FEATURE_EXTRACTION.PROJECTION_SIZE = 512
cfg.MODEL.FEATURE_EXTRACTION.INTERMEDIATE_SIZE = None
cfg.DATALOADER.SAMPLER_TRAIN = 'PKSampler'
cfg.MODEL.TRIPLET_DISTANCE_LEARNER.TRIPLET_STRATEGY = ('*', '*')
delay = (cfg.SOLVER.MAX_ITER * 0.5)
strategies_pos = np.linspace(0.5, 0.8, 21)
strategies_neg = 1. - strategies_pos
strategies = [(float(p), float(n)) for p, n in zip(strategies_pos, strategies_neg)]
switch_steps = np.linspace(delay, cfg.SOLVER.MAX_ITER, len(strategies)+1)[:-1]
cfg.DATALOADER.PK_SAMPLER.STRATEGY_SWITCHES = [(int(step), strat) for (step, strat) in zip(switch_steps, strategies)]
cfg.TEST.EVAL_PERIOD = 250
out_dir = pathlib.Path(out_dir)
out_dir.mkdir(parents=True, exist_ok=True)
with (out_dir / 'used_config.yaml').open('w') as stream:
stream.write(cfg.dump())
from sap_computer_vision.engine import TripletDistanceTrainer
trainer = TripletDistanceTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
cfg.DATASETS.TEST = ('grocery_test', )
metrics = trainer.test(cfg, trainer.model)
print(json.dumps(metrics, ident=2))
```
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/examples/Triplet_Learning.ipynb
| 0.446012 | 0.15876 |
Triplet_Learning.ipynb
|
pypi
|
```
! [ -d "PetImages" ] && echo "skipping" || (wget -nc --no-check-certificate https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip && unzip kagglecatsanddogs_3367a.zip "PetImages/*" -d . && rm kagglecatsanddogs_3367a.zip)
!rm -f "PetImages/Dog/11702.jpg" # Remove broken image
!rm -f "PetImages/Cat/666.jpg" # Remove broken image
import os
import shutil
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from sap_computer_vision.datasets import image_folder as imgf
import pathlib, json
dataset_folder = pathlib.Path('PetImages/')
datasets, class_names = imgf.split_and_register('dogs_vs_cats',
base_dir=dataset_folder,
splits={'test': 0.1, 'validation': 0.1, 'train': 0.8})
from sap_computer_vision import setup_loggers, get_cfg, get_config_file
import numpy as np
out_dir = 'image_classification_model'
if pathlib.Path(out_dir).exists():
raise RuntimeError('Result folder already exists. Please delete the folder or change the name of the output')
setup_loggers(out_dir)
cfg = get_cfg()
cfg.merge_from_file(get_config_file('Base-EarlyStopping'))
cfg.merge_from_file(get_config_file('Base-Evaluation'))
cfg.merge_from_file(get_config_file('ImageClassifier/Resnet50'))
cfg.OUTPUT_DIR = out_dir
cfg.DATASETS.TRAIN = ('dogs_vs_cats_train', )
cfg.DATASETS.TEST = ('dogs_vs_cats_validation', )
cfg.SOLVER.MAX_ITER = 2000
cfg.SOLVER.BASE_LR = 0.001
cfg.SOLVER.GAMMA = float(np.sqrt(0.1))
cfg.SOLVER.EARLY_STOPPING.ENABLED = False
cfg.SOLVER.WARMUP_ITERS = max(int(0.01 * cfg.SOLVER.MAX_ITER), 50)
cfg.SOLVER.STEPS = [cfg.SOLVER.MAX_ITER * p for p in (0.05, 0.25, 0.375, 0.5, 0.75, 0.9)]
for aug in ['RANDOM_LIGHTING', 'RANDOM_BRIGHTNESS', 'RANDOM_SATURATION', 'RANDOM_CONTRAST', 'CROP']:
if cfg.get(aug, None) is not None:
cfg.INPUT[aug].ENABLED = True
cfg.SOLVER.IMS_PER_BATCH = 64
cfg.SOLVER.IMS_PER_BATCH_EVAL = 64
cfg.MODEL.IMAGE_CLASSIFIER.NUM_CLASSES = len(class_names)
cfg.TEST.EVAL_PERIOD = int(cfg.SOLVER.MAX_ITER * 0.25)
from sap_computer_vision.engine import ImageClassificationTrainer
trainer = ImageClassificationTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
cfg.DATASETS.TEST = ('dogs_vs_cats_test', )
metrics = trainer.test(cfg, trainer.model)
print(json.dumps(metrics, ident=2))
```
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/examples/Image_Classification.ipynb
| 0.405331 | 0.171581 |
Image_Classification.ipynb
|
pypi
|
import pathlib
import json
import logging
from typing import List, Dict
from functools import partial
import torch
from metaflow import FlowSpec, step, argo, Parameter, JSONType
import numpy as np
from detectron2.config import CfgNode
from detectron2.structures import Instances
from detectron2.checkpoint import DetectionCheckpointer
from sap_computer_vision import setup_loggers, get_cfg
import sap_computer_vision.datasets.image_folder as imgf
from sap_computer_vision.utils.object_detection import torchvision_nms_on_model_output
#@argo_base(
# labels={'scenarios.ai.sap.com/id': 'scenario-id',
# 'ai.sap.com/version': 'scenario-version'},
# annotations={'scenarios.ai.sap.com/name': 'scenario-name',
# 'executables.ai.sap.com/name': 'executable-name',
# 'artifacts.ai.sap.com/datain.kind': 'dataset',
# 'artifacts.ai.sap.com/trainedmodel.kind': 'model'},
# image='mlf.docker.repositories.sapcdn.io/com.sap.ai/sap_cv_metaflow:0.0.25',
# imagePullSecrets=[{'name': 'your-image-pull-secret'}],
# envFrom=[{'secretRef': {'name': 'default-object-store-secret'}}],
# volumes=[{'name': 'dshm', 'emptyDir': {'medium': 'Memory'}}])
class BatchProcessing(FlowSpec):
"""Pipeline to batch processing data using a trained model.
"""
# Constant values for during processing
DATA_INPUT_DIR = pathlib.Path('/tmp/datain')
MODEL_INPUT_DIR = pathlib.Path('/tmp/model')
RESULT_OUTPUT_DIR = pathlib.Path('/tmp/results')
DATASET_NAME = "predict_dataset"
FALLBACK_TASK = 'IMAGE_CLASSIFICATION'
# Pipeline parameters
model_filename = Parameter("model_file",
help=f"File name for model to be used. Usually a .pth file",
default='')
batch_size = Parameter("batch_size",
help="Number of images per batch. Set to a value <= 0 to use the batch size used during training.",
default=0)
imgtypes = Parameter("image_types",
help="JSON-encoded list of expected image extensions",
type=JSONType,
default=json.dumps([".jpg", ".jpeg", ".png"]))
iou_threshold = Parameter("iou_threshold",
help="IOU threshold is used in Non-maximum suppression to filter out overlapping detections for object detection. " + \
"Only applied when the value is >= 0 and <= 1.",
type=float,
default=-1.0)
@argo(output_artifacts=[{'name': 'results',
'globalName': 'results',
'path': str(RESULT_OUTPUT_DIR),
'archive': {'none': {}}}],
input_artifacts=[{'name': 'datain',
'path': str(DATA_INPUT_DIR)},
{'name': 'modelin',
'path': str(MODEL_INPUT_DIR)}],
labels={"ai.sap.com/resourcePlan": "train.l"},
shared_memory=1000)
@step
def start(self):
"""In this step the model is trained.
"""
logger = logging.getLogger(__name__)
setup_loggers(str(self.RESULT_OUTPUT_DIR), color=False, additional_loggers=[__name__])
model_folder = pathlib.Path(self.MODEL_INPUT_DIR)
cfg_path = pathlib.Path(model_folder / 'used_config.yaml' )
with cfg_path.open() as stream:
cfg = CfgNode.load_cfg(stream)
model_file = [model_folder / f for f in [self.model_filename, 'model_best.pth', 'model_final.pth'] if (model_folder / f ).exists()]
if len(model_file) == 0:
raise RuntimeError('No model found!')
model_file = model_file[0]
cfg.MODEL.DEVICE = get_cfg().MODEL.DEVICE
cfg.MODEL.WEIGHTS = str(model_file)
cfg.INPUT.IMS_PER_BATCH_EVAL = self.batch_size if self.batch_size > 0 else cfg.INPUT.IMS_PER_BATCH_EVAL
images, _ = imgf.register(self.DATASET_NAME,
base_dir=self.DATA_INPUT_DIR,
extensions=self.imgtypes,
class_names=cfg.TRAINING_INFO.get('THING_CLASSES', None))
if len(images) == 0:
raise RuntimeError('No images found!')
result_output_path = pathlib.Path(self.RESULT_OUTPUT_DIR)
result_output_path.mkdir(parents=True, exist_ok=True)
with (result_output_path / 'used_config.yaml').open('w') as stream:
stream.write(cfg.dump())
self.task = cfg.TRAINING_INFO.get('TASK', 'UNKNOWN')
if self.task == 'UNKNOWN':
logger.warn(f"Unknown task: {self.task }. Try to process data using '{self.FALLBACK_TASK}' trainer as fallback.")
if self.task == 'OBJECT_DETECTION' or self.FALLBACK_TASK == 'OBJECT_DETECTION':
from sap_computer_vision.engine import ObjectDetectionTrainer as trainer
elif self.task == 'IMAGE_CLASSIFICATION' or self.FALLBACK_TASK == 'IMAGE_CLASSIFICATION':
from sap_computer_vision.engine import ImageClassificationTrainer as trainer
elif self.task == 'TRIPLET_DISTANCE_MERIC_LEARNING' or self.FALLBACK_TASK == 'TRIPLET_DISTANCE_MERIC_LEARNING':
from sap_computer_vision.engine import TripletDistanceTrainer as trainer
model = trainer.build_model(cfg)
checkpointer = DetectionCheckpointer(model)
checkpointer.load(cfg.MODEL.WEIGHTS)
dl_val = trainer.build_test_loader(cfg, self.DATASET_NAME)
if isinstance(self.iou_threshold, float) and self.iou_threshold >= 0.0 and self.iou_threshold <= 1.0:
nms_f = partial(torchvision_nms_on_model_output, device='cpu', iou_threshold=float(self.iou_threshold))
else:
nms_f = lambda batch: batch
with JSONSaver(self.task, self.RESULT_OUTPUT_DIR, self.DATA_INPUT_DIR, self.combined, self.max_size) as saver:
with torch.no_grad():
model.eval()
for batch in dl_val:
output = model(batch)
if self.task == 'OBJECT_DETECTION':
output = nms_f(output)
saver.register_batch(batch, output)
self.next(self.end)
@step
def end(self):
"""Currently this step is empty, but it is added because
metaflow DAGs always need a \'start\' and \'end\' step.
"""
pass
class JSONSaver:
def __init__(self, task, target_dir, data_in_dir, combined=True, max_size=None):
self.task = task
self._counter = 0
self.combined = combined
self.target_dir = pathlib.Path(target_dir)
self.result_buffer = [] if combined else None
self._in_context = False
self.max_size = max_size if isinstance(max_size, int) and max_size > 0 else None
self.crop_filename = lambda f: str(f).replace(str(self.data_in_dir) + '/', '')
self.data_in_dir = data_in_dir
def __enter__(self):
self.target_dir.mkdir(parents=True, exist_ok=True)
self._in_context = True
return self
def __exit__(self, type, value, traceback):
self._in_context = False
if self.result_buffer and len(self.result_buffer) > 0:
self._clear_buffer()
def register_batch(self, model_input, model_output):
model_output = results_to_jsonable_result_list(self.task, model_output)
for in_, out_ in zip(model_input, model_output):
in_file = self.crop_filename(in_['file_name'])
out_['filename'] = in_file
self.register_result(in_file, out_)
def register_result(self, in_file, result):
self._counter += 1
if self.combined:
self.result_buffer.append(result)
if self.max_size:
if self._counter % self.max_size == 0:
self._clear_buffer()
else:
out_file = self.target_dir / in_file.with_suffix('.json')
with out_file.open() as stream:
json.dump(result, stream)
def _clear_buffer(self, base_name='results'):
s, e = self._counter - len(self.result_buffer), self._counter - 1
filenamne = base_name + (f'{s}-{e}'if self.max_size else '') + '.json'
with (self.target_dir / filenamne).open('w') as stream:
json.dump(self.result_buffer, stream)
self.result_buffer = []
def results_to_jsonable_result_list(task, res, thing_classes=None) -> List[Dict]:
result_list = []
if task == 'OBJECT_DETECTION':
for res_i in res:
res_i = res_i['instances'].to('cpu')
res_i = {**res_i.get_fields()}
res_i['scores'] = res_i['scores'].tolist()
res_i['pred_boxes'] = res_i['pred_boxes'].tensor.tolist()
res_i['pred_classes'] = res_i['pred_classes'].tolist()
result_list.append(res_i)
elif task == 'IMAGE_CLASSIFICATION':
probs = res.cpu().numpy()
idx_max = np.argmax(probs, axis=1)
if thing_classes is not None:
predicted_class = [thing_classes[i] for i in idx_max]
else:
predicted_class = [''] * len(idx_max)
for p, i, c in zip(probs, idx_max, predicted_class):
result_list.append({'probs': p.tolist(), 'idx_max': i, 'pred_class': c})
elif task == 'TRIPLET_DISTANCE_MERIC_LEARNING':
embedding = res.squeeze().cpu().numpy()
result_list.extend([{'embedding': embedding_i.tolist()} for embedding_i in embedding])
return result_list
if __name__ == '__main__':
BatchProcessing()
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/pipelines/batch_pipelines/batch_processing.py
| 0.729423 | 0.228716 |
batch_processing.py
|
pypi
|
import pathlib
import json
import os
import zipfile
from functools import lru_cache
import logging
from fvcore.common.config import BASE_KEY
from metaflow import FlowSpec, step, argo, Parameter, JSONType
from detectron2.config import CfgNode
import sap_computer_vision.datasets.utils as utils
from sap_computer_vision.datasets import image_folder as imgf
from sap_computer_vision import get_cfg, get_config_file, setup_loggers
from sap_computer_vision.evaluators import ImageClassificationEvaluator
from sap_computer_vision.engine import ImageClassificationTrainer
MODEL_OUTPUT_DIR = str(pathlib.Path('/tmp/model'))
DATA_INPUT_DIR = pathlib.Path('/tmp/datain')
# TODO: Data loading process not finished and try to match what we implemented for object detection
class Trainer(ImageClassificationTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name):
if cfg.TEST.EVAL_PERIOD <= 0:
raise NotImplementedError
else:
return ImageClassificationEvaluator(cfg) # pylint: disable=E1124,E1125
class ImageClassificationTrain(FlowSpec):
"""Pipeline to train a model for image classification.
"""
model_name = Parameter("model_name",
help="Name of the Model configuration file.",
default="ImageClassifier/FPN-Resnet50-no-pooling")
train_input = Parameter("train",
help=f"Name of the file within {DATA_INPUT_DIR} containing filenames of the train dataset.",
default='train.txt')
validation_input = Parameter("validation",
help=f"Name of the file within {DATA_INPUT_DIR} containing filenames of the validation dataset.",
default="val.txt")
test_input = Parameter("test",
help=f"Name of the file within {DATA_INPUT_DIR} containing filenames of the test dataset.",
default="val.txt")
class_names = Parameter("class_names",
help="JSON encoded list of class names. If it is empty or not a valid JSON, " + \
"classes will be extracted from the train/val/test datasets.",
default="")
batch_size = Parameter("batch_size",
help="Number of images per batch.",
default=40)
lr_decay = Parameter("learning_rate_decay",
help="Whether learning rate should be decreased over the training.",
default=True)
intermediate_layers = Parameter("projection_layers",
help="Size of layers between backbone and final output layer. If [] , no intermediate layer is used.",
type=JSONType,
default=json.dumps([]))
n_steps = Parameter("maximum_training_steps",
help="Maximum number of training steps. Actual training steps could be less if `early_stopping` is enabled.",
default=3000)
eval_freq = Parameter("evaluation_frequency",
help="Frequency of evaluation. If > 1 it is treated as every `evaluation_frequency` steps. " + \
"If < 1 an evaluation in done every `evaluation_frequency` * `maximum_training_steps` steps",
default=0.1)
base_lr = Parameter("base_learning_rate",
help="Base learning rate.",
default=0.00025)
imgtypes = Parameter("image_types",
help="JSON encoded list of expected file extensions for images",
type=JSONType,
default=json.dumps([".jpg", ".jpeg", ".png"]))
additional_augmentations = Parameter("additional_augmentations",
help="Whether as additional data augmentations like cropping, random saturation, " + \
"random lighting, random brightness and random contrast should be done.",
default=True)
log_metrics_aif = Parameter("aicore_tracking",
help="Whether the evaluator should log the metrics on AI Core, so you can track your pipeline execution on AI Core",
type=bool,
default=False)
seed = Parameter("seed",
help="Random seed.",
default=1337)
@argo(output_artifacts=[{'name': 'trainedmodel',
'globalName': 'trainedmodel',
'path': str(MODEL_OUTPUT_DIR),
'archive': {'none': {}}}],
input_artifacts=[{'name': 'datain',
'path': str(DATA_INPUT_DIR)}],
labels={"ai.sap.com/resourcePlan": "train.l"},
shared_memory=1000)
@step
def start(self):
logger = setup_loggers(str(MODEL_OUTPUT_DIR), color=False, additional_loggers=[__name__])
self.eval_frequency = float(self.eval_freq)
class_names = None if self.class_names == "" else json.loads(self.class_names)
img_extensions = utils.check_extensions(self.imgtypes)
self.datasets, self.class_names_used = self.prepare_input_data(DATA_INPUT_DIR,
train=self.train_input,
validation=self.validation_input,
test=self.test_input,
class_names=class_names,
img_extensions=img_extensions,
seed=self.seed)
cfg = self.get_train_cfg(self.datasets, self.class_names_used )
modul_output_path = pathlib.Path(MODEL_OUTPUT_DIR)
modul_output_path.mkdir(parents=True, exist_ok=True)
with (modul_output_path / 'used_config.yaml').open('w') as stream:
stream.write(cfg.dump())
trainer = Trainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
test_name = f'test'
if test_name in self.datasets:
cfg.defrost()
cfg.DATASETS.TEST = (test_name,)
results_test = trainer.test(cfg, trainer.model) # # pylint: disable=E1101
with open(cfg.OUTPUT_DIR + '/metrics_test.json', 'w') as stream:
json.dump(results_test, stream, indent=2)
if self.log_metrics_aif:
try:
from ai_core_sdk.tracking import Tracking
except ImportError:
logger.warn("AI Core Tracking Module not found!")
else:
from sap_computer_vision.engine.trainers import AIFLogging
tracking_module = Tracking()
formatted_metrics = AIFLogging.format_metrics(results_test, step=trainer.iter, labels=[{'name': 'data_split', 'value': 'test'}])
tracking_module.log_metrics(metrics=formatted_metrics)
self.next(self.end)
@step
def end(self):
pass
@staticmethod
def prepare_input_data(base_dir, class_names=None, img_extensions=None, seed=None, **dataset_inputs):
f'''This function interprets the \'train\', \'test\', \'validation\' parameters.
To register a dataset in detectron a lightweight version (list of dicts) of the dataset has to be created.
The folder in which the images are located is used as the label for each image.
So there must be a subfolder in the input artifact for each class.
For the \'train\', \'test\', \'validation\' parameters in this pipeline different options are supported.
The input data is provided as an input artifact and is copy to {DATA_INPUT_DIR}. The parameters for the
different dataset can be either a subfolder with the input directory, a txt-file containing the filenames
or a float between zero and 1.
Examples
---------
1.)
Structure of the input artifact:
input_artifact/
train/
class_1/
img1.jpg
img2.jpg
class_2/
...
...
val/
class_1/
img231.jpg
img2123.jpg
class_2/
...
...
test/
class_1/
img3213.jpg
img32231.jpg
class_2/
...
...
Parameter values:
\'train\'='train'
\'validation\'='val'
\'test\'='test'
In this example the values of the parameters are relative paths to subfolders
located in the input artifact.
The files in the subfolders are used for the corresponding dataset.
2.)
Structure of the input artifact:
input_artifact/
class_1/
img1.jpg
img2.jpg
class_2/
...
...
splits/
train.txt
val.txt
test.txt
Parameter values:
\'train\'='splits/train.txt'
\'validation\'='splits/val.txt'
\'test\'='splits/test.txt'
In this example the values of the parameters are paths to txt files within the in the input artifact.
The txt-files are expected to contain the file names (1 file name per line with or with file ending)
for the datasets.
3.)
Structure of the input artifact:
class_1/
img1.jpg
img2.jpg
class_2/
...
...
Parameter values:
\'train\'=0.8
\'validation\'=0.2
\'test\'=''
In this example the values of the parameters are float values and '' for the test parameter.
The numbers indicate which part of the files in the images/annotation folders should be used
for the dataset. In the result folder of the pipeline a txt-file for each split will be placed.
\'validation\' and \'test\' can be in empty string in all cases. If they are set
to an empty string no evaluation/test is performed during the training.
'''
if img_extensions is None:
img_extensions = ['*.jpg', '*.jpeg']
base_dir = pathlib.Path(base_dir)
logger = logging.getLogger(__name__)
logger.info(f"Preparing datsets:")
@lru_cache(None)
def _prepare_input_data(input):
input_data = None
if input != '' and input is not None:
input_data = base_dir / str(input)
if input_data.exists() and input_data.is_dir():
pass
elif input_data.exists() and input_data.is_file():
if input_data.suffix == '.zip':
with zipfile.ZipFile(input_data, 'r') as zip_ref:
input_data_extracted = input_data.with_suffix('')
zip_ref.extractall(input_data_extracted)
os.remove(input_data)
input_data = input_data_extracted
else:
try:
input_data = float(input)
except ValueError:
input_data = None
return input_data
for name, value in dataset_inputs.items():
prepared = _prepare_input_data(value)
dataset_inputs[name] = prepared
logger.info(f"Parameter '{name}'={value} -> {prepared}")
primary_parameter = next(iter(dataset_inputs.values()))
if isinstance(primary_parameter, pathlib.Path):
datasets = {}
for n, input_ in dataset_inputs.items():
if isinstance(input_, pathlib.Path):
datasets[n], class_names = imgf.register(n,
class_names=class_names,
extensions=img_extensions,
base_dir=(base_dir if input_.is_file() else input_),
filenames=(input_ if input_.is_file() else None))
elif isinstance(primary_parameter, float):
splits = {n: v for n, v in dataset_inputs.items() if isinstance(v, float)}
datasets, class_names = imgf.split_and_register('',
base_dir=base_dir,
splits=splits,
extensions=img_extensions,
rnd_gen=seed)
for n, file_ids in datasets.items():
with (pathlib.Path(MODEL_OUTPUT_DIR) / f'{n}.txt').open('w') as stream:
stream.write('\n'.join([str(f) for f in file_ids]))
else:
raise ValueError(f'Invalid input for train data! Input artifact content: {os.listdir(base_dir)}')
logger.info(f"Successfully registed datasets:")
for n, v in datasets.items():
logger.info(f"{n} with {len(v)} examples")
return datasets, class_names
def get_train_cfg(self, datasets, class_names):
cfg = get_cfg()
cfg.merge_from_file(get_config_file('Base-EarlyStopping'))
cfg.merge_from_file(get_config_file('Base-Evaluation'))
cfg.merge_from_file(get_config_file(self.model_name))
cfg.SEED = int(self.seed)
cfg.OUTPUT_DIR = MODEL_OUTPUT_DIR
cfg.DATASETS.TRAIN = ('train',)
if 'validation' in datasets.keys():
cfg.DATASETS.TEST = (f'validation',)
if self.eval_frequency > 1:
cfg.TEST.EVAL_PERIOD = int(self.eval_frequency)
else:
cfg.TEST.EVAL_PERIOD = int(cfg.SOLVER.MAX_ITER * float(self.eval_frequency))
else:
cfg.DATASETS.TEST = None
cfg.TEST.EVAL_PERIOD = -1
cfg.SOLVER.BASE_LR = float(self.base_lr)
cfg.SOLVER.MAX_ITER = int(self.n_steps)
cfg.SOLVER.IMS_PER_BATCH = int(self.batch_size)
cfg.SOLVER.WARMUP_ITERS = max(int(0.01 * cfg.SOLVER.MAX_ITER), 0)
if self.lr_decay:
cfg.SOLVER.STEPS = [cfg.SOLVER.MAX_ITER * p for p in (0.5, 0.75, 0.9)]
else:
cfg.SOLVER.STEPS = []
cfg.set_new_allowed(True)
cfg.TRAINING_INFO = CfgNode()
cfg.TRAINING_INFO.THING_CLASSES = class_names
cfg.TRAINING_INFO.TASK = 'IMAGE_CLASSIFICATION'
cfg.MODEL.FEATURE_EXTRACTION.INTERMEDIATE_SIZE = self.intermediate_layers
cfg.MODEL.IMAGE_CLASSIFIER.NUM_CLASSES = len(class_names)
cfg.EVAL.LOG_METRICS = bool(self.log_metrics_aif)
for aug in ['RANDOM_LIGHTING', 'RANDOM_BRIGHTNESS', 'RANDOM_SATURATION', 'RANDOM_CONTRAST', 'RANDOM_ROTATION', 'CROP']:
if cfg.INPUT.get(aug, None) is not None:
cfg.INPUT[aug].ENABLED = self.additional_augmentations
cfg.freeze()
return cfg
if __name__ == '__main__':
ImageClassificationTrain()
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/pipelines/batch_pipelines/image_classification_train.py
| 0.512937 | 0.318551 |
image_classification_train.py
|
pypi
|
import pathlib
import json
import zipfile
import os
import logging
from functools import lru_cache
from collections.abc import Iterable
import numpy as np
from metaflow import FlowSpec, step, argo, Parameter, JSONType
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
import torch
from sap_computer_vision.datasets import image_folder as imgf
import sap_computer_vision.datasets.utils as utils
from sap_computer_vision import get_cfg, get_config_file, setup_loggers
from sap_computer_vision.evaluators.contrastive import ContrastiveEvaluator, get_metrics
from sap_computer_vision.engine import TripletDistanceTrainer
MODEL_OUTPUT_DIR = pathlib.Path('/tmp/model')
DATA_INPUT_DIR = pathlib.Path('/workdir/datain')
class Trainer(TripletDistanceTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name):
if cfg.TEST.EVAL_PERIOD <= 0:
raise NotImplementedError
else:
return ContrastiveEvaluator(cfg) # pylint: disable=E1124,E1125
class TripletDistanceMetricTrain(FlowSpec):
"""Pipeline to train a model for feature extraction using triplet loss.
"""
model_name = Parameter("model_name",
help="Name of the Model configuration file",
default="TripletDistanceLearner/FPN-Resnet50")
train_input = Parameter("train",
help=f"Name of the file or folder within {DATA_INPUT_DIR} containing filenames of the train dataset. If {DATA_INPUT_DIR} " + \
"contain an \'images\' and \'annotations\' folder it can also be a float. This float is the ratio of images included in the train dataset",
default='train.txt')
validation_input = Parameter("validation",
help=f"Name of the file or folder within {DATA_INPUT_DIR} containing filenames of the validation dataset. If {DATA_INPUT_DIR} " + \
"contain an \'images\' and \'annotations\' folder it can also be a float. This float is the ratio of images included in the train dataset. " + \
"For this to work \'train\' has to be a float as well.",
default="val.txt")
test_input = Parameter("test",
help=f"Name of the file or folder within {DATA_INPUT_DIR} containing filenames of the test dataset. If {DATA_INPUT_DIR} " + \
"contain an \'images\' and \'annotations\' folder it can also be a float. This float is the ratio of images included in the train dataset." ,
default="test.txt")
index_input = Parameter("index",
help=f"Name of the file or folder within {DATA_INPUT_DIR} containing filenames of the index dataset. If {DATA_INPUT_DIR} " + \
"contain an \'images\' and \'annotations\' folder it can also be a float. This float is the ratio of images included in the train dataset." ,
default="index.txt")
# [TODO] Check if these samplers can be used? Why pipeline has NotImplementedError raised for other samplersexcept PKSampler?
sampler = Parameter("sampler",
help="Sampler used to create the triplets." + \
"Choose from the following samplers: `PKSampler`, `TripletSampler`, `TripletReservoirSampler`",
default="PKSampler")
loss = Parameter("loss",
help="Loss function to use for Triplet Distance Learner." + \
"Choose from the following options: `MARGIN_LOSS`, `NCA_LOSS`, `CIRCLE_LOSS`",
default="MARGIN_LOSS")
margin = Parameter("margin",
help="Value for the Margin term used if `loss` is `CIRCLE_LOSS` or `MARGIN_LOSS`",
default="0.5")
embedding_dim = Parameter("embedding_dimensions",
help="Dimensionality of the final layer. If 0, the last layer is nn.Identity",
default="512")
intermediate_layers = Parameter("projection_layers",
help="Size of layers between backbone and final output layer. If [] , no intermediate layer is used.",
type=JSONType,
default='[]')
lr_decay = Parameter("learning_rate_decay",
help="Whether learning rate should be decreased over the training.",
type=JSONType,
default=json.dumps(True))
early_stopping = Parameter("early_stopping",
help="Whether early stopping should be active.",
default=False)
additional_augmentations = Parameter("additional_augmentations",
help="Whether as additional data augmentations like cropping, random saturation, " + \
"random lighting, random brightness and random contrast should be done.",
default=True)
batch_size = Parameter("batch_size",
help="Number of images per batch.",
default=8)
p_classes_per_batch = Parameter("p_classes_per_batch",
help="Randomly sampled `P` classes per batch which is then used for randomly sampling `K` images of each class, " + \
"thus resulting in a batch of `PK` images.",
default=8)
k_examples_per_class = Parameter("k_examples_per_class",
help="Randomly sampled `K` images of each class (from the selected `p_classes_per_batch`), " + \
"thus resulting in a batch of `PK` images.",
default=8)
n_steps = Parameter("maximum_training_steps",
help="Maximum number of training steps. Actual training steps could be less if `early_stopping` is enabled.",
default=3000)
eval_freq = Parameter("evaluation_frequency",
help="Frequency of evaluation. If > 1 it is treated as every `evaluation_frequency` steps. " + \
"If < 1 an evaluation in done every `evaluation_frequency` * `maximum_training_steps` steps",
default="0.1")
base_lr = Parameter("base_learning_rate",
help="Base learning rate.",
default=0.00025)
seed = Parameter("seed",
help="Random seed.",
default=1337)
log_metrics_aif = Parameter("aicore_tracking",
help="Whether the evaluator should log the metrics on AI Core, so you can track your pipeline execution on AI Core",
type=bool,
default=False)
imgtypes = Parameter("image_types",
help="JSON encoded list of expected file extensions for images",
type=JSONType,
default='[".jpg", ".jpeg", ".png"]')
top_k = Parameter("evaluation_topk",
help="JSON encoded list of integers used as `k` values during evaluation",
type=JSONType,
default=json.dumps([1,3,5,10,30]))
freeze_backbone = Parameter("freeze_backbone",
help="Whether to freeze the weights of the backbone Network. \
Useful for applying the pretrained model as is without weight modifications to the backbone layers",
type=bool,
default=False)
# [TODO] : Check is this even used?
workdirsize = Parameter("work_dir_size",
help="Json encoded list of class names of expected images types",
default=20)
@argo(output_artifacts=[{'name': 'trainedmodel',
'globalName': 'trainedmodel',
'path': str(MODEL_OUTPUT_DIR),
'archive': {'none': {}}}],
input_artifacts=[{'name': 'datain',
'path': str(DATA_INPUT_DIR)}],
labels={"ai.sap.com/resourcePlan": "train.l"},
shared_memory=1000)
@step
def start(self):
"""In this step the model is trained.
"""
setup_loggers(str(MODEL_OUTPUT_DIR), color=False, additional_loggers=[__name__])
self.eval_frequency = float(self.eval_freq)
img_extensions = utils.check_extensions(self.imgtypes)
self.datasets, self.class_names_used = self.prepare_input_data(DATA_INPUT_DIR,
train=self.train_input,
validation=self.validation_input,
test=self.test_input,
index=self.index_input,
img_extensions=img_extensions,
seed=int(self.seed))
cfg = self.get_train_cfg(self.datasets)
module_output_path = pathlib.Path(MODEL_OUTPUT_DIR)
module_output_path.mkdir(parents=True, exist_ok=True)
with (module_output_path / 'used_config.yaml').open('w') as stream:
stream.write(cfg.dump())
trainer = Trainer(cfg)
self.run_test(cfg, trainer, 'untrained')
trainer.resume_or_load(resume=False)
trainer.train()
self.run_test(cfg, trainer)
self.next(self.end)
@step
def end(self):
"""Currently this step is empty, but it is added because
metaflow DAGs always need a \'start\' and \'end\' step.
"""
pass
@staticmethod
def predict_dataset(cfg, dataset, trainer, class_names):
labels = []
file_names = []
outputs_val = []
with torch.no_grad():
trainer.model.eval()
dl_val = trainer.build_test_loader(cfg, dataset)
metadata = MetadataCatalog.get(dataset)
for batch in dl_val:
labels.extend([class_names[d['class_id']] if d.get('class_id', None) else '' for d in batch])
file_names.extend([str(d['file_name']).replace(metadata.base_dir, '') for d in batch])
outputs_val.append(trainer.model(batch).cpu())
trainer.model.train()
return torch.cat(outputs_val).numpy(), np.array(labels), np.array(file_names) # pylint: disable=E1101
def run_test(self, cfg, trainer, suffix=None):
logger = logging.getLogger(__name__)
suffix = f"_{suffix}" if suffix is not None else ""
test_name = f'test'
index_name = f'index'
if test_name in self.datasets and index_name not in self.datasets:
cfg.defrost()
cfg.DATASETS.TEST = (test_name,)
results_test = trainer.test(cfg, trainer.model) # pylint: disable=E1101
elif index_name in self.datasets:
index_vectors, index_labels, index_file_names = self.predict_dataset(cfg, index_name, trainer, self.class_names_used)
np.savez_compressed(cfg.OUTPUT_DIR + f'/index{suffix}.npz',
vectors=index_vectors,
labels=index_labels,
file_names=index_file_names)
if test_name in self.datasets:
test_vectors, test_labels, _ = self.predict_dataset(cfg, test_name, trainer, self.class_names_used)
results_test = get_metrics(
embeddings=test_vectors,
labels=test_labels,
index_embeddings=index_vectors,
index_labels=index_labels,
ks=cfg.EVAL.CONTRASTIVE.TOP_KS)
else:
return
with open(cfg.OUTPUT_DIR + f'/metrics_test{suffix}.json', 'w') as stream:
json.dump(results_test, stream, indent=2)
if self.log_metrics_aif:
try:
from ai_core_sdk.tracking import Tracking
except ImportError:
logger.warn("AI Core Tracking Module not found!")
else:
from sap_computer_vision.engine.trainers import AIFLogging
tracking_module = Tracking()
formatted_metrics = AIFLogging.format_metrics(results_test, step=trainer.iter, labels=[{'name': 'data_split', 'value': 'test'}])
tracking_module.log_metrics(metrics=formatted_metrics)
@staticmethod
def prepare_input_data(base_dir, class_names=None, img_extensions=None, seed=None, **dataset_inputs):
f'''This function interprets the \'train\', \'test\', \'validation\' parameters.
To register a dataset in detectron a lightweight version (list of dicts) of the dataset has to be created.
The folder in which the images are located is used as the label for each image.
So there must be a subfolder in the input artifact for each class.
For the \'train\', \'test\', \'validation\' parameters in this pipeline different options are supported.
The input data is provided as an input artifact and is copy to {DATA_INPUT_DIR}. The parameters for the
different dataset can be either a subfolder with the input directory, a txt-file containing the filenames
or a float between zero and 1.
Examples
---------
1.)
Structure of the input artifact:
input_artifact/
train/
class_1/
img1.jpg
img2.jpg
class_2/
...
...
val/
class_1/
img231.jpg
img2123.jpg
class_2/
...
...
test/
class_1/
img3213.jpg
img32231.jpg
class_2/
...
...
Parameter values:
\'train\'='train'
\'validation\'='val'
\'test\'='test'
In this example the values of the parameters are relative paths to subfolders
located in the input artifact.
The files in the subfolders are used for the corresponding dataset.
2.)
Structure of the input artifact:
input_artifact/
class_1/
img1.jpg
img2.jpg
class_2/
...
...
splits/
train.txt
val.txt
test.txt
Parameter values:
\'train\'='splits/train.txt'
\'validation\'='splits/val.txt'
\'test\'='splits/test.txt'
In this example the values of the parameters are paths to txt files within the in the input artifact.
The txt-files are expected to contain the file names (1 file name per line with or with file ending)
for the datasets.
3.)
Structure of the input artifact:
class_1/
img1.jpg
img2.jpg
class_2/
...
...
Parameter values:
\'train\'=0.8
\'validation\'=0.2
\'test\'=''
In this example the values of the parameters are float values and '' for the test parameter.
The numbers indicate which part of the files in the images/annotation folders should be used
for the dataset. In the result folder of the pipeline a txt-file for each split will be placed.
\'validation\' and \'test\' can be in empty string in all cases. If they are set
to an empty string no evaluation/test is performed during the training.
4.)
Structure of the input artifact:
input_artifact/
train.zip /
class_1/
img1.jpg
img2.jpg
class_2/
...
...
val.zip
class_1/
img231.jpg
img2123.jpg
class_2/
...
...
test.zip/
class_1/
img3213.jpg
img32231.jpg
class_2/
...
...
Parameter values:
\'train\'='train.zip'
\'validation\'='val.zip'
\'test\'='test.zip'
'''
if img_extensions is None:
img_extensions = ['*.jpg', '*.jpeg']
base_dir = pathlib.Path(base_dir)
logger = logging.getLogger(__name__)
logger.info(f"Preparing datsets:")
@lru_cache(None)
def _prepare_input_data(input):
input_data = None
if input != '' and input is not None:
input_data = base_dir / str(input)
if input_data.exists() and input_data.is_dir():
pass
elif input_data.exists() and input_data.is_file():
if input_data.suffix == '.zip':
with zipfile.ZipFile(input_data, 'r') as zip_ref:
input_data_extracted = input_data.with_suffix('')
zip_ref.extractall(input_data_extracted)
os.remove(input_data)
input_data = input_data_extracted
else:
try:
input_data = float(input)
except ValueError:
input_data = None
return input_data
for name, value in dataset_inputs.items():
prepared = _prepare_input_data(value)
dataset_inputs[name] = prepared
logger.info(f"Parameter '{name}'={value} -> {prepared}")
primary_parameter = next(iter(dataset_inputs.values()))
if isinstance(primary_parameter, pathlib.Path):
datasets = {}
for n, input_ in dataset_inputs.items():
if isinstance(input_, pathlib.Path):
datasets[n], class_names = imgf.register(n,
class_names=class_names,
extensions=img_extensions,
base_dir=(base_dir if input_.is_file() else input_),
filenames=(input_ if input_.is_file() else None))
elif isinstance(primary_parameter, float):
splits = {n: v for n, v in dataset_inputs.items() if isinstance(v, float)}
datasets, class_names = imgf.split_and_register('',
base_dir=base_dir,
splits=splits,
extensions=img_extensions,
rnd_gen=seed)
for n, file_ids in datasets.items():
with (pathlib.Path(MODEL_OUTPUT_DIR) / f'{n}.txt').open('w') as stream:
stream.write('\n'.join([str(f) for f in file_ids]))
else:
raise ValueError(f'Invalid input for train data! Input artifact content: {os.listdir(base_dir)}')
logger.info(f"Successfully registed datasets:")
for n, v in datasets.items():
logger.info(f"{n} with {len(v)} examples")
return datasets, class_names
def get_train_cfg(self, datasets):
''' This function prepares the training config.'''
cfg = get_cfg()
cfg.merge_from_file(get_config_file('Base-EarlyStopping'))
cfg.merge_from_file(get_config_file('Base-Evaluation'))
cfg.merge_from_file(get_config_file(self.model_name))
cfg.SEED = int(self.seed)
cfg.OUTPUT_DIR = str(MODEL_OUTPUT_DIR)
cfg.SOLVER.MAX_ITER = int(self.n_steps)
cfg.SOLVER.BASE_LR = float(self.base_lr)
cfg.SOLVER.GAMMA = float(np.sqrt(0.1))
cfg.SOLVER.IMS_PER_BATCH = int(self.batch_size)
cfg.SOLVER.WARMUP_ITERS = max(int(0.01 * cfg.SOLVER.MAX_ITER), 0)
if isinstance(self.lr_decay, Iterable):
cfg.SOLVER.STEPS = [cfg.SOLVER.MAX_ITER * p for p in self.lr_decay]
elif self.lr_decay:
cfg.SOLVER.STEPS = [cfg.SOLVER.MAX_ITER * p for p in (0.5, 0.75, 0.9)]
else:
cfg.SOLVER.STEPS = []
cfg.SOLVER.EARLY_STOPPING.ENABLED = bool(self.early_stopping)
cfg.EVAL.LOG_METRICS = bool(self.log_metrics_aif)
for aug in ['RANDOM_LIGHTING', 'RANDOM_BRIGHTNESS', 'RANDOM_SATURATION', 'RANDOM_CONTRAST', 'RANDOM_ROTATION', 'CROP', 'CUT_OUT']:
if cfg.INPUT.get(aug, None) is not None:
cfg.INPUT[aug].ENABLED = self.additional_augmentations
cfg.MODEL.TRIPLET_DISTANCE_LEARNER.MARGIN_LOSS.MARGIN = float(self.margin)
cfg.MODEL.TRIPLET_DISTANCE_LEARNER.CIRCLE_LOSS.MARGIN = float(self.margin)
cfg.MODEL.TRIPLET_DISTANCE_LEARNER.LOSS = self.loss
cfg.MODEL.FEATURE_EXTRACTION.PROJECTION_SIZE = None if int(self.embedding_dim) <= 0 else int(self.embedding_dim)
cfg.MODEL.FEATURE_EXTRACTION.INTERMEDIATE_SIZE = self.intermediate_layers
cfg.MODEL.FEATURE_EXTRACTION.FREEZE_BACKBONE = bool(self.freeze_backbone)
sampler = 'PKSampler' if self.loss.upper() == 'CIRCLE_LOSS' else self.sampler
if sampler.lower() == 'PKSampler'.lower():
cfg.DATALOADER.SAMPLER_TRAIN = 'PKSampler'
cfg.MODEL.TRIPLET_DISTANCE_LEARNER.TRIPLET_STRATEGY = ('*', '*')
delay = (cfg.SOLVER.MAX_ITER * 0.5)
strategies_pos = np.linspace(0.5, 0.8, 21)
strategies_neg = 1. - strategies_pos
strategies = [(float(p), float(n)) for p, n in zip(strategies_pos, strategies_neg)]
switch_steps = np.linspace(delay, cfg.SOLVER.MAX_ITER, len(strategies)+1)[:-1]
cfg.DATALOADER.PK_SAMPLER.STRATEGY_SWITCHES = [(int(step), strat) for (step, strat) in zip(switch_steps, strategies)]
cfg.DATALOADER.PK_SAMPLER.P_CLASSES_PER_BATCH = int(self.p_classes_per_batch)
cfg.DATALOADER.PK_SAMPLER.K_EXAMPLES_PER_CLASS = int(self.k_examples_per_class)
else:
raise NotImplementedError
train_name = f'train'
validation_name = f'validation'
cfg.DATASETS.TRAIN = (train_name, )
if validation_name in datasets.keys():
cfg.DATASETS.TEST = (validation_name, )
if self.eval_frequency > 1:
cfg.TEST.EVAL_PERIOD = int(self.eval_frequency)
else:
cfg.TEST.EVAL_PERIOD = int(cfg.SOLVER.MAX_ITER * float(self.eval_frequency))
else:
cfg.DATASETS.TEST = None
cfg.TEST.EVAL_PERIOD = -1
cfg.EVAL.CONTRASTIVE.TOP_KS = [int(k) for k in self.top_k]
cfg.set_new_allowed(True)
cfg.TRAINING_INFO = CfgNode()
cfg.TRAINING_INFO.TASK = 'TRIPLET_DISTANCE_MERIC_LEARNING'
cfg.freeze()
return cfg
if __name__ == '__main__':
TripletDistanceMetricTrain()
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/pipelines/batch_pipelines/contrastive_learning_triplet_loss.py
| 0.586523 | 0.34006 |
contrastive_learning_triplet_loss.py
|
pypi
|
import pathlib
import json
import zipfile
import os
from functools import lru_cache
import logging
from metaflow import FlowSpec, step, argo, Parameter, JSONType
from detectron2.config import CfgNode
import sap_computer_vision.datasets.pascal_voc_style as pvs
import sap_computer_vision.datasets.utils as utils
from sap_computer_vision import get_cfg, get_config_file, setup_loggers
from sap_computer_vision.evaluators import ObjectDetectionEvaluator
from sap_computer_vision.engine import ObjectDetectionTrainer
MODEL_OUTPUT_DIR = pathlib.Path('/tmp/model')
DATA_INPUT_DIR = pathlib.Path('/tmp/datain')
class Trainer(ObjectDetectionTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name):
if cfg.TEST.EVAL_PERIOD <= 0:
raise NotImplementedError
else:
return ObjectDetectionEvaluator(cfg, dataset_names=(dataset_name, )) # pylint: disable=E1124,E1125
class ObjectDetectionTrain(FlowSpec):
"""Pipeline to train a model for object detection.
"""
model_name = Parameter("model_name",
help="Name of the Model configuration file",
default="COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
train_input = Parameter("train",
help=f"Name of the file or folder within {DATA_INPUT_DIR} containing filenames of the train dataset. If {DATA_INPUT_DIR} " + \
"contain an \'images\' and \'annotations\' folder it can also be a float. This float is the ratio of images included in the train dataset",
default='train.txt')
validation_input = Parameter("validation",
help=f"Name of the file or folder within {DATA_INPUT_DIR} containing filenames of the validation dataset. If {DATA_INPUT_DIR} " + \
"contain an \'images\' and \'annotations\' folder it can also be a float. This float is the ratio of images included in the train dataset. " + \
"For this to work \'train\' has to be a float as well.",
default="val.txt")
test_input = Parameter("test",
help=f"Name of the file or folder within {DATA_INPUT_DIR} containing filenames of the test dataset. If {DATA_INPUT_DIR} " + \
"contain an \'images\' and \'annotations\' folder it can also be a float. This float is the ratio of images included in the train dataset." ,
default="test.txt")
class_names = Parameter("class_names",
help="Json encoded list of class names. If empty or not valid json. Classes will " + \
"extracted from the train/val/test datasets.",
type=JSONType,
default=json.dumps([]))
box_mode = Parameter("box_mode",
help="Format of the boxes in the annotation files.",
default="XYXY_ABS")
batch_size = Parameter("batch_size",
help="Number of images per batch.",
default=12)
lr_decay = Parameter("learning_rate_decay",
help="Whether learning rate should be decreased over the training.",
default=True)
early_stopping = Parameter("early_stopping",
help="Whether early stopping should be active.",
default=True)
additional_augmentations = Parameter("additional_augmentations",
help="Whether as additional data augmentations like cropping, random saturation, " + \
"random lighting, random brightness and random contrast should be done.",
default=True)
n_steps = Parameter("maximum_training_steps",
help="Maximum number of training steps. Actual training steps could be less if `early_stopping` is enabled.",
default=3000)
eval_freq = Parameter("evaluation_frequency",
help="Frequency of evaluation. If > 1 it is treated as every `evaluation_frequency` steps. " + \
"If < 1 an evaluation in done every `evaluation_frequency` * `maximum_training_steps` steps",
default=0.1)
base_lr = Parameter("base_learning_rate",
help="Base learning rate.",
default=0.00025)
seed = Parameter("seed",
help="Random seed.",
default=1337)
log_metrics_aif = Parameter("aicore_tracking",
help="Whether the evaluator should log the metrics on AI Core, so you can track your pipeline execution on AI Core",
type=bool,
default=False)
imgtypes = Parameter("image_types",
help="JSON encoded list of expected file extensions for images",
type=JSONType,
default=json.dumps([".jpg", ".jpeg", ".png"]))
@argo(output_artifacts=[{'name': 'trainedmodel',
'globalName': 'trainedmodel',
'path': str(MODEL_OUTPUT_DIR),
'archive': {'none': {}}}],
input_artifacts=[{'name': 'datain',
'path': str(DATA_INPUT_DIR)}],
labels={"ai.sap.com/resourcePlan": "train.l"},
shared_memory=1000)
@step
def start(self):
"""In this step the model is trained.
"""
logger = setup_loggers(str(MODEL_OUTPUT_DIR), color=False, additional_loggers=[__name__])
self.eval_frequency = float(self.eval_freq)
class_names = None if len(self.class_names) == 0 else self.class_names
img_extensions = utils.check_extensions(self.imgtypes)
self.datasets, self.class_names_used = self.prepare_input_data(DATA_INPUT_DIR,
train=self.train_input,
validation=self.validation_input,
test=self.test_input,
class_names=class_names,
img_extensions=img_extensions,
seed=int(self.seed))
cfg = self.get_train_cfg(self.datasets, self.class_names_used)
module_output_path = pathlib.Path(MODEL_OUTPUT_DIR)
module_output_path.mkdir(parents=True, exist_ok=True)
with (module_output_path / 'used_config.yaml').open('w') as stream:
stream.write(cfg.dump())
trainer = Trainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
test_name = f'test'
if test_name in self.datasets:
cfg.defrost()
cfg.DATASETS.TEST = (test_name,)
results_test = trainer.test(cfg, trainer.model) # # pylint: disable=E1101
with open(cfg.OUTPUT_DIR + '/metrics_test.json', 'w') as stream:
json.dump(results_test, stream, indent=2)
if self.log_metrics_aif:
try:
from ai_core_sdk.tracking import Tracking
except ImportError:
logger.warn("AI Core Tracking Module not found!")
else:
from sap_computer_vision.engine.trainers import AIFLogging
tracking_module = Tracking()
formatted_metrics = AIFLogging.format_metrics(results_test, step=trainer.iter, labels=[{'name': 'data_split', 'value': 'test'}])
tracking_module.log_metrics(metrics=formatted_metrics)
self.next(self.end)
@step
def end(self):
"""Currently this step is empty, but it is added because
metaflow DAGs always need a \'start\' and \'end\' step.
"""
pass
@staticmethod
def prepare_input_data(base_dir, class_names=None, img_extensions=None, seed=None, **dataset_inputs):
f'''This function interprets the \'train\', \'test\', \'validation\' parameters.
To register a dataset in detectron a lightweight version (list of dicts) of the dataset has to be created.
Currently this pipeline supports only datasets in the "Pascal VOC" format. Data in the "Pascal VOC" format
consists of two folders "images" and "annotations". The image folder contains the images and the annotaions
folder xmls. The xmls contain the bounding boxes and labels for each image. The name of the image and its
corresponding xml file should be identical except of the file ending.
For the \'train\', \'test\', \'validation\' parameters in this pipeline different options are supported.
The input data is provided as in input artifact and is copy to {DATA_INPUT_DIR}. The parameters for the
different dataset can be either a subfolder with the input directory, a txt-file containing the filenames
or a float between zero and 1.
Examples
---------
1.)
Structure of the input artifact:
input_artifact/
train/
images/
annotaions/
val/
images/
annotaions/
test/
images/
annotaions/
Parameter values:
\'train\'='train'
\'validation\'='val'
\'test\'='test'
In this example the values of the parameters are paths to subfolders in the input artifact.
The files in the subfolders are used for the corresponding dataset.
2.)
Structure of the input artifact:
input_artifact/
images/
annotaions/
splits/
train.txt
val.txt
test.txt
Parameter values:
\'train\'='splits/train.txt'
\'validation\'='splits/val.txt'
\'test\'='splits/test.txt'
In this example the values of the parameters are paths to txt files within the in the input artifact.
The txt-files are expected to contain the file names (1 file name per line with or with file ending)
for the datasets.
3.)
Structure of the input artifact:
input_artifact/
images/
annotaions/
Parameter values:
\'train\'=0.8
\'validation\'=0.2
\'test\'=''
In this example the values of the parameters are float values and '' for the test parameter.
The numbers indicate which part of the files in the images/annotation folders should be used
for the dataset. In the result folder of the pipeline a txt-file for each split will be placed.
\'validation\' and \'test\' can be in empty string in all cases. If they are set
to an empty string no evaluation/test is performed during the training.
'''
if img_extensions is None:
img_extensions = ['*.jpg', '*.jpeg']
base_dir = pathlib.Path(base_dir)
logger = logging.getLogger(__name__)
logger.info(f"Preparing datsets:")
@lru_cache(None)
def _prepare_input_data(input):
input_data = None
if input != '' and input is not None:
input_data = base_dir / str(input)
if input_data.exists() and input_data.is_dir():
pass
elif input_data.exists() and input_data.is_file():
if input_data.suffix == '.zip':
with zipfile.ZipFile(input_data, 'r') as zip_ref:
input_data_extracted = input_data.with_suffix('')
zip_ref.extractall(input_data_extracted)
os.remove(input_data)
input_data = input_data_extracted
else:
try:
input_data = float(input)
except ValueError:
input_data = None
return input_data
for name, value in dataset_inputs.items():
prepared = _prepare_input_data(value)
dataset_inputs[name] = prepared
logger.info(f"Parameter '{name}'={value} -> {prepared}")
primary_parameter = next(iter(dataset_inputs.values()))
if isinstance(primary_parameter, pathlib.Path):
datasets = {}
for n, input_ in dataset_inputs.items():
if isinstance(input_, pathlib.Path):
datasets[n], class_names = pvs.register(n,
*(pvs.find_folders(input_.parent) if input_.is_file() else pvs.find_folders(input_)),
filenames=input_ if input_.is_file() else None,
class_names=class_names,
extensions=img_extensions)
elif isinstance(primary_parameter, float):
img_dir, xml_dir = pvs.find_folders(base_dir)
splits = {n: v for n, v in dataset_inputs.items() if isinstance(v, float)}
datasets, class_names = pvs.split_and_register('',
img_dir=img_dir,
xml_dir=xml_dir,
splits=splits,
extensions=img_extensions,
rnd_gen=seed)
for n, file_ids in datasets.items():
with (pathlib.Path(MODEL_OUTPUT_DIR) / f'{n}.txt').open('w') as stream:
stream.write('\n'.join([str(f) for f in file_ids]))
else:
raise ValueError('Invalid input for train data. Please provide a path to a file containing image names '
f'or path to folder containing image or a float so the images under \'{base_dir}/images\'')
for n, v in datasets.items():
if not isinstance(v, pathlib.Path):
logger.info(f"{n} with {len(v)} examples")
else:
logger.info(f"{n} from {v}")
return datasets, class_names
def get_train_cfg(self, datasets, class_names):
''' This function prepares the training config.'''
cfg = get_cfg()
cfg.merge_from_file(get_config_file('Base-EarlyStopping'))
cfg.merge_from_file(get_config_file('Base-Evaluation'))
cfg.merge_from_file(get_config_file(self.model_name))
cfg.SEED = int(self.seed)
cfg.SOLVER.EARLY_STOPPING.ENABLED = bool(self.early_stopping)
cfg.OUTPUT_DIR = str(MODEL_OUTPUT_DIR)
cfg.SOLVER.MAX_ITER = int(self.n_steps)
cfg.SOLVER.BASE_LR = float(self.base_lr)
cfg.SOLVER.IMS_PER_BATCH = int(self.batch_size)
cfg.SOLVER.WARMUP_ITERS = max(int(0.01 * cfg.SOLVER.MAX_ITER), 0)
if bool(self.lr_decay):
cfg.SOLVER.STEPS = [cfg.SOLVER.MAX_ITER * p for p in (0.5, 0.75, 0.9)]
else:
cfg.SOLVER.STEPS = []
train_name = 'train'
validation_name = 'validation'
cfg.DATASETS.TRAIN = (train_name, )
if validation_name in datasets.keys():
cfg.DATASETS.TEST = (validation_name, )
if self.eval_frequency > 1:
cfg.TEST.EVAL_PERIOD = int(self.eval_frequency)
else:
cfg.TEST.EVAL_PERIOD = int(cfg.SOLVER.MAX_ITER * float(self.eval_frequency))
else:
cfg.DATASETS.TEST = None
cfg.TEST.EVAL_PERIOD = -1
cfg.set_new_allowed(True)
cfg.TRAINING_INFO = CfgNode()
cfg.TRAINING_INFO.THING_CLASSES = class_names
cfg.TRAINING_INFO.TASK = 'OBJECT_DETECTION'
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(class_names)
cfg.EVAL.LOG_METRICS = bool(self.log_metrics_aif)
for aug in ['RANDOM_LIGHTING', 'RANDOM_BRIGHTNESS', 'RANDOM_SATURATION', 'RANDOM_CONTRAST', 'RANDOM_ROTATION', 'CROP']:
if cfg.INPUT.get(aug, None) is not None:
cfg.INPUT[aug].ENABLED = self.additional_augmentations
cfg.freeze()
return cfg
if __name__ == '__main__':
ObjectDetectionTrain()
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/pipelines/batch_pipelines/object_detection_train.py
| 0.553264 | 0.348839 |
object_detection_train.py
|
pypi
|
import logging
from typing import Dict, Iterable, Union, Callable
import numpy as np
import torch.utils.data as torchdata
import detectron2
from detectron2.config import configurable, CfgNode
from detectron2.data.samplers import InferenceSampler
from detectron2.data.build import trivial_batch_collator, _test_loader_from_config, worker_init_reset_seed
from detectron2.data.samplers import (
RepeatFactorTrainingSampler,
TrainingSampler
)
from torch.utils.data import DataLoader, Dataset, Sampler
try:
from detectron2.data.samplers.distributed_sampler import RandomSubsetTrainingSampler
except ImportError:
RandomSubsetTrainingSampler = None
from detectron2.data import DatasetMapper
from detectron2.data.common import MapDataset, DatasetFromList
from detectron2.data import transforms as T
from detectron2.data.build import build_batch_data_loader
from sap_computer_vision.data.image_classification import DatasetMapperClassification, get_classification_dataset_dicts
from sap_computer_vision.data.samplers import PKTripletSampler, TripletTrainSampler, TripletReservoirSampler, triplet_collator_fn
from .augs import build_augmentations, generate_aug_cfg_node
def _test_loader_from_config_batched(cfg, *args, **kwargs):
return {**_test_loader_from_config(cfg, *args, **kwargs),
"batch_size": cfg.SOLVER.get('IMS_PER_BATCH_EVAL', cfg.SOLVER.IMS_PER_BATCH)}
@configurable(from_config=_test_loader_from_config_batched)
def build_detection_test_loader_batched(dataset, *, mapper, sampler=None, num_workers=0, batch_size=1) -> DataLoader:
"""
Similar to `build_detection_train_loader`, but uses a batch size of 1,
and :class:`InferenceSampler`. This sampler coordinates all workers to
produce the exact set of all samples.
This interface is experimental.
Parameters
----------
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper: callable
a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
sampler: torch.utils.data.sampler.Sampler or None, optinal, default=None
A sampler that produces
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
which splits the dataset across all workers.
num_workers: int, optional, default=0:
Number of parallel data loading workers
batch_size: int, optional, default=1
Batch_size
Returns
----------
torch.DataLoader
A torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
Examples
--------
Examples:
::
data_loader = build_detection_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_detection_test_loader(cfg, "my_test")
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = InferenceSampler(len(dataset))
batch_sampler = torchdata.sampler.BatchSampler(sampler, batch_size, drop_last=False)
data_loader = torchdata.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
def _classification_train_loader_from_config(cfg: Dict,
mapper: Union[Callable, None]=None,
*,
dataset: Union[Dataset, None]=None,
sampler: Union[Sampler, None]=None,
triplet_excludes: Union[Dict, None]=None,
collate_fn: Union[None, Callable]=None) -> Dict:
"""Function to create classification/distance metric learning dataloader from cfg.
This functions is not intended to be used directly.
Check :func:`build_classification_train_loader` for all parameters not documented here.
Parameters
----------
triplet_excludes: None or dict, optional, default=None
Triplet exclude dict passed to sampler, if sampler supports it.
Check sap_computer_vision.data.samplers for more details on the different samplers.
This parameters is meant for distance metric learning.
Returns
----------
dict
kwargs for :func:`build_classification_train_loader`
"""
if dataset is None:
dataset = get_classification_dataset_dicts(cfg.DATASETS.TRAIN)
if mapper is None:
mapper = DatasetMapperClassification(cfg, True) # pylint: disable=E1125, E1121
aspect_ratio_grouping = cfg.DATALOADER.ASPECT_RATIO_GROUPING
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
elif sampler_name == "RandomSubsetTrainingSampler":
if RandomSubsetTrainingSampler is None:
raise ImportError('To use `RandomSubsetTrainingSampler` detectron > 0.5.0 is need.'
f' (installed: {detectron2.__version__}).')
else:
sampler = RandomSubsetTrainingSampler(len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO)
elif sampler_name == "PKSampler":
class_ids = np.array([d["class_id"] for d in dataset], dtype=int)
sampler = PKTripletSampler(p=cfg.DATALOADER.PK_SAMPLER.P_CLASSES_PER_BATCH,
k=cfg.DATALOADER.PK_SAMPLER.K_EXAMPLES_PER_CLASS,
class_ids=class_ids,
ignore_class_weights=cfg.DATALOADER.PK_SAMPLER.IGNORE_CLASS_WEIGHTS,
rng=cfg.SEED,
excludes=triplet_excludes,
return_batch=True,
infinite_stream=True)
aspect_ratio_grouping = False
collate_fn = trivial_batch_collator if collate_fn is None else collate_fn
elif sampler_name == "TripletSampler":
class_ids = np.array([d["class_id"] for d in dataset], dtype=int)
sampler = TripletTrainSampler(class_ids=class_ids,
n_triplets_per_batch=cfg.SOLVER.IMS_PER_BATCH,
rng=cfg.SEED,
excludes=triplet_excludes,
return_batch=True,
infinite_stream=True)
aspect_ratio_grouping = False
collate_fn = triplet_collator_fn if collate_fn is None else collate_fn
elif sampler_name == "TripletReservoirSampler":
class_ids = np.array([d["class_id"] for d in dataset], dtype=int)
sampler = TripletReservoirSampler(class_ids=class_ids,
n_triplets_per_batch=cfg.SOLVER.IMS_PER_BATCH,
k_examples_per_class=cfg.DATALOADER.TRIPLET_RESERVOIR_SAMPLER.K_EXAMPLES_PER_CLASS,
reservoir_of_n_batches=cfg.DATALOADER.TRIPLET_RESERVOIR_SAMPLER.N_BATCHES_PER_RESERVOIR,
refresh_after_n_batches=cfg.DATALOADER.TRIPLET_RESERVOIR_SAMPLER.REFRESH_EVERY_N_STEPS,
n_random_batches_start=cfg.DATALOADER.TRIPLET_RESERVOIR_SAMPLER.NUM_STEPS_RANDOM,
strategy=cfg.DATALOADER.TRIPLET_RESERVOIR_SAMPLER.TRIPLET_STRATEGY,
rng=cfg.SEED,
excludes=triplet_excludes,
return_batch=True,
infinite_stream=True)
aspect_ratio_grouping = False
collate_fn = triplet_collator_fn if collate_fn is None else collate_fn
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": sampler.batch_size if isinstance(sampler, PKTripletSampler) else cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": aspect_ratio_grouping,
"collate_fn": collate_fn,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
}
@configurable(from_config=_classification_train_loader_from_config)
def build_classification_train_loader(
dataset, *, mapper, total_batch_size, sampler=None, aspect_ratio_grouping=True, num_workers=0, collate_fn=None) -> DataLoader:
"""
Build a dataloader for image classification with some default features.
This interface is experimental.
Parameters
----------
dataset: list or torch.utils.data.Dataset
A list of dataset dicts,
or a pytorch dataset (either map-style or iterable). It can be obtained
by using :func:`DatasetCatalog.get` or :func:`get_class_dataset_dicts`.
mapper: callable
A callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
total_batch_size: int
total batch size across all workers. Batching
simply puts data into a list.
sampler: torch.utils.data.sampler.Sampler or None):
A sampler that produces indices to be applied on ``dataset``.
If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
which coordinates an infinite random shuffle sequence across all workers.
Sampler must be None if ``dataset`` is iterable.
aspect_ratio_grouping: bool, optinal, default=True
whether to group images with similar
aspect ratio for efficiency. When enabled, it requires each
element in dataset be a dict with keys "width" and "height".
num_workers: int
number of parallel data loading workers
collate_fn: None or callable
Collate functions passed to the Dataloader. If None a default collate function
depending on the selected Sampler will be used.
Returns
----------
torch.utils.data.DataLoader:
a dataloader. Each output from it is a ``list[mapped_element]`` of length
``total_batch_size / num_workers``, where ``mapped_element`` is produced
by the ``mapper``.
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if isinstance(dataset, torchdata.IterableDataset):
assert sampler is None, "sampler must be None if dataset is IterableDataset"
else:
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
if getattr(sampler, 'return_batch', False):
#dataset = ToIterableDataset(dataset, sampler)
return DataLoader(
dataset,
batch_sampler=sampler,
num_workers=num_workers,
collate_fn=collate_fn,
worker_init_fn=worker_init_reset_seed
)
else:
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers,
)
def _classification_test_loader_from_config_batched(cfg, dataset_name, mapper=None):
"""
Uses the given `dataset_name` argument (instead of the names in cfg), because the
standard practice is to evaluate each test set individually (not combining them).
"""
if isinstance(dataset_name, str):
dataset_name = [dataset_name]
dataset = get_classification_dataset_dicts(dataset_name)
if mapper is None:
mapper = DatasetMapperClassification(cfg, False) # pylint: disable=E1125, E1121
return {"dataset": dataset,
"mapper": mapper,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
"batch_size": cfg.SOLVER.get('IMS_PER_BATCH_EVAL', cfg.SOLVER.IMS_PER_BATCH)}
@configurable(from_config=_classification_test_loader_from_config_batched)
def build_classification_test_loader_batched(dataset, *, mapper, sampler=None, num_workers=0, batch_size=1):
"""
Similar to `build_classification_train_loader`, but uses a batch size of 1,
and :class:`InferenceSampler`. This sampler coordinates all workers to
produce the exact set of all samples.
This interface is experimental.
Parameters
----------
dataset: list or torch.utils.data.Dataset
a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_classification_dataset_dicts`.
mapper: callable
a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
sampler: torch.utils.data.sampler.Sampler or None
a sampler that produces
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
which splits the dataset across all workers.
num_workers: int
number of parallel data loading workers
batch_size: int
Batch_size
Returns
----------
DataLoader: a torch DataLoader
That loads the given classification
dataset, with test-time transformation and batching.
Examples:
::
data_loader = build_classification_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_classification_test_loader(cfg, "my_test")
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = InferenceSampler(len(dataset))
batch_sampler = torchdata.sampler.BatchSampler(sampler, batch_size, drop_last=False)
data_loader = torchdata.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
class DatasetMapperWithAdditionalAugmentaions(DatasetMapper):
"""This DatasetMapper extends the default detectron2.data.DatasetMapper
to accepts a list on additional augmenations and to append a final
resize transformation to the list if cfg.INPUT.FIXED_IMAGE_SIZE is set.
"""
@classmethod
def from_config(cls,
cfg: 'CfgNode',
is_train: bool = True,
additional_augs_orignal_image: Iterable['T.Transform']=None,
additional_augs_resized_image: Iterable['T.Transform']=None,
) -> Dict:
"""Classmethod to create an instance based on the config.
Check detectron configs mechanism.
Parameters
----------
cfg: CfgNode
Config
is_train: bool, optional, default=None
Indicator if DatasetMapper is used for training.
This enables random ordering of examples.
additional_augs_orignal_image: None or Iterable of T.Transform
Optional list of additional augementations. Those augmentations are applied before
the image is brought to its original size.
additional_augs_resized_image: None or Iterable of T.Transform
Optional list of additional augementations. Those augmentations are applied after
the image is brought to its original size.
Returns
-------
dict
Dict with the relevant kwargs. This dict can be consumed by the
__init__ function.
"""
ret = super(DatasetMapperWithAdditionalAugmentaions, cls).from_config(cfg, is_train)
cfg_augs = generate_aug_cfg_node(cfg, cfg.INPUT if is_train else CfgNode({}), is_train)
ret['augmentations'] = build_augmentations(cfg_augs, cfg.INPUT.FORMAT)
if additional_augs_orignal_image is not None:
ret['augmentations'] = [*additional_augs_orignal_image] + ret['augmentations']
if additional_augs_resized_image is not None:
ret['augmentations'].extend(additional_augs_resized_image)
return ret
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/data/data_build.py
| 0.897496 | 0.417746 |
data_build.py
|
pypi
|
from dataclasses import dataclass
from itertools import combinations, permutations
from copy import deepcopy
from typing import Callable, Iterable, Union, Tuple, Dict, Set
import numpy as np
from scipy.spatial.distance import pdist
from scipy.stats import skewnorm
import torch
from torch import nn
from functools import lru_cache
class TripletGenerator:
"""Class to generate (anchor, positive_example, negative_example) triplets.
This class supports various ways to generate triplets usable for learning with triplet loss.
"""
@dataclass
class Subset:
"""Class representing a subset of TripletGenerator.data.
Normally instances of the class are generate through TripletGenerator.sample_subset(...).
Attributes
----------
base_set: TripletGenerator
The TripletGenerator from which the subset was sampled.
indeices: np.array of ints
Indices of the subset examples
example_vectors: np.array or None, optional, default=None
Feature vectors of the examples
"""
base_set: "TripletGenerator"
indices: np.ndarray
example_vectors: np.ndarray = None
def build_maxmin_triplets(self, metric: Union[str, Callable]='euclidean', **kwargs) -> Tuple['np.array', 'np.array', 'np.array']:
"""Build the hardest triplet in the dataset.
For the hardest triplets the distance between anchor and the positive example
is maximized and the distance between anchor and the negative example is minized.
To calculate the distance scipy.distance.pdist is used. See its documentation for details.
Parameters
----------
metric: str or callable
Metric used for the pdist call.
Check scipy.distance.pdist documentation
**kwargs
All keywords are passed to pdist call.
Returns
-------
np.array
Anchor indices
np.array
Positive example indices
np.array
Negative example indices
"""
if self.example_vectors is None:
raise ValueError('Please provide example_vectors through the \'set_example_vectors\' functions!')
dist_matrix = pdist(self.example_vectors, metric=metric, **kwargs)
pos, neg, pos_masks = [], [], {}
class_ids_set = self.class_ids
for idx in range(len(self.indices)):
indices_pos, indices_feat_pos = _get_indices_pdist_matrix_condensed(class_ids_set, idx, pos_masks, same_class=True, use_torch=False)
pos.append(self.indices[indices_feat_pos[np.argmax(dist_matrix[indices_pos])]])
indices_neg, indices_feat_neg = _get_indices_pdist_matrix_condensed(class_ids_set, idx, pos_masks, same_class=False, use_torch=False)
neg.append(self.indices[indices_feat_neg[np.argmin(dist_matrix[indices_neg])]])
return self.indices, np.array(pos), np.array(neg)
@property
def class_ids(self) -> 'np.array':
return self.base_set.class_ids[self.indices]
def build_all_possible_triplets(self, anchor_pos_combinations_unique=False) -> Tuple['np.array', 'np.array', 'np.array']:
"""Build all possible triplets.
Parameters
----------
anchor_pos_combinations_unique: bool, optional, default=False
If anchor, pos pairs should be unique.
If True, (1, 2, 3) and (2, 1, 3) are returned.
If False, only (1, 2, 3) will returned.
Returns
-------
np.array
Anchor indices
np.array
Positive example indices
np.array
Negative example indices
"""
class_ids_set = self.class_ids
class_ids_unique = np.unique(class_ids_set)
triplets = []
for class_id in class_ids_unique:
mask = class_ids_set == class_id
indices_of_class = np.where(mask)[0]
indices_of_not_class = np.where(np.logical_not(mask))[0]
if anchor_pos_combinations_unique:
ap_pairs = combinations(indices_of_class, 2)
else:
ap_pairs = permutations(indices_of_class, 2)
triplets.extend([(*pair, idx_neg) for pair in ap_pairs for idx_neg in indices_of_not_class])
triplets = np.array(triplets)
return self.indices[triplets[:, 0]], self.indices[triplets[:, 1]], self.indices[triplets[:, 2]]
def build_random_triplets(self, size, replace=False) -> Tuple['np.array', 'np.array', 'np.array']:
"""Build uniform random triplets
Parameters
----------
size: int
Number of triplets.
replace: bool, optional, default=False
Sample anchors with replacement.
Returns
-------
np.array
Anchor indices
np.array
Positive example indices
np.array
Negative example indices
"""
class_ids_set = self.class_ids
anchors, pos, neg = self.base_set.build_random_triplets_for_ids(
class_ids_set,
excludes=self.base_set.excludes,
size=size,
rng=self.base_set.rng,
replace=replace)
return self.indices[anchors], self.indices[pos], self.indices[neg]
def build_triplets_accourding_to_strategy(self, strategy, dist_norm=2.) -> Tuple['np.array', 'np.array', 'np.array']:
"""Build using 'create_triplets_from_pk_sample'.
This function is also used inside of the TripletDistanceLearner and supports different
strategy. Check documentation for the function for more details.
Parameters
----------
strategy: length 2 tuple of strs or floats between 0. and 1.
Tuple of strategies (pos_strategy, neg_strategy) used for sampling the positive and
negative examples. Supported strategies are '*', 'rand', 'min', 'max' or floats
between 0. and 1.
dist_norm: float, optional, default=2
Norm used to calculate the distance.
Check torch.nn.functional.pdist for details.
Returns
-------
np.array
Anchor indices
np.array
Positive example indices
np.array
Negative example indices
"""
strategy = build_triplet_strategy(strategy)
triplets = create_triplets_from_pk_sample(torch.from_numpy(self.example_vectors),
torch.from_numpy(self.class_ids),
*strategy,
dist_norm=dist_norm)
triplets = [i.numpy() for i in triplets]
return self.indices[triplets[0]], self.indices[triplets[1]], self.indices[triplets[2]]
def set_example_vectors(self, example_vectors):
if example_vectors is not None and len(example_vectors) != len(self.indices):
raise ValueError('The provided example_vectors is not of the same length as the image vector in the subset!')
self.example_vectors = example_vectors
def __init__(self,
class_ids: Iterable[int],
excludes: Union[None, Dict[int, Set]]=None,
rng: Union[None, int, 'np.random.Generator']=None,
example_vectors: Union[None, 'np.array']=None,
global_indices: Union[None, Iterable[int]]=None,
ignore_class_weights=False):
"""Create instance of TripletGenerator.
Parameters
----------
class_ids: Iterable of ints castable to np.array
Class ids
excludes: None or dict, optional, default=None
Triplet exclude dict passed to sampler, if sampler supports it.
Check sap_computer_vision.data.samplers for more details on the different samplers.
This parameters is meant for distance metric learning.
rng: None or int or np.random.Generator, optinal, default=None
Seed for the np.random.Generator or a np.random.Generator.
Check https://numpy.org/doc/stable/reference/random/generator.html
for more infos
example_vectors: None or np.array, optional, default=None
Features vectors for all examples. Has to be of same length as
class_ids.
global_indices: None or np., optional, default=None
Indices mappping.
If None set the first entry of class_ids is returnd as index 0 etc.
Has to be of same length as class_ids.
"""
self.class_ids = np.asarray(class_ids)
self.classes, self.class_counts = np.unique(class_ids, return_counts=True)
self.ignore_class_weights = ignore_class_weights
self.use_excludes = excludes is not None
self.excludes, self.pos_masks_classes, self.pos_example_mask = self._prepare_excludes(class_ids, excludes=excludes)
self.neg_masks_classes = {}
if global_indices is not None:
global_indices = np.asarray(global_indices)
self.global_indices = global_indices
if example_vectors is not None:
example_vectors = np.asarray(example_vectors)
self.example_vectors = example_vectors
if not isinstance(rng, np.random.Generator):
if isinstance(rng, int) and rng < 0:
rng = None
rng = np.random.default_rng(seed=rng)
self.rng = rng
def _adjust_class_weights(self, new_class, class_weights=None):
if class_weights is None:
class_weights = self.class_counts.copy()
excluded_classes = self.excludes.get(new_class, set()).union(set([new_class]))
class_weights_mask = [c_i in excluded_classes for c_i in self.classes]
w_excluded = np.sum(class_weights[class_weights_mask])
if w_excluded == np.sum(class_weights):
raise ValueError('Excludes led to 0 remaining classes! Please relax the excludes!')
class_weights[class_weights_mask] = 0.
class_weights /= (1 - w_excluded)
return class_weights
def sample_subset(self, p: int, k: int, ignore_class_weights: bool=False) -> 'TripletGenerator.Subset':
"""Sample a subset containing p classes with k examples each.
Parameters
----------
p: int
Number of classes in the subset.
k: int
Number of examples in the subset.
ignore_class_weights: bool, optinal, default=False
Whether class_weights should be ignored when sampling the subset.
Returns
-------
TripletGenerator.Subset
Subset containing p classes with k examples each.
"""
if ignore_class_weights:
class_weights = np.ones_like(self.class_counts).astype(np.float32)
else:
class_weights = self.class_counts.copy().astype(np.float32)
class_weights[class_weights < k] = 0
class_weights /= np.sum(class_weights)
if not self.use_excludes:
selected_classes = self.rng.choice(self.classes, size=p, p=class_weights, replace=False)
else:
selected_classes = [self.rng.choice(self.classes, p=class_weights, replace=False)]
for i in range(p-1):
class_weights = self._adjust_class_weights(new_class=selected_classes[-1], class_weights=class_weights)
selected_classes.append(self.rng.choice(self.classes, p=class_weights, replace=False))
image_idx = [self.rng.choice(list(self.pos_masks_classes.setdefault(class_id, _create_pos_mask(class_id, self.class_ids))), size=k, replace=False) for class_id in selected_classes]
indices = np.array([i for image_l in image_idx for i in image_l])
return TripletGenerator.Subset(base_set=self,
indices=self.global_indices[indices] if self.global_indices is not None else indices,
example_vectors=self.example_vectors[indices] if self.example_vectors is not None else None)
def build_random_triplets(self, size: int, replace: bool=False) -> Tuple['np.array', 'np.array', 'np.array']:
"""Sample uniform random triplets
Parameters
----------
size: int
Number of triplets.
replace: bool, optional, default=False
Sample anchors with replacement.
Returns
-------
np.array
Anchor indices
np.array
Positive example indices
np.array
Negative example indices
"""
a, p, n = self._build_random_triplets_for_ids(
class_ids=self.class_ids,
examples_mask=self.pos_example_mask,
excludes=self.excludes,
size=size,
rng=self.rng,
replace=replace,
pos_masks=self.pos_masks_classes,
neg_masks=self.neg_masks_classes)
if self.global_indices is None:
return a, p, n
else:
return self.global_indices[a], self.global_indices[p], self.global_indices[n]
@staticmethod
def _prepare_excludes(class_ids, excludes=None, pos_masks=None, force_bidirectional_excludes=True):
if excludes is None:
excludes = {}
else:
excludes = deepcopy(excludes)
if pos_masks is None:
pos_masks = {}
examples_mask = np.ones_like(class_ids, dtype=bool)
for c_i, count in zip(*np.unique(class_ids, return_counts=True)):
try:
e_i = excludes[c_i]
except KeyError:
e_i = set([c_i])
else:
e_i = set(e_i).union(set([c_i]))
excludes[c_i] = e_i
if force_bidirectional_excludes:
for c_ij in excludes[c_i]:
if c_ij != c_i:
try:
e_j = excludes[c_ij]
except KeyError:
e_j = set([c_i])
else:
e_j = set(e_j).union(set([c_i]))
excludes[c_ij] = e_j
if count == 1:
pos_mask = pos_masks.setdefault(c_i, _create_pos_mask(c_i, class_ids))
examples_mask[list(pos_mask)] = False
return excludes, pos_masks, examples_mask
@staticmethod
def build_random_triplets_for_ids(class_ids, excludes=None, size=1, rng=None, replace=True):
"""Static function to generate uniform random triplets.
Parameters
----------
class_ids: Iterable of ints castable to np.array
Class ids
excludes: None or dict, optional, default=None
Triplet exclude dict passed to sampler, if sampler supports it.
Check sap_computer_vision.data.samplers for more details on the different samplers.
This parameters is meant for distance metric learning.
rng: None or int or np.random.Generator, optinal, default=None
Seed for the np.random.Generator or a np.random.Generator.
Check https://numpy.org/doc/stable/reference/random/generator.html
for more infos
size: int
Number of triplets.
replace: bool, optional, default=False
Sample anchors with replacement.
Returns
-------
np.array
Anchor indices
np.array
Positive example indices
np.array
Negative example indices
"""
excludes, pos_masks, examples_mask= TripletGenerator._prepare_excludes(class_ids=class_ids, excludes=excludes)
neg_masks = {}
if not isinstance(rng, np.random.Generator):
rng = np.random.default_rng(seed=rng)
return TripletGenerator._build_random_triplets_for_ids(class_ids=class_ids,
examples_mask=examples_mask,
excludes=excludes,
size=size,
rng=rng,
replace=replace,
pos_masks=pos_masks,
neg_masks=neg_masks)
@staticmethod
def _build_random_triplets_for_ids(class_ids, examples_mask, excludes, size, rng, replace, pos_masks, neg_masks):
try:
selected_idx = rng.choice(np.where(examples_mask)[0], size=size, replace=replace)
except ValueError:
if not replace:
raise ValueError(f'Tried to sample {size} triplets without replacement, but there are only '
f'{np.sum(examples_mask)} images from classes with at least 2 images!')
idx = set(range(len(class_ids)))
anchors, pos, neg = [], [], []
for idx_anchor, c_i in zip(selected_idx, class_ids[selected_idx]):
pos_mask = pos_masks.setdefault(c_i, _create_pos_mask(c_i, class_ids))
neg_mask = neg_masks.setdefault(c_i, _create_neg_mask(excludes[c_i], class_ids, pos_masks))
idx_pos = rng.choice(list(pos_mask.difference(set([idx_anchor]))))
try:
idx_neg = rng.choice(list(idx.difference(neg_mask)))
except ValueError:
raise ValueError('Tried to sample a negative example from an empty list. This is most likely to rigorous excludes. '
'Please to try relax the excludes or remove classes with strong excludes from the input')
anchors.append(idx_anchor)
pos.append(idx_pos)
neg.append(idx_neg)
return np.array(anchors), np.array(pos), np.array(neg)
def _index_f_squared_to_condense_torch(n, i, j):
return n * i + j - torch.div(((i + 2) * (i + 1)), 2, rounding_mode='trunc')
def _index_f_squared_to_condense_numpy(n, i, j):
return n * i + j - ((i + 2) * (i + 1)) // 2
def _get_indices_pdist_matrix_condensed(labels, idx, pos_masks=None, same_class=True, use_torch=False):
"""Helper function to get indices form a condensed pdist matrix."""
pos_masks = {} if pos_masks is None else pos_masks
if use_torch:
j = torch.arange(len(labels), device=labels.device)
j_original = j.clone()
i = torch.ones_like(j) * int(idx)
else:
j = np.arange(len(labels))
i = np.ones_like(j) * int(idx)
mask = i > j
i[mask], j[mask] = j[mask], i[mask]
pos_mask = pos_masks.setdefault(labels[idx], labels == labels[idx])
if use_torch:
if same_class:
value_mask = pos_mask.clone()
value_mask[idx] = False
else:
value_mask = torch.logical_not(pos_mask)
else:
if same_class:
value_mask = pos_mask.copy()
value_mask[idx] = False
else:
value_mask = np.logical_not(pos_mask)
if use_torch:
return _index_f_squared_to_condense_torch(len(labels), i[value_mask], j[value_mask]), j_original[value_mask]
else:
return _index_f_squared_to_condense_numpy(len(labels), i[value_mask], j[value_mask]), np.where(value_mask)[0]
def _create_pos_mask(c_i, classes):
return set(np.where(classes == c_i)[0])
def _create_neg_mask(excluded_classes, classes, pos_masks):
ignore_idx = set()
for c_i in excluded_classes:
idx_c_i = pos_masks.setdefault(c_i, _create_pos_mask(c_i, classes))
ignore_idx = ignore_idx.union(idx_c_i)
return ignore_idx
def create_triplets_from_pk_sample(feature_vectors: 'torch.Tensor',
labels: 'torch.Tensor',
strategy_pos: Union[None, str, Callable]=None,
strategy_neg: Union[None, str, Callable]=None,
dist_norm: float =2.,
anchor_pos_combinations_unique: bool=False) -> Tuple['torch.Tensor', 'torch.Tensor', 'torch.Tensor']:
"""Sample triplets from a subset containing p classes and k examples per class according to a given strategy.
This function is meant to be used internally. For example this function is called
by the TripletDistanceLearner or the TripletReservoirSampler.
Parameters
----------
example_vectors: torch.Tensor
Features vectors for all examples. Has to be of same length as
class_ids.
labels: torch.Tensor
Labels ids
strategy_pos: None, str or callable, optinal, default=None
Strategy used to determine the positive example.
Supported strategies are 'min', 'max', 'rand' or '*'/'all'.
If '*' or 'all' strategy_neg is ignored and all possible triplets
are returned. 'min'/'max' selects the example with the mininmal/
maximal distance. 'rand' selects a uniform random example.
If None the default strategy 'rand' is used.
Alternativly a callable can be passed as a stategy.
The callable has to return an index as Torch.Tensor and is called
with max_index and the device of the result. The returned index
used to access the potential positive examples ordered by their
distance to the anchor (ascending).
The intended use is to used custom distribution instead of uniform
random sampling. Check SkewedNormalSampler as an example.
strategy_neg: None, str or callable, optinal, default=None
Strategy used to determine the negative example.
Identical to 'strategy_pos'.
dist_norm: float, optional, default=2
Norm used to calculate the distance.
Check torch.nn.functional.pdist for details.
anchor_pos_combinations_unique: bool, optional, default=False
This option only is only relevant when using '*'/'all' as the strategy.
The option controls if anchor, pos pairs should be unique.
If True, (1, 2, 3) and (2, 1, 3) are returned.
If False, only (1, 2, 3) will returned.
Returns
-------
torch.Tensor
Anchor indices
torch.Tensor
Positive example indices
torch.Tensor
Negative example indices
"""
indices = torch.arange(len(labels), device=labels.device)
if strategy_pos in ['all', '*'] or strategy_neg in ['all', '*']:
positive_matrix = labels.unsqueeze(1) == labels.unsqueeze(0)
negative_matrix = positive_matrix.logical_not()
if anchor_pos_combinations_unique:
positive_matrix = positive_matrix.triu(diagonal=1)
else:
positive_matrix = positive_matrix.logical_and(torch.eye(*positive_matrix.size(), dtype=bool).logical_not())
anchor, pos, neg = [], [], []
for i, a in enumerate(indices):
for p in indices[positive_matrix[i, :]]:
for n in indices[negative_matrix[i, :]]:
anchor.append(a)
pos.append(p)
neg.append(n)
anchor_idx, pos_idx, neg_idx = torch.stack(anchor), torch.stack(pos), torch.stack(neg)
else:
with torch.no_grad():
pos, neg = [], []
pos_masks = {}
if any([strat in ['min', 'max'] for strat in (strategy_pos, strategy_neg)]) or any([callable(strat) for strat in (strategy_pos, strategy_neg)]):
if isinstance(dist_norm, float) or isinstance(dist_norm, int):
pdist = nn.functional.pdist(feature_vectors, p=float(dist_norm))
else:
raise NotImplementedError
else:
pdist = None
for idx in range(len(feature_vectors)):
if strategy_pos == 'rand':
pos_mask = pos_masks.setdefault(labels[idx], labels == labels[idx])
indices_pos = indices[pos_mask]
selected_idx = torch.randint(0, len(indices_pos), size=(1, ), device=labels.device)[0]
selected_pos = indices_pos[selected_idx]
if selected_pos == idx:
selected_pos = indices_pos[(selected_idx + 1) % len(indices_pos)]
pos.append(selected_pos)
elif strategy_pos in ('min', 'max') or callable(strategy_pos):
indices_pos, indices_feat_pos = _get_indices_pdist_matrix_condensed(labels,
idx,
None,#pos_masks,
same_class=True,
use_torch=True)
if callable(strategy_pos):
distance_order = torch.argsort(pdist[indices_pos])
idx_ = strategy_pos(len(distance_order), labels.device)
pos.append(indices_feat_pos[distance_order[idx_]])
else:
pos.append(indices_feat_pos[torch.argmin(pdist[indices_pos])
if strategy_pos == 'min' else
torch.argmax(pdist[indices_pos])])
else:
raise ValueError(f'`{strategy_pos}` is not a valid strategy to sample positive examples')
if strategy_neg == 'rand':
neg_mask = torch.logical_not(pos_masks.setdefault(labels[idx], labels == labels[idx]))
indices_neg = indices[neg_mask]
selected_idx = torch.randint(0, len(indices_neg), size=(1, ), device=labels.device)[0]
neg.append(indices_neg[selected_idx])
elif strategy_neg in ('min', 'max') or callable(strategy_neg):
indices_neg, indices_feat_neg = _get_indices_pdist_matrix_condensed(labels,
idx,
None,#pos_masks,
same_class=False,
use_torch=True)
if callable(strategy_neg):
distance_order = torch.argsort(pdist[indices_neg])
idx_ = strategy_neg(len(distance_order), labels.device)
neg.append(indices_feat_neg[distance_order[idx_]])
else:
neg.append(indices_feat_neg[torch.argmin(pdist[indices_neg])
if strategy_neg == 'min' else
torch.argmax(pdist[indices_neg])])
else:
raise ValueError(f'`{strategy_neg}` is not a valid strategy to sample positive examples')
anchor_idx, pos_idx, neg_idx = indices, torch.stack(pos), torch.stack(neg)
return anchor_idx, pos_idx, neg_idx
class SkewedNormalSampler:
"""Class providng samples indices based on a moved and scaled skewed normal distribution.
The basic idea is to squeeze a skewed normal distribution in the range between 0 and 1
and skew the distribution "to the right" if the modus is > 0.5 and "to the left" if the mode
is < 0.5. The skewness is linearly increased to its max value when moving the mode to the
side.
The distribution is used to have control on the difficulty of sampled triplets. It can be
seen as a relaxation of max min triplets. The class is used when using a float as triplet
sampling strategy.
"""
def __init__(self, loc: float, power: float=1., min_weight: Union[None, float]=None, max_skew: float=10.):
"""Create callable instance to random sample an index according to a
moved and scaled skewed normal distribution.
Parameters
----------
loc: float
Position of the mode. Has to be between 0 and 1.
power: float, optional, defaul=1.
Exponent x (dist**x).
Can be used to make the distribution "sharper".
min_weight: None or float, optinal, default=None
Minimum weight when sampling the index.
dist*(x) = max(dist(x), min_weight).
max_skew: float, optional, default=10.
Maximal skewness value
"""
self._cache = {}
self.loc = loc
self.power = power
self.min_weight = min_weight
self.max_skew = max_skew
def __repr__(self):
return str(self)
def __str__(self):
return f'SkewedNormalSampler(loc={self.loc}, power={self.power} min_weight={self.min_weight}, max_skew={self.max_skew})'
@staticmethod
def _get_scaled_moved_skewed_normal_distribution(a, max_a=10., power=1.):
used_a = (0.5-a)/0.5 * max_a
scaling = skewnorm.ppf(1-0.01, used_a) - skewnorm.ppf(0.01, used_a)
if used_a == 0:
mode = 0
else:
delta = used_a / np.sqrt(1 + used_a**2)
mu_z = np.sqrt(2 / np.pi) * delta
sigma_z = np.sqrt(1-mu_z**2)
skew = (4-np.pi) / 2 * (delta * np.sqrt(2/np.pi))**3 / (1 - 2*delta**2/np.pi)**(1.5)
mode = mu_z - skew * sigma_z / 2. - np.sign(used_a) / 2. * np.exp(-2*np.pi/np.absolute(used_a))
amplitude = skewnorm.pdf(mode, used_a)
def f_(x):
x = ((x-a)* scaling + mode)
weights = np.power(skewnorm.pdf(x, used_a) / amplitude, power)
return weights
return f_
@staticmethod
def _get_sample_weights_skewed_norm_dist(n_entries, loc, power=1, min_weight=None, max_skew=10.):
loc = np.clip(loc, 0., 1.)
f_ = SkewedNormalSampler._get_scaled_moved_skewed_normal_distribution(loc, power=power, max_a=max_skew)
x = np.linspace(0., 1, n_entries)
weights = np.ones(1, dtype=np.float32) if n_entries == 1 else f_(np.linspace(0., 1, n_entries))
if min_weight is not None:
weights = np.clip = np.clip(weights, min_weight, None)
weights /= np.sum(weights)
return weights
def __call__(self, n_entries, device):
try:
cumsum = self._cache[n_entries]
except KeyError:
weights = self._get_sample_weights_skewed_norm_dist(n_entries, loc=self.loc, power=self.power, min_weight=self.min_weight, max_skew=self.max_skew)
cumsum = np.cumsum(weights)
cumsum /= cumsum[-1]
cumsum = torch.from_numpy(cumsum).float().to(device)
self._cache[n_entries] = cumsum
val = torch.rand(1, device=device, requires_grad=False)
return int(sum(cumsum < val))
def build_triplet_strategy(strategy: Union[str, float]=None, skewed_pow=3) -> Tuple[Union[str, Callable]]:
"""Function to check strategy and create SkewedNormalSampler when strategy is float.
This function is used by the TripletDistanceLearner and the TripletReservoirSampler.
Parameters
----------
strategy: str or float, optional, defaul=None
Strategies used to sample positve example (strategy[0])
and negative example (strategy[1]).
Options are 'rand'/'min'/'max' or a float between 0 and 1.
- rand: random
- min: minimal distance to the anchor
- max: maximal distance to the anchor
- all/*: All possible triplets
- float: loc of SkedNormalSampler
Returns
-------
tuple(str/Callable, str/Callable)
Tiplet
"""
if (isinstance(strategy, str) and strategy.lower() == 'none') or strategy is None:
strategy = ('rand', 'rand')
if not isinstance(strategy, tuple) and len(strategy) != 2:
raise ValueError('`strategy` for triplet sampling when using the PKsampler/TripletReservoirSampler has be a tuple (pos_strat, neg_strat).')
pos, neg = strategy
if isinstance(pos, float):
pos = SkewedNormalSampler(pos, power=skewed_pow)
elif isinstance(pos, str):
pos = pos.lower()
if isinstance(neg, float):
neg = SkewedNormalSampler(neg, power=skewed_pow)
elif isinstance(neg, str):
neg = neg.lower()
return (pos, neg)
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/data/triplet_sampling_utils.py
| 0.91895 | 0.605712 |
triplet_sampling_utils.py
|
pypi
|
import logging
from contextlib import ExitStack
from itertools import count
from collections.abc import Iterable as IsIterable
from typing import Callable, Iterator, Dict, List, Set, Union, Tuple, Any
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import Sampler
from torch.utils.data import IterableDataset
from detectron2.engine.train_loop import HookBase
from detectron2.evaluation.evaluator import inference_context
from detectron2.data.build import trivial_batch_collator, build_batch_data_loader
from .triplet_sampling_utils import TripletGenerator, create_triplets_from_pk_sample, build_triplet_strategy
logger = logging.getLogger(__name__)
class PKTripletSampler(Sampler):
""""This sampler returns batches containing p classes and k examples per class.
This sampler can return batches of indices:
[(c1, c1, c5, c5, c4, c4), ..., (c5, c5, c2, c2, c3, c3) ]
or single indices (Make sure to do correct batching later on the
preserve the pk batches):
[c1, c1, c5, c5, c4, c4, ..., c5, c5, c2, c2, c3, c3]
The idea for such a sampler is taken from: https://arxiv.org/pdf/1703.07737.pdf
Attributes
----------
batch_size: int
Effective batch size: p*k
p: int
Number of classes in a batch.
k: int
Number of examples per class.
"""
def __init__(self,
p: int,
k: int,
class_ids: Union['np.array', List[int]],
n_batches: Union[None, int]=None,
excludes: Union[None, Dict[int, Set[int]]]=None,
ignore_class_weights: bool=True,
rng: Union[None, int, 'np.random.Generator']=None,
return_batch: bool=False,
infinite_stream: bool=False):
""""Create a sampler.
Parameters
----------
p: int
Number of classes in a batch.
k: int
Number of examples per class.
class_ids: np.array or list of ints
Class ids of the examples in the dataset.
n_batches: None or int, optional, default=None
If None len of the iterator is will be len(class_ids.
excludes: None or dict(int, set(int)), optional, default=None
To exclude classes from being sampled together with other classes.
Provide a dict with int as key and a set of class ids of the
excluded classes. The prevents all classes from the set form
being sampled if the class used as key for the set is in the batch.
ignore_class_weights: bool, optional, default=False
If True the p classes are sampled according to their ratio in the dataset.
If False all classes are uniformly sampled.
rng: None or int or np.random.Generator, optinal, default=None
Seed for the np.random.Generator or a np.random.Generator.
Check https://numpy.org/doc/stable/reference/random/generator.html
for more infos
return_batch: bool, optional, default=False
Whether the Sampler should return batches indices or single indices of the
examples.
infinite_strem: bool, optional, default=False
If set to True the sampler returns an infinite stream of bachtes/single
indices.
"""
assert p >= 2 and isinstance(p, int), "`p` must be int >= 2."
assert k >= 2 and isinstance(p, int), "`k` must be int >= 2."
self.p = p
self.k = k
self._generator = TripletGenerator(class_ids=class_ids, excludes=excludes, rng=rng, ignore_class_weights=ignore_class_weights)
self.return_batch = return_batch
# Setup sampling
if n_batches is None:
n_batches = len(class_ids)
self.n_batches = n_batches
self.infinite_stream = infinite_stream
def __len__(self) -> int:
if self.infinite_stream:
raise TypeError('Sampler is set to produce an infinite stream of samples, so length is not definied!')
else:
if self.return_batch:
return self.n_batches
else:
return self.n_batches * self.batch_size
@property
def batch_size(self):
return self.p * self.k
def __iter__(self) -> Iterator[int]:
iterator = count(0) if self.infinite_stream else range(self.n_batches)
for _ in iterator:
subset = self._generator.sample_subset(p=self.p, k=self.k)
if self.return_batch:
yield subset.indices
else:
for i in subset.indices:
yield i
class TripletTrainSampler(Sampler):
"""This sampler returns uniform random sampled triplets.
The shape of the output can be controlled with the attributes
'flatten' 'and return_batch':
- (batch_size=2, flatten=False, return_batch=True):
[((t1_anchor, t1_pos, t1_neg), (t2_anchor, t2_pos, t2_neg)), ...]
- (batch_size=2, flatten=True, return_batch=True):
[(t1_anchor, t1_pos, t1_neg, t2_anchor, t2_pos, t2_neg), ...]
- (batch_size=2, flatten=Flase, return_batch=False):
[t1_anchor, t1_pos, t1_neg, t2_anchor, t2_pos, t2_neg, ...]
Attributes
----------
batch_size: int
Effective batch size: n_triplets_per_batch*3 (flatten=True) or
simply n_triplets_per_batch (flatten=False)
sampler_n_batches_at_once: int, default=10
This parameter is only to optimize the performace a little bit.
The time it takes to generate triplets is linear with the number of triplets.
But there is also a significant overhead for every call (check TripletGenerator
implementation details). So calling it for every batch is slower. Creating
a lot of unused triplets is also not ideal. Normally you can simply ignore this
parameter and use the default values.
"""
sampler_n_batches_at_once = 10
def __init__(self,
class_ids,
n_triplets_per_batch,
n_batches=None,
excludes=None,
rng: Union[None, int, 'np.random.Generator']=None,
return_batch: bool=False,
flatten: bool=True,
infinite_stream: bool=False):
""""Create a sampler.
Parameters
----------
class_ids: np.array or list of ints
Class ids of the examples in the dataset.
n_triplets_per_batch: int
Number of triplets per batch.
n_batches: None or int, optional, default=None
If None len of the iterator is will be len(class_ids.
excludes: None or dict(int, set(int)), optional, default=None
To exclude classes from being sampled together with other classes.
Provide a dict with int as key and a set of class ids of the
excluded classes. The prevents all classes from the set form
being sampled if the class used as key for the set is in the batch.
rng: None or int or np.random.Generator, optinal, default=None
Seed for the np.random.Generator or a np.random.Generator.
Check https://numpy.org/doc/stable/reference/random/generator.html
for more infos
return_batch: bool, optional, default=False
Whether the Sampler should return batches indices or single indices of the
examples.
flatten: bool, optional, default=True
If True the batch shape is (n_triplets_per_batch*3, ).
If False the batch shape is (n_triplets_per_batch, 3).
infinite_strem: bool, optional, default=False
If set to True the sampler returns an infinite stream of bachtes/single
indices.
"""
self.generator = TripletGenerator(class_ids=class_ids, excludes=excludes, rng=rng)
self.return_batch = return_batch
self.flatten = True if not return_batch else flatten
self.infinite_stream = infinite_stream
# Setup sampling
if n_batches is None:
self.n_batches = len(class_ids)
self.n_triplets_per_batch = n_triplets_per_batch
def __len__(self) -> int:
if self.infinite_stream:
raise TypeError('Sampler is set to produce an infinite stream of samples, so length is not definied!')
else:
if self.return_batch:
return self.n_batches
else:
return self.n_batches * self.batch_size
@property
def batch_size(self) -> int:
if self.flatten:
return self.n_triplets_per_batch * 3
else:
return self.n_triplets_per_batch
def __iter__(self) -> Iterator[int]:
iterator = count(0) if self.infinite_stream else range(self.n_batches)
batches_returned = 0
for _ in iterator:
new_batches = min(self.sampler_n_batches_at_once,
self.n_batches - batches_returned if self.n_batches > 0 else self.sampler_n_batches_at_once)
size = self.n_triplets_per_batch * new_batches
triplet_array = np.vstack(self.generator.build_random_triplets(size=size, replace=True)).T
for data in _iter_triplet_data(triplet_array,
self.n_triplets_per_batch,
flatten=self.flatten,
return_batch=self.return_batch):
yield data
batches_returned += new_batches
if self.n_batches > 0 and batches_returned >= self.n_batches:
return
class TripletReservoirSampler(Sampler):
"""This sampler creates triplets similar to TripletSampler
but creates the triplets based on the distance/similarities of the
examples. The batches of triplets are sampled from a reservoir.
The reservoirs contain p classes with k examples each.
"""
def __init__(self,
class_ids: Union['np.array', List[int]],
n_triplets_per_batch: int,
k_examples_per_class: int,
strategy: Tuple[Union[str, float]]=('max', 'min'),
reservoir_of_n_batches: int=10,
n_random_batches_start: int=10,
refresh_after_n_batches: Union[int, None]=None,
n_batches: Union[None, int]=None,
example_vectors: Union[None, 'np.array']=None,
example_vectors_func: Union[None, Callable]=None,
excludes: Union[None, Dict[int, Set[int]]]=None,
rng: Union[None, int, 'np.random.Generator']=None,
return_batch: bool=False,
flatten: bool=True,
infinite_stream: bool=False):
""""Create a sampler.
Parameters
----------
class_ids: np.array or list of ints
Class ids of the examples in the dataset.
n_triplets_per_batch: int
Number of triplets per batch.
k_examples_per_class: int
Number of examples per class
strategy: str or float, optional, defaul=('max', 'min')
Strategy used to sample positve example (strategy[0])
and negative example (strategy[1]).
Options are 'rand'/'min'/'max' or a float between 0 and 1.
- rand: random
- min: minimal distance to the anchor
- max: maximal distance to the anchor
- float: loc of triplet_sampling_utils.SkedNormalSampler
reservoir_of_n_batches: int, optional, default=10
Number of batches sampled from each reservoir.
n_random_batches_start: int, optional, default=0
Number of uniform random triplets to warmup the classifier.
refresh_after_n_batches: None or int, optional, default=None
Number of batches sampled from a single reservoir.
If None refresh_after_n_batches = reservoir_of_n_batches
n_batches: None or int, optional, default=None
If None len of the iterator is will be len(class_ids.
example_vectors: None or np.array, optional, default=None
Features vectors for all examples. Has to be of same length as
class_ids. To calculate the feature vectors dynamically for each
batch set sampler.example_vectors_func.
example_vectors_func: None or callable, optional, default=None
Callable calculating the feature vectors of the examples in the
reservoir. The callable is called with the reservoir.
Check sap_computer_vision.data.triplet_sampling_utils.TripletGenerator.Subset
for details.
excludes: None or dict(int, set(int)), optional, default=None
To exclude classes from being sampled together with other classes.
Provide a dict with int as key and a set of class ids of the
excluded classes. The prevents all classes from the set form
being sampled if the class used as key for the set is in the batch.
rng: None or int or np.random.Generator, optinal, default=None
Seed for the np.random.Generator or a np.random.Generator.
Check https://numpy.org/doc/stable/reference/random/generator.html
for more infos
return_batch: bool, optional, default=False
Whether the Sampler should return batches indices or single indices of the
examples.
flatten: bool, optional, default=True
If True the batch shape is (n_triplets_per_batch*3, ).
If False the batch shape is (n_triplets_per_batch, 3).
infinite_strem: bool, optional, default=False
If set to True the sampler returns an infinite stream of bachtes/single
indices.
"""
self._generator = TripletGenerator(class_ids=class_ids, excludes=excludes, rng=rng, example_vectors=example_vectors)
self.return_batch = return_batch
self.refresh_after_n_batches = refresh_after_n_batches if refresh_after_n_batches is not None else reservoir_of_n_batches
self.n_triplets_per_batch = n_triplets_per_batch
self.reservoir_of_n_batches = reservoir_of_n_batches
self.k_examples_per_class = k_examples_per_class
self.n_random_batches_start = n_random_batches_start
self._random_triplets = n_random_batches_start > 0
self.triplet_strategy = build_triplet_strategy(strategy)
self.example_vectors_func = example_vectors_func
self.return_batch = return_batch
self.flatten = True if not return_batch else flatten
self.infinite_stream = infinite_stream
if n_batches is None:
self.n_batches = len(class_ids)
@property
def p_classes_in_reservoir(self):
return int(np.ceil((self.n_triplets_per_batch * self.reservoir_of_n_batches) // self.k_examples_per_class))
def __len__(self) -> int:
if self.infinite_stream:
raise TypeError('Sampler is set to produce an infinite stream of samples, so length is not definied!')
else:
if self.return_batch:
return self.n_batches
else:
return self.n_batches * self.batch_size
@property
def batch_size(self):
if self.flatten:
return self.n_triplets_per_batch * 3
else:
return self.n_triplets_per_batch
def get_reservoir(self):
reservoir = self._generator.sample_subset(
p=self.p_classes_in_reservoir,
k=self.k_examples_per_class)
if self.example_vectors_func is not None:
reservoir.set_example_vectors(self.example_vectors_func(reservoir))
return reservoir
def __iter__(self) -> Iterator[int]:
iterator = count(0) if self.infinite_stream else range(self.n_batches)
batches_returned = 0
for _ in iterator:
if self._random_triplets:
new_batches = min(self.n_random_batches_start, self.n_batches)
size = self.n_triplets_per_batch * new_batches
triplet_array = np.vstack(self._generator.build_random_triplets(size=size, replace=True)).T
self._random_triplets = False
else:
reservoir = self.get_reservoir()
triplet_array = reservoir.build_triplets_accourding_to_strategy((self.triplet_strategy[0],
self.triplet_strategy[1]))
triplet_array = np.array(triplet_array).T
self._generator.rng.shuffle(triplet_array)
new_batches = min(self.refresh_after_n_batches,
self.n_batches - batches_returned if self.n_batches > 0 else self.refresh_after_n_batches)
triplet_array = triplet_array[:new_batches*self.n_triplets_per_batch]
for data in _iter_triplet_data(triplet_array,
self.n_triplets_per_batch,
flatten=self.flatten,
return_batch=self.return_batch):
yield data
batches_returned += new_batches
if self.n_batches > 0 and batches_returned >= self.n_batches:
return
class PredictReservoirHook(HookBase):
"""Detectron trainer hook to calculate feature vectors of the reservoir examples.
The tricky part is to create a dataloader for the reservoir."""
def __init__(self, sampler):
self.sampler = sampler
def before_train(self):
"""Function executed before the training.
Assign the 'calculate_example_vectors' of the hook as the example_vectors_func
callable of the sampler.
"""
self.sampler.example_vectors_func = self.calculate_example_vectors
@staticmethod
def _predict_reservoir(cfg, reservoir_data, model, collate_fn=trivial_batch_collator):
with ExitStack() as stack:
if isinstance(model, nn.Module):
stack.enter_context(inference_context(model))
stack.enter_context(torch.no_grad())
batch_size = cfg.SOLVER.get('IMS_PER_BATCH_EVAL', cfg.SOLVER.IMS_PER_BATCH)
data_loader = DataLoader(
dataset=reservoir_data,
batch_size=batch_size,
drop_last=False,
collate_fn=collate_fn)
outputs = []
with torch.no_grad():
model.eval()
for batch_inputs in data_loader:
outputs.append(model(batch_inputs).cpu().detach().numpy())
model.train()
return np.vstack(outputs)
def calculate_example_vectors(self, reservoir: 'TripletGenerator.Subset') -> 'np.array':
"""Function calculating the feature vectors of a reservoir.
This functions is used as the example_vectors_func callable
of the TripletReservoirSampler.
Parameters
----------
reservoir: TripletGenerator.Subset
Reservoir for which the feature vectors should be calculated.
Returns
-------
np.array
Feature vectors of the reservoir
"""
if reservoir.example_vectors is None:
dataset = self.trainer.data_loader.dataset
if isinstance(dataset, IterableDataset):
raise ValueError('TripletReservoirSampler can not be used for `IterableDataset`s.')
reservoir_data = [dataset[idx] for idx in reservoir.indices]
example_vectors = self._predict_reservoir(cfg=self.trainer.cfg,
reservoir_data=reservoir_data,
model=self.trainer.model)
return example_vectors
def triplet_collator_fn(batch: List[Dict[str, Any]]) -> List[Dict[str, Dict[str, Any]]]:
"""Collate function for torch.data.DataLoader to output list of dicts of triplets.
[{'anchor': example_dict, 'pos': example_dict, 'neg': example_dict}...]
Parameters
----------
batch: list of dicts
List of dicts returned form the DatasetMapper.
Returns
----------
list of dicts
List of dicts. Each list entry is a dict
{'anchor': example_dict, 'pos': example_dict, 'neg': example_dict}
"""
assert len(batch) % 3 == 0
return [{'anchor': a, 'pos': p, 'neg': n} for a, p, n in zip(batch[::3], batch[1::3], batch[2::3])]
def _iter_triplet_data(data, n_triplets_per_batch, flatten, return_batch):
assert len(data.shape) == 2
assert data.shape[1] == 3
_position = 0
while _position < len(data):
start = _position
end = start + n_triplets_per_batch
end = None if end > len(data) else end
s_ = slice(start, end)
current_batch = data[s_, ].flatten() if flatten else data[s_, ]
if return_batch:
yield current_batch
else:
for entry in current_batch:
yield entry
_position += n_triplets_per_batch
class PKTripletStrategySwitcher(HookBase):
"""This sampler returns batches containing p classes and k examples per class.
This sampler can return batches of indices (p=3, k=2):
[(c1, c1, c5, c5, c4, c4), ..., (c5, c5, c2, c2, c3, c3) ]
or single indices (Make sure to do correct batching later on the
preserve the pk batches):
[c1, c1, c5, c5, c4, c4, ..., c5, c5, c2, c2, c3, c3]
The idea for such a sampler is taken from: https://arxiv.org/pdf/1703.07737.pdf
Attributes
----------
next_strat_and_step: (step, (pos_strat, net_strat))
Property to retrieve the next strategy switch:
step (int) and tuple (pos_strat, neg_strat)
"""
def __init__(self, strategies, target):
"""Create a hook with list of strategies.
The hook instance can be registered with a detectron2 trainer.
If using the TripletDistanceTrain from this package the
hook is automatically registered when strategies switches are
defined in the config and the PKSampler is selected.
Parameters
----------
strategies: list of tuples [(step, (pos_strat, neg_start)), ...]
Strategies are provided as a list of tuples.
The tuples consist of a step and a tuple with the strategy
for the positive example and the strategy for the negative
example. Check sap_computer_vision.modelling.TripletDistanceLearner
documentation for details about different strategies.
"""
self.strategies = self.check_strategies(strategies)
self.target = target
self._pointer = 0
@property
def next_strat_and_step(self) -> Tuple[int, Union[None, Tuple[Union[str, float], Union[str, float]]]]:
"""Property returning the next stragety and the switch step.
Returns
-------
int
Step for the next switch. -1 if no switch is scheduled.
tuple or None
Tuple (pos_strat, neg_strat) or None if no switch is scheduled."""
try:
return self.strategies[self._pointer]
except IndexError:
return -1, None
@staticmethod
def check_strategies(strategies) -> List[Tuple[int, Union[None, Tuple[Union[str, float], Union[str, float]]]]]:
"""Functions to perform sanity checks of list of strategies.
Invalid tuples are ignored and are logged as warnings.
Parameters
----------
strategies: list of tuples [(step, (pos_strat, neg_start)), ...]
Strategies are provided as a list of tuples.
The tuples consist of a step and a tuple with the strategy
for the positive example and the strategy for the negative
example. Check sap_computer_vision.modelling.TripletDistanceLearner
documentation for details about different strategies.
Returns
-------
list of strategy tuples
Cleaned list of tuples [(step, (pos_strat, neg_start)), ...]
"""
if not isinstance(strategies, IsIterable):
logger.warning('Strategies for switches have to provided as a list of tuples (step, strategie). Strategies will not be switched!')
return []
else:
checked_strategies = []
for i in range(len(strategies)):
try:
step, strat = strategies[i]
except ValueError:
logger.warning(f'Strategies for switches have to provided as a list of tuples (step, strategie). Switch defined as `{strategies[i]}` will be ignored!')
else:
checked_strategies.append((step, strat))
return checked_strategies
def before_train(self):
"""Function executed before the training.
This functions is only logging the first strategy switch.
"""
msg = f'Starting with `{self.target.triplet_strategy}`'
switch_step, next_strat = self.next_strat_and_step
if next_strat is not None:
msg += f'; first strategy switch iter: {switch_step} to `{next_strat}`'
else:
msg += '; no strategy switch sheduled'
logger.info(msg)
def after_step(self):
"""Function executed after every training step.
After every training step this functions check if the strategy has to be switched.
Every switch is logged and the next switch is announced.
"""
switch_step, next_strat = self.next_strat_and_step
if self.trainer.iter == switch_step:
try:
new_strat = build_triplet_strategy(next_strat)
except ValueError:
msg = f'Switching to {next_strat} failed, because it is not a valid strategy'
else:
msg = f'Switching to `{new_strat}` iter: {self.trainer.iter}'
self.target.triplet_strategy = new_strat
self._pointer += 1
switch_step, next_strat = self.next_strat_and_step
if next_strat is not None:
msg += f'; next strategy iter: {switch_step} to `{next_strat}`'
else:
msg += '; no more strategy switches sheduled'
logger.info(msg)
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/data/samplers.py
| 0.923653 | 0.478712 |
samplers.py
|
pypi
|
from typing import Callable, List, Union
import detectron2.data.transforms as T_
from detectron2.config import CfgNode
from fvcore.transforms.transform import Transform
from timm.data.auto_augment import rand_augment_transform
from timm.data.random_erasing import RandomErasing
from detectron2.data import detection_utils as utils
from PIL import Image
import numpy as np
ALL_AUGS_KEYS = [
'RANDOM_FLIP',
'CROP',
'RANDOM_LIGHTING',
'RANDOM_BRIGHTNESS',
'RANDOM_SATURATION',
'RANDOM_CONTRAST',
'RAND_AUG',
'CUT_OUT'
]
class RandAugWrapper(Transform):
def __init__(self, config_str, image_format='BGR', probs=0.5, ):
super().__init__()
self.image_format = image_format
self.augs = rand_augment_transform(config_str, {})
for op in self.augs.ops:
op.prob = probs
def apply_image(self, img: np.ndarray):
img = utils.convert_image_to_rgb(img, self.image_format)
return utils.convert_PIL_to_numpy(self.augs(Image.fromarray(img)), format=self.image_format)
def apply_coords(self, *args, **kwargs):
raise NotImplementedError
class RandAug(T_.Augmentation):
def __init__(self, cfg_str: str, image_format='BGR', probs: float=0.5):
self.aug = RandAugWrapper(cfg_str, image_format, probs=probs)
def get_transform(self, *args) -> Transform:
return self.aug
class RandomCutOutTransform(Transform):
def __init__(self, pos_rel_x, pos_rel_y, rel_area, aspect_ratio, random_color=True, per_pixel=True, contain=False, const_color=(127.5 , 127.2, 127.5)):
super().__init__()
self.pos_rel_x = np.atleast_1d(pos_rel_x)
self.pos_rel_y = np.atleast_1d(pos_rel_y)
self.rel_area = np.atleast_1d(rel_area)
self.aspect_ratio = np.atleast_1d(aspect_ratio)
self.orientation = np.random.choice(['vertical', 'horizontal'], size=len(self.pos_rel_x), replace=True)
self.aspect_ratio = np.clip(aspect_ratio, 0., 1.)
self.random_color = random_color
self.const_color = const_color
self.per_pixel = per_pixel
self.contain = contain
def add_box(self, img: 'np.ndarray', pos_rel_x, pos_rel_y, rel_area, aspect_ratio, orientation):
h, w = img.shape[0], img.shape[1]
abs_area = rel_area * h * w
factor_h, factor_w = (1., aspect_ratio) if orientation == 'vertical' else (aspect_ratio, 1.)
len_longer_side = np.sqrt(abs_area / (factor_w * factor_h))
box_h, box_w = len_longer_side * factor_h, len_longer_side * factor_w
if self.contain:
if w - box_w < 0:
pos_abs_x = w * 0.5
else:
pos_abs_x = box_w / 2. + (w - box_w) * pos_rel_x
if h - box_h < 0:
pos_abs_y = h * 0.5
else:
pos_abs_y = box_h / 2. + (h - box_h) * pos_rel_y
else:
pos_abs_x, pos_abs_y = pos_rel_x * w, pos_rel_y * h
x_0, x_1 = np.clip([pos_abs_x - (box_w / 2.), pos_abs_x + (box_w / 2.)], 0., w).astype(int)
y_0, y_1 = np.clip([pos_abs_y - (box_h / 2.), pos_abs_y + (box_h / 2.)], 0., h).astype(int)
box = self.get_random_color(img, y_1-y_0, x_1-x_0, self.random_color, self.per_pixel)
img[slice(y_0, y_1), slice(x_0, x_1), :] = box
return img
def get_random_color(self, img, h, w, random_color=True, per_pixel=True):
dtype = img.dtype
col = np.zeros((h, w, 3), dtype=dtype)
if not random_color and not per_pixel:
if self.const_color == 'median':
const_color = np.median(img, axis=(0,1))
else:
const_color = self.const_color
col += np.array(const_color).astype(col.dtype)
elif random_color and not per_pixel:
col += (np.random.uniform(size=3) * 255.).astype(col.dtype)
else:
col += (np.random.uniform(size=col.shape) * 255.).astype(col.dtype)
return col
def apply_image(self, img: np.ndarray):
for x, y, a, r, o in zip(self.pos_rel_x,
self.pos_rel_y,
self.rel_area,
self.aspect_ratio,
self.orientation):
img = self.add_box(img, x, y, a, r, o)
return img
def apply_coords(self, *args, **kwargs):
raise NotImplementedError
class RandomCutOut(T_.Augmentation):
def __init__(self, min_area, max_area, min_aspect_ratio, max_aspect_ratio=None, random_color=True, per_pixel=True, max_holes=1, contain=False, const_color=(127.5 , 127.3, 127.5)):
self.min_area = min_area
self.max_area = max_area
self.min_aspect_ratio = min(min_aspect_ratio, 1.)
self.max_aspect_ratio = max(min(max_aspect_ratio if max_aspect_ratio else 1., 1.), self.min_aspect_ratio)
self.random_color = random_color
self.const_color = const_color
self.per_pixel = per_pixel
self.contain = contain
self.max_holes = max_holes
def get_transform(self, *args) -> Transform:
n_holes = np.random.randint(1, self.max_holes+1)
pos_rel_x = np.random.uniform(size=n_holes)
pos_rel_y = np.random.uniform(size=n_holes)
rel_area = self.min_area + np.random.uniform(size=n_holes) * (self.max_area - self.min_area)
aspect_ratio = self.min_aspect_ratio + np.random.uniform(size=n_holes) * (self.max_aspect_ratio - self.min_aspect_ratio)
return RandomCutOutTransform(
pos_rel_x=pos_rel_x,
pos_rel_y=pos_rel_y,
rel_area=rel_area,
aspect_ratio=aspect_ratio,
random_color=self.random_color,
const_color=self.const_color,
per_pixel=self.per_pixel,
contain=self.contain)
def build_augmentations(cfg: CfgNode, img_format: str) -> List['T_.Transform']:
"""Build additional augmentations.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
list of T_.Transform
List of augmentation
"""
augmentations = []
if cfg.get('CROP', {}).get('ENABLED', False):
augmentations.append(T_.RandomCrop(cfg.CROP.TYPE, cfg.CROP.SIZE))
if cfg.get('RANDOM_ROTATION', {}).get('ENABLED', False):
angle = cfg.RANDOM_ROTATION.get('ANGLE', [-30., 30])
expand = cfg.RANDOM_ROTATION.get('EXPAND', True)
sample_style = cfg.RANDOM_ROTATION.get('SAMPLE_STYLE', 'range')
if sample_style is not None:
augmentations.append(T_.RandomRotation(angle, expand, sample_style=sample_style))
if cfg.get('RESIZE', {}).get('MODE', None):
if cfg.RESIZE.MODE is not None and cfg.RESIZE.MODE.upper() == 'FIXED':
if isinstance(cfg.RESIZE.FIXED_SIZE, tuple):
h, w = cfg.RESIZE.FIXED_SIZE
else:
h, w = cfg.RESIZE.FIXED_SIZE, cfg.RESIZE.FIXED_SIZE
if not (isinstance(h, int) and isinstance(w, int)):
raise TypeError('`cfg.RESIZE.FIXED_SIZE` hast to be None, int or (int, int).')
augmentations.append(T_.Resize((h, w)))
elif cfg.RESIZE.MODE is not None and cfg.RESIZE.MODE.upper() == 'SHORTEST_EDGE':
augmentations.append(T_.ResizeShortestEdge(
cfg.RESIZE.MIN_SIZE,
cfg.RESIZE.MAX_SIZE,
cfg.RESIZE.SAMPLE_STYLE))
if cfg.get('RANDOM_FLIP', None) is not None and cfg.get('RANDOM_FLIP', None) != 'none':
augmentations.append(T_.RandomFlip(horizontal=cfg.RANDOM_FLIP == "horizontal",
vertical=cfg.RANDOM_FLIP == "vertical"))
if cfg.get('RANDOM_LIGHTING', {}).get('ENABLED', False):
augmentations.append(T_.RandomLighting(cfg.RANDOM_LIGHTING.STRENGTH))
if cfg.get('RANDOM_BRIGHTNESS', {}).get('ENABLED', False):
augmentations.append(T_.RandomBrightness(*cfg.RANDOM_BRIGHTNESS.STRENGTH))
if cfg.get('RANDOM_SATURATION', {}).get('ENABLED', False):
augmentations.append(T_.RandomSaturation(*cfg.RANDOM_BRIGHTNESS.STRENGTH))
if cfg.get('RANDOM_CONTRAST', {}).get('ENABLED', False):
augmentations.append(T_.RandomContrast(*cfg.RANDOM_CONTRAST.STRENGTH))
if cfg.get('RAND_AUG', {}).get('ENABLED', False):
augmentations.append(RandAug(cfg.RAND_AUG.CONFIG_STR, img_format, cfg.RAND_AUG.PROB))
if cfg.get('CUT_OUT', {}).get('ENABLED', False):
aug =RandomCutOut(*sorted(cfg.CUT_OUT.get('AREA_RANGE', (0.05, 0.1))),
*sorted(cfg.CUT_OUT.get('ASPRECT_RATIO_RANGE', (0.5, 1.))),
random_color=cfg.CUT_OUT.get('RANDOM_COLOR', True),
const_color=cfg.CUT_OUT.get('CONST_COLOR', (127.5 , 127.4, 127.5)),
per_pixel=cfg.CUT_OUT.get('PER_PIXEL', False),
contain=cfg.CUT_OUT.get('CONTAIN', True),
max_holes=cfg.CUT_OUT.get('MAX_HOLES', 1),)
if cfg.CUT_OUT.get('BEFORE_RESIZE', False):
augmentations = [aug] + augmentations
else:
augmentations.append(aug)
return augmentations
def generate_aug_cfg_node(cfg, target_cfg_node, is_train=True):
target_cfg_node = target_cfg_node.clone()
target_cfg_node.defrost()
if cfg.INPUT.get('FIXED_IMAGE_SIZE', None):
kwargs = {'MODE': 'FIXED',
'FIXED_SIZE': cfg.INPUT.FIXED_IMAGE_SIZE}
else:
mode = 'SHORTEST_EDGE'
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
kwargs = {'MODE': mode,
'MIN_SIZE': min_size,
'MAX_SIZE': max_size,
'SAMPLE_STYLE': sample_style}
target_cfg_node.RESIZE = CfgNode(kwargs)
return target_cfg_node
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/data/augs.py
| 0.805517 | 0.378 |
augs.py
|
pypi
|
import glob
from optparse import Values
import pathlib
import logging
from typing import Callable, List, Tuple, Union, Dict, Iterable
from collections.abc import Iterable as IsIterable
import numpy as np
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
from .utils import split_image_folder, check_extensions, find_files
__all__ = ["register",
"split_and_register"]
logger = logging.getLogger(__name__)
def build_image_dict(filenames: Union[str, pathlib.Path, List[str], Tuple[str]],
class_names: Union[List[str], None] = None,
labels: Union[Iterable[str], None] = None,
raise_on_missing: bool = True,
extract_class_func: Union[Callable, None] = lambda f: pathlib.Path(f).parent.name,
append_missing_classes: bool = False):
"""Function that will be registered in the dataset catalog to return the
lightweight version of the dataset.
Normally this function is used only through `register` or `split_and_register`.
Make sure to set remove_dir=False and remove_ext=False when generating the splits to feed into here.
Parameters
----------
filenames: str or pathlib.Path or Iterable[str, pathlib.Path]
Names of the files for this dataset.
If single str or pathlib.Path this has to be the path to a file containing the actual filenames.
If iterable the iterable has to contain the actual filenames.
So every filename needs to correspond to an image with the provided path on the filesystem.
Behavior on missing files can be controlled through `raise_on_missing`.
class_names: List[str] or None
List of class names; either explicitly provided or determined from data if None.
raise_on_missing: bool, optional, default=True
Whether an exception should be raised when an image file or annoation file can not be found.
If `False` missing files will be ignored.
append_missing_classes: bool
Completes missing classes when explicitly provided.
This can be useful when you want to create consistent label mapping for multiple different datasets.
Returns
----------
List of dicts
A list of dicts with a dict for each example.
Raises
------
FileNotFoundError
If either an xml or image file can not be found.
"""
if isinstance(filenames, IsIterable):
fileids = np.array(filenames, dtype=str)
else:
filenames = pathlib.Path(filenames).resolve()
with PathManager.open(filenames) as f:
fileids = np.loadtxt(f, dtype=str)
if class_names is None:
class_names = []
items = []
for i, fileid in enumerate(fileids):
path = pathlib.Path(fileid)
if path.exists():
if labels is None:
label = extract_class_func(path)
else:
label = labels[i]
try:
class_id = class_names.index(label)
except ValueError:
if append_missing_classes:
class_id = len(class_names)
class_names.append(label)
else:
raise ValueError(f'Got example of class `{label}`. Add to `class_names` or set append_missing_classes `True`.')
item = {
'file_name': str(path),
'label': label,
'class_id': class_id
}
items.append(item)
elif raise_on_missing:
raise FileNotFoundError(path)
return items
def _parse_file_names_txt(input_):
if isinstance(input_, str) or isinstance(input_, pathlib.Path):
input_ = [input_]
files_names = []
for i in input_:
with pathlib.Path(i).open() as stream:
files_names.extend([l.strip() for l in stream.readlines()])
return files_names
def register(name: str,
base_dir: Union[str, pathlib.Path],
filenames: Union[List[str], List[pathlib.Path]] = None,
labels: List[str] = None,
extensions: Union[str, Iterable[str]] = None,
class_names: Union[List[str], None] = None,
extract_class_func: Union[Callable, None] = lambda f: pathlib.Path(f).parent.name,
append_missing_classes: bool=True,
**additional_dataset_infos):
"""Register a dataset in the detectron2 DatasetCatalog and MetadataCatalog.
After registration the dataset can be referenced in the cfg by simply providing
the `name`.
Parameters
----------
name: str
Name under which the dataset will be registered. Has to be unique.
base_dir: str or pathlib.Path
Path to the directory containing the dataset. Expects to contain subfolders corresponding to each class and
each of those subfolders has to contain the relevant images.
filenames: List[str] or List[pathlib.Path], optional
If not provided will parse base_dir for image files and use all of them.
If provided will simply use the provided filenames instead.
extensions: str or Iterable[str], optional, default=['.jpg', '.jpeg']
Valid extensions for images. Only used when `filename_with_extension=False`.
See `img_extensions` from `load_voc_like_instances` for more details.
class_names: List[str] or None
List of class names, if provided, otherwise determined automatically from data.
extract_class_func: Callable or None, optional, default=lambda f: pathlib.Path(f).parent.name
Function used to label the the image. The callable is called with the path of the image
and is expected to return a str. Default is to use the name of the parent folder as
the label.
append_missing_classes: bool
Completes missing classes when explicitly provided.
This can be useful when you want to create consistent label mapping for multiple different datasets.
**additional_dataset_infos: str
Every additional keyword arguements are considered to be metadata and
will be registered in the metadata catalog. For all datasets the
names of the classes, image directory and xml directory are stored.
Returns
----------
List of str
List of images in the dataset
List of str
Used class names.
Examples
--------
Split data for object detection and register train dataset.
>>> dataset_path = pathlib.Path('pets')
>>> register('my_dataset',
base_dir=dataset_path,
extensions=['.jpg'])
('my_dataset_train', ['cat', 'dog', ...])
"""
if labels is None and extract_class_func is None:
raise ValueError('Either `labels` or `extract_class_func` has to be provided.' )
if filenames is None: # Only base_dir specified, in this case the base_dir is scanned for images
if extensions is None:
extensions = ['*.jpg', '*.jpeg']
extensions = check_extensions(extensions)
filenames = find_files(base_dir, extensions, recursive=True)
labels = None
else:
# Try to figure out what is provided as filenames.
# Possiblities are:
# - paths of images
# - 1 or multiple txt-files containing image paths
if isinstance(filenames, str): # Single file -> has to be a file containing image paths
list_files_as_input = True
elif isinstance(filenames, IsIterable):
list_files_as_input = False
filenames = [*filenames]
if len(filenames) == 0:
raise ValueError('Got empty list for `filenames`. Reasons could be that the base_dir does '
'not contain any images matching the any of the extensions or the register function '
'was called with an empty list for `filenames`.')
elif pathlib.Path(filenames[0]).suffix == '.txt':
list_files_as_input = True
if list_files_as_input:
filenames = _parse_file_names_txt(filenames)
labels = None
else:
# Since the paths of the images are directly provided to the function
# nothing has to be done for the image paths
if labels is not None:
labels = [*labels]
if len(labels) != len(filenames):
raise ValueError('`labels` and `filenames` have to be of the same length.')
if labels is None:
labels = [extract_class_func(p) for p in filenames]
if class_names is None or len(class_names) == 0:
class_names = sorted(set(labels))
elif append_missing_classes:
new_class_names = set(labels).difference(set(class_names))
class_names.extend([*new_class_names])
DatasetCatalog.register(name, lambda: build_image_dict(filenames, class_names=class_names, labels=labels, extract_class_func=extract_class_func, append_missing_classes=False))
MetadataCatalog.get(name).set(classes=list(class_names),
base_dir=str(base_dir),
**additional_dataset_infos)
return filenames, class_names
def split_and_register(basename: Union[str, None],
base_dir: Union[str, pathlib.Path],
splits: Dict[str, float],
rnd_gen: Union[int, None] = 1337,
extensions: Union[str, List[str]] = None,
class_names: Union[List[str], None] = None,
extract_class_func: Union[Callable, None] = lambda f: pathlib.Path(f).parent.name,
stratified: bool = True,
**additional_dataset_infos):
"""Register a dataset in the detectron2 DatasetCatalog and MetadataCatalog.
After registration the dataset can be referenced in the cfg by simply providing
the `name`.
Parameters
----------
basename: str
All datasets will be registered as `<basename>_<split>`
base_dir: str or pathlib.Path
Path to the directory containing the dataset. Expects to contain subfolders corresponding to each class and
each of those subfolders has to contain the relevant images.
splits: dict(str, float)
Dictionary with the names of the splits and their size.
Check `sap_computer_visison.datasets.utils.split` for more details.
rnd_gen: int, np.random.Generator or None, optional, default=None
Random seed or np.random.Generator for the splits. If None no specific seed will be used. This results
in unreproducible splits!
extensions: str or Iterable[str], optional, default=['.jpg', '.jpeg']
Valid extensions for images. Only used when `filename_with_extension=False`.
See `load_voc_like_instances` for more details.
class_names: List[str] or None
List of class names, if provided, otherwise determined automatically from data.
**additional_dataset_infos: str
Every additional keyword arguements are considered to be metadata for all splits and
will be registered in the metadata catalog for each split. For all datasets the
names of the classes, image directory, xml directory and name of the
split are stored.
Returns
----------
dict(str, List[str])
Name and list of filenames for each dataset
List of str
Used class names.
Examples
--------
Split data for object detection and register train dataset.
>>> dataset_path = pathlib.Path('data')
>>> split_and_register('my_dataset'
base_dir=dataset_path,
splits={'train': 0.7, 'val': 0.15, 'test': 0.15})
>>>
({'my_dataset_train': ['img1', 'img3', ...],
'my_dataset_test': ['img2', 'img6', ...],
'my_dataset_val': ['img4', 'img9', ...]}, ['cat', 'dog', ...])
"""
if extensions is None:
extensions = ['*.jpg', '*.jpeg']
base_dir = pathlib.Path(base_dir)
splits = split_image_folder(
input_dir=pathlib.Path(base_dir),
splits=splits,
extensions=extensions,
extract_class_func=extract_class_func if stratified else None,
rnd_gen=rnd_gen
)
registered_datasets = {}
if class_names is None:
class_names = sorted(set([extract_class_func(p) for split in splits.values() for p in split]))
for split, fileids in splits.items():
name = f'{basename}_{split}' if basename and basename != '' else split
_, class_names_split = register(name=name,
base_dir=base_dir,
filenames=fileids,
split=split,
extract_class_func=extract_class_func,
class_names=class_names,
additional_dataset_infos=additional_dataset_infos)
if len(set(class_names).difference(class_names_split)) > 0:
logger.warning(f"Split `{name}` is missing classes: {', '.join(set(class_names).difference(class_names_split))}")
registered_datasets[name] = fileids
return registered_datasets, class_names
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/datasets/image_folder.py
| 0.837421 | 0.414603 |
image_folder.py
|
pypi
|
import copy
import pathlib
import re
from sqlite3 import apilevel
from typing import Callable, List, Union, Dict, Iterable
import numpy as np
__all__ = ["split_images_dirs",
"split_image_folder",
"split_image_lists",
"find_files",
"generate_class_ids",
"check_extensions"]
def find_files(input_dir: Union[str, pathlib.Path], extensions: Union[str, List[str]] = None, recursive=False):
"""Find files with given extensions
Parameters
----------
input_dir : str or pathlib.Path
Directory to be searched in.
extensions : str or List[str], optional, default=['*.jpg', '*.jpeg']
List of acceptable extensions.
recursive : bool, optinal, default = False
Whether subdirectories should be included.
Returns
----------
List[pathlib.Path]
List of files
Raises
----------
ValueError
If extension with bad format
"""
if extensions is None:
extensions = ['*.jpg', '*.jpeg']
input_dir = pathlib.Path(input_dir)
images = []
extensions = check_extensions(extensions)
for ext in extensions:
images.extend([*input_dir.glob(f'**/{ext}' if recursive else ext)])
return images
def generate_class_ids(images, extract_class_func=lambda p: pathlib.Path(p).parent.name, class_names=None, append_missing_classes=True):
"""Check and normalize the file type extensions.
Parameters
----------
images : Iterable[str, pathlib.Path]
Iterable containing list of images.
extract_class_func : callable, optional
Callable accepting the entries of `images` and returning the class label as str.
class_names : list[str], optional, default=[]
List of class names.
append_missing_classes : bool, optional, default=True
Whether unexpected class labels should be added.
TODO: would propose to remove, either you provide explicit list to be strict or you provide empty list and want to be non-strict
Returns
----------
List[int]
List of class_ids same length as images
List[str]
Full list of used class_names.
Raises
----------
ValueError
If class label is not in `class_names` and `append_missing_classes=False`.
"""
if class_names is None:
class_names = []
class_ids = []
class_names = [*class_names]
for i in images:
label = extract_class_func(i)
try:
class_id = class_names.index(label)
except ValueError:
if append_missing_classes:
class_id = len(class_names)
class_names.append(label)
else:
ValueError(f'Class `{label}` not in `class_names` provide complete list '
'of class names or set `append_missing_classes` to true.')
class_ids.append(class_id)
return class_ids, class_names
def check_extensions(extensions: Iterable[str]):
"""Check and normalize the file type extensions.
Parameters
----------
extensions : Iterable[str]
List of extensions. Acceptables formats are '.{file_type}' and '*.{file_type}
Returns
----------
List[str]
List of file extensions [*.{file_type}]
Raises
----------
ValueError
If extension is not in the expected format.
"""
r = re.compile('^.*\*\.([a-zA-Z0-9.]*)$')
checked_extensions = []
if isinstance(extensions, str):
extensions = [extensions]
for ext in extensions:
if not r.match(ext):
ext_new = f'*{ext}'
if r.match(ext_new):
checked_extensions.append(ext_new)
else:
raise ValueError('The extension `{ext}` is not valid. Please add'
' extension starting with a * and only file types.')
else:
checked_extensions.append(ext)
return checked_extensions
def split_images_dirs(input_dirs: Iterable[Union[str, pathlib.Path]],
splits: Dict[str, float],
output_dir: Union[str, pathlib.Path, None]=None,
extensions: Union[str, List[str]] = None,
remove_ext: bool = False,
remove_dir: bool = False,
rnd_gen: Union[int, None] = None):
"""Collect and split files with specific extensions for multiple folders.
Each folder is split individually and the splits are merge at the end. This function can be used
to do stratified sampling for image classification cases if images of the same class are grouped
in subfolders.
Parameters
----------
input_dirs : Iterable[str, pathlib.Path]
List of directory used to search for files. In this directory a glob for each given extensions is
executed to select all files.
splits: dict(str, float)
Dict naming the splits and providing split sizes. If sum of sizes <= 1. the numbers are directly used
as the ratio. If sum of sizes < 1 a single split can have `size=None`. This splits will consists of
all remaining files.
If sum of sizes > 1 the ratio of images for each split is `size/sum(sizes)`.
output_dir : str or pathlib.Path or None, optional, default=None
Directory in which `<split>.txt` files will be created to with list of files per split.
If None no files will be created.
extensions: str or Iterable[str], optional, default = ['*.jpg', '*.jpeg']
All file extensions used in the globs.
remove_ext: bool, optional, default=False
Remove file extensions from the file lists. This can be useful for spliting files for object detection
datasets in pascal voc style (.jpg + .xml).
remove_dir: bool, optional, default=False
Remove all directories from the file paths.
rnd_gen: int, np.random.Generator or None, optional, default=None
Random seed or np.random.Generator for the splits. If None no specific seed will be used. This results
in unreproducible splits!
Returns
----------
dict(str, List[pathlib.Path]) or dict(pathlib.Path, List[pathlib.Path])
Dict containing a list of files for each split.
If `output_dir` is None the keys of the dict are the keys from `splits`.
If `output_dir` is not None the key of the dict are: `<output_dir>/<split_name>.txt``
the name of the txt-files containing the names of the files for each split.
Examples
--------
Split data for image classification. Images for every class are in separate
subfolders: data/class1, data/class2, ...
>>> dataset_path = pathlib.Path('data')
>>> split_images_dirs(glob.glob(DATASET_PATH + '/*/'),
None,
{'train': 0.7, 'val': 0.15, 'test': 0.15},
remove_dir=False,
remove_ext=False)
{'train': ['data/class1/img1.jpg', 'data/class2/img231.jpg', ...],
'val': ['data/class1/img7.jpg', 'data/class2/img21.jpg', ...],
'test': ['data/class1/img9.jpg', 'data/class2/img3.jpg', ...]}
"""
if extensions is None:
extensions = ['*.jpg', '*.jpeg']
splits_images = {}
for input_dir in input_dirs:
splits_i = split_image_folder(input_dir=input_dir,
splits=splits,
extensions=extensions,
remove_ext=remove_ext,
remove_dir=remove_dir,
rnd_gen=rnd_gen)
for split, images_split_i in splits_i.items():
images_split = splits_images.setdefault(split, [])
images_split.extend(images_split_i)
if output_dir:
output_dir = pathlib.Path(output_dir).resolve()
output_files = {}
for name, images_split in splits_images.items():
output_i = output_dir / f'{name}.txt'
with output_i.open('w') as stream:
stream.write('\n'.join(str(i) for i in images_split))
output_files[output_i] = images_split
return output_files
else:
return splits_images
def split_image_folder(input_dir: Union[str, pathlib.Path],
splits: Dict[str, float],
output_dir: Union[str, pathlib.Path, None]=None,
extensions: Union[str, Iterable[str]] = None,
remove_ext: bool = False,
remove_dir: bool = False,
extract_class_func: Union[None, Callable] = None,
rnd_gen: Union[int, None, 'np.random.Generator'] = None):
"""Collect and split files with specific extensions.
This function can be used to split files from a directory into arbitrary splits.
Parameters
----------
input_dir : str or pathlib.Path
Directory used to search for files. In this directory a glob for each given extensions is
executed to select all files.
splits: dict(str, float)
Dict naming the splits and providing split sizes. If sum of sizes <= 1. the numbers are directly used
as the ratio. If sum of sizes < 1 a single split can have `size=None`. This splits will consists of
all remaining files.
If sum of sizes > 1 the ratio of images for each split is `size/sum(sizes)`.
output_dir : str or pathlib.Path or None, optional, default=None
Directory in which `<split>.txt` files will be created to with list of files per split.
If None no files will be created.
extensions: str or Iterable[str], optional, default = ['*.jpg', '*.jpeg']
All file extensions used in the globs.
remove_ext: bool, optional, default=False
Remove file extensions from the file lists. This can be useful for spliting files for object detection
datasets in pascal voc style (.jpg + .xml).
remove_dir: bool, optional, default=False
Remove all directories from the file paths.
extract_class_func: None or callable
Callable returning the class label. The callable for each file with the path as an argument.
If a callable is provided the split will be done for each class individually to achieve a
stratified sampling.
rnd_gen: int, np.random.Generator or None, optional, default=None
Random seed or np.random.Generator for the splits. If None no specific seed will be used. This results
in unreproducible splits!
Returns
----------
dict(str, List[pathlib.Path]) or dict(pathlib.Path, List[pathlib.Path])
Dict containing a list of files for each split.
If `output_dir` is None the keys of the dict are the keys from `splits`.
If `output_dir` is not None the key of the dict are: `<output_dir>/<split_name>.txt``
the name of the txt-files containing the names of the files for each split.
Examples
--------
Split data for image classification. Images for every class are in separate
subfolders: data/class1, data/class2, ...
To include files from subfolders use extensions like '**/*.jpg' instead of '*.jpg'.
>>> dataset_path = pathlib.Path('data')
>>> split_image_folder(dataset_path,
extensions=['**/*.jpg', '**/*.jpeg'],
{'train': 0.7, 'val': 0.15, 'test': 0.15},
extract_class_func=lambda f: f.parent.name)
{'train': ['data/class1/img1.jpg', 'data/class2/img231.jpg', ...],
'val': ['data/class1/img7.jpg', 'data/class2/img21.jpg', ...],
'test': ['data/class1/img9.jpg', 'data/class2/img3.jpg', ...]}
"""
if extensions is None:
extensions = ['*.jpg', '*.jpeg']
splits = copy.deepcopy(splits)
sum_sizes = sum(v for v in splits.values() if v)
got_remaining_split = [v is None for v in splits.values()]
if sum(got_remaining_split) == 0:
got_remaining_split = False
elif sum(got_remaining_split) == 1:
got_remaining_split = True
else:
raise ValueError('Only one split can has size=None!')
if got_remaining_split and sum_sizes > 1:
raise ValueError('Sum of `size` for all splits > 1, '
'therefore it is treated as relative size definitions. '
'With relative sizes size=None is not allowed')
start = 0.
if sum_sizes > 1:
quotient = sum_sizes
else:
quotient = 1.
split_name_remaining = None
for n, v in splits.items():
if v is not None:
len_split = v / quotient
splits[n] = (start, start+len_split)
start += len_split
else:
split_name_remaining = n
if split_name_remaining:
splits[split_name_remaining] = (start, 1.0)
if not isinstance(rnd_gen, np.random.Generator):
rnd_gen = np.random.default_rng(rnd_gen)
input_dir = pathlib.Path(input_dir)
if output_dir:
output_dir = pathlib.Path(output_dir).resolve()
images = find_files(input_dir, extensions=extensions, recursive=True)
if callable(extract_class_func):
classes = np.array([extract_class_func(img) for img in images])
else:
classes = None
split_files = split_image_lists(images, classes, splits, rnd_gen)
output_files = {}
for n, imgs in split_files.items():
if remove_ext:
imgs = [i.with_suffix('') for i in imgs]
if remove_dir:
imgs = [i.name for i in imgs]
if output_dir:
output_i = output_dir / f'{n}.txt'
with output_i.open('w') as stream:
stream.write('\n'.join(str(i) for i in imgs))
else:
output_i = n
output_files[output_i] = imgs
return output_files
def split_image_lists(images: Iterable[Union[str, pathlib.Path]],
classes: Union[Iterable[int], None],
splits: Dict[str, float],
rnd_gen: Union[int, None, 'np.random.Generator'] = None):
"""Split list of images.
This function can be used to split list of images. When class ids are provided
the a stratified split is performed.
Parameters
----------
images : List/array of str or pathlib.Path
Array of image paths.
classes: List/array of int
Array of class ids.
splits: dict(str, float)
Dict naming the splits and providing split sizes. If sum of sizes <= 1. the numbers are directly used
as the ratio. If sum of sizes < 1 a single split can have `size=None`. This splits will consists of
all remaining files.
If sum of sizes > 1 the ratio of images for each split is `size/sum(sizes)`.
rnd_gen: int, np.random.Generator or None, optional, default=None
Random seed or np.random.Generator for the splits. If None no specific seed will be used. This results
in unreproducible splits!
Returns
----------
dict(str, List[pathlib.Path]) or dict(pathlib.Path, List[pathlib.Path])
Dict containing a list of files for each split.
If `output_dir` is None the keys of the dict are the keys from `splits`.
If `output_dir` is not None the key of the dict are: `<output_dir>/<split_name>.txt``
the name of the txt-files containing the names of the files for each split.
Examples
--------
Split data for image classification. Images for every class are in separate
subfolders: data/class1, data/class2, ...
To include files from subfolders use extensions like '**/*.jpg' instead of '*.jpg'.
>>> dataset_path = pathlib.Path('data')
>>> images = find_files(dataset_path, recursive=True)
>>> classes, class_names = generate_class_ids(images, lambda f: pathlib.Path(f).parent.name)
>>> split_image_folder(images,
classes,
{'train': 0.7, 'val': 0.15, 'test': 0.15})
{'train': ['data/class1/img1.jpg', 'data/class2/img231.jpg', ...],
'val': ['data/class1/img7.jpg', 'data/class2/img21.jpg', ...],
'test': ['data/class1/img9.jpg', 'data/class2/img3.jpg', ...]}
"""
classes = np.asarray(classes) if classes is not None else None
images = np.asarray(images)
if not isinstance(rnd_gen, np.random.Generator):
rnd_gen = np.random.default_rng(rnd_gen)
if classes is not None:
split_files = {}
for c in np.unique(classes):
images_c = np.where(classes == c)[0]
rnd_gen.shuffle(images_c)
eps = 1/(len(images_c) + 1)
split_files_i = {}
prev_limit = None
for n, (s, e) in splits.items():
split_files_i[n] = slice(prev_limit, int(np.round(e*len(images_c))) if e + eps < 1 else None)
prev_limit = split_files_i[n].stop
for n, s in split_files_i.items():
images_s = split_files.setdefault(n, [])
indices = np.sort(images_c[s])
images_s.extend([images[idx] for idx in indices])
else:
rnd_gen.shuffle(images)
eps = 1/(len(images) + 1)
split_files = {}
prev_limit = None
for n, (s, e) in splits.items():
split_files[n] = slice(prev_limit, int(np.round(e*len(images))) if e + eps < 1 else None)
prev_limit = split_files[n].stop
split_files = {n: images[s] for n, s in split_files.items()}
return split_files
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/datasets/utils.py
| 0.754282 | 0.486941 |
utils.py
|
pypi
|
"""Functions to register object detection datasets, that are in a pascal voc style
"""
import pathlib
import xml.etree.ElementTree as ET
from typing import List, Tuple, Union, Dict, Iterable, Any
from collections.abc import Iterable as IsIterable
import numpy as np
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from .utils import split_image_folder, find_files, check_extensions
__all__ = ["register",
"split_and_register",
"get_classnames_from_annotation_files",
"get_classnames_from_filenames_files"]
def find_folders(base_path: Union[str, pathlib.Path],
images_names: Union[None, Iterable[str]]=None,
annotations_names: Union[None, Iterable[str]]=None) -> Tuple[pathlib.Path, pathlib.Path]:
"""Small helper functions to find subfolders for images and
annoation in Pascal VOC datasets.
Parameters
----------
base_path: str or pathlib.Path
Folder to search in
images_names: Iterable of str or None, optional, default=None
List of potential folder names containing the images.
If None the default list: ['Images', 'images', 'jpegs', 'jpgs']
is being used.
images_names: Iterable of str or None, optional, default=None
List of potential folder names containing the annotation xmls.
If None the default list: ['Annotations', 'annotations', 'annotation',
'Annotation', 'xml', 'xmls'] is being used.
Returns
-------
pathlib.Path
Image folder
pathlib.Path
Annotation folder
"""
if images_names is None:
images_names = ['Images', 'images', 'jpegs', 'jpgs']
if annotations_names is None:
annotations_names = ['Annotations', 'annotations', 'annotation', 'Annotation', 'xml', 'xmls']
if len(set(images_names).intersection(set(annotations_names))) != 0:
raise ValueError(f'Names allowed for the image and annotation folder are overlapping! {set(images_names).intersection(set(annotations_names))}')
base_path = pathlib.Path(base_path)
for p in images_names:
image_folder = base_path / p
if image_folder.exists() and image_folder.is_dir():
break
else:
raise ValueError('No image folder found!')
for p in annotations_names:
annotation_folder = base_path / p
if annotation_folder.exists() and annotation_folder.is_dir():
break
else:
raise ValueError('No annotation folder found!')
return image_folder, annotation_folder
def get_annotations_from_xml(annotation_file: Union[str, pathlib.Path, 'ET.ElementTree'],
class_names: Union[List[str], None] = None,
box_mode: 'BoxMode'=BoxMode.XYXY_ABS) -> Dict[str, Any]:
"""Parse annotations from xml in Pascal VOC format.
Parameters
----------
annotation_file: str/pathlib.Path or ET.ElementTree
Path of the annotation file or already parsed ET.ElementTree
class_names: List[str] or None
List of class names; either explicitly provided or determined from data if None.
box_mode: detectron2.structures.BoxMode
Format of the boxes in the annotations.
Returns
-------
dict
Dict with containing the annotations
"""
if not isinstance(annotation_file, ET.ElementTree):
annotation_file = pathlib.Path(annotation_file)
with annotation_file.open() as f:
tree = ET.parse(f)
else:
tree = annotation_file
r = {
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
bbox = obj.find("bndbox")
if box_mode == BoxMode.XYXY_ABS:
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
else:
NotImplementedError('Currently only `BoxMode.XYXY_ABS` implemented!')
bbox[0] -= 1.0
bbox[1] -= 1.0
try:
difficult = bool(obj.find("difficult").text)
except:
difficult = False
try:
truncated = bool(obj.find("truncated").text)
except:
truncated = False
try:
pose = obj.find("pose").text
except:
pose = "Unspecified"
instances.append({"category_id": class_names.index(cls) if class_names else cls,
"bbox": bbox,
"bbox_mode": box_mode,
"difficult": difficult,
"truncated": truncated,
"pose": pose})
r["annotations"] = instances
return r
def load_voc_like_instances(img_dir: Union[str, pathlib.Path],
xml_dir: Union[str, pathlib.Path],
filenames: Union[str, pathlib.Path, List[str], Tuple[str]],
class_names: Union[List[str], Tuple[str, ...]],
img_extensions: Union[str, List[str]]='.jpg',
box_mode: Union[str, BoxMode]=BoxMode.XYXY_ABS,
filename_with_extension: bool=False,
raise_on_missing: bool=True) -> Dict[str, Any]:
"""Function that will be registered in the dataset catalog to return the
lightweight version of the dataset.
Normally this function is used only through `register` or `split_and_register`.
Parameters
----------
img_dir: str or pathlib.Path
Path to the directory containing the images.
xml_dir: str or pathlib.Path
Path to the directory containing the annotation xmls.
filenames: str or pathlib.Path or Iterable[str, pathlib.Path]
Names of the files for this dataset.
If single str or pathlib.Path this has to be the path to a file containing the actual filenames.
If iterable the iterable has to contain the actual filenames. During creating of the dicts
the functions is looking for an image with one of the extensions from `img_extensions`.
So every filename needs to match an image with path `<img_dir>/<filename>.<extension>` and
an annotation files with path `<xml_dir>/<filename>.xml`. If the filesnames are with the
image file extension set `filename_with_extension=True` `img_extensions` will be ignored.
Behavior on missing files can be controlled through `raise_on_missing`.
class_names: List[str] or Tuple[str], optional, default=None
List of all names of the classes in the training. The order of the class names is important
since internally class labels are indices of the classes in this list.
img_extensions: str or Iterable[str], optional, default='.jpg'
Valid extensions for images. Only used when `filename_with_extension=False`.
See `img_extensions` from `load_voc_like_instances` for more details.
box_mode: detectron2.structures.BoxMode, optional, default=XYXY_ABS
Format of the boxes in the annotation files.
Default is (x1, y1, x2, y2) as absolute pixels
Check detectron2 documentation for formats available.
filename_with_extension: bool, optional, default=False
Whether filenames are with or without img extension.
raise_on_missing: bool, optional, default=True
Whether an exception should be raised when either image file or annoation file
can not be found. If `False`missing files will be ignored.
Returns
----------
List of dicts
A list of dicts with a dict for each example.
Raises
------
FileNotFoundError
If either an xml or image file can not be found.
"""
img_dir = pathlib.Path(img_dir).resolve()
annotation_dirname = pathlib.Path(xml_dir).resolve()
if isinstance(filenames, IsIterable) and not isinstance(filenames, str):
fileids = np.array(filenames, dtype=str)
else:
filenames = pathlib.Path(filenames).resolve()
with PathManager.open(filenames) as f:
fileids = np.loadtxt(f, dtype=str)
# Needs to read many small annotation files. Makes sense at local
dicts = []
if isinstance(box_mode, str):
box_mode = BoxMode[box_mode]
if isinstance(img_extensions, str):
img_extensions = [img_extensions]
def add_img_extension(path):
if filename_with_extension:
return path
elif len(img_extensions) > 1:
for ext in img_extensions:
ext = ext.replace('*', '')
img_path = path.parent / (path.name + ext)
if img_path.exists():
return img_path
if raise_on_missing:
raise FileNotFoundError(f'Image `{path}` does not exist for any of the extensions {img_extensions}')
else:
return None
else:
img_path = path.with_suffix(img_extensions[0])
if img_path.exists():
return img_path
else:
if raise_on_missing:
raise FileNotFoundError(f'Image `{img_path}` does not exist!')
else:
return None
add_img_extension.with_extension = None
for fileid in fileids:
jpeg_file = add_img_extension(img_dir / fileid)
if not jpeg_file:
continue
anno_file = annotation_dirname / jpeg_file.with_suffix('.xml').name
annotation = get_annotations_from_xml(annotation_file=anno_file, class_names=class_names, box_mode=box_mode)
annotation["file_name"] = str(jpeg_file)
annotation["image_id"] = fileid
dicts.append(annotation)
return dicts
def split_and_register(basename: Union[str, None],
img_dir: Union[str, pathlib.Path],
xml_dir: Union[str, pathlib.Path],
splits: Dict[str, float],
class_names: Union[None, List[str], Tuple[str]]=None,
rnd_gen: Union[int, None]=1337,
extensions: Union[str, List[str]]=None,
box_mode: BoxMode=BoxMode.XYXY_ABS,
**additional_dataset_infos):
"""Register a dataset in the detectron2 DatasetCatalog and MetadataCatalog.
After registration the dataset can be referenced in the cfg by simply providing
the `name`.
Parameters
----------
basename: str
All datasets will be registered as `<basename>_<split>`
img_dir: str or pathlib.Path
Path to the directory containing the images.
xml_dir: str or pathlib.Path
Path to the directory containing the annotation xmls.
splits: dict(str, float)
Dictionary with the names of the splits and their size.
Check `sap_computer_visison.datasets.utils.split` for more details.
class_names: None or List[str] or Tuple[str], optional, default=None
List of all names of the classes in the training. The order of the class names is important
since internally class labels are indices of the classes in this list.
If `None` class names will be extracted from the annotation files of the dataset. Beware
in some cases especially for smaller test datasets not all classes might be part of a
dataset and this will result in a mixup of labels.
rnd_gen: int, np.random.Generator or None, optional, default=None
Random seed or np.random.Generator for the splits. If None no specific seed will be used. This results
in unreproducible splits!
extensions: str or Iterable[str], optional, default=['.jpg', '.jpeg']
Valid extensions for images. Only used when `filename_with_extension=False`.
See `load_voc_like_instances` for more details.
box_mode: detectron2.structures.BoxMode, optional, default=XYXY_ABS
Format of the boxes in the annotation files.
Default is (x1, y1, x2, y2) as absolute pixels
Check detectron2 documentation for formats available.
**additional_dataset_infos: str
Every additional keyword arguements are considered to be metadata for all splits and
will be registered in the metadata catalog for each split. For all datasets the
names of the classes, image directory, xml directory and name of the
split are stored.
Returns
----------
dict(str, List[str])
Name and list of filenames for each dataset
List of str
Used class names.
Examples
--------
Split data for object detection and register train dataset.
>>> dataset_path = pathlib.Path('data')
>>> split_and_register('my_dataset'
img_dir=dataset_path / 'images',
xml_dir=dataset_path / 'annotations',
splits={'train': 0.7, 'val': 0.15, 'test': 0.15})
>>>
({'my_dataset_train': ['img1', 'img3', ...],
'my_dataset_test': ['img2', 'img6', ...],
'my_dataset_val': ['img4', 'img9', ...]}, ['cat', 'dog', ...])
"""
if extensions is None:
extensions = ['*.jpg', '*.jpeg']
splits = split_image_folder(img_dir,
output_dir=None,
extensions=extensions,
splits=splits,
remove_dir=True,
remove_ext=True,
rnd_gen=rnd_gen)
if not class_names:
class_names = get_classnames_from_annotation_files(
xml_dir,
[item for sublist in splits.values() for item in sublist])
registered_datasets = {}
def create_loading_f(filenames):
def _f():
return load_voc_like_instances(img_dir, xml_dir, filenames, class_names=class_names, img_extensions=extensions, box_mode=box_mode)
return _f
for split, fileids in splits.items():
name = f'{basename}_{split}' if basename and basename != '' else split
DatasetCatalog.register(name, create_loading_f(fileids))
MetadataCatalog.get(name).set(thing_classes=list(class_names),
img_dir=str(img_dir),
xml_dir=str(xml_dir),
split=split,
box_mode=box_mode if isinstance(box_mode, BoxMode) else BoxMode[box_mode],
**additional_dataset_infos)
registered_datasets[name] = fileids
return registered_datasets, class_names
def register(name:str,
img_dir: Union[str, pathlib.Path],
xml_dir: Union[str, pathlib.Path],
filenames: Union[str, pathlib.Path, Iterable[Union[str, pathlib.Path]]]=None,
class_names: Union[None, List[str], Tuple[str]]=None,
extensions: Union[str, Iterable[str]]=None,
box_mode: BoxMode=BoxMode.XYXY_ABS,
filename_with_extension: bool=False,
append_missing_classes: bool=True,
**additional_dataset_infos):
"""Register a dataset in the detectron2 DatasetCatalog and MetadataCatalog.
After registration the dataset can be referenced in the cfg by simply providing
the `name`.
Parameters
----------
name: str
Name under which the dataset will be registered. Has to be unique
img_dir: str or pathlib.Path
Path to the directory containing the images.
xml_dir: str or pathlib.Path
Path to the directory containing the annotation xmls.
filenames: str or pathlib.Path or Iterable[str, pathlib.Path]
Names of the files for this dataset.
If single str or pathlib.Path this has to be the path to a file containing the actual filenames.
If iterable the iterable has to contain the actual filenames.
Every filename needs to match an image with path `<img_dir>/<filename>.<extension>` and
an annotation files with path `<xml_dir>/<filename>.xml`.
If the filesnames are with the the image file extension set `filename_with_extension=True`.
Usally the filenames are only the stem.
class_names: None or List[str] or Tuple[str], optional, default=None
List of all names of the classes in the training. The order of the class names is important
since internally class labels are indices of the classes in this list.
If `None` class names will be extracted from the annotation files of the dataset. Beware
in some cases especially for smaller test datasets not all classes might be part of a
dataset and this will result in a mixup of labels.
extensions: str or Iterable[str], optional, default=['.jpg', '.jpeg']
Valid extensions for images. Only used when `filename_with_extension=False`.
See `img_extensions` from `load_voc_like_instances` for more details.
box_mode: detectron2.structures.BoxMode, optional, default=XYXY_ABS
Format of the boxes in the annotation files.
Default is (x1, y1, x2, y2) as absolute pixels
Check detectron2 documentation for formats available.
filename_with_extension: bool, optional, default=False
Whether filenames are with or without img extension.
append_missing_classes: bool
Completes missing classes when explicitly provided.
This can be useful when you want to create consistent label mapping for multiple different datasets.
**additional_dataset_infos: str
Every additional keyword arguements are considered to be metadata and
will be registered in the metadata catalog. For all datasets the
names of the classes, image directory and xml directory are stored.
Returns
----------
List of str
List of images in the dataset
List of str
Used class names.
Examples
--------
Split data for object detection and register train dataset.
>>> dataset_path = pathlib.Path('data')
>>> splits = split_image_folder(dataset_path / 'images',
dataset_path,
extensions=['*.jpg', '*.jpeg'],
{'train': 0.7, 'val': 0.15, 'test': 0.15},
remove_dir=True,
remove_ext=True)
>>> register('my_dataset_train',
img_dir=dataset_path / 'images',
xml_dir=dataset_path / 'annotations',
filenames=splits['train'])
('my_dataset_train', ['cat', 'dog', ...])
"""
if extensions is None:
extensions = ['*.jpg', '*.jpeg']
extensions = check_extensions(extensions)
if filenames is None:
filenames = [f.stem for f in find_files(img_dir, extensions, recursive=False)]
if class_names is None or len(class_names) == 0 or append_missing_classes:
class_names = class_names if class_names is not None else []
if isinstance(filenames, str) or isinstance(filenames, pathlib.Path):
class_names_set = get_classnames_from_filenames_files(xml_dir, filenames)
elif isinstance(filenames, IsIterable):
filenames = [*filenames] # In case of filenames being an iterator/generator
class_names_set = sorted(get_classnames_from_annotation_files(xml_dir, filenames))
else:
raise RuntimeError
if append_missing_classes:
new_class_names = set(class_names_set).difference(set(class_names))
class_names.extend([*new_class_names])
else:
class_names = class_names_set
DatasetCatalog.register(name, lambda: load_voc_like_instances(img_dir, xml_dir, filenames, class_names=class_names, img_extensions=extensions, box_mode=box_mode, filename_with_extension=filename_with_extension))
MetadataCatalog.get(name).set(thing_classes=list(class_names),
img_dir=str(img_dir),
xml_dir=str(xml_dir),
box_mode=box_mode if isinstance(box_mode, BoxMode) else BoxMode[box_mode],
**additional_dataset_infos)
return filenames, class_names
def get_classnames_from_filenames_files(xml_dir: Union[str, pathlib.Path],
filenames: Union[str, pathlib.Path, Iterable[Union[str, pathlib.Path]]]):
"""Retrieve classes from annotation files provided via files.
This functions can be useful if datasets are managed in list of files in files.
Parameters
----------
xml_dir: str or pathlib.Path
Path to the directory containing xml annotation files
filenames: str or pathlib.Path or Iterable[str, pathlib.Path]
Path or iterable containing path of files containing names of annotation files.
Annotion filenames with and without extensions are valid.
If filesnames got an extension it will be overwritten with `.xml`.
If not `.xml` will be appended.
Returns
----------
List of str
Sorted list of class names retrieved from the annotation xmls. Beware internally the
classes will be handles as ids coming from the order of the list.
Examples
--------
Split data for object detection and retrieve classes from all sets.
>>> dataset_path = pathlib.Path('data')
>>> splits = split_image_folder(dataset_path / 'images',
dataset_path,
extensions=['*.jpg', '*.jpeg'],
{'train': 0.7, 'val': 0.15, 'test': 0.15},
remove_dir=True,
remove_ext=True)
>>> get_classnames_from_filenames_files(dataset_path / 'annotations',
splits.values())
['cat', 'dog', ...]
"""
xml_dir = pathlib.Path(xml_dir)
if isinstance(filenames, pathlib.Path) or isinstance(filenames, str):
filenames = [filenames]
filenames = [str(pathlib.Path(f).resolve()) for f in filenames]
class_names = []
for f in filenames:
with PathManager.open(str(pathlib.Path(f).resolve())) as f:
fileids = np.loadtxt(f, dtype=str)
class_names.extend(get_classnames_from_annotation_files(xml_dir, fileids))
return sorted([*set(class_names)])
def get_classnames_from_annotation_files(xml_dir: Union[str, pathlib.Path],
files: Iterable[Union[str, pathlib.Path]]):
"""Retrieve classes from annotation files provided as iterable of files.
Parameters
----------
xml_dir: str or pathlib.Path
Path to the directory containing xml annotation files
files: Iterable[str, pathlib.Path]
List of annotation files. Filenames with and without extensions are valid.
If filesnames got an extension it will be overwritten with `.xml`.
If not `.xml` will be appended.
Returns
----------
List of str
Sorted list of class names retrieved from the annotation xmls. Beware internally the
classes will be handles as ids coming from the order of the list.
Examples
--------
Split data for object detection and retrieve classes from all sets.
>>> dataset_path = pathlib.Path('data')
>>> splits = split_image_folder(dataset_path / 'images',
None,
extensions=['*.jpg', '*.jpeg'],
{'train': 0.7, 'val': 0.15, 'test': 0.15},
remove_dir=True,
remove_ext=True)
>>> get_classnames_from_annotation_files(dataset_path / 'annotations',
[s for s in split_i for split_i in splits])
['cat', 'dog', ...]
"""
class_names = set()
for fileid in files:
anno_file = xml_dir / pathlib.Path(fileid).with_suffix('.xml').name
if not anno_file.exists():
anno_file = xml_dir / (pathlib.Path(fileid).name + '.xml')
with anno_file.open() as f:
tree = ET.parse(f)
for obj in tree.findall("object"):
cls = obj.find("name").text
class_names.add(cls)
return sorted([*class_names])
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/datasets/pascal_voc_style.py
| 0.929248 | 0.468791 |
pascal_voc_style.py
|
pypi
|
import pathlib
from typing import List, Union, Dict
import numpy as np
from detectron2.data import DatasetCatalog, MetadataCatalog
from sap_computer_vision.data.triplet_sampling_utils import TripletGenerator
__all__ = ["register",
"create_triplets"]
def build_triplet_dict(triplets: Dict[str, Union[str, pathlib.Path]],
base_dir: Union[str, pathlib.Path, None] = None):
if base_dir is not None:
base_dir = pathlib.Path(base_dir)
items = []
for triplet in triplets:
item = {}
add_item = True
for k in ['pos', 'neg', 'anchor']:
img_path = pathlib.Path(triplet[k])
if base_dir is not None:
img_path = base_dir / img_path
if img_path.exists():
item[k] = {'file_name': str(img_path)}
else:
add_item = False
break
if add_item:
items.append(item)
return items
def register(name: str,
triplets: List[Dict[str, Union[str, pathlib.Path]]],
base_dir: Union[str, pathlib.Path, None] = None,
**additional_dataset_infos):
DatasetCatalog.register(name, lambda: build_triplet_dict(triplets, base_dir))
MetadataCatalog.get(name).set(classes=None,
base_dir=str(base_dir),
**additional_dataset_infos)
return name
def create_triplets(classes, excludes=None, size=1, rng=None, replace=False, example_vectors=None, images=None, return_triplet_dicts=False):
generator = TripletGenerator(class_ids=classes,
excludes=excludes,
rng=rng,
example_vectors=example_vectors)
a, p, n = generator.build_random_triplets(size, replace)
if images:
images = np.array(images)
a, p, n = images[a], images[p], images[n]
if return_triplet_dicts:
return [{'anchor': a_i, 'pos': p_i, 'neg': n_i} for a_i, p_i, n_i in zip(a, p, n)]
else:
return a, p, n
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/datasets/contrastive.py
| 0.62681 | 0.231853 |
contrastive.py
|
pypi
|
from typing import Union, Iterable
import cv2
import numpy as np
import torch
import detectron2.data.detection_utils as utils
def visualize_similarities(image_a: Union[np.ndarray, torch.Tensor],
unpooled_a: Iterable[Union[np.ndarray, torch.Tensor]],
pooled_a: Iterable[Union[np.ndarray, torch.Tensor]],
image_b: Union[np.ndarray, torch.Tensor],
unpooled_b: Iterable[Union[np.ndarray, torch.Tensor]],
pooled_b: Iterable[Union[np.ndarray, torch.Tensor]],
pooling_type: str='average',
image_format: str='BGR',
image_channel_first: Union[str, bool]='auto',
cmap: str='magma',
aggregate: Union[str, None]='mean',
alpha: float=0.5):
"""Implementation of `Visualizing Deep Similarity Networks`https://arxiv.org/abs/1901.00536
It is a function to visualize which part of two similar images contribute the most
to the similarity. It is applicable to conv based distance metric learners using a pooling layer.
Parameters
----------
image_a/image_b: np.ndarry or torch.Tensor
First/Second image as an array the format of the array can be specified with
`image_format` and `image_channel_first`.
unpooled_a/unpooled_b: Iterable of np.ndarry or torch.Tensor
Name under which the dataset will be registered. Has to be unique
pooled_a/pooled_b: Iterable of np.ndarry or torch.Tensor
Name under which the dataset will be registered. Has to be unique
pooling_type: ['average', 'max'], optional, default='average'
Type of pooling layer used in the model.
image_format: str, optional, default='BGR'
One of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
image_channel_first: bool or 'auto'
Whether the color channel of the image is in the first axis of the image arrays.
If 'auto' the color channel axis is determined by np.argmin(image_arr.shape).
cmap: str or None, optional, default='magma
Name of the matplotlib cmap used to create a visualization. The similarity heatmap
is used as an overlay for the input image.
If `None`the raw similarity values are returned.
alpha: float, optional, default=0.5
Alpha of the similarity heat map.
Returns
----------
"""
image_a = _image_to_rgb_numpy(image_a, image_format, image_channel_first)
image_b = _image_to_rgb_numpy(image_b, image_format, image_channel_first)
unpooled_a = [_convert_to_numpy(a) for a in unpooled_a]
pooled_a = [_convert_to_numpy(a) for a in pooled_a]
unpooled_b = [_convert_to_numpy(a) for a in unpooled_b]
pooled_b = [_convert_to_numpy(a) for a in pooled_b]
sim_ab, sim_ba = _calculate_similarities(image_a=image_a,
unpooled_a=unpooled_a,
pooled_a=pooled_a,
image_b=image_b,
unpooled_b=unpooled_b,
pooled_b=pooled_b,
pooling_type=pooling_type,
aggregate=aggregate)
if cmap is not None:
try:
from matplotlib import pyplot as plt
except ImportError:
raise ImportError('To create a visualization from the similarity matplotlib has to be installed')
cmap = plt.get_cmap(cmap)
min_ = np.min((np.min(sim_ab), np.min(sim_ba)))
max_ = np.max((np.max(sim_ab), np.max(sim_ba)))
sim_ab = (sim_ab - min_) / max_
sim_ba = (sim_ba - min_) / max_
if len(sim_ba.shape) == 3:
for i in range(len(sim_ab)):
sim_ab[i] = _apply_heatmap(image_a, sim_ab[i], alpha, cmap)
sim_ba[i] = _apply_heatmap(image_b, sim_ba[i], alpha, cmap)
else:
sim_ab = _apply_heatmap(image_a, sim_ab, alpha, cmap)
sim_ba = _apply_heatmap(image_b, sim_ba, alpha, cmap)
return sim_ab, sim_ba
def _image_to_rgb_numpy(image, image_format, channel_first='auto'):
image = _convert_to_numpy(image)
if channel_first == 'auto':
channel_first = np.argmin(image.shape) == 0
if channel_first:
image = np.moveaxis(image, 0, -1)
return utils.convert_image_to_rgb(image, image_format)
def _apply_heatmap(image, sim, alpha, cmap):
sim_c = cmap(sim)[:, :, :3]
if image.dtype == np.uint8:
sim_c *= 255
sim_c = sim_c.astype(np.uint8)
if alpha < 1:
sim_c = cv2.addWeighted(image, (1-alpha), sim_c, alpha, 0.0)
return sim_c
def _calculate_similarities(image_a,
unpooled_a,
pooled_a,
image_b,
unpooled_b,
pooled_b,
pooling_type='average',
aggregate='mean'):
h_a, w_a, _ = image_a.shape
h_b, w_b, _ = image_b.shape
joined_sim_ab = _calc_sim(unpooled_a, pooled_a, pooled_b, pooling_type=pooling_type, height=h_a, width=w_a, aggregate=aggregate)
joined_sim_ba = _calc_sim(unpooled_b, pooled_b, pooled_a, pooling_type=pooling_type, height=h_b, width=w_b, aggregate=aggregate)
return joined_sim_ab, joined_sim_ba
def _convert_to_numpy(arr):
if isinstance(arr, torch.Tensor):
arr = arr.cpu().numpy()
if not isinstance(arr, np.ndarray):
raise TypeError(f'Expected np.array or torch.Tensor, but got {type(arr)}.')
return arr
def _calc_sim(unpooled_a, pooled_a, pooled_b, pooling_type='average', height=None, width=None, aggregate='mean'):
sim_layers = []
assert len(unpooled_a) == len(pooled_a) == len(pooled_b)
for unpooled_a_i, pooled_a_i, pooled_b_i in zip(unpooled_a, pooled_a, pooled_b):
assert unpooled_a_i.shape[0] == len(pooled_a_i) == len(pooled_b_i)
if pooling_type.lower() == 'average':
normalization = unpooled_a_i.shape[1] * unpooled_a_i.shape[2] * np.linalg.norm(pooled_a_i) * np.linalg.norm(pooled_b_i)
sim_layers.append(np.einsum('ijk,i->jk', unpooled_a_i, pooled_b_i) / normalization)
elif pooling_type.lower() == 'max':
surrogate_unpooled = np.zeros_like(unpooled_a_i)
for c_idx, c_val in enumerate(pooled_a_i):
try:
idx_x, idx_y = np.where(unpooled_a_i[c_idx] == c_val)
except:
raise
surrogate_unpooled[c_idx, idx_x, idx_y] = c_val / len(idx_x)
normalization = np.linalg.norm(pooled_a_i) * np.linalg.norm(pooled_b_i)
sim_layers.append(np.einsum('ijk,i->jk', surrogate_unpooled, pooled_b_i) / normalization)
stacked = np.array([cv2.resize(s, (width, height), interpolation=cv2.INTER_NEAREST) for s in sim_layers])
if aggregate == 'mean':
stacked = np.mean(stacked, axis=0)
elif aggregate == 'sum':
stacked = np.sum(stacked, axis=0)
elif aggregate == 'prod':
stacked = np.prod(stacked, axis=0)
return stacked
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/utils/deep_similarity.py
| 0.929304 | 0.753512 |
deep_similarity.py
|
pypi
|
from typing import List, Dict
class _VersionedModel:
"""VersionedModel defines versioned moedl specific configs/settings"""
def __init__(self):
self._py_pre_proc_func_import_path = []
self._py_post_proc_func_import_path = []
self._py_model_init_params = "{}"
@property
def id(self):
return self._id
@id.setter
def id(self, v: str):
self._id = v
@property
def model_type(self):
return self._model_type
@model_type.setter
def model_type(self, v: str):
self._model_type = v
@property
def model_path(self):
return self._model_path
@model_path.setter
def model_path(self, v: str):
self._model_path = v
@property
def py_model_interface_filepath(self):
return self._py_model_interface_filepath
@py_model_interface_filepath.setter
def py_model_interface_filepath(self, v: str):
self._py_model_interface_filepath = v
@property
def py_model_interface_class_name(self):
return self._py_model_interface_class_name
@py_model_interface_class_name.setter
def py_model_interface_class_name(self, v: str):
self._py_model_interface_class_name = v
@property
def py_whl_model_interface_import_path(self) -> str:
return self._py_whl_model_interface_import_path
@py_whl_model_interface_import_path.setter
def py_whl_model_interface_import_path(self, v: str):
self._py_whl_model_interface_import_path = v
@property
def py_pre_proc_func_import_path(self) -> List[str]:
return self._py_pre_proc_func_import_path
@py_pre_proc_func_import_path.setter
def py_pre_proc_func_import_path(self, v: List[str]):
self._py_pre_proc_func_import_path = v
@property
def py_post_proc_func_import_path(self) -> List[str]:
return self._py_post_proc_func_import_path
@py_post_proc_func_import_path.setter
def py_post_proc_func_import_path(self, v: List[str]):
self._py_post_proc_func_import_path = v
@property
def py_model_init_params(self) -> str:
return self._py_model_init_params
@py_model_init_params.setter
def py_model_init_params(self, v: str):
self._py_model_init_params = v
@property
def target_cpus(self):
return self._target_cpus
@target_cpus.setter
def target_cpus(self, v: List):
if isinstance(v, list):
self._target_cpus = v
else:
raise ValueError("provided target cpus: {} is not a valid list".format(v))
@property
def trailing_cpus(self):
return self._trailing_cpus
@trailing_cpus.setter
def trailing_cpus(self, v: int):
if isinstance(v, list):
self._trailing_cpus = v
else:
raise ValueError("provided trailing cpus: {} is not a valid list".format(v))
@property
def niceness(self):
return self._niceness
@niceness.setter
def niceness(self, v: int):
self._niceness = v
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/cfg/versioned_model.py
| 0.852598 | 0.193033 |
versioned_model.py
|
pypi
|
import ast
import os.path
import re
import sys
import ujson
from pydoc import locate
from typing import Callable, Dict, List
from centaur._.pyvmps.cfg import constants
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.models.exceptions import (
InvalidModelException,
ModelInitFailed,
InvalidPyModelInterfaceClassException,
)
from centaur._.pyvmps.models.python.constants import ExporterVar
from centaur._.pyvmps.utils.common import abs_and_normalize_path, get_abspath
from centaur._.pyvmps.client.constants import PayloadKey
from centaur._.pyvmps.error_handling.errors import python_model_error
import importlib
from centaur._.pyvmps.tracking.tracker import tracker
from centaur._.pyvmps.tracking.constants import Tracking
_logger = get_logger(__name__)
def _validate_model_config(versioned_model_path: str, interface_class_filepath: str, interface_class_name: str) -> Dict:
"""_validate_model_config inspects given file containing implemented predict class and returns a dict
{
"interface_class_abs_dirpath": interface_base_path, # base path of the interface class
"interface_class_filename_without_ext": interface_class_filename_without_ext, # file containing interface class
"interface_class_name": interface_class_name,
"interface_func_defined": func_verbs, # list of methods found in class
"diff_interface_base_path": diff_interface_base_path, # bool indicating if the interface file is in a diff directory than the python model
}
:param versioned_model_path:
:type versioned_model_path: str
:param path_to_interface:
:type path_to_interface: str
:rtype: dict
"""
# normalize path
interface_class_abs_filepath = abs_and_normalize_path(interface_class_filepath)
diff_interface_base_path = False
if not os.path.exists(interface_class_abs_filepath):
# Check if file exists.
_logger.exception("File does not exist")
raise FileNotFoundError()
interface_base_path, interface = os.path.split(interface_class_abs_filepath)
if not interface.endswith(".py"):
raise InvalidPyModelInterfaceClassException(
"Python model interface class should be a '.py' file, got: {}".format(interface_class_filepath)
)
interface_class_filename_without_ext = interface.rsplit(".", 1)[0]
_logger.debug("versioned_model_path=%s, interface_base_path=%s", versioned_model_path, interface_base_path)
# TODO:
# Normalize all user passed in paths at some proper uniform place.
if versioned_model_path != interface_base_path:
try:
# If interface_class_filename_without_ext can be imported, this means there is another module
# that shares this name. In this case, we raise an Exception.
_ = importlib.import_module(interface_class_filename_without_ext)
raise InvalidPyModelInterfaceClassException(
"Python model interface class module name: {} conflicts with existing modules".format(
interface_class_filename_without_ext
)
)
except ModuleNotFoundError:
diff_interface_base_path = True
with open(interface_class_abs_filepath) as fh:
source_code = fh.read()
func_verbs = extract_func_names(source_code, interface_class_name)
return {
"interface_class_abs_dirpath": interface_base_path,
"interface_class_filename_without_ext": interface_class_filename_without_ext,
"interface_class_name": interface_class_name,
"interface_func_defined": func_verbs,
"diff_interface_base_path": diff_interface_base_path,
}
def extract_func_names(source: str, model_class: str) -> List[str]:
_logger.debug("extract_func_names,model_class=%s", model_class)
funcs: List[str] = []
module_tree = ast.parse(source)
for node in ast.iter_child_nodes(module_tree):
if isinstance(node, ast.ClassDef) and node.name == model_class:
_logger.debug("model_class %s found", model_class)
for class_node in node.body:
if isinstance(class_node, ast.FunctionDef):
func_name: str = class_node.name
if re.match(r"_.*_", func_name) or func_name == ExporterVar.initialize_func_name:
# don't use internal methods as verbs
continue
funcs.append(func_name)
_logger.info("verbs extracted=%s", funcs)
if not funcs:
raise InvalidModelException("No inference methods exposed")
return funcs
def _extract_verb_func_py_model(
versioned_model_path: str, py_model_init_params: str, validated: Dict
) -> Dict[str, Callable]:
interface_class_abs_dirpath = validated["interface_class_abs_dirpath"]
interface_class_filename_without_ext = validated["interface_class_filename_without_ext"]
interface_class_name = validated["interface_class_name"]
interface_func_defined: List[str] = validated["interface_func_defined"]
diff_interface_base_path: bool = validated["diff_interface_base_path"]
# Add versioned_model_path into system path.
if os.path.isdir(versioned_model_path) and versioned_model_path not in sys.path:
sys.path.insert(0, versioned_model_path)
# add parent dir of versioned_model_path into sys path too
parent_path = os.path.dirname(versioned_model_path)
sys.path.insert(0, parent_path)
# Append model interface class to system path if necessary.
if diff_interface_base_path and interface_class_abs_dirpath not in sys.path:
sys.path.append(interface_class_abs_dirpath)
_logger.debug("appended py model interface class dir=%s to sys.path", interface_class_abs_dirpath)
interface_class = locate(f"{interface_class_filename_without_ext}.{interface_class_name}")
_logger.debug("interface_class=%s", type(interface_class))
if not callable(interface_class):
raise Exception("Python Model Interface failed to load.")
else:
interface_obj = interface_class()
# initialize
initialize_func = getattr(interface_obj, ExporterVar.initialize_func_name, None)
if callable(initialize_func):
kwargs = ujson.loads(py_model_init_params)
if versioned_model_path:
initialize_func(get_abspath(versioned_model_path), **kwargs)
else:
initialize_func(None, **kwargs)
verb_funcs: Dict[str, Callable] = {}
for func_name in interface_func_defined:
verb_funcs[func_name] = getattr(interface_obj, func_name)
_logger.info("verb_funcs=%s", verb_funcs)
return verb_funcs
def batch_predict(data: Dict[str, Dict], verb_funcs):
errs = {}
results = {}
for req_id, payload in data.items():
verb = payload[PayloadKey.VERB]
usr_data = payload[PayloadKey.USR_DATA]
kwargs = payload[constants.KWARGS]
if verb in verb_funcs:
verb_func = verb_funcs[verb]
try:
try:
result = verb_func(usr_data, **kwargs)
except TypeError:
result = verb_func(usr_data)
results.update({req_id: result})
except Exception as e:
# Log exception here as predict error is caught here and no longer re-raised
_logger.exception("error calling python model")
errs[req_id] = python_model_error.generate_error(details=repr(e))
finally:
tracker.insert_anchor(req_id, Tracking.PYVMP_PREDICT_DONE.value)
else:
errs[req_id] = python_model_error.generate_error(details=f"'{verb}' is not supported")
tracker.insert_anchor(req_id, Tracking.PYVMP_PREDICT_DONE.value)
return results, errs
def extract_verb_func_py_wheel(
dotted_path: str, py_model_init_params: str, versioned_model_path: str = ""
) -> Dict[str, Callable]:
verb_funcs: Dict = {}
try:
imported_obj = locate(dotted_path)
except ImportError:
_logger.exception("Error importing dotted path module")
if isinstance(imported_obj, type):
# imported_obj is a class
interface_obj = imported_obj()
initialize_func = getattr(interface_obj, ExporterVar.initialize_func_name, None)
if callable(initialize_func):
kwargs = ujson.loads(py_model_init_params)
initialize_func(versioned_model_path, **kwargs)
interface_func_defined: List = []
for func_name in dir(interface_obj):
if re.match(r"_.*_", func_name) or func_name == ExporterVar.initialize_func_name:
# don't use internal methods as verbs
continue
interface_func_defined.append(func_name)
for func_name in interface_func_defined:
verb_funcs[func_name] = getattr(interface_obj, func_name)
_logger.info("verb_funcs=%s", verb_funcs)
return verb_funcs
else:
method_name = (
imported_obj.__qualname__ # type: ignore
) # see https://stackoverflow.com/questions/58108488/what-is-qualname-in-python
verb_funcs[method_name] = imported_obj
return verb_funcs
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/models/python/utils.py
| 0.449634 | 0.16132 |
utils.py
|
pypi
|
from typing import Callable, Dict, List
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.models.base_model import Model
from centaur._.pyvmps.models.python.utils import _validate_model_config, _extract_verb_func_py_model
from centaur._.pyvmps.utils.common import abs_and_normalize_path
_logger = get_logger(__name__)
class PythonModel(Model):
"""PythonModel."""
def __init__(
self,
versioned_model_path: str,
py_model_interface_filepath: str,
py_model_interface_class_name: str,
py_model_init_params: str,
):
self.versioned_model_path = versioned_model_path
self.py_model_interface_filepath = py_model_interface_filepath
self.py_model_interface_class_name = py_model_interface_class_name
self.py_model_init_params = py_model_init_params
def set_verb_funcs(self, verb_funcs: Dict[str, Callable]):
_logger.debug("init python model")
for verb, func in verb_funcs.items():
_logger.debug("verb=%s,func=%s", verb, func)
setattr(self, verb, func)
self._log_supported_methods()
def load(self) -> bool:
"""load python model
:param versioned_model_path: path to python model
:type versioned_model_path: str
:param model_attrs: Tuple containing path to python interface class, and interface class name
:type model_attrs: Tuple[str, str]
:param model_class:
:type model_class: Type[Model]
:rtype: Tuple[Callable, Any]
"""
versioned_model_path = abs_and_normalize_path(self.versioned_model_path)
self.validated: Dict = _validate_model_config(
self.versioned_model_path, self.py_model_interface_filepath, self.py_model_interface_class_name
)
_logger.debug("Model validated by PyModelLoader")
self.verb_funcs: Dict = _extract_verb_func_py_model(
versioned_model_path, self.py_model_init_params, self.validated
)
self.set_verb_funcs(self.verb_funcs)
return super().load()
def _log_supported_methods(self):
_logger.info("PyModel|supported methods: %s", ",".join([verb for verb in self.verb_funcs.keys()]))
def predict(self, data: Dict, **kwargs):
"""Empty implementation, real method set in load()"""
def get_verbs(self) -> List[str]:
return [verb for verb in self.verb_funcs.keys()]
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/models/python/model.py
| 0.883851 | 0.203767 |
model.py
|
pypi
|
import base64
import io
from typing import Dict, List, Union, Tuple
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.models.tf.constants import (
B64_KEY,
COL_FORMAT_NESTED_LIST,
COL_FORMAT_OBJECT,
COL_FORMAT_VALUE,
ROW_FORMAT_LIST_OF_OBJECTS,
ROW_FORMAT_NESTED_LIST,
ROW_FORMAT_VALUE,
)
from centaur._.pyvmps.models.tf.types import (
RowExtractDataPrepFunc,
ColExtractDataPrepFunc,
Tf2RowExtractDataPrepFunc,
Tf2ColExtractDataPrepFunc,
RowPreprocessFunc,
ColPreprocessFunc,
HandleResFunc,
InferenceFunc,
InferenceRes,
RawPayload,
IndivSigData,
)
logger = get_logger(__file__)
def convert_value_to_list(output: Dict) -> Dict:
return {k: v.tolist() for k, v in output.items()}
def output_to_row_format_json(output_to_format: Dict) -> Union[Dict, List]:
# only affects http rest calls
output = output_to_format.copy()
processed_output = convert_value_to_list(output)
num_outputs = len(list(output.keys()))
num_inputs = len(list(output.values())[0])
if num_outputs == 1:
result = list(processed_output.values())[0]
else:
response: List[Dict] = []
for i in range(num_inputs):
temp_dict: Dict = {}
for k, v in processed_output.items():
temp_dict[k] = v[i]
response.append(temp_dict)
result = response
return result
def output_to_col_format_json(output: Dict) -> Dict:
# TODO: Temp fix. Perform single key conversion in HTTPServerUDSMessage
processed_output = convert_value_to_list(output)
return processed_output
def handle_col_format_single_key(output: Dict):
num_outputs = len(list(output.keys()))
if num_outputs == 1:
return list(output.values())[0]
return output
def handle_row_format_single_key(output: Dict):
num_outputs = len(list(output.keys()))
if num_outputs == 1:
return list(output.values())[0]
return output
Base64Str = str
def is_base64(value: str) -> bool:
try:
return base64.b64encode(base64.b64decode(value)).decode("utf-8") == value
except Exception:
logger.exception("error")
return False
def convert_b64_to_byte_array(value: Base64Str) -> bytes:
return io.BytesIO(base64.b64decode(value)).getvalue()
def handle_b64_case(value: str):
if isinstance(value, str) and is_base64(value):
return convert_b64_to_byte_array(value)
return value
# >>>>>>>>> Row Format Functions <<<<<<<<<<<
def _determine_row_format_type(payload: List) -> str:
"""Get the sub-type of the row-formatted payload.
Assumes row object type can only be list or value, determine by checking first element in list.
Not the best the solution,
:param payload:
:type payload: List
:rtype: str
"""
if isinstance(payload, list) and len(payload) > 0:
if isinstance(payload[0], list):
return ROW_FORMAT_NESTED_LIST
elif isinstance(payload[0], dict):
return ROW_FORMAT_LIST_OF_OBJECTS
return ROW_FORMAT_VALUE
def row_format_preprocess_data(input_payload: List) -> List:
"""Handle preprocessing of http request body for row-formatted payloads.
Should be called before row_format_create_input_tensors
:param input_payload:
:type input_payload: List
:rtype: List
"""
payload = input_payload
for i in range(len(payload)):
# introduce mutation, check with leo
obj = payload[i]
if isinstance(obj, dict) and len(obj) == 1 and obj.get(B64_KEY):
payload[i] = convert_b64_to_byte_array(obj[B64_KEY])
else:
if isinstance(obj, dict):
for key, value in obj.items():
if isinstance(value, dict) and value.get(B64_KEY):
obj[key] = convert_b64_to_byte_array(value[B64_KEY])
return payload
# >>>> Col format functions <<<<
def _determine_col_format_type(payload: Dict) -> str:
# assumes row object type can only be list or value, determine by checking first element in list.
# Not the best the solution, will do for now
# payload here is a LIST
if isinstance(payload, list):
return COL_FORMAT_NESTED_LIST
elif isinstance(payload, dict):
return COL_FORMAT_OBJECT
return COL_FORMAT_VALUE
def col_format_preprocess_data(input_payload: Dict) -> Dict:
"""col_format_preprocess_data.
:param input_payload:
:type input_payload: Dict
:rtype: Dict
"""
payload = input_payload
if isinstance(payload, list):
for i in range(len(payload)):
obj = payload[i]
if isinstance(obj, dict) and len(obj) == 1 and obj.get(B64_KEY):
payload[i] = convert_b64_to_byte_array(obj[B64_KEY])
elif isinstance(payload, dict):
for key, value in payload.items():
if isinstance(value, list):
for index in range(len(value)):
item = value[index]
if isinstance(item, dict) and item.get(B64_KEY):
value[index] = convert_b64_to_byte_array(item[B64_KEY])
return payload
def _execute(
*,
preprocess_func: Tuple[RowPreprocessFunc, ColPreprocessFunc],
extract_data_prep_func: Tuple[RowExtractDataPrepFunc, ColExtractDataPrepFunc],
output_format_func: HandleResFunc,
inference_func: InferenceFunc,
payload: RawPayload,
indiv_sig_data: IndivSigData
) -> InferenceRes:
# reuse for tf2
processed_input = preprocess_func(payload)
predict_input = extract_data_prep_func(processed_input)(indiv_sig_data)
res = inference_func(predict_input)
return output_format_func(res)
def _extract(
*,
preprocess_func: Tuple[RowPreprocessFunc, ColPreprocessFunc],
extract_data_prep_func: Tuple[
RowExtractDataPrepFunc, ColExtractDataPrepFunc, Tf2RowExtractDataPrepFunc, Tf2ColExtractDataPrepFunc
],
payload: RawPayload,
indiv_sig_data: IndivSigData
) -> Dict:
"""_extract is used by BatchPredict functions
:param preprocess_func:
:param extract_data_prep_func:
:param payload:
:param indiv_sig_data:
:rtype: Dict
"""
processed_input = preprocess_func(payload)
return extract_data_prep_func(processed_input)(indiv_sig_data)
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/models/tf/utils.py
| 0.499512 | 0.309011 |
utils.py
|
pypi
|
from collections import defaultdict
from typing import Dict, List, Collection
import tensorflow as tf
from centaur._.pyvmps.error_handling.constants import ExceptionMsgs as em
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.models.exceptions import InvalidInputException
from centaur._.pyvmps.models.tf.constants import (
COL_FORMAT_NESTED_LIST,
COL_FORMAT_OBJECT,
COL_FORMAT_VALUE,
OUTPUTS,
PREDICTIONS,
ROW_FORMAT_LIST_OF_OBJECTS,
ROW_FORMAT_NESTED_LIST,
ROW_FORMAT_VALUE,
INPUT_KEY_TO_TENSOR,
)
from centaur._.pyvmps.models.tf.utils import (
_determine_col_format_type,
_determine_row_format_type,
output_to_col_format_json,
output_to_row_format_json,
)
from centaur._.pyvmps.models.tf.types import ExtractDataFunc, InputTensorsMap, InferenceRes
_logger = get_logger(__name__)
def row_format_create_input_tensors(payload: List) -> ExtractDataFunc:
"""row_format_create_input_tensors.
:param payload: request payload, ie the body of the http request {"signature_name": "", "instances": ".."}
:type payload: List
:rtype: Callable
"""
# payload here is the data pass to the tf model
payload_type = _determine_row_format_type(payload)
def _extract_data(indiv_sig_data: Dict) -> InputTensorsMap:
"""Converts the http request body from json format to a Dict with Input Tensor objects as Keys
and input data as values. Output can be passed in as feed dict for TF1
:param indiv_sig_data: Dict containing TENSOR_TO_INPUT_KEY and OUTPUT_KEY_TO_TENSOR keys.
:type indiv_sig_data: Dict
:rtype: Dict[tf.Tensor, List]
"""
_logger.debug("_row_extract_data_start")
input_tensors_tpl = indiv_sig_data[INPUT_KEY_TO_TENSOR]
single_input_tensor = len(input_tensors_tpl) == 1
input_tensors: Dict = defaultdict(list)
if payload_type == ROW_FORMAT_VALUE or payload_type == ROW_FORMAT_NESTED_LIST:
# Row format, value or nested list
if not single_input_tensor:
raise InvalidInputException(em.TF_INVALID_INPUT_KEY_NOT_SPECIFIED)
tensor = list(input_tensors_tpl.values())[0]
# NOTE: Wrap payload in list if its a single value so http_request level batching
# can append subsequent tensor inputs without any errors
input_tensors[tensor] = [payload] if payload_type == ROW_FORMAT_VALUE else payload
elif payload_type == ROW_FORMAT_LIST_OF_OBJECTS:
# Row format, list of objects
for input_key, tensor in input_tensors_tpl.items():
for obj in payload:
if not obj.get(input_key):
raise InvalidInputException(em.TF_INVALID_INPUT_MISSING_KEY.format(input_key=input_key))
input_tensors[tensor].append(obj[input_key])
_logger.debug("_row_extract_data_end")
return input_tensors
return _extract_data
def row_format_handle_result(model_output: Dict) -> InferenceRes:
"""Convert model_output into format expected when a row-formatted payload was sent
:param model_output: Result produced by model
"""
_logger.debug("_row_format_handle_result")
result = output_to_row_format_json(model_output)
return {PREDICTIONS: result}
def col_format_create_input_tensors(payload: Dict) -> ExtractDataFunc:
"""col_format_create_input_tensors.
:param payload:
"""
payload_type = _determine_col_format_type(payload)
def _extract_data(indiv_sig_data: Dict) -> InputTensorsMap:
_logger.debug("_col_extract_data_start")
input_tensors_tpl = indiv_sig_data[INPUT_KEY_TO_TENSOR]
single_input_tensor = len(input_tensors_tpl) == 1
input_tensors: Dict[tf.Tensor, Collection] = {}
if payload_type == COL_FORMAT_VALUE or payload_type == COL_FORMAT_NESTED_LIST:
if not single_input_tensor:
raise InvalidInputException(em.TF_INVALID_INPUT_KEY_NOT_SPECIFIED)
tensor = list(input_tensors_tpl.values())[0]
# NOTE: Wrap payload in list if its a single value so http_request level batching
# can append subsequent tensor inputs without any errors
input_tensors[tensor] = [payload] if payload_type == COL_FORMAT_VALUE else payload
elif payload_type == COL_FORMAT_OBJECT:
for input_key, tensor in input_tensors_tpl.items():
if input_key not in payload:
raise InvalidInputException(em.TF_INVALID_INPUT_MISSING_KEY.format(input_key=input_key))
input_tensors[tensor] = payload[input_key]
_logger.debug("_col_extract_data_end")
return input_tensors
return _extract_data
def col_format_handle_result(res: Dict) -> InferenceRes:
_logger.debug("_col_format_handle_result")
result = output_to_col_format_json(res)
return {OUTPUTS: result}
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/models/tf/tf1/payload_utils.py
| 0.826187 | 0.367554 |
payload_utils.py
|
pypi
|
from typing import Dict, List
import tensorflow as tf
from centaur._.pyvmps.error_handling.constants import ExceptionMsgs as em
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.models.exceptions import InvalidInputException, InvalidInputTensorShape
from centaur._.pyvmps.models.tf.constants import (
COL_FORMAT_NESTED_LIST,
COL_FORMAT_OBJECT,
COL_FORMAT_VALUE,
ROW_FORMAT_LIST_OF_OBJECTS,
ROW_FORMAT_NESTED_LIST,
ROW_FORMAT_VALUE,
SigTensorInfo,
)
from centaur._.pyvmps.models.tf.tf1.payload_utils import (
_determine_col_format_type,
_determine_row_format_type,
col_format_handle_result,
row_format_handle_result,
)
from centaur._.pyvmps.models.tf.types import InferenceRes, Tf2ExtractDataFunc
_logger = get_logger(__name__)
def tf2_row_format_create_input_tensors(payload: List) -> Tf2ExtractDataFunc:
"""row_format_create_input_tensors.
:param payload: request payload, ie the body of the http request {"signature_name": "", "instances": ".."}
:type payload: List
:rtype: Callable
"""
# payload here is the data pass to the tf model
payload_type = _determine_row_format_type(payload)
def _extract_data(sig_tensor_info: SigTensorInfo) -> Dict:
"""_extract_data.
:param sig_tensor_info:
:type sig_tensor_info: SigTensorInfo
:rtype: Dict[str, tf.EagerTensor]
"""
input_data_dict: Dict[str, tf.constant] = {}
structured_input_sig: Dict = sig_tensor_info.inputs
single_input_tensor = len(structured_input_sig)
if payload_type == ROW_FORMAT_VALUE or payload_type == ROW_FORMAT_NESTED_LIST:
# Row format, value or nested list
if not single_input_tensor:
raise InvalidInputException(
# "Input keys not specified in data"
em.TF_INVALID_INPUT_KEY_NOT_SPECIFIED
)
key_name = list(structured_input_sig.keys())[0]
payload_dtype = structured_input_sig[key_name].dtype
input_required_tensor_shape: tf.TensorShape = structured_input_sig[key_name].shape
newly_created_tensor = tf.constant(payload[key_name], dtype=payload_dtype)
if not input_required_tensor_shape.is_compatible_with(newly_created_tensor.shape):
raise InvalidInputTensorShape(
# "Expected={}, gave={}".format(
# input_required_tensor_shape, newly_created_tensor.shape
# )
em.TF_INVALID_INPUT_TENSOR_SHAPE.format(
expected_shape=input_required_tensor_shape, received_shape=newly_created_tensor.shape
)
)
input_data_dict[key_name] = tf.constant(payload[key_name], dtype=payload_dtype)
elif payload_type == ROW_FORMAT_LIST_OF_OBJECTS:
for input_name, input_tensor in structured_input_sig.items():
input_data: List = []
for obj in payload:
if not obj.get(input_name):
raise InvalidInputException(
em.TF_INVALID_INPUT_MISSING_KEY.format(input_key=input_name)
# "Data does not contain {} input key".format(input_name)
)
input_data.append(obj[input_name])
tensor_shape: tf.TensorShape = input_tensor.shape
newly_created_tensor = tf.constant(input_data, dtype=input_tensor.dtype)
if not tensor_shape.is_compatible_with(newly_created_tensor.shape):
raise InvalidInputTensorShape(
# "Expected={}, gave={}".format(
# tensor_shape, newly_created_tensor.shape
# )
em.TF_INVALID_INPUT_TENSOR_SHAPE.format(
expected_shape=tensor_shape, received_shape=newly_created_tensor.shape
)
)
input_data_dict[input_name] = newly_created_tensor
return input_data_dict
return _extract_data
def tf2_row_format_handle_result(model_output: Dict) -> InferenceRes:
"""Convert model_output into format expected when a row-formatted payload was sent
:param model_output: Result produced by model
"""
_logger.debug("_row_format_handle_result")
# convert EagerTensor to numpy array first before passing to format_result()
model_output = {k: v.numpy() for k, v in model_output.items()}
return row_format_handle_result(model_output)
def tf2_col_format_create_input_tensors(payload: Dict) -> Tf2ExtractDataFunc:
"""col_format_create_input_tensors.
:param payload:
"""
payload_type = _determine_col_format_type(payload)
def _extract_data(sig_tensor_info: SigTensorInfo) -> Dict:
"""_extract_data.
:param sig_tensor_info:
:rtype: Dict[str, tf.EagerTensor]
"""
input_data_dict: Dict[str, tf.constant] = {}
structured_input_sig: Dict[str, tf.TensorSpec] = sig_tensor_info.inputs
single_input_tensor = len(structured_input_sig) == 1
if payload_type == COL_FORMAT_VALUE or payload_type == COL_FORMAT_NESTED_LIST:
if not single_input_tensor:
raise InvalidInputException(em.TF_INVALID_INPUT_KEY_NOT_SPECIFIED)
key_name = list(structured_input_sig.keys())[0]
payload_dtype = structured_input_sig[key_name].dtype
input_required_tensor_shape: tf.TensorShape = structured_input_sig[key_name].shape
newly_created_tensor = tf.constant(payload[key_name], dtype=payload_dtype)
if not input_required_tensor_shape.is_compatible_with(newly_created_tensor.shape):
raise InvalidInputTensorShape(
# "Expected={}, gave={}".format(
# input_required_tensor_shape, newly_created_tensor.shape
# )
em.TF_INVALID_INPUT_TENSOR_SHAPE.format(
expected_shape=input_required_tensor_shape, received_shape=newly_created_tensor.shape
)
)
input_data_dict[key_name] = tf.constant(payload[key_name], dtype=payload_dtype)
elif payload_type == COL_FORMAT_OBJECT:
for input_key, input_tensor in structured_input_sig.items():
if input_key not in payload:
raise InvalidInputException(
# "Data does not contain {} input key".format(input_key)
em.TF_INVALID_INPUT_MISSING_KEY.format(input_key=input_key)
)
tensor_shape: tf.TensorShape = input_tensor.shape
newly_created_tensor = tf.constant(payload[input_key], dtype=input_tensor.dtype)
if not tensor_shape.is_compatible_with(newly_created_tensor.shape):
raise InvalidInputTensorShape(
# "Expected={}, gave={}".format(
# tensor_shape, newly_created_tensor.shape
# )
em.TF_INVALID_INPUT_TENSOR_SHAPE.format(
expected_shape=tensor_shape, received_shape=newly_created_tensor.shape
)
)
input_data_dict[input_key] = newly_created_tensor
return input_data_dict
return _extract_data
def tf2_col_format_handle_result(res) -> InferenceRes:
"""tf2_col_format_handle_result.
:param res:
"""
_logger.debug("_col_format_handle_result")
res = {k: v.numpy() for k, v in res.items()}
return col_format_handle_result(res)
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/models/tf/tf2/payload_utils.py
| 0.855263 | 0.403802 |
payload_utils.py
|
pypi
|
from centaur._.pyvmps.cfg.config import MessageType, PyvmpProtocol
from enum import Enum
# pylint: disable=pointless-string-statement
"""
Pyvmp message protocol:
{
"msg_type": "INT_OF_MSG_TYPE",
"__msg_type_corpus(explanatory field, for reference only, no need to be in a real message)": [
"VMP_STARTUP_ERR(1)", "VMP_STARTUP_SUCCEEDED(2)",
"HEALTH_CHECK(3)", "HEALTH_CHECK_OK(4)", "HEALTH_CHECK_ERR(5)",
"RESP_OK(6)", "RESP_ERR(7)",
"VMP_ASK_FOR_REQ(8)", "REQ(9)",
"METRICS(10)",
"M_RELOAD(11)", "M_OFFLOAD(12)", "M_LOAD(13)",
"EXIT(14)", "VMP_EXIT_OK(15)", "VMP_EXIT_ERR(16)"
],
"vmp_startup_succeeded_attrs": {
"vmp_id": "INT_OF_VMP_ID",
},
"req_attrs": {
"recv_ts": 1601626396029,
"api_verb": "predict"
},
"metrics_attrs": {
"name": "METRICS_NAME"
},
"m_reload_attrs": {},
"m_offload_attrs": {},
"m_load_attrs": {},
"err": ["BYTES SLICE"],
"data": ["BYTES SLICE"]
}
"""
class ModelType:
"""ModelType contains constants for supported model types"""
TF = "tf"
PYTHON = "py"
PYTHON_WHEEL = "py_whl"
@classmethod
def all(cls):
return [cls.TF, cls.PYTHON, cls.PYTHON_WHEEL]
class PyvmpStates:
"""PyvmpStates contain the different state constants that Pyvmp can hold"""
INIT = "init"
CONNECTED = "connected"
MODEL_LOAD_FAILURE = "model_load_failure"
MODEL_LOAD_SUCCESS = "model_load_success"
class PayloadKey:
"""PayloadKey contains constants used to describe action to take"""
VERB: str = "api_verb"
USR_DATA: str = "usr_data"
class PyvmpConstants:
"""PyvmpConstants."""
INTERVAL = 60 # health reporter interval
class MessageConstants:
"""MessageConstants."""
ASK_FOR_WORK = {PyvmpProtocol.MSG_TYPE: MessageType.VMP_ASK_FOR_REQ.value}
HEARTBEAT = {PyvmpProtocol.MSG_TYPE: MessageType.HEARTBEAT.value}
HEALTH_CHECK_OK = {PyvmpProtocol.MSG_TYPE: MessageType.HEALTH_CHECK_OK.value}
EXIT_OK = {PyvmpProtocol.MSG_TYPE: MessageType.VMP_EXIT_OK.value}
Tracking = Enum(
"Tracking", ["PYVMP_START_INF", "PYVMP_UNWRAP_BYTES", "PYVMP_PREDICT_DONE", "PYVMP_WRAP_DICT"], start=0
) # type: ignore
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/client/constants.py
| 0.680772 | 0.172294 |
constants.py
|
pypi
|
import re
import ujson
from io import BytesIO
from collections import defaultdict
from typing import List, Dict, Union, NamedTuple
from requests_toolbelt.multipart import decoder
from requests.structures import CaseInsensitiveDict
from centaur import constants as public_constants
from centaur._.pyvmps.cfg import constants
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.cfg.config import SupportedHTTPCodecs
from centaur._.pyvmps.models.tf import constants as tf_constants
_logger = get_logger(__name__)
class ParsedUsrPayload(NamedTuple):
payload: Union[
dict,
list,
bytes,
"numpy.ndarray", # <- _parse_npy + anonymous field
"pandas.DataFrame", # <- _parse_pd_feather or _prase_pd_parquet + anonymous field
]
content_types: Dict[str, Union[str, list]] = {}
def parse_usr_payload(content: bytes, content_type: List[str], encoding: str = "utf-8") -> ParsedUsrPayload:
_logger.debug("Content-Type=%s, encoding=%s", content_type, encoding)
ct = [x.lower() for x in content_type]
if "application/json" in ct:
return ParsedUsrPayload(payload=_parse_json(content))
multipart_ct = ""
for x in ct:
if "multipart/form-data; boundary=" in x:
multipart_ct = x
break
if multipart_ct:
parsed_payload, cts, tf_sig_name = {}, {}, ""
multipart_data = decoder.MultipartDecoder(content, multipart_ct, encoding)
part_counter = defaultdict(int)
for p in multipart_data.parts:
_logger.debug("multipart/form-data part headers=%s", p.headers)
if len(p.content) == 0:
raise ValueError("empty multipart/form-data part: {}".format(p.headers))
cd_h = p.headers[b"Content-Disposition"]
l_idx = cd_h.index(b'form-data; name="') + 17
r_idx = cd_h.index(b'"', l_idx)
name = cd_h[l_idx:r_idx].decode(encoding)
# Note:
# tf_constants.SIGNATURE_NAME ('signature_name') needs to be treated as a reserved keyword,
# that is: it should only appear in the requests for tf models.
if name == tf_constants.SIGNATURE_NAME:
tf_sig_name = p.content.decode(encoding)
else:
if cts:
if name.startswith(public_constants.UNDERSCORE):
if public_constants.UNDERSCORE in cts:
raise ValueError(
"more than 1 anonymous field (key starts with '%s') is not allowed: %s"
% (public_constants.UNDERSCORE, name)
)
raise ValueError(
"using anonymous field (key starts with '%s') and named field (key not starts with '%s') together is not allowed"
% (public_constants.UNDERSCORE, public_constants.UNDERSCORE)
)
else:
if public_constants.UNDERSCORE in cts:
raise ValueError(
"using anonymous field (key starts with '%s') and named field (key not starts with '%s') together is not allowed"
% (public_constants.UNDERSCORE, public_constants.UNDERSCORE)
)
part_counter[name] += 1
ct_h = p.headers.get(b"Content-Type", b"")
p_ct = p_ct_to_relay = ""
p_v, as_list = p.content, False
if ct_h:
p_ct = p_ct_to_relay = ct_h.decode(encoding)
as_list = constants.ContentTypes.AS_LIST in p_ct
if as_list:
_logger.warning(
"multipart/form-data part '%s' activated '%s' feature",
p.headers,
constants.ContentTypes.AS_LIST,
)
p_ct = p_ct.replace(constants.ContentTypes.AS_LIST, "")
p_ct_to_relay = [p_ct]
parser = _parsers.get(p_ct, None)
if parser:
p_v = parser(p.content)
p_ct_to_relay = [""] if as_list else ""
if as_list:
p_v = [p_v]
if name.startswith(public_constants.UNDERSCORE):
cts[public_constants.UNDERSCORE] = p_ct_to_relay
parsed_payload = p_v
else:
if part_counter[name] == 1:
cts[name] = p_ct_to_relay
parsed_payload[name] = p_v
elif part_counter[name] == 2:
cts[name] = [cts[name], p_ct_to_relay]
parsed_payload[name] = [parsed_payload[name], p_v]
else:
cts[name].append(p_ct_to_relay)
parsed_payload[name].append(p_v)
filtered_cts = {k: v for k, v in cts.items() if not re.match("^[\[\]', ]*$", str(v))}
if tf_sig_name:
return ParsedUsrPayload(
payload={tf_constants.SIGNATURE_NAME: tf_sig_name, tf_constants.INPUTS: parsed_payload},
content_types=filtered_cts,
)
return ParsedUsrPayload(payload=parsed_payload, content_types=filtered_cts)
return ParsedUsrPayload(payload=content)
def _parse_json(data):
return ujson.loads(data)
def _parse_npy(data):
import numpy as np
v = np.load(BytesIO(data), allow_pickle=True)
if not isinstance(v, np.ndarray):
raise ValueError("payload is not a valid ndarray")
return v
def _parse_npz(data):
import numpy as np
v = np.load(BytesIO(data), allow_pickle=True)
if not isinstance(v, np.lib.npyio.NpzFile):
raise ValueError("payload is not a valid 'np.lib.npyio.NpzFile'")
d = {}
for k in v.keys():
d[k] = v[k]
return d
def _parse_pd_feather(data):
import pandas as pd
return pd.read_feather(BytesIO(data))
def _prase_pd_parquet(data):
import pandas as pd
return pd.read_parquet(BytesIO(data))
_parsers = CaseInsensitiveDict(
{
SupportedHTTPCodecs.JSON.value: _parse_json,
SupportedHTTPCodecs.NPY.value: _parse_npy,
SupportedHTTPCodecs.NPZ.value: _parse_npz,
SupportedHTTPCodecs.PD_FEATHER.value: _parse_pd_feather,
SupportedHTTPCodecs.PD_PARQUET.value: _prase_pd_parquet,
}
)
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/client/usr_payload_parser.py
| 0.631253 | 0.159872 |
usr_payload_parser.py
|
pypi
|
import binascii
import copy
from typing import Callable, Dict, List, Optional, Tuple
import tensorflow as tf
from centaur._.pyvmps.client.pyvmp_models._tf_helper import batch_deconstruct
from centaur._.pyvmps.client.utils import unpack_and_check_verb
from centaur._.pyvmps.error_handling.constants import ExceptionMsgs as em
from centaur._.pyvmps.error_handling.errors import invalid_input # internal_error,
from centaur._.pyvmps.error_handling.errors import invalid_tensor_input, invalid_tensor_signature, tf_graph_error
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.models.base_model import BatchedModel
from centaur._.pyvmps.models.centaur_model import CentaurModelMixin
from centaur._.pyvmps.models.exceptions import (
InvalidInputException,
InvalidInputTensorShape,
InvalidSignatureKeyException,
ModelPredictErrorException,
)
from centaur._.pyvmps.models.tf import constants as tfconstants
from centaur._.pyvmps.models.tf.constants import TfVerbs, SigTensorInfo
from centaur._.pyvmps.models.tf.tf2.model import Tf2Model
from centaur._.pyvmps.models.tf.tf2.payload_utils import (
tf2_col_format_create_input_tensors,
tf2_col_format_handle_result,
tf2_row_format_create_input_tensors,
tf2_row_format_handle_result,
)
from centaur._.pyvmps.models.tf.utils import col_format_preprocess_data, row_format_preprocess_data
from centaur._.pyvmps.tracking.tracker import tracker
from centaur._.pyvmps.tracking.constants import Tracking
from centaur._.pyvmps.utils.common import current_milli_time
BatchedSigDict = Dict
_logger = get_logger(__file__)
row_format_funcs = [row_format_preprocess_data, tf2_row_format_create_input_tensors, tf2_row_format_handle_result]
col_format_funcs = [col_format_preprocess_data, tf2_col_format_create_input_tensors, tf2_col_format_handle_result]
# NOTE: Temp setting of tf verbs since only predict is supported
tf_predict_set = set([TfVerbs.PREDICT])
def _construct_batch_tuple_lists(req_id: str, batched_payload: Dict) -> Tuple[str, str, Callable, Callable]:
req_payload, _ = unpack_and_check_verb(batched_payload, tf_predict_set)
sig: str = req_payload.get(tfconstants.SIGNATURE_NAME, tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
payload_partial_func, output_formatter = batch_deconstruct(req_payload, row_format_funcs, col_format_funcs)
return req_id, sig, payload_partial_func, output_formatter
def _construct_batch_ds(
sig_tensor_dict: Dict,
batch_data_store: Dict,
req_id: str,
sig: str,
partial_extract_func: Callable,
output_formatter_func: Callable,
):
sig_tensor_info: Optional[SigTensorInfo] = sig_tensor_dict.get(sig, None)
if not sig_tensor_info:
raise InvalidSignatureKeyException(em.TF_INVALID_SIGNATURE_KEY.format(signature=sig))
# Convert http request body to tensor-object data mapping
payload_input_data_dict = partial_extract_func(indiv_sig_data=sig_tensor_info)
if sig not in batch_data_store:
batch_data_store[sig] = {
tfconstants.CONC_FUNC: sig_tensor_info.conc_func,
tfconstants.MERGED_INPUT_TENSORS: {
input_key: tf.concat([tf_constant_value], axis=0)
for input_key, tf_constant_value in payload_input_data_dict.items()
},
tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER: [
tfconstants.TfMergedRequestsTuple(
req_id, 0, len(payload_input_data_dict[next(iter(payload_input_data_dict))]), output_formatter_func
)
],
}
else:
for (input_key, tf_constant_value) in payload_input_data_dict.items():
batch_data_store[sig][tfconstants.MERGED_INPUT_TENSORS][input_key] = tf.concat(
[batch_data_store[sig][tfconstants.MERGED_INPUT_TENSORS][input_key], tf_constant_value], axis=0
)
next_start_idx = batch_data_store[sig][tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER][-1].end_index
batch_data_store[sig][tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER].append(
tfconstants.TfMergedRequestsTuple(
req_id,
next_start_idx,
next_start_idx + len(payload_input_data_dict[next(iter(payload_input_data_dict))]),
output_formatter_func,
)
)
_logger.debug(
"output_formatter_added=%s", batch_data_store[sig][tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER][-1]
)
def _inner_predict_func(
batch_err_store: Dict,
conc_func: Callable,
merged_input_tensors,
output_formatter_req_id_mapper: List[tfconstants.TfMergedRequestsTuple],
) -> Dict:
"""fetches: Dict[str, tf.Tensor] - output key to tensor dict
feed_dict: input tensor to payload dict
"""
formatted_outputs = dict()
try:
outputs: Dict = conc_func(**merged_input_tensors)
ts = current_milli_time()
for x in output_formatter_req_id_mapper:
temp = copy.copy(outputs)
for k, v in temp.items():
temp[k] = v[x.start_index : x.end_index]
formatted_outputs[x.req_id] = x.formatter(temp)
tracker.insert_anchor(x.req_id, Tracking.PYVMP_PREDICT_DONE.value, ts)
except Exception:
_logger.info("error encountered during batch predict")
for tf_merged_request_tuple in output_formatter_req_id_mapper:
indiv_input_tensors = {
k: v[tf_merged_request_tuple.start_index : tf_merged_request_tuple.end_index]
for k, v in merged_input_tensors.items()
}
try:
indiv_outputs = conc_func(**indiv_input_tensors)
formatted_outputs[tf_merged_request_tuple.req_id] = tf_merged_request_tuple.formatter(indiv_outputs)
except Exception as e:
_logger.info("error_for_single_predict|req_id=%s", tf_merged_request_tuple.req_id)
_logger.debug(str(e))
batch_err_store[tf_merged_request_tuple.req_id] = tf_graph_error.generate_error(details=repr(e))
finally:
tracker.insert_anchor(tf_merged_request_tuple.req_id, Tracking.PYVMP_PREDICT_DONE.value)
return formatted_outputs
def tf2_batch_predict(
batch_payload: Dict[str, Dict], sig_tensor_dict: Dict[str, Dict]
) -> Tuple[Dict[str, Dict], Dict[str, Dict]]:
batch_data_store: Dict[str, BatchedSigDict] = {}
batch_err_store: Dict = {}
model_output: Dict = {}
# RETURN FORMATTED_OUTPUTS
for req_id, req_payload in batch_payload.items():
try:
_construct_batch_ds(sig_tensor_dict, batch_data_store, *_construct_batch_tuple_lists(req_id, req_payload))
except (InvalidInputException, InvalidInputTensorShape) as e:
batch_err_store[req_id] = invalid_tensor_input.generate_error(details=repr(e))
except InvalidSignatureKeyException as e:
batch_err_store[req_id] = invalid_tensor_signature.generate_error(details=repr(e))
except binascii.Error as e:
batch_err_store[req_id] = invalid_input.generate_error(details=repr(e))
except Exception as e:
_logger.exception("unhandled_exeception_caught")
batch_err_store[req_id] = invalid_input.generate_error(details=repr(e))
for _, tensor_tuple in batch_data_store.items():
try:
model_output = {**model_output, **_inner_predict_func(batch_err_store, *tensor_tuple.values())}
except Exception:
_logger.exception("predict_func|unknown_error")
return model_output, batch_err_store
class Tf2ModelPyvmp(Tf2Model, BatchedModel, CentaurModelMixin):
"""Model class with pyvmp logic embedded"""
def _single_predict(self, single_req_payload: Dict[str, Dict]) -> Tuple[Dict, Dict]:
req_id = next(iter(single_req_payload))
batch_payload = single_req_payload[req_id]
payload, _ = unpack_and_check_verb(batch_payload, [TfVerbs.PREDICT])
result_payload = {}
errs = {}
try:
output = super().predict(payload)
result_payload[req_id] = output
except (InvalidInputException, InvalidInputTensorShape) as e:
errs[req_id] = invalid_tensor_input.generate_error(details=repr(e))
except InvalidSignatureKeyException as e:
errs[req_id] = invalid_tensor_signature.generate_error(details=repr(e))
except binascii.Error as e:
errs[req_id] = invalid_input.generate_error(details=repr(e))
except ModelPredictErrorException as e:
errs[req_id] = invalid_input.generate_error(details=repr(e))
except Exception as e:
errs[req_id] = invalid_input.generate_error(details=repr(e))
finally:
tracker.insert_anchor(req_id, Tracking.PYVMP_PREDICT_DONE.value)
return result_payload, errs
def _batch_predict(self, batched_req_dict: Dict[str, Dict]) -> Tuple[Dict, Dict]:
"""To be used by pyvmp to handle http-request level batched payload. Returns 2 dicts, model_outputs and batched_errs
model_outputs stores successful model results
batched_errs store errs.
Both dicts use req_id as keys, where union(model_outputs.keys(), batched_errs.keys()) === batched_req_dict.keys()
:param batched_req_dict: Dict with req_id as key, http-request body as values
:type batched_req_dict: Dict[str, Dict]
:rtype: Tuple[Dict, Dict]
"""
return tf2_batch_predict(batched_req_dict, self.sig_tensor_dict)
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/client/pyvmp_models/tf2.py
| 0.712832 | 0.273826 |
tf2.py
|
pypi
|
import binascii
import copy
from typing import Callable, Dict, List, Optional, Tuple
import tensorflow as tf
from centaur._.pyvmps.client.pyvmp_models._tf_helper import batch_deconstruct
from centaur._.pyvmps.client.utils import unpack_and_check_verb
from centaur._.pyvmps.error_handling.constants import ExceptionMsgs as em
from centaur._.pyvmps.error_handling.errors import ( # internal_error,
invalid_input,
invalid_tensor_input,
invalid_tensor_signature,
tf_graph_error,
)
from centaur._.pyvmps.logger.get_logger import get_logger
from centaur._.pyvmps.models.base_model import BatchedModel
from centaur._.pyvmps.models.centaur_model import CentaurModelMixin
from centaur._.pyvmps.models.exceptions import (
InvalidInputException,
InvalidInputTensorShape,
InvalidSignatureKeyException,
ModelPredictErrorException,
)
from centaur._.pyvmps.models.tf import constants as tfconstants
from centaur._.pyvmps.models.tf.constants import TfVerbs
from centaur._.pyvmps.models.tf.tf1.model import Tf1Model
from centaur._.pyvmps.models.tf.tf1.payload_utils import (
col_format_create_input_tensors,
col_format_handle_result,
row_format_create_input_tensors,
row_format_handle_result,
)
from centaur._.pyvmps.models.tf.utils import col_format_preprocess_data, row_format_preprocess_data
from centaur._.pyvmps.tracking.tracker import tracker
from centaur._.pyvmps.tracking.constants import Tracking
from centaur._.pyvmps.utils.common import current_milli_time
BatchedSigDict = Dict
_logger = get_logger(__file__)
row_format_funcs: List[Callable] = [
row_format_preprocess_data,
row_format_create_input_tensors,
row_format_handle_result,
]
col_format_funcs: List[Callable] = [
col_format_preprocess_data,
col_format_create_input_tensors,
col_format_handle_result,
]
# NOTE: Temp setting of tf verbs since only predict is supported
tf_predict_set = set([TfVerbs.PREDICT])
def _construct_batch_tuple_lists(req_id: str, batch_data: Dict) -> Tuple[str, str, Callable, Callable]:
"""_construct_batch_tuple_lists
:param req_id:
:type req_id: str
:param batch_data:
:type batch_data: Dict
:rtype: Tuple[str, str, Callable, Callable]
"""
req_payload, _ = unpack_and_check_verb(batch_data, tf_predict_set)
sig: str = req_payload.get(
tfconstants.SIGNATURE_NAME, tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
)
payload_partial_func, output_formatter = batch_deconstruct(req_payload, row_format_funcs, col_format_funcs)
return req_id, sig, payload_partial_func, output_formatter
def _construct_batch_ds(
signature_tensor_mapping: Dict,
batch_data_store: Dict,
req_id,
sig,
partial_extract_func: Callable,
output_formatter_func: Callable,
):
tensor_mapping_dict: Optional[Dict] = signature_tensor_mapping.get(sig, None)
if not tensor_mapping_dict:
raise InvalidSignatureKeyException(em.TF_INVALID_SIGNATURE_KEY.format(signature=sig))
# Convert http request body to tensor-object data mapping
# which is fed into sess.run()
input_tensors = partial_extract_func(indiv_sig_data=tensor_mapping_dict)
if sig not in batch_data_store:
batch_data_store[sig] = {
tfconstants.FETCHES: tensor_mapping_dict[tfconstants.OUTPUT_KEY_TO_TENSOR],
tfconstants.MERGED_INPUT_TENSORS: input_tensors,
tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER: [
tfconstants.TfMergedRequestsTuple(
req_id, 0, len(input_tensors[next(iter(input_tensors))]), output_formatter_func
)
],
}
else:
for k, v in input_tensors.items():
batch_data_store[sig][tfconstants.MERGED_INPUT_TENSORS][k].extend(v)
next_start_idx = batch_data_store[sig][tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER][-1][2]
end_index = next_start_idx + len(input_tensors[next(iter(input_tensors))])
batch_data_store[sig][tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER].append( # type:ignore
tfconstants.TfMergedRequestsTuple(req_id, next_start_idx, end_index, output_formatter_func)
)
_logger.debug(
"output_formatter_added=%s", batch_data_store[sig][tfconstants.OUTPUT_FORMATTER_REQ_ID_MAPPER][-1]
)
def _inner_predict_func(
sess,
batch_err_store: Dict,
fetches,
feed_dict,
output_formatter_req_id_mapper: List[tfconstants.TfMergedRequestsTuple],
) -> Dict:
"""fetches: Dict[str, tf.Tensor] - output key to tensor dict
feed_dict: input tensor to payload dict
"""
formatted_outputs = dict()
try:
outputs: Dict = sess.run(fetches, feed_dict)
ts = current_milli_time()
for x in output_formatter_req_id_mapper:
temp = copy.copy(outputs)
for k, v in temp.items():
temp[k] = v[x.start_index : x.end_index]
formatted_outputs[x.req_id] = x.formatter(temp)
tracker.insert_anchor(x.req_id, Tracking.PYVMP_PREDICT_DONE.value, ts)
except Exception:
_logger.info("error encountered during batch predict")
for tf_merged_request_tuple in output_formatter_req_id_mapper:
indiv_feed_dict = {
k: v[tf_merged_request_tuple.start_index : tf_merged_request_tuple.end_index]
for k, v in feed_dict.items()
}
try:
indiv_outputs = sess.run(fetches, indiv_feed_dict)
formatted_outputs[tf_merged_request_tuple.req_id] = tf_merged_request_tuple.formatter(indiv_outputs)
except Exception as e:
_logger.info("error_for_single_predict|req_id=%s", tf_merged_request_tuple.req_id)
_logger.debug(str(e))
batch_err_store[tf_merged_request_tuple.req_id] = tf_graph_error.generate_error(details=repr(e))
finally:
tracker.insert_anchor(tf_merged_request_tuple.req_id, Tracking.PYVMP_PREDICT_DONE.value)
return formatted_outputs
def batch_predict(
batch_payload: Dict[str, Dict], signature_tensor_mapping: Dict[str, Dict], sess
) -> Tuple[Dict[str, Dict], Dict[str, Dict]]:
# Data structures to construct response from batch payload
batch_data_store: Dict[str, BatchedSigDict] = {}
batch_err_store: Dict = {}
model_output: Dict = {}
for req_id, req_payload in batch_payload.items():
try:
_construct_batch_ds(
signature_tensor_mapping, batch_data_store, *_construct_batch_tuple_lists(req_id, req_payload)
)
except (InvalidInputException, InvalidInputTensorShape) as e:
batch_err_store[req_id] = invalid_tensor_input.generate_error(details=repr(e))
except InvalidSignatureKeyException as e:
batch_err_store[req_id] = invalid_tensor_signature.generate_error(details=repr(e))
except binascii.Error as e:
batch_err_store[req_id] = invalid_input.generate_error(details=repr(e))
except Exception as e:
_logger.exception("unhandled_exeception_caught")
batch_err_store[req_id] = invalid_input.generate_error(details=repr(e))
for _, tensor_tuple in batch_data_store.items():
try:
model_output = {**model_output, **_inner_predict_func(sess, batch_err_store, *tensor_tuple.values())}
except Exception:
_logger.exception("predict_func|unknown_error")
return model_output, batch_err_store
class Tf1ModelPyvmp(Tf1Model, BatchedModel, CentaurModelMixin):
"""Model class with pyvmp logic embedded"""
def _single_predict(self, single_req_payload: Dict[str, Dict]) -> Tuple[Dict, Dict]:
req_id = next(iter(single_req_payload))
batch_payload = single_req_payload[req_id]
payload, _ = unpack_and_check_verb(batch_payload, [TfVerbs.PREDICT])
result_payload = {}
errs = {}
try:
output = super().predict(payload)
result_payload[req_id] = output
except (InvalidInputException, InvalidInputTensorShape) as e:
errs[req_id] = invalid_tensor_input.generate_error(details=repr(e))
except InvalidSignatureKeyException as e:
errs[req_id] = invalid_tensor_signature.generate_error(details=repr(e))
except binascii.Error as e:
errs[req_id] = invalid_input.generate_error(details=repr(e))
except ModelPredictErrorException as e:
errs[req_id] = invalid_input.generate_error(details=repr(e))
except Exception as e:
errs[req_id] = invalid_input.generate_error(details=repr(e))
finally:
tracker.insert_anchor(req_id, Tracking.PYVMP_PREDICT_DONE.value)
return result_payload, errs
def _batch_predict(self, batched_req_dict: Dict[str, Dict]) -> Tuple[Dict, Dict]:
"""To be used by pyvmp to handle http-request level batched payload. Returns 2 dicts, model_outputs and batched_errs
model_outputs stores successful model results
batched_errs store errs.
Both dicts use req_id as keys, where union(model_outputs.keys(), batched_errs.keys()) === batched_req_dict.keys()
:param batched_req_dict: Dict with req_id as key, http-request body as values
:type batched_req_dict: Dict[str, Dict]
:rtype: Tuple[Dict, Dict]
"""
return batch_predict(batched_req_dict, self.sig_mapping, self.sess)
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/centaur/_/pyvmps/client/pyvmp_models/tf1.py
| 0.772574 | 0.291813 |
tf1.py
|
pypi
|
import json
from typing import List, Generator, Any
import pandas as pd
from pyrfc import ABAPApplicationError, ABAPRuntimeError, LogonError, CommunicationError
from .connection import SAPConnection
from .exceptions import SAPException
class SAP:
def __init__(self, connection: SAPConnection):
self.connection = connection
def _to_dataframe(self, result: list, colunas: list, tamanhos: list) -> pd.DataFrame:
df = pd.DataFrame(columns=colunas)
for j, d in enumerate(result):
resultado = d['WA']
linha_resultado = []
for i, col in enumerate(colunas):
valor = resultado[sum(tamanhos[:i]):sum(tamanhos[:i + 1])]
linha_resultado.append(valor)
df.loc[j] = linha_resultado
df_obj = df.select_dtypes(['object'])
df[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
return df
def get_data_df(self,
table: str,
columns: List[str],
where: str = None,
where_list: List[str] = None,
humanized_columns: List[str] = None,
page_size: int = 1000) -> Generator[pd.DataFrame, None, None]:
fields = []
where_clause = []
if where:
where_clause = [{"TEXT": where}]
elif where_list:
where_clause = [{"TEXT": w} for w in where_list]
if columns:
fields = [{"FIELDNAME": f} for f in columns]
page = 1
while True:
try:
connection = self.connection.get_connection()
start = (page - 1) * page_size
limit = page_size
result = connection.call('RFC_READ_TABLE',
QUERY_TABLE=table,
# DELIMITER='¬',
FIELDS=fields,
OPTIONS=where_clause,
ROWSKIPS=start,
ROWCOUNT=limit)
connection.close()
response_columns = result.get('FIELDS')
response_columns_lenght = [int(col.get('LENGTH')) for col in response_columns]
response_columns = [col.get('FIELDNAME') for col in response_columns]
data = self._to_dataframe(result['DATA'], response_columns, response_columns_lenght)
if humanized_columns:
new_humanized_columns = dict(zip(columns, humanized_columns))
data.rename(columns=new_humanized_columns, inplace=True)
yield data
if len(result['DATA']) < page_size:
break
page += 1
except CommunicationError:
raise SAPException('Could not connect to server')
except LogonError:
raise SAPException('Could not log in. Wrong credentials?')
except (ABAPApplicationError, ABAPRuntimeError):
raise SAPException('An error occurred at ABAP level')
def get_data_json(self,
table: str,
columns: List[str],
page: int,
where: str = None,
where_list: List[str] = None,
humanized_columns: List[str] = None,
page_size: int = 1000) -> Any:
fields = []
where_clause = []
if where:
where_clause = [{"TEXT": where}]
elif where_list:
where_clause = [{"TEXT": w} for w in where_list]
if columns:
fields = [{"FIELDNAME": f} for f in columns]
try:
connection = self.connection.get_connection()
start = (page - 1) * page_size
limit = page_size
result = connection.call('RFC_READ_TABLE',
QUERY_TABLE=table,
# DELIMITER='¬',
FIELDS=fields,
OPTIONS=where_clause,
ROWSKIPS=start,
ROWCOUNT=limit)
connection.close()
response_columns = result.get('FIELDS')
response_columns_lenght = [int(col.get('LENGTH')) for col in response_columns]
response_columns = [col.get('FIELDNAME') for col in response_columns]
data = self._to_dataframe(result['DATA'], response_columns, response_columns_lenght)
if humanized_columns:
new_humanized_columns = dict(zip(columns, humanized_columns))
data.rename(columns=new_humanized_columns, inplace=True)
return json.loads(data.to_json(orient='records', force_ascii=False))
except CommunicationError:
raise SAPException('Could not connect to server')
except LogonError:
raise SAPException('Could not log in. Wrong credentials?')
except (ABAPApplicationError, ABAPRuntimeError):
raise SAPException('An error occurred at ABAP level')
|
/sap_rfc_data_collector-1.0.2.tar.gz/sap_rfc_data_collector-1.0.2/sap_rfc_data_collector/sap_generic.py
| 0.461502 | 0.179981 |
sap_generic.py
|
pypi
|
[](https://api.reuse.software/info/github.com/SAP/cloud-pysec)
# Description
This project is a python client library called *sap_xssec* for validation of *OAuth access tokens* issued by the *XSUAA*.
### OAuth Authorization Code Flow
The typical web application use the OAuth authorization code flow for authentication, which is described as follows:
1. A user accesses the web application using a browser.
2. The web application (in typical SAP Cloud Platform applications, this is an application router) acts as OAuth client and redirects
to the OAuth server for authorization.
3. Upon authentication, the web application uses the code issued by the authorization server to request an access token.
4. The web application uses the access token to request data from the OAuth resource server.
The OAuth resource server validates the token using online or offline validation.
For this validation libraries like sap_xssec are used.

### Usage
For the usage of this library it is necessary to pass a JWT access token that should be validated to the library.
The examples below rely on users and credentials that you should substitute with the ones in your context.
The typical use case for calling this API lies from within a container when an HTTP request is received and it must
be checked if the requester is authorized to execute this method.
In this case, the access token is contained in the authorization header (with keyword `bearer`).
You can remove the prefix `bearer` and pass the remaining string (just as in the following example as `access_token`) to the API.
```python
from sap import xssec
from cfenv import AppEnv
env = AppEnv()
uaa_service = env.get_service(name='<uaa_service_name>').credentials
security_context = xssec.create_security_context(access_token, uaa_service)
```
**Note:** That the example above uses module [`cfenv`](https://pypi.python.org/pypi/cfenv) to retrieve the configuration of the uaa
service instance.
`uaa_service` is a dict that contains the necessary client information and looks like:
```
{
'clientid' : 'example_clientid' // the id of the client
'clientsecret': 'example_clientsecret' // the secret of the client
'url': 'example_url' // the url of the uaa
'uaadomain': 'example_uaadomain' // the domain of the uaa
'verificationkey': 'example_verification key' // (optional) the key used for the verfication of the token
}
```
If the `uaadomain` is set in the `uaa_service` and the `jku` and `kid` are set in the incomming token, the key is requested from the uaa. As a fallback, the `verificationkey` configured in `uaa_service` is used for offline validation. Requested keys are cached for 15 minutes to avoid extensive load on the uaa.
The creation function `xssec.create_security_context` is to be used for an end-user token (e.g. for grant_type `password`
or grant_type `authorization_code`) where user information is expected to be available within the token and thus within the security context.
`create_security_context` also accepts a token of grant_type `client_credentials`.
This leads to the creation of a limited *SecurityContext* where certain functions are not available.
For more details please consult the API description in the wiki.
For example, the `security_context` object can then be used to check if a user has a required scope:
```
security_context.check_scope('uaa.user')
```
or to receive the client id of a user:
```
security_context.get_clientid()
```
More details on the API can be found in the [wiki](https://github.com/SAP/cloud-pysec/wiki).
### Offline Validation
sap_xssec offers offline validation of the access token, which requires no additional call to the UAA.
The trust for this offline validation is created by binding the XS UAA service instance to your application.
Inside the credentials section in the environment variable `VCAP_SERVICES`, the key for validation of tokens is included.
By default, the offline validation check will only accept tokens intended for the same OAuth2 client in the same UAA identity zone.
This makes sense and will cover the vast majority of use cases.
⚠️From version 2.1.0, the `SAP_JWT_TRUST_ACL` environment variable is no longer supported.
If you want to enable another (foreign) application to use some of your application's scopes, you can add a ```granted-apps``` marker to your scope in the ```xs-security.json``` file (as in the following example). The value of the marker is a list of applications that is allowed to request a token with the denoted scope.
```JSON
{
"xsappname" : "sample-leave-request-app",
"description" : "This sample application demos leave requests",
"scopes" : [ { "name" : "$XSAPPNAME.createLR",
"description" : "create leave requests" },
{ "name" : "$XSAPPNAME.approveLR",
"description" : "approve leave requests",
"granted-apps" : ["MobileApprovals"] }
],
"attributes" : [ { "name" : "costcenter",
"description" : "costcenter",
"valueType" : "string"
} ],
"role-templates": [ { "name" : "employee",
"description" : "Role for creating leave requests",
"scope-references" : [ "$XSAPPNAME.createLR","JobScheduler.scheduleJobs" ],
"attribute-references": [ "costcenter"] },
{ "name" : "manager",
"description" : "Role for creating and approving leave requests",
"scope-references" : [ "$XSAPPNAME.createLR","$XSAPPNAME.approveLR","JobScheduler.scheduleJobs" ],
"attribute-references": [ "costcenter" ] }
]
}
```
# Configuration
~~To configure whether the *sap-jwt* or the *py-jwt* library should be used for validation of the jwt token,
change the `USE_SAP_PY_JWT` environment variable to `true`.~~
⚠️From version 4.0.0, the `USE_SAP_PY_JWT` environment variable is no longer supported and therefore *py-jwt* is installed by default.
# Requirements
*sap_xssec* requires *python 3.7* or newer.
# Download and Installation
As this package is deployed to PyPI, you can simply add `sap_xssec` as a dependency to your python project or
install this package by running `pip install sap_xssec`.
# Known Issues
# How to obtain support
Open an issue in GitHub.
|
/sap_xssec-4.0.1.tar.gz/sap_xssec-4.0.1/README.md
| 0.590307 | 0.844729 |
README.md
|
pypi
|
class JwtAudienceValidator(object):
'''
Validates if the jwt access token is intended for the OAuth2 client of this
application. The aud (audience) claim identifies the recipients the JWT is
issued for.
Validates whether there is one audience that matches one of the configured
OAuth2 client ids.
'''
DOT = "."
def __init__(self, clientid):
self._clientid = clientid
self._trusted_clientids = set()
self.trusted_clientids = clientid
self._is_foreign_mode = False
@property
def trusted_clientids(self):
return self._trusted_clientids
@trusted_clientids.setter
def trusted_clientids(self, clientid):
if clientid:
self._trusted_clientids.add(clientid)
@property
def is_foreign_mode(self):
return False
@is_foreign_mode.setter
def is_foreign_mode(self, foreignmode):
self._is_foreign_mode = foreignmode
@property
def clientid(self):
return self._clientid
@clientid.setter
def clientid(self, clientId):
self._clientid = clientId
def configure_trusted_clientId(self, client_id):
if client_id:
self.trusted_clientids.add(client_id)
def validate_token(self, clientId_from_token=None, audiences_from_token=[], scopes_from_token=[]):
self.is_foreign_mode = False
allowed_audiences = self.extract_audiences_from_token(audiences_from_token, scopes_from_token, clientId_from_token)
if (self.validate_same_clientId(clientId_from_token) == True or
self.validate_audience_of_xsuaabrokerclone(allowed_audiences) == True or
self.validate_default(allowed_audiences)==True):
return True
else:
return False
def extract_audiences_from_token(self, audiences_from_token=[], scopes_from_token=[], clientid_from_token=None):
'''
Extracts Audience From Token
'''
audiences = []
token_audiences = audiences_from_token
for audience in token_audiences:
if audience.find(self.DOT) > -1:
# CF UAA derives the audiences from the scopes.
# In case the scopes contains namespaces, these needs to be removed.
audience = audience[0:audience.find(self.DOT)].strip()
if audience and (audience not in audiences):
audiences.append(audience)
else:
audiences.append(audience)
if len(audiences) == 0:
for scope in scopes_from_token:
if scope.find(self.DOT) > -1:
audience = scope[0 :scope.find(self.DOT)].strip()
if audience :
if (audience not in audiences):
audiences.append(audience)
if (clientid_from_token and (clientid_from_token not in audiences)):
audiences.append(clientid_from_token)
return audiences
def validate_same_clientId(self, clientid_from_token):
if clientid_from_token == self.clientid:
return True
else:
return False
def validate_audience_of_xsuaabrokerclone(self, allowed_audiences):
for configured_clientid in self.trusted_clientids:
if ("!b") in configured_clientid:
# isBrokerClientId
for audience in allowed_audiences:
if (audience.endswith("|" + configured_clientid)):
return True
self.is_foreign_mode=True
return False
def validate_default(self, allowedAudiences):
for configuredClientId in self.trusted_clientids:
if configuredClientId in allowedAudiences:
return True
return False
|
/sap_xssec-4.0.1.tar.gz/sap_xssec-4.0.1/sap/xssec/jwt_audience_validator.py
| 0.661158 | 0.211508 |
jwt_audience_validator.py
|
pypi
|
from typing import List
import websockets
import click
import asyncio
import json
import os
import logging
import aiohttp
import tqdm
import tqdm.asyncio
ENDPOINT = os.environ.get("SAPIENTA_ENDPOINT", "https://sapienta.papro.org.uk")
logger = logging.getLogger("sapientacli")
async def submit_job(file_path: str) -> dict:
"""Submit job for processing, return ID on queue"""
data = {'file': open(file_path, 'rb')}
async with aiohttp.ClientSession() as session:
async with session.post(f"{ENDPOINT}/submit", data=data) as response:
return await response.json()
async def submit_and_subscribe(filename, websocket, job_map):
response = await submit_job(filename)
await websocket.send(json.dumps({"action":"subscribe", "job_id":response['job_id']}))
job_map[response['job_id']] = filename
async def collect_result(job_id, local_filename):
"""Collect the results for the given job and store"""
newpath = infer_result_name(local_filename)
async with aiohttp.ClientSession() as session:
async with session.get(f"{ENDPOINT}/{job_id}/result") as response:
content = await response.text()
with open(newpath,'w') as f:
f.write(content)
def infer_result_name(input_name):
"""Calculate output name for annotated paper"""
nameroot, _ = os.path.splitext(input_name)
newpath = nameroot + "_annotated.xml"
return newpath
async def handle_results(websocket, job_map, total_files):
done = 0
with tqdm.tqdm(desc="annotation progress", total=total_files) as pbar:
while done < total_files:
resptext = await websocket.recv()
try:
resp = json.loads(resptext)
tqdm.tqdm.write(f"{job_map[resp['job_id']]} update: {resp['step']}={resp['status']}")
if resp['step'] == 'annotate' and resp['status'] == 'complete':
await collect_result(resp['job_id'],job_map[resp['job_id']])
pbar.update()
done += 1
except Exception as e:
tqdm.tqdm.write(f"Could not handle response {resptext}: {e}")
async def execute(files: List[str]):
"""This is the real main meat of the app that 'main' wrapps"""
WS_ENDPOINT = ENDPOINT.replace("http","ws", 1)
uri = f"{WS_ENDPOINT}/ws"
async with websockets.connect(uri) as websocket:
done = 0
futures = []
tasks = set()
job_ids = {}
job_map = {}
to_process = set([file for file in files if not os.path.exists(infer_result_name(file))])
for file in files:
if file not in to_process:
print(f"Skip existing {file}")
result_handler = asyncio.create_task(handle_results(websocket, job_map, len(to_process)))
for file in tqdm.tqdm(to_process, desc="upload progress"):
await submit_and_subscribe(file, websocket, job_map)
await result_handler
@click.command()
@click.argument("files", nargs=-1, type=click.Path(file_okay=True, exists=True))
def main(files):
"""Run annotation process"""
logging.basicConfig(level=logging.INFO)
asyncio.get_event_loop().run_until_complete(execute(files))
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
/sapientacli-0.2.1-py3-none-any.whl/sapientacli.py
| 0.503906 | 0.164248 |
sapientacli.py
|
pypi
|
import ast
import copy
import glob
import json
import re
from importlib.metadata import entry_points
from pathlib import Path
from shutil import copyfile
from typing import Tuple, Union
from sapientml.executor import PipelineExecutor
from sapientml.generator import CodeBlockGenerator, PipelineGenerator
from sapientml.macros import metric_lower_is_better
from sapientml.params import Code, Dataset, PipelineResult, RunningResult, Task
from sapientml.util.json_util import JSONEncoder
from sapientml.util.logging import setup_logger
from .adaptation.generation.template_based_adaptation import Adaptation
from .explain.main import process as explain
from .params import SimplePipeline, SapientMLConfig, summarize_dataset
from .seeding.predictor import predict
model_dir_path_default = Path(__file__).parent / "models"
logger = setup_logger()
def add_prefix(filename, prefix):
if not prefix:
return filename
return f"{prefix}_{filename}"
class SapientMLGenerator(PipelineGenerator, CodeBlockGenerator):
def __init__(self, **kwargs):
self.config = SapientMLConfig(**kwargs)
self.config.postinit()
eps = entry_points(group="sapientml.code_block_generator")
self.loaddata = eps["loaddata"].load()(**kwargs)
self.preprocess = eps["preprocess"].load()(**kwargs)
def generate_pipeline(self, dataset: Dataset, task: Task):
self.dataset = dataset
self.task = task
logger.info("Generating pipelines...")
dataset, loaddata_block = self.loaddata.generate_code(dataset, task)
dataset, preprocess_block = self.preprocess.generate_code(dataset, task)
code_block = loaddata_block + preprocess_block
dataset, sapientml_results = self.generate_code(dataset, task)
result_pipelines: list[Code] = []
for pipeline in sapientml_results:
pipeline.validation = code_block.validation + pipeline.validation
pipeline.test = code_block.test + pipeline.test
pipeline.train = code_block.train + pipeline.train
pipeline.predict = code_block.predict + pipeline.predict
result_pipelines.append(pipeline)
logger.info("Executing generated pipelines...")
executor = PipelineExecutor()
self.execution_results = executor.execute(
result_pipelines,
self.config.initial_timeout,
Path(dataset.output_dir),
self.config.cancel,
)
logger.info("Evaluating execution results of generated pipelines...")
lower_is_better = self.task.adaptation_metric in metric_lower_is_better
self.evaluate(self.execution_results, lower_is_better)
logger.info("Done.")
return (self._best_pipeline, self._best_pipeline_score), self._candidate_scripts
def generate_code(self, dataset: Dataset, task: Task) -> Tuple[Dataset, list[SimplePipeline]]:
df = dataset.training_dataframe
# Generate the meta-features
logger.info("Generating meta features ...")
dataset_summary = summarize_dataset(df, task) # type: ignore
if dataset_summary.has_inf_value_targets:
raise ValueError("Stopped generation because target columns have infinity value.")
labels = predict(task, dataset_summary)
adapt = Adaptation(
labels,
task,
dataset_summary,
self.config,
)
pipelines = adapt.run_adaptation()
return dataset, pipelines
def evaluate(self, execution_results: list[tuple[Code, RunningResult]], lower_is_better: bool = False) -> None:
self._best_pipeline = None
self._best_pipeline_score = PipelineResult(score=None, metric=None, best_params=None)
candidate_scripts = []
for pipeline, result in execution_results:
if result.returncode == 0:
pipeline_score = self._parse_pipeline_output(result.output)
else:
pipeline_score = PipelineResult(score=None, metric=None, best_params=None)
candidate_scripts.append((pipeline, pipeline_score))
self._candidate_scripts = candidate_scripts
# When an error occurs while running a pipeline, the score becomes None
error_pipelines = [pipeline for pipeline in candidate_scripts if pipeline[1].score is None]
# If none of them have the score, stop ranking them
if len(candidate_scripts) == len(error_pipelines):
return
# sort descending
succeeded_scripts = sorted(
[x for x in candidate_scripts if x[1].score is not None],
key=lambda x: x[1].score,
reverse=(not lower_is_better),
)
failed_scripts = [x for x in candidate_scripts if x[1].score is None]
ranked_candidate_scripts = succeeded_scripts + failed_scripts
best_pipeline_tuple = ranked_candidate_scripts[0]
if best_pipeline_tuple is None:
return
best_pipeline = copy.deepcopy(best_pipeline_tuple[0])
if best_pipeline_tuple[1].best_params is not None:
best_pipeline.test = best_pipeline.test.replace(
"best_params = study.best_params", "best_params = " + str(best_pipeline_tuple[1].best_params)
)
best_pipeline.train = best_pipeline.train.replace(
"best_params = study.best_params", "best_params = " + str(best_pipeline_tuple[1].best_params)
)
self._best_pipeline = best_pipeline
self._best_pipeline_score = best_pipeline_tuple[1]
@staticmethod
def _parse_pipeline_output(output: str):
score = None
best_params = None
metric = None
output_lines = output.splitlines()
try:
for line in output_lines:
if re.match("best params: ", line):
best_params = ast.literal_eval(re.findall("best params: (.+)", line)[0])
elif re.match("RESULT: ", line):
parts = [x.strip() for x in line.split(":")]
metric = parts[-2].strip().split(" ")[0]
score = float(parts[-1])
except Exception:
pass
return PipelineResult(score=score, metric=metric, best_params=best_params)
def save(self, output_dir: Union[Path, str]):
self.output_dir = Path(output_dir)
self.output_dir.mkdir(parents=True, exist_ok=True)
_output_dir = Path(self.dataset.output_dir)
candidate_scripts = self._candidate_scripts
if candidate_scripts:
if self._best_pipeline:
script_body = self._best_pipeline.test.replace(_output_dir.as_posix(), ".")
with open(self.output_dir / add_prefix("final_script.py", self.config.project_name), "w", encoding="utf-8") as f:
f.write(script_body)
script_body = self._best_pipeline.train.replace(_output_dir.as_posix(), ".")
with open(self.output_dir / add_prefix("final_train.py", self.config.project_name), "w", encoding="utf-8") as f:
f.write(script_body)
script_body = self._best_pipeline.predict.replace(_output_dir.as_posix(), ".")
with open(self.output_dir / add_prefix("final_predict.py", self.config.project_name), "w", encoding="utf-8") as f:
f.write(script_body)
with open(
self.output_dir / (add_prefix("final_script", self.config.project_name) + ".out.json"),
"w",
encoding="utf-8",
) as f:
json.dump(self._best_pipeline_score.__dict__, f, cls=JSONEncoder, indent=4)
else:
logger.warning("All candidate scripts failed. Final script is not saved.")
raise RuntimeError("All candidate scripts failed. Final script is not saved.")
# copy libs
lib_path = self.output_dir / "lib"
lib_path.mkdir(exist_ok=True)
eps = entry_points(group="sapientml.export_modules")
for ep in eps:
for file in glob.glob(f"{ep.load().__path__[0]}/*.py"):
copyfile(file, lib_path / Path(file).name)
for index, (script, detail) in enumerate(candidate_scripts, start=1):
# script.dataset.training_data_path is '{user specified dir}/{name}.csv' or '{tmpdir}/training.pkl'
# If latter one, we have to modify the {tmpdir} to output_dir.
script_body = script.validation.replace(_output_dir.as_posix(), ".")
with open(self.output_dir / f"{index}_script.py", "w", encoding="utf-8") as f:
f.write(script_body)
self.debug_info = {}
for i, candidate in enumerate(candidate_scripts, start=1):
info = {"content": candidate[0].model_dump(), "run_info": candidate[1].__dict__}
self.debug_info[i] = info
if self.config.debug:
with open(self.output_dir / add_prefix("run_info.json", self.config.project_name), "w", encoding="utf-8") as f:
json.dump(self.debug_info, f, cls=JSONEncoder, indent=4)
if self.config.add_explanation:
self.add_explanation()
def add_explanation(self):
explain(
visualization=True,
eda=True,
dataframe=self.dataset.training_dataframe,
script_path=(self.output_dir / add_prefix("final_script.py", self.config.project_name))
.absolute()
.as_posix(),
target_columns=self.task.target_columns,
problem_type=self.task.task_type,
ignore_columns=self.task.ignore_columns,
skeleton=self._best_pipeline.labels,
explanation=self._best_pipeline.pipeline_json,
run_info=self.debug_info,
internal_execution=True,
timeout=self.config.timeout_for_test,
cancel=self.config.cancel,
)
|
/sapientml_core-0.4.2.tar.gz/sapientml_core-0.4.2/sapientml_core/generator.py
| 0.775902 | 0.202996 |
generator.py
|
pypi
|
from ...enums import Operator
from .predicate import Predicate
class PreprocessingLabel:
def __init__(self, label_name, meta_features, predicates):
self.label_name = label_name
self.meta_features = meta_features
self.predicate_objects = list()
self._build_predicate_objects(predicates)
self.relevant_columns = list()
self.components_before = list()
self.components_after = list()
self.alternative_components = list()
def __str__(self):
return self.label_name
def __repr__(self):
return str(self)
def _build_predicate_objects(self, predicates):
for pred in predicates:
feature_name = pred["feature_name"]
operator = self._get_operator(pred["operator"])
comparison_value = pred["threshold"]
p = Predicate(feature_name, operator, comparison_value)
self.predicate_objects.append(p)
def _get_operator(self, op_string):
if op_string == ">":
return Operator.GREATER_THAN
elif op_string == ">=":
return Operator.GREATER_THAN_OR_EQUAL_TO
elif op_string == "<":
return Operator.LESS_THAN
elif op_string == "<=":
return Operator.LESS_THAN_OR_EQUAL_TO
elif op_string == "==" or op_string == "=":
return Operator.EQUAL_TO
else:
return Operator.NOT_EQUAL_TO
def get_relevant_columns(self, dataset_summary, target, ignore_columns):
rel_columns_list = []
# approach 1: conjunction: a column is relavant if and only if all of the predicates applicable to that component are true
# approach 2: disjunction: a column is relavant if and only if at least one of the predicates applicable to that component are true
approach = 2
for column_name, column in dataset_summary.columns.items():
if column_name in ignore_columns:
continue
# error handling for log transform: don't apply if any col value <= 0
if "PREPROCESS:Scaling:log" in self.label_name:
if column.has_negative_value:
continue
result = list() # holds boolean results of all predicates applicable to a column
for p in self.predicate_objects:
# special handling of "target_imbalance_score" feature, since it should only be applied on target column
if p.feature_name == "feature:target_imbalance_score":
if column_name not in target:
result.append(False)
continue
result.append(p.evaluate_predicate(column.meta_features))
if approach == 1: # conjunction
if all(result):
rel_columns_list.append(column_name)
elif approach == 2: # disjunction
if any(result):
rel_columns_list.append(column_name)
return rel_columns_list
|
/sapientml_core-0.4.2.tar.gz/sapientml_core-0.4.2/sapientml_core/adaptation/generation/preprocessing_label.py
| 0.617628 | 0.179189 |
preprocessing_label.py
|
pypi
|
from typing import Literal, Optional
import pandas as pd
from sapientml.params import CancellationToken
from sapientml.util.logging import setup_logger
from .AutoEDA import EDA
from .AutoVisualization import AutoVisualization_Class
from .code_miner import Miner
logger = setup_logger()
def process(
visualization: bool,
eda: bool,
dataframe: pd.DataFrame,
script_path: str,
target_columns: list[str],
problem_type: Literal["regression", "classification"],
ignore_columns: Optional[list[str]] = None,
skeleton: Optional[dict] = None,
explanation: Optional[dict] = None,
run_info: Optional[dict] = None,
internal_execution: bool = False,
timeout: int = 0,
cancel: Optional[CancellationToken] = None,
):
output_files = None
if visualization:
# Call AutoVisualization to generate visualization codes
AV = AutoVisualization_Class()
visualization_code = AV.AutoVisualization(
df=dataframe,
target_columns=target_columns,
problem_type=problem_type,
ignore_columns=ignore_columns,
)
else:
visualization_code = None
if eda:
# handle list(tuple, dict) value in dataframe.
for col in dataframe.columns:
exist_list_values = [x for x in dataframe[col] if type(x) in [list, tuple, dict]]
if len(exist_list_values) > 0:
dataframe[col] = dataframe[col].fillna("").astype(str)
eda = EDA(dataframe, target_columns, log_level=2)
eda.check_consistency(convert=False)
categories, desc = eda.cat_process(threshold=0.01, IQR_activation=True, z_activation=True)
initial_blocks = eda.description
else:
initial_blocks = []
code_miner = Miner(
script_path,
init_blocks=initial_blocks,
visualization_code=visualization_code,
logger=logger,
skeleton=skeleton,
explanation=explanation,
run_info=run_info,
)
output_files = code_miner.save_all(execution=internal_execution, timeout=timeout, cancel=cancel)
return output_files
|
/sapientml_core-0.4.2.tar.gz/sapientml_core-0.4.2/sapientml_core/explain/main.py
| 0.775137 | 0.32184 |
main.py
|
pypi
|
import os
from jinja2 import Environment, FileSystemLoader
from sapientml.generator import CodeBlockGenerator
from sapientml.params import Code, Dataset, Task
template_env = Environment(loader=FileSystemLoader(f"{os.path.dirname(__file__)}/templates"), trim_blocks=True)
ROW_THRESHOLD_FOR_SAMPLING = 100000
def _render(tpl, *args, **kwargs):
code = tpl.render(*args, **kwargs)
return "\n".join([line for line in code.split("\n") if len(line) > 0]) + "\n\n"
class LoadData(CodeBlockGenerator):
def generate_code(self, dataset: Dataset, task: Task):
code = Code()
code.validation = code.test = code.train = code.predict = "# *** GENERATED PIPELINE ***\n\n"
tpl = template_env.get_template("loaddata.py.jinja")
code.validation += _render(tpl, dataset=dataset, task=task, validation=True)
code.test += _render(tpl, dataset=dataset, task=task, validation=False)
tpl = template_env.get_template("loaddata_train.py.jinja")
code.train += _render(tpl, dataset=dataset, task=task, script_type="train")
tpl = template_env.get_template("loaddata_predict.py.jinja")
code.predict += _render(tpl, dataset=dataset, task=task, script_type="predict")
tpl = template_env.get_template("split.py.jinja")
code.validation += _render(tpl, dataset=dataset, task=task, validation=True)
code.test += _render(tpl, dataset=dataset, task=task, validation=False)
tpl = template_env.get_template("subsample.py.jinja")
code.validation += _render(tpl, task=task, sample_size=ROW_THRESHOLD_FOR_SAMPLING)
dataset.training_dataframe = dataset.training_dataframe.drop(dataset.ignore_columns, axis=1, errors="ignore")
if dataset.validation_dataframe is not None:
dataset.validation_dataframe = dataset.validation_dataframe.drop(
dataset.ignore_columns, axis=1, errors="ignore"
)
if dataset.test_dataframe is not None:
dataset.test_dataframe = dataset.test_dataframe.drop(dataset.ignore_columns, axis=1, errors="ignore")
return dataset, code
|
/sapientml_loaddata-0.3.4.tar.gz/sapientml_loaddata-0.3.4/sapientml_loaddata/generator.py
| 0.507568 | 0.271547 |
generator.py
|
pypi
|
# sapiopylib: Official Sapio Informatics Platform Python API
<div align="center">
<img src="https://s3.amazonaws.com/public.exemplareln.com/sapio-pylib/sapio-sciencesofficial-python-api-library.png" alt="logo"><br>
</div>
-----------------
[](https://pypi.org/project/sapiopylib/) [](https://github.com/sapiosciences/sapio-py-tutorials/blob/master/LICENSE) [](https://github.com/sapiosciences/sapio-py-tutorials/issues)
## What is it?
sapiopylib is a powerful Python package, developed and maintained by Sapio Sciences, that provides the ability to create endpoints to manipulate data and make configuration changes within the Sapio lab informatics platform in a quick and straightforward manner.
The package makes it easy to automate changes to and queries of different types of data in the system, ranging from records to notebooks and the entries within them. Intuitive datatypes, such as record models that allow for simple manipulation of data records and their fields, within the package help to make development nearly as straightforward as performing the same tasks in the application.
As well as serving as the most direct way to programmatically alter data in the application, sapiopylib makes it possible to create endpoints to alter and query configurations in the system. Configurations for system data types, lists used by the system, and more can be easily accessed using this package.
## Main Features
Here is a list of major features in this library:
- Support all Sapio REST API functions.
- Manipulate data records with record models using client-based caching. This allows you to batch requests easily for performance. Making your changes in mini-batch is also provides transactional commits outside of a webhook context for data record changes.
- Create new temporary data types easily with FormBuilder utility.
- Provides Protocol-Step API as we have defined in Sapio Java API.
- Supports creation of a Flask-based webhook server. Implement additional toolbar buttons, rules, validation logic to customize your ELN experiment, workflows, and user interface.
## Where to get it?
Installation is simple:
```sh
pip install sapiopylib
```
However, you may need to pay attention to the library version to ensure it is compatible with your Sapio Informatics Platform.
The correct versions for each platform can be found under the tutorial github. The github will create a branch under 'prior_releases' folder when a specific sapiopylib is made against a platform release. The installation manual inside the tutorial, with the correct branch checked out, will make a reference to the exact version you should install for that platform.
The most recent release may contain REST calls that will require a bleeding edge version of Sapio Platform that has not yet reached GA status.
## Licenses
sapiopylib along with its tutorials in the github are licensed under MPL 2.0.
pypi.org is granted the right to distribute sapiopylib forever.
This license does not provide any rights to use any other copyrighted artifacts from Sapio Sciences. (And they are typically written in another programming language with no linkages to this library.)
## Dependencies
The following dependencies are required for this package:
- [requests - Requests is an Apache2 Licensed HTTP library, written in Python, for human beings.](https://pypi.org/project/requests/2.7.0/)
- [pandas - pandasis a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,
built on top of the Python programming language.](https://pandas.pydata.org/)
- [Flask - A simple framework for building complex web applications.](https://pypi.org/project/Flask/)
- [buslane - A simple implementation of event-bus system with proper type hinting](https://pypi.org/project/buslane/)
- [plotly.py - An open-source, interactive data visualization library for Python](https://pypi.org/project/plotly/)
- [kaleido - Static image export for web-based visualization libraries with zero dependencies](https://pypi.org/project/kaleido/)
## Documentation
All documentations, including code examples and installation guide, are provided at [our sapiopylib tutorial github](https://github.com/sapiosciences/sapio-py-tutorials).
## Getting Help
If you have support contract with Sapio Sciences, please use our technical support channels. [email protected]
If you have any questions about how to use sapiopylib, please visit our tutorial page.
If you would like to report an issue on sapiopylib, or its tutorial content, please feel free to create a issue ticket at the tutorial github.
## About Us
Sapio is at the forefront of the Digital Lab with its science-aware platform for managing all your life science data with its integrated Electronic Lab Notebook, LIMS Software and Scientific Data Management System.
Visit us at https://www.sapiosciences.com/
|
/sapiopylib-2023.6.17.147.tar.gz/sapiopylib-2023.6.17.147/README.md
| 0.693369 | 0.964422 |
README.md
|
pypi
|
import os # Allows checking if using Windows
try:
assert(os.name == 'nt') # Checks for Windows
except:
raise RuntimeError("Windows is required.")
import comtypes.client # Importing comtypes.client will make the gen subpackage
try:
from comtypes.gen import SpeechLib # comtypes
except ImportError:
# Generate the SpeechLib lib and any associated files
engine = comtypes.client.CreateObject("SAPI.SpVoice")
stream = comtypes.client.CreateObject("SAPI.SpFileStream")
from comtypes.gen import SpeechLib
class Sapi(object):
"""A speech API using the Microsoft SAPI through COM"""
def __init__(self):
super().__init__()
self.voice = comtypes.client.CreateObject('Sapi.SpVoice')
def get_voices(self, name=''):
"""Get a list of voices, search by name optional"""
voice_list = []
voices = self.voice.GetVoices()
if name is not '':
for voice in voices:
if name in voice.GetDescription():
voice_list.append(voice)
break
else:
print('Voice not found')
else:
for voice in voices:
voice_list.append(voice)
return voice_list
def get_voice_names(self):
"""Get the names of all the voices"""
return [voice.GetDescription() for voice in self.get_voices()]
def set_voice(self, voice):
"""Set the voice to the given voice"""
if type(voice) is str:
self.voice.Voice = self.get_voices(voice)[0]
else:
self.voice.Voice = voice
return
def get_audio_outputs(self, name=''):
"""Get the audio outputs, search for the one with the name if given"""
output_list = []
outputs = self.voice.GetAudioOutputs()
if name is not '':
for output in outputs:
if name in output.GetDescription():
output_list.append(output)
break
else:
print('Audio output not found')
else:
for output in outputs:
output_list.append(output)
return output_list
def get_audio_output_names(self):
"""Get the names of all the audio outpus"""
return [output.GetDescription() for output in self.get_audio_outputs()]
def set_audio_output(self, output):
if type(output) is str:
self.voice.AudioOutput = self.get_audio_outputs(output)[0]
else:
self.voice.AudioOutput = output
return
def say(self, message):
self.voice.Speak(message)
return
def set_rate(self, rate):
"""Set the speed of the speaker
-10 is slowest, 10 is fastest"""
self.voice.Rate = rate
def _create_stream(self, filename):
"""Create a file stream handler"""
stream = comtypes.client.CreateObject('Sapi.SpFileStream')
stream.Open(filename, SpeechLib.SSFMCreateForWrite)
return stream
def create_recording(self, filename, message):
"""Make a recording of the given message to the file
The file should be a .wav as the output is
PCM 22050 Hz 16 bit, Little endianness, Signed"""
stream = self._create_stream(filename)
temp_stream = self.voice.AudioOutputStream
self.voice.AudioOutputStream = stream
self.say(message)
self.voice.AudioOutputStream = temp_stream
if __name__ == '__main__':
v = Sapi()
v.set_voice('Anna')
v.get_voice_names()
|
/sapitts-0.1.tar.gz/sapitts-0.1/tts/sapi.py
| 0.643889 | 0.160135 |
sapi.py
|
pypi
|
from datetime import datetime, timedelta
import requests
class Sapling:
def __init__(self, uri, api_key):
self.get_header = {'Authorization': f'Token {api_key}'}
self.put_header = {'Authorization': f'Token {api_key}',
"Content-Type": "application/x-www-form-urlencoded"
}
self.uri = uri
def get_users(self, max_days=1):
"""Gets user from sapling
Keyword Arguments:
max_days {int} -- Maximum days from current date to retrieve user (default: {1})
Returns:
[type] -- [description]
"""
now = datetime.now().date().strftime('%Y-%m-%d')
print(now)
till = (datetime.now() + timedelta(days=max_days)
).date().strftime('%Y-%m-%d')
url = f'https://{self.uri}.saplingapp.io/api/v1/beta/profiles?status=active' + \
f'&start_date[since]={now}&start_date[until]={till}'
response = requests.get(url, headers=self.get_header)
return response.json()['users']
def get_users_backdate(self, max_days=1):
"""Gets user from sapling
Keyword Arguments:
max_days {int} -- Maximum days from current date to retrieve user (default: {1})
Returns:
[type] -- [description]
"""
page = 1
end = datetime.now().date().strftime('%Y-%m-%d')
start = (datetime.now() - timedelta(days=max_days)
).date().strftime('%Y-%m-%d')
print(start, end)
url = f'https://{self.uri}.saplingapp.io/api/v1/beta/profiles?status=active' + \
f'&start_date[since]={start}&start_date[until]={end}&page={page}'
response = requests.get(url, headers=self.get_header)
users = response.json()['users']
page += 1
while(True):
if page <= response.json()['total_pages']:
url = f'https://{self.uri}.saplingapp.io/api/v1/beta/profiles?status=active' +\
f'&start_date[since]={start}&start_date[until]={end}&page={page}'
response = requests.get(url, headers=self.get_header)
users += response.json()['users']
print(response.json()['current_page'])
page += 1
continue
break
return users
def get_user_by_guid(self, guid):
"""Get user by guid
Arguments:
guid {str} -- Unique identifier for user
Returns:
dict -- User data
"""
url = f'https://{self.uri}.saplingapp.io/api/v1/beta/profiles/{guid}'
response = requests.get(url, headers=self.get_header)
return response.json()
def update_email(self, guid, email):
"""Update Company Email for user
Arguments:
guid {str} -- Unique identifier
email {str} -- User email
Returns:
int -- status code
"""
url = f'https://{self.uri}.saplingapp.io/api/v1/beta/profiles/{guid}'
response = requests.put(url, headers=self.put_header, data={
'company_email': email})
return response.status_code
|
/sapling_lib-0.0.1.tar.gz/sapling_lib-0.0.1/sapling_lib/sapling.py
| 0.72662 | 0.165728 |
sapling.py
|
pypi
|
import requests
import uuid
class SaplingClient:
'''
Sapling client class. Provides a mapping of Python functions to Sapling HTTP REST APIs.
:param api_key: 32-character API key
:type api_key: str
:param timeout: Timeout for API call in seconds. Defaults to 120 seconds.
:type timeout: int
:param hostname: Hostname override for SDK and self-hosted deployments.
:type hostname: str
:param pathname: Pathname override for SDK and self-hosted deployments as well as version requirements.
:type pathname: str
'''
def __init__(
self,
api_key,
timeout=120,
hostname=None,
pathname=None,
):
self.api_key = api_key
self.timeout = timeout
self.hostname = hostname or 'https://api.sapling.ai'
self.pathname = pathname or '/api/v1/'
self.url_endpoint = self.hostname + self.pathname
self.default_session_id = str(uuid.uuid4())
def edits(
self,
text,
session_id=None,
lang=None,
variety=None,
medical=None,
auto_apply=False,
advanced_edits=None,
):
'''
Fetches edits (including for grammar and spelling) for provided text.
:param text: Text to process for edits.
:type text: str
:param session_id: Unique name or UUID of document or portion of text that is being checked
:type session_id: str
:param session_id: 2 letter ISO 639-1 language code
:type session_id: str
:param variety: Specifies regional English variety preference. Defaults to the configuration in the user Sapling dashboard.
:type variety: str
:param medical: If true, the backend will apply Sapling's medical dictionary.
:type medical: bool
:param auto_apply: Whether to return a field with edits applied to the text
:type auto_apply: bool
:param advanced_edits: Additional edit configurations
:type advanced_edits: dict
Options:
- advanced_edits
- adverbs
- simplifications
- hard_to_read
- qualifiers
- voice
- dei
- gender
- gender_pronoun
- gender_noun
- gender_id
- sensitivity
- disability
- age
- race
- social_class
- violence
\
:rtype: dict
:return:
- edits: List of Edits:
- sentence: Unedited sentence
- sentence_start: Offset of sentence from start of text
- start: Offset of edit start relative to sentence
- end: Offset of edit end relative to sentence
- replacement: Suggested replacement
- error_type: Error type
- general_error_type: General Error type
- applied_text: Transformed text if auto_apply is set.
Supported languages:
- `de`: German (Deutsch)
- `el`: Greek (Ελληνικά)
- `en`: English (US/UK/CA/AU)
- `es`: Spanish (Español)
- `fr`: French (Français) (`fr-fr` and `fr-ca` coming soon)
- `it`: Italian (Italiano)
- `jp`: Japanese (日本語)
- `ko`: Korean (한국어)
- `nl`: Dutch (Nederlands)
- `pl`: Polish (Polski)
- `pt`: Portuguese (Português) (`pt-pt` and `pt-br` coming soon)
- `sv`: Swedish (Svenska)
- `tl`: Tagalog
- `zh`: Chinese (中文)
Supported varieties:
- `us-variety`: American English
- `gb-variety`: British English
- `au-variety`: Australian English
- `ca-variety`: Canadian English
- `null-variety`: Don't suggest changes based on English variety
'''
url = self.url_endpoint + 'edits'
session_id = session_id or self.default_session_id
data = {
'key': self.api_key,
'text': text,
'session_id': session_id,
}
if lang is not None:
data['lang'] = lang
if variety is not None:
data['variety'] = variety
if medical is not None:
data['medical'] = medical
if auto_apply is not None:
data['auto_apply'] = auto_apply
if advanced_edits is not None:
data['advanced_edits'] = advanced_edits
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return resp.json()
raise Exception(f'HTTP {resp.status_code}: {resp.text}')
def accept_edit(
self,
edit_uuid,
session_id=None,
):
'''
Use this API endpoint to have Sapling adapt its system over time.
Each suggested edit has an edit UUID. You can pass this information back to Sapling to
indicate the edit suggestion was helpful.
For each unique edit in each document, use the accept or reject API endpoint only once in total.
:param edit_uuid: Opaque UUID of the edit returned from the edits endpoint
:type edit_uuid: str, uuid
:param session_id: Unique name or UUID of text that is being processed
:type session_id: str
'''
url = f'{self.url_endpoint}edits/{edit_uuid}/accept'
session_id = session_id or self.default_session_id
data = {
'key': self.api_key,
'session_id': session_id,
}
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return
raise Exception(f'HTTP {resp.status_code}: resp.text')
def reject_edit(
self,
edit_uuid,
session_id=None,
):
'''
Use this API endpoint to have Sapling not recommend the same edit anymore.
Each suggested edit has an edit UUID. You can pass this information back to Sapling to
indicate the edit suggestion was not helpful.
For each unique edit in each document, use the accept or reject API endpoint only once in total.
:param edit_uuid: Opaque UUID of the edit returned from the edits endpoint
:type edit_uuid: str, uuid
:param session_id: Unique name or UUID of text that is being processed
:type session_id: str
'''
url = f'{self.url_endpoint}edits/{edit_uuid}/reject'
session_id = session_id or self.default_session_id
data = {
'key': self.api_key,
'session_id': session_id,
}
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return
raise Exception(f'HTTP {resp.status_code}: resp.text')
def spellcheck(
self,
text,
session_id=None,
min_length=None,
multiple_edits=None,
lang=None,
auto_apply=False,
variety=None,
user_data=None,
):
'''
Fetches spelling (no grammar or phrase level) edits for provided text.
:param text: Text to process for edits.
:type text: str
:param session_id: Unique name or UUID of document or portion of text that is being checked
:type session_id: str
:param min_length: Default is 3. Minimum character length of words to suggest corrections for. Setting this too low will result in much higher false positives.
:type min_length: int
:param multiple_edits: Default is false. If true, will return `candidates` field containing list of other potential corrections for each error.
:type multiple_edits: bool
:param lang: Default is English. Specify a language to spellcheck the text against.
:type lang: str
:param auto_apply: Whether to return a field with edits applied to the text. Cannot be set with multiple_edits option.
:type auto_apply: bool
:param advanced_edits: additional edit checking options
:type advanced_edits: dict
:param variety: Specifies regional English variety preference. Defaults to the configuration in the user Sapling dashboard.
:type variety: str
:rtype: list[dict]
Supported languages:
- `en`: English
- `ar`: عربي
- `bg`: български
- `ca`: català
- `cs`: čeština
- `da`: dansk
- `de`: Deutsch
- `el`: Ελληνικά
- `es`: español
- `et`: eesti keel
- `fa`: فارسی
- `fi`: suomi
- `fr`: français (`fr-fr` and `fr-ca` coming soon)
- `he`: עִבְרִית
- `hi`: हिन्दी",
- `hr`: hrvatski,
- `hu`: magyar nyelv
- `id`: bahasa Indonesia
- `is`: íslenska
- `it`: italiano
- `jp/ja`: 日本語
- `ko`: 한국어
- `lt`: lietuvių kalba
- `lv`: latviešu valoda
- `nl`: Nederlands
- `no`: norsk
- `pl`: polski
- `pt`: português
- `ro`: limba română
- `ru`: русский
- `sk`: slovenčina
- `sq`: shqip
- `sr`: srpski
- `sv`: svenska
- `th`: ภาษาไทย
- `tl`: Tagalog / ᜆᜄᜎᜓᜄ᜔
- `tr`: Türkçe
- `uk`: Українська мова
- `vi`: Tiếng Việt
- `zh`: 中文
Supported varieties:
- `us-variety`: American English
- `gb-variety`: British English
- `au-variety`: Australian English
- `ca-variety`: Canadian English
- `null-variety`: Don't suggest changes based on English variety
'''
url = self.url_endpoint + 'spellcheck'
session_id = session_id or self.default_session_id
data = {
'key': self.api_key,
'text': text,
'session_id': session_id,
}
if min_length is not None:
data['min_length'] = min_length
if multiple_edits is not None:
data['multiple_edits'] = multiple_edits
if lang is not None:
data['lang'] = lang
if auto_apply is not None:
data['auto_apply'] = auto_apply
if variety is not None:
data['variety'] = variety
if user_data is not None:
data['user_data'] = user_data
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
resp_json = resp.json()
return resp_json.get('edits')
raise Exception(f'HTTP {resp.status_code}: {resp.text}')
def complete(
self,
query,
session_id=None,
):
'''
Provides predictions of the next few characters or words
:param query: Text to get completions against.
:type query: str
:param session_id: Unique name or UUID of document or portion of text that is being checked
:type session_id: str
'''
url = self.url_endpoint + 'complete'
session_id = session_id or self.default_session_id
data = {
'key': self.api_key,
'query': query,
'session_id': session_id,
}
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
resp_json = resp.json()
return resp_json
raise Exception(f'HTTP {resp.status_code}: {resp.text}')
def accept_complete(
self,
complete_uuid,
query,
completion,
session_id=None,
):
'''
Use this API endpoint to have Sapling improve completions over time.
Each suggested autocomplete has a UUID. You can pass this information back to Sapling to
indicate the suggestion was helpful.
:param complete_uuid: Opaque UUID of the edit returned from the complete endpoint.
:type complete_uuid: str, uuid
:param query: The query text passed to the complete endpoint.
:type query: str
:param completion: The suggested completion text returned from the complete endpoint.
:type completion: str
'''
url = f'{self.url_endpoint}complete/{complete_uuid}/accept'
session_id = session_id or uuid.uuid4()
data = {
'key': self.api_key,
'session_id': session_id,
'context': {
'query': query,
'completion': completion,
}
}
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return
raise Exception(f'HTTP {resp.status_code}: resp.text')
def aidetect(
self,
text,
sent_scores=None,
):
'''
Score a piece of text on how likely it was generated by AI.
:param text: Text to
:type text: str
:param sent_scores: If true, each sentence will also be scored individually.
:type sent_scores: bool
:rtype: dict
:return:
- score: float between 0 and 1, probability that text is AI generated
- sentence_scores: If sent_scores is set, will return a list of scores per sentence.
- text: text that was processed
'''
url = f'{self.url_endpoint}aidetect'
data = {
'key': self.api_key,
'text': text,
}
if sent_scores is not None:
data['sent_scores'] = sent_scores
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return resp.json()
raise Exception(f'HTTP {resp.status_code}: resp.text')
def chunk_text(
self,
text,
max_length,
step_size=None,
):
'''
Break an input text into blocks of length of most max_length. When splitting the text, the API follows the following preference stack:
page break > paragraph breaks > line breaks > tabs > punctuation > all other whitespace
:param text: Text to be chunked
:type text: str
:param max_length: Maximum length of text segments.
:type max_length: integer
:param step_size: Size of window to look for split points.
:type step_size: integer
:rtype: dict
:return:
- chunks: List of resulting chunks
'''
url = f'{self.url_endpoint}ingest/chunk_text'
data = {
'key': self.api_key,
'text': text,
'max_length': max_length
}
if step_size is not None:
data['step_size'] = step_size
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return resp.json()
raise Exception(f'HTTP {resp.status_code}: {resp.text}')
def chunk_html(
self,
html,
max_length,
step_size=None,
):
'''
Break an input text into blocks of length of most max_length. When splitting the text, the API follows the following preference stack:
page break > paragraph breaks > line breaks > tabs > punctuation > all other whitespace
Note: This endpoint not only breaks up the HTML but also discards all HTML tags, resulting in plain text.
:param html: HTML to be chunked
:type html: str
:param max_length: Maximum length of text segments.
:type max_length: integer
:param step_size: Size of window to look for split points.
:type step_size: integer
:rtype: dict
:return:
- chunks: List of resulting chunks representing the segmented text contained within the HTML
'''
url = f'{self.url_endpoint}ingest/chunk_html'
data = {
'key': self.api_key,
'html': html,
'max_length': max_length
}
if step_size is not None:
data['step_size'] = step_size
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return resp.json()
raise Exception(f'HTTP {resp.status_code}: {resp.text}')
def postprocess(
self,
text,
session_id,
operations,
):
'''
Performs a variety of operations that are useful for working with the outputs of an NLP (whether human or AI) system. These include:
- Fixing or restoring punctuation
- Fixing capitalization
- Fixing or restoring whitespace
Example use cases include repairing transcriptions or captions.
:param text: Text to postprocess
:type text: str
:param session_id: Unique name or UUID of document or portion of text that is being chunked
:type text: str
:param operations: Operations to apply. The currently accepted operations are:
- capitalize
- punctuate
- fixspaces
:type operations: list[str]
:rtype: list[dict]
:return:
Same as the edits endpoint:
- sentence: Unedited sentence
- sentence_start: Offset of sentence from start of text
- start: Offset of edit start relative to sentence
- end: Offset of edit end relative to sentence
- replacement: Suggested replacement
- error_type: Error type
- general_error_type: General Error type
'''
url = f'{self.url_endpoint}postprocess'
data = {
'key': self.api_key,
'text': text,
'session_id': session_id,
'operations': operations,
}
resp = requests.post(
url,
json=data,
timeout=self.timeout,
)
if 200 <= resp.status_code < 300:
return resp.json()
raise Exception(f'HTTP {resp.status_code}: {resp.text}')
|
/sapling-py-4.0.0.tar.gz/sapling-py-4.0.0/sapling/client.py
| 0.812123 | 0.218024 |
client.py
|
pypi
|
from lib import with_line_numbers
from split import Split
import traceback
class ConfigError(Exception):
"""Thrown when a Config cannot parse."""
def __init__(self, msg, *args):
self.msg = msg % args
def __str__(self):
return self.msg
class Config(object):
"""Represents a sapling split configuration. Configurations can contain any valid python code
but need only define a splits list containing the splits in a git repository. Each split is a
dict that must have the following entries:
'name': a logical name for the split
'paths': the paths this split is comprised of relative to the root of the git repository
"""
__slots__ = ('_splits')
def __init__(self, repo, data = None):
if data is None or len(data) == 0 or data.isspace():
self._splits = {}
else:
self._splits = Config._parse(repo, data.strip())
@classmethod
def _parse(cls, repo, config):
local_config = {}
try:
exec(config, {}, local_config)
except StandardError:
raise ConfigError("Problem parsing config:\n%s\n\n%s", with_line_numbers(config),
traceback.format_exc())
Config._validate(local_config)
splits = {}
for splitmap in local_config['splits']:
split = Config._parse_split(repo, splitmap)
splits[split.name] = split
return splits
@classmethod
def _parse_split(cls, repo, splitmap):
name = splitmap.pop('name')
patterns = splitmap.pop('paths')
try:
return Split(repo, name, patterns)
except KeyError:
raise ConfigError("Problem creating split: %s\n%s\n\n%s", name, splitmap,
traceback.format_exc())
@classmethod
def _validate(cls, config):
if 'splits' in config:
for split in config['splits']:
Config._validate_split(split)
@classmethod
def _validate_split(cls, split):
problems = []
if 'name' not in split:
problems.append("split must define a 'name'")
if 'paths' not in split:
problems.append("split must define 'paths'")
if len(problems) > 0:
raise ConfigError("Invalid split %s has the following problems:\n\t%s", split,
'\n\t'.join(problems))
@property
def splits(self):
"""A dict of the configured Splits keyed by their names."""
return self._splits
def __str__(self):
return "Config(%s)" % ", ".join('%s => %s' % (x, y) for (x, y) in self.splits.items())
|
/sapling-0.0.11.tar.gz/sapling-0.0.11/saplib/config.py
| 0.720467 | 0.247805 |
config.py
|
pypi
|
import git
import gitdb
import lib
import os
import re
import StringIO
class Split(object):
"""Represents a split of a git repository off to a remote repository. A Split maps one or more
subtrees of a containing git repository as a logical unit that can be pushed to or pulled from its
remote."""
__slots__ = ('_repo', '_name', '_paths', '_excludes')
def __init__(self, repo, name, patterns):
"""Creates a new Split over the given repo with the specified logical name. The patterns
specify paths to include in the split and regular expressions to prune out sub-paths. Paths to
include are taken as relative to the root of the repo and can either be directory paths, in
which case the full directory tree is retained in the split, or an individual file path.
Excludes are distinguished with a leading ! character with the rest of the pattern forming a
path regular expression to match files gathered from the path patterns that should be pruned.
For example, the following patterns would specify a split that grabs a top-level README, and the
src/ tree except for any OWNERS files contained within:
[ 'README', 'src', '!.+/OWNERS$"""
self._repo = repo
self._name = name
paths = []
excludes = set()
for pattern in patterns:
if pattern.startswith('!'):
excludes.add(re.compile(pattern[1:]))
else:
paths.append(os.path.normpath(pattern))
self.paths = paths
self._excludes = excludes
@property
def name(self):
"""The logical name of this Split."""
return self._name
@property
def paths(self):
"The paths this split is comprised of."
return self._paths
@paths.setter
def paths(self, value):
self._paths = self._validate_paths(value)
def _validate_paths(self, paths):
tree = self._current_tree()
for path in paths:
try:
tree / path
except KeyError:
raise KeyError("Invalid path: %s" % path)
return paths
def commits(self, reverse = True):
"""Returns an iterator over the commits in the current head that instersect this split. By
default commits are returned oldest first, but this can be overridden by specifying
'reverse' = False"""
refspec = self._current_head()
return git.Commit.iter_items(self._repo, refspec, self.paths, reverse = reverse)
class ApplyListener(object):
def on_start(self, commit_count):
pass
def on_commit(self, original_commit, new_commit):
pass
def on_finish(self):
pass
def apply(self, branch_name, apply_listener = ApplyListener()):
"""Applies this split over the commits to the named branch and returns the tip commit. An
ApplyListener callback can be passed to track progress of the split; otherwise, a no-op
ApplyListener is used. If there are no (new) commits to split None is returned."""
commits = list(self.commits())
if not commits:
return None
commit_count = len(commits)
apply_listener.on_start(commit_count)
try:
if not commits:
return None
parent = None
branch = lib.find(self._repo.branches,
lambda branch: branch.name == branch_name,
lambda: self._repo.create_head(branch_name))
for commit in commits:
index_path = '/tmp/%s.index' % branch_name
if os.path.exists(index_path):
os.remove(index_path)
index = git.IndexFile(self._repo, index_path)
for item in self._subtrees(commit):
if self._is_included(item):
index.add([item])
else:
index.add(item.traverse(lambda item, depth: self._is_included(item)))
synthetic_tree = index.write_tree()
parents = [] if parent is None else [ parent ]
parent = self._copy_commit(commit, synthetic_tree, parents)
apply_listener.on_commit(commit, parent)
branch.commit = parent
return parent
finally:
apply_listener.on_finish()
def _copy_commit(self, orig_commit, tree, parents):
new_commit = git.Commit(self._repo, git.Commit.NULL_BIN_SHA, tree, orig_commit.author,
orig_commit.authored_date, orig_commit.author_tz_offset,
orig_commit.committer, orig_commit.committed_date,
orig_commit.committer_tz_offset,
"%s\n(sapling split of %s)" % (orig_commit.message, orig_commit.hexsha),
parents, orig_commit.encoding)
return self._write_commit(new_commit)
def _write_commit(self, commit):
stream = StringIO.StringIO()
commit._serialize(stream)
stream_len = stream.tell()
stream.seek(0)
istream = self._repo.odb.store(gitdb.IStream(git.Commit.type, stream_len, stream))
commit.binsha = istream.binsha
return commit
def _subtrees(self, commit = None, ignore_not_found = True):
if commit is None:
commit = self._current_head_commit()
for path in self.paths:
try:
yield commit.tree / path
except KeyError as e:
if not ignore_not_found:
raise e
def _is_included(self, item):
return item.type is "blob" and not self._is_excluded(item)
def _is_excluded(self, item):
for exclude in self._excludes:
if exclude.match(item.path):
return True
return False
def _current_tree(self):
return self._current_head_commit().tree
def _current_head_commit(self):
return self._current_head().commit
def _current_head(self):
return self._repo.head
def __str__(self):
return "Split(name=%s, paths=%s, excludes=%s)" % (
self._name,
self.paths,
[ exclude.pattern for exclude in self._excludes ]
)
|
/sapling-0.0.11.tar.gz/sapling-0.0.11/saplib/split.py
| 0.759939 | 0.337258 |
split.py
|
pypi
|
<h1 align="center">
<img width="30%" src="img/logo.png" />
<br />
</h1>
`saplings` is a static analysis tool for Python. Given a program, `saplings` will build object hierarchies for every module imported in the program. Object hierarchies are dependency trees where the root node represents a module and each child represents an attribute of its parent. These can be useful for [type inference](https://en.wikipedia.org/wiki/Duck_typing) and mining data about how a module is used.
<img src="img/demo.gif" />
- Object hierarchies or attribute trees?
- Tracks modules by default, but build trees for _any_ objects
- Why is it unique? Can track object flow through many complex paths through a program without executing it
## Installation
> Requires Python 3.X.
You can install `saplings` with `pip`:
```bash
$ pip install saplings
```
## Usage
Using saplings takes only two steps. First, convert your input program into an [Abstract Syntax Tree (AST)](https://en.wikipedia.org/wiki/Abstract_syntax_tree) using the `ast` module. Then, import the `Saplings` object and initialize it with the root node of the AST.
```python
import ast
from saplings import Saplings
my_program = open("path_to_your_program.py", "r").read()
program_ast = ast.parse(my_program)
my_saplings = Saplings(program_ast)
```
That's it. To access the object hierarchies, simply call the `get_trees` method in your `Saplings` object, like so:
```python
my_saplings.get_trees() # => [ObjectNode(), ObjectNode(), ..., ObjectNode()]
```
For more advanced usage of the `Saplings` object, read the docstring [here]().
### Printing an Object Hierarchy
`get_trees` returns a list of `ObjectNode`s, each representing the root node of an object hierarchy and which has the following attributes:
* **`name` _(str)_:** Name of the object
* **`is_callable` _(bool)_:** Whether the object is callable (i.e. has `__call__` defined)
* **`order` _(int)_:** Indicates the type of connection to the parent node (e.g. `0` is an attribute of the parent, `1` is an attribute of the output of the parent when called, etc.); `-1` if node is root
* **`frequency` _(int)_:** Number of times the object is referenced (i.e. appears) in the program
* **`children` _(list)_:** List of child nodes
To pretty-print a tree, simply pass its root node into the `render_tree` generator, like so:
```python
from saplings import render_tree
trees = my_saplings.get_trees()
root_node = trees[0]
for branches, node in render_tree(root_node):
print(f"{branches}{node}")
```
```
numpy (NC, -1)
+-- random (NC, 0)
| +-- randn (C, 0)
| +-- __sub__ (C, 1)
| | +-- shape (NC, 1)
| | +-- __index__ (C, 1)
| +-- sum (C, 1)
+-- matmul (C, 0)
+-- expand_dims (C, 0)
+-- T (NC, 1)
```
(Here, `NC` means indicates a non-callable node and `C` a callable node. `-1`/`0`/`1` indicate the order of the node's connection to its parent).
To create a dictionary representation of a tree, pass its root node into the `dictify_tree` function, like so:
```python
from saplings import dictify_tree
dictify_tree(root_node)
```
```python
{
"numpy": {
"is_callable": False,
"order": -1,
"frequency": 1,
"children": [
{"random": ...},
{"matmul": ...},
{"expand_dims": ...}
]
}
}
```
### Interpreting the object hierarchy
Each node is an _object_ and an object can either be _callable_ (i.e. has `__call__` defined) or _non-callable_. Links between nodes each have an _order_ –– a number which describes the relationship between a node and its parent. If a node is a 0th-order child of its parent object, then it's an attribute of that object. If it's a 1st-order child, then it's an attribute of the output of the parent object when it's called, and so on. For example:
```python
my_parent = module.my_obj
my_parent.attr # attr is a 0th-order child of my_obj
my_parent().attr # attr is a 1st-order child of my_obj
my_parent()().attr # attr is a 2nd-order child of my_obj
```
#### What counts as a function?
In Python, subscripts, comparisons, and binary operations are all just syntactic sugar for function calls, and are treated by saplings as such. Here are some example "translations:"
```python
my_obj['my_sub'] # => my_obj.__index__('my_sub')
my_obj + 10 # => my_obj.__add__(10)
my_obj == None # => my_obj.__eq__(None)
```
## Limitations
Saplings _[statically analyzes](https://en.wikipedia.org/wiki/Static_program_analysis)_ the usage of a module in a program, meaning it doesn't actually execute any code. Instead, it traverses the program's AST and tracks "object flow," i.e. how an object is passed through a program via variable assignments and calls of user-defined functions and classes. To demonstrate this idea, consider this example of [currying](https://en.wikipedia.org/wiki/Currying) and the tree saplings produces:
```python
import torch
def compose(g, f):
def h(x):
return g(f(x))
return h
def F(x):
return x.T
def G(x):
return x.sum()
composed_func = compose(F, G)
composed_func(torch.tensor())
```
<p align="center">
<img width="25%" src="img/currying.png" />
</p>
Saplings identifies `tensor` as an attribute of `torch`, then follows the object as it's passed into `composed_func`. Because saplings has an understanding of how `composed_func` is defined, it can analyze how the object is used _within_ the function and capture the `T` and `sum` sub-attributes.
While saplings can track object flow through many complex paths in a program, I haven't tested every edge case, and there are some situations where saplings produces inaccurate trees. Below is a list of all the failure modes I'm aware of (and currently working on fixing). If you discover a bug or missing feature that isn't listed here, please [create an issue](https://github.com/shobrook/saplings/issues/new) for it.
### Data Structures
As of right now, saplings can't track _assignments_ to comprehensions, generator expressions, dictionaries, lists, tuples, or sets. It can, however, track object flow _inside_ these data structures. For example, consider the following:
```python
import numpy as np
vectors = [np.array([0]), np.array([1]), np.array([2])]
vectors[0].mean()
```
Saplings can capture `array` and add it to the `numpy` object hierarchy, but it cannot capture `mean`, and thus produces the following tree:
<p align="center">
<img width="25%" src="img/data_structures.png" />
</p>
This limitation can have some unexpected consequences. For example, functions that return multiple values with one `return` statement (e.g. `return a, b, c`) are actually returning tuples. Therefore, the output of those functions won't be tracked by saplings. The same logic applies to variable unpacking with `*` and `**`.
### Control Flow
Handling control flow is tricky. Tracking object flow in loops and conditionals requires making assumptions about what code actually executes. For example, consider the following:
```python
import numpy as np
for x in np.array([]):
print(x.mean())
```
Because saplings only does _static_ analysis and not type inference, it doesn't know that `np.array([])` is an empty list, and that therefore the loop never executes. In this situation, capturing `mean` and adding the `__index__ -> mean` subtree to `numpy -> array` would be a false positive, since `x` (i.e. the output of `np.array().__iter__()`) is never defined. To handle this, saplings _should_ branch out and produce two possible trees for this module –– one that assumes the loop doesn't execute, and one that assumes it does:
<p align="center">
<img width="50%" src="img/for_loop.png" />
</p>
But as of right now, saplings will only produce the tree on the right –– that is, we assume the bodies of `for` loops are always executed (because they usually are).
Below are the assumptions saplings makes for other control flow elements.
#### `while` loops
`while` loops are processed under the same assumption as `for` loops –– that is, the body of the loop is assumed to execute, even if the loop condition evaluates to `False`.
#### `if`/`else` blocks
Saplings processes `if` and `else` blocks more conservatively than loops. It tracks object flow within these blocks but doesn't allow changes to the namespace to persist into the parent scope. For example, given:
```python
import numpy as np
X = np.array()
if condition:
X = np.matrix()
else:
X.mean()
X = None
Y = np.array()
print(X.sum())
print(Y.max())
```
saplings will produce the following tree:
<p align="center">
<img width="40%" src="img/if_else_1.png" />
</p>
Notice how the value of `X` becomes ambiguous after we exit the `if` block, since we don't know if `condition` evaluates to `True` or `False`. To handle this, saplings simply stops tracking any variable that's defined in the outer scope, like `X`, if it's modified inside an `if`/`else` block. Similarly, notice how there exists an execution path where `Y` is never defined and `Y.max()` throws an error. To handle this, saplings assumes that any variable defined inside an `if`/`else` block, such as `Y`, doesn't persist into the outer scope.
Both of these assumptions are made in attempt to reduce false positives and false negatives. But ideally, saplings would branch out and produce two separate trees for this module –– one that assumes the `if` block executes and another that assumes the `else` block executes, like so:
<p align="center">
<img width="65%" src="img/if_else_2.png" />
</p>
#### `try`/`except` blocks
`try`/`except` blocks are handled similarly to `if`/`else` blocks –– that is, changes to the namespace made in either block do not persist in the outer scope.
Notably, `try` and `else` blocks are treated as a single block, since `else` is only executed if `try` executes without exceptions. And `finally` blocks are treated as separate from the control flow, since code in here always executes regardless of whether an exception is thrown.
#### `return`, `break`, and `continue` statements
All code underneath a `return`, `break`, or `continue` statement is assumed not to execute and will not be analyzed. For example, consider this:
```python
import numpy as np
for x in range(10):
y = np.array([x])
continue
y.mean()
```
It may be the case that `mean` is actually an attribute of `np.array`, but saplings will not capture this since `y.mean()` is never executed.
Notably, saplings doesn't apply this assumption to statements inside control flow blocks. For example, if the `continue` statement above was changed to:
```python
if condition:
continue
```
Then `mean` _would_ be captured by saplings as an attribute of `np.array`.
### Functions
<!--#### Conditional return types
`saplings` can generally track module and user-defined functions, but there are some edge cases it cannot handle. For example, because module functions must be treated as black-boxes to `saplings`, conditional return types cannot be accounted for. Consider the following code and trees that saplings produces:
```python
import my_module
my_module.foo(5).attr1()
my_module.foo(10).attr2()
```
However, suppose `my_module.foo` is defined in the backend as:
```python
def foo(x):
if x <= 5:
return ObjectA()
else:
return ObjectB()
```
and `ObjectB` doesn't have `attr1` as an attribute. Then, saplings will have incorrectly treated `attr1` and `attr2` as attributes of the same object.-->
#### Recursion
Saplings cannot process recursive function calls. Consider the following example:
```python
import some_module
def my_recursive_func(input):
if input > 5:
return my_recursive_func(input - 1)
elif input > 1:
return some_module.foo
else:
return some_module.bar
output = my_recursive_func(5)
output.attr()
```
We know this function returns `some_module.foo`, but saplings cannot tell which base case is hit, and therefore can't track the output. To avoid false positives, we assume this function returns nothing, and thus `attr` will not be captured and added to the object hierarchy. The tree saplings produces is:
<p align="center">
<img width="35%" src="img/recursion.png" />
</p>
#### Generators
Generators aren't processed as iterables. Instead, saplings ignores `yield`/`yield from` statements and treats the generator like a normal function. For example, given:
```python
import some_module
def my_generator():
yield from some_module.some_items
for item in my_generator():
print(item.name)
```
`__index__ -> name` won't be added as a subtree to `some_module -> some_items`, and so the tree produced by saplings will look like this:
<p align="center">
<img width="35%" src="img/generators.png" />
</p>
Notably, this limitation will only produce false negatives –– not false positives.
#### Anonymous Functions
While the _bodies_ of anonymous (`lambda`) functions are processed, object flow through assignments and calls of those functions is not tracked. For example, given:
```python
import numpy as np
trans_diag = lambda x: np.diagonal(x.T)
trans_diag(np.random.randn(5, 5))
```
saplings will produce the following tree:
<p align="center">
<img width="45%" src="img/anonymous.png" />
</p>
Notice that `T` is not captured as an attribute of `numpy.random.randn`, but `diagonal` is captured as an attribute of `numpy`. This is because the body of the `lambda` function is processed by saplings, but the assignment to `trans_diag` is not recognized, and therefore the function call is not processed.
### Classes
Saplings can track object flow in static, class, and instance methods, getter and setter methods, class and instance variables, classes defined within classes, and class closures (i.e. functions that return classes). Notably, it can keep track of the state of each instance of a user-defined class. Consider the following program and the tree saplings produces:
```python
import torch.nn as nn
from torch import tensor
class Perceptron(nn.Module):
loss = None
def __init__(self, in_channels, out_channels):
super(Perceptron, self).__init__()
self.layer = nn.Linear(in_channels, out_channels)
self.output = Perceptron.create_output_layer()
@staticmethod
def create_output_layer():
def layer(x):
return x.mean()
return layer
@classmethod
def calculate_loss(cls, output, target):
cls.loss = output - target
return cls.loss
def __call__(self, x):
x = self.layer(x)
return self.output(x)
model = Perceptron(1, 8)
output = model(tensor([10]))
loss = Perceptron.calculate_loss(output, 8)
```
<p align="center">
<img width="50%" src="img/class.png" />
</p>
While saplings can handle many common usage patterns for user-defined classes, such as the ones above, there are some things saplings can't handle yet. Below are all the limitations I'm aware of:
#### Class Modifications
In the example above, calling the class method `Perceptron.calculate_loss` should change the value of the class variable `loss`. However, saplings cannot track modifications to a class when it's passed into a function. Saplings _can_ handle when a class is modified in the scope in which it was defined, like so:
```python
Perceptron.loss = tensor()
Perceptron.loss.item()
```
Here, `item` would be captured and added to the tree as an attribute of `tensor`. But if the class is modified via an alias, like so:
```python
NeuralNet = Perceptron
NeuralNet.loss = tensor()
Perceptron.loss.item()
```
Then saplings won't capture `item`. Saplings also can't propagate class modifications to existing instances of the class. For example, continuing the code above:
```python
model = Perceptron(1, 8)
Perceptron.loss = tensor()
model.loss.item()
```
Because the change to `loss`, a class variable, won't propagate to `model`, an instance of `Perceptron`, `item` won't be captured as an attribute of `tensor`.
#### Inheritance
Saplings cannot recognize inherited methods or variables in user-defined classes. For example, given:
```python
import some_module
class MyClass(module.Foo):
def __init__(self, x):
self.bar(x)
```
saplings will not recognize `bar` as an attribute of `module.Foo`, despite `bar` being an inherited method. This limitation also holds true when the base class is user-defined.
#### Metaclasses
Once I learn what metaclasses actually are and how to use them, I'll get around to handling them in saplings. But for now this is on the bottom of my to-do list since 99.9% of Python users also don't know what the hell metaclasses are.
### Miscellaneous
#### `global` and `nonlocal` statements
`global` statement are used inside functions to declare a variable to be in the global namespace. But saplings doesn't recognize these statements and change the namespace accordingly. For example, given:
```python
import some_module
my_var = some_module.foo
def my_func():
global my_var
my_var = None
my_func()
my_var.bar()
```
saplings will produce a tree with `bar` as an attribute of `foo`. This would be a false positive since calling `my_func` sets `my_var` to `None`, and of course `None` doesn't have `bar` as an attribute.
`nonlocal` statements are similar to `global`s, except they allow you to modify variables declared in outer scopes. And like `global`s, saplings doesn't recognize `nonlocal` statements.
#### Built-in functions
None of Python's [built-in functions](https://docs.python.org/3/library/functions.html) are recognized by saplings. For example, consider the `enumerate` function:
```python
import some_module
for index, item in enumerate(some_module.items):
print(item.some_attr)
```
saplings won't capture `attr` as an attribute of `some_module.items.__iter__`, which it would have if `some_module.items` wasn't wrapped by `enumerate`.
|
/saplings-4.3.1.tar.gz/saplings-4.3.1/README.md
| 0.685529 | 0.983053 |
README.md
|
pypi
|
# sapporo-service
[](https://github.com/sapporo-wes/sapporo-service/actions?query=workflow%3Apytest)
[](https://github.com/sapporo-wes/sapporo-service/actions?query=workflow%3Aflake8)
[](https://github.com/sapporo-wes/sapporo-service/actions?query=workflow%3Aisort)
[](https://github.com/sapporo-wes/sapporo-service/actions?query=workflow%3Amypy)
[](https://zenodo.org/badge/latestdoi/220937589)
[](http://www.apache.org/licenses/LICENSE-2.0)
<img src="https://raw.githubusercontent.com/sapporo-wes/sapporo/main/logo/sapporo-service.svg" width="400" style="display: block; margin-left: auto; margin-right: auto; margin-top: 30px; margin-bottom: 30px;" alt="sapporo-service logo">
[Japanese Document](https://github.com/sapporo-wes/sapporo-service/blob/main/README_ja.md)
The sapporo-service is a standard implementation conforming to the [Global Alliance for Genomics and Health](https://www.ga4gh.org) (GA4GH) [Workflow Execution Service](https://github.com/ga4gh/workflow-execution-service-schemas) (WES) API specification.
Also, we have extended the API specification.
Please check [SwaggerHub - sapporo-wes](https://app.swaggerhub.com/apis/suecharo/sapporo-wes/sapporo-wes-1.0.1-oas3).
One of sapporo-service's features is the abstraction of workflow engines, making it easy to convert various workflow engines into WES.
Currently, the following workflow engines have been confirmed to work.
- [cwltool](https://github.com/common-workflow-language/cwltool)
- [nextflow](https://www.nextflow.io)
- [Toil (experimental)](https://toil.ucsc-cgl.org)
- [cromwell](https://github.com/broadinstitute/cromwell)
- [snakemake](https://snakemake.readthedocs.io/en/stable/)
- [ep3 (experimental)](https://github.com/tom-tan/ep3)
- [StreamFlow (experimental)](https://github.com/alpha-unito/streamflow)
Another feature of the sapporo-service is the mode that can only execute workflows registered by the system administrator.
This feature is useful when building a WES in a shared HPC environment.
## Install and Run
The sapporo-service supports Python 3.7 or newer.
```bash
$ pip3 install sapporo
$ sapporo
```
### Docker
You can also launch the sapporo-service with Docker.
To use Docker-in-Docker (DinD), you must mount `docker.sock`, `/tmp`, etc.
```bash
# Launch
$ docker compose up -d
# Launch confirmation
$ docker compose logs
```
## Usage
The help for the sapporo-service startup command is as follows:
```bash
$ sapporo --help
usage: sapporo [-h] [--host] [-p] [--debug] [-r] [--disable-get-runs]
[--disable-workflow-attachment]
[--run-only-registered-workflows] [--service-info]
[--executable-workflows] [--run-sh] [--url-prefix]
Implementation of a GA4GH workflow execution service that can easily support
various workflow runners.
optional arguments:
-h, --help show this help message and exit
--host Host address of Flask. (default: 127.0.0.1)
-p , --port Port of Flask. (default: 1122)
--debug Enable debug mode of Flask.
-r , --run-dir Specify the run dir. (default: ./run)
--disable-get-runs Disable endpoint of `GET /runs`.
--disable-workflow-attachment
Disable `workflow_attachment` on endpoint `Post
/runs`.
--run-only-registered-workflows
Run only registered workflows. Check the registered
workflows using `GET /service-info`, and specify
`workflow_name` in the `POST /run`.
--service-info Specify `service-info.json`. The
supported_wes_versions, system_state_counts and
workflows are overwritten in the application.
--executable-workflows
Specify `executable-workflows.json`.
--run-sh Specify `run.sh`.
--url-prefix Specify the prefix of the url (e.g. --url-prefix /foo
-> /foo/service-info).
```
### Operating Mode
There are two startup modes in the sapporo-service.
- Standard WES mode (Default)
- Execute only registered workflows mode
These are switched with the startup argument `--run-only-registered-workflows`.
It can also be switched by giving `True` or `False` to the environment variable `SAPPORO_ONLY_REGISTERED_WORKFLOWS`.
**Startup arguments take priority over environment variables.**
#### Standard WES mode
As the API specifications, please check [SwaggerHub - sapporo-wes - RunWorkflow](https://app.swaggerhub.com/apis/suecharo/sapporo-wes/sapporo-wes-1.0.1-oas3#/default/RunWorkflow).
**When using the sapporo-service, It is different from the standard WES API specification; you must specify `workflow_engine_name` in the request parameter of `POST /runs`.**
We think this part is a standard WES API specification mistake, so we request fixing it.
#### Execute only registered workflows mode
As the API specifications for executing only registered workflows mode, please check [SwaggerHub - sapporo-wes](https://app.swaggerhub.com/apis/suecharo/sapporo-wes/sapporo-wes-1.0.0):
It conforms to the standard WES API.
The changes are as follows:
- Executable workflows are returned by `GET /executable_workflows`.
- Specify `workflow_name` instead of `workflow_url` in `POST /runs`.
The executable workflows are managed at [`executable_workflows.json`](https://github.com/sapporo-wes/sapporo-service/blob/main/sapporo/executable_workflows.json).
Also, the schema for this definition is [`executable_workflows.schema.json`](https://github.com/sapporo-wes/sapporo-service/blob/main/sapporo/executable_workflows.schema.json). The default location of these files is under the application directory of the sapporo-service. You can override them using the startup argument `--executable-workflows` or the environment variable `SAPPORO_EXECUTABLE_WORKFLOWS`.
For more information, see [SwaggerUI - sapporo-wes - GetExecutableWorkflows](https://app.swaggerhub.com/apis/suecharo/sapporo-wes/sapporo-wes-1.0.1-oas3#/default/GetExecutableWorkflows).
### Run Dir
The sapporo-service manages the submitted workflows, workflow parameters, output files, etc., on the file system.
You can override the location of run dir by using the startup argument `--run-dir` or the environment variable `SAPPORO_RUN_DIR`.
The run dir structure is as follows:
```bash
$ tree run
.
└── 29
└── 29109b85-7935-4e13-8773-9def402c7775
├── cmd.txt
├── end_time.txt
├── exe
│ └── workflow_params.json
├── exit_code.txt
├── outputs
│ ├── ERR034597_1.small.fq.trimmed.1P.fq
│ ├── ERR034597_1.small.fq.trimmed.1U.fq
│ ├── ERR034597_1.small.fq.trimmed.2P.fq
│ ├── ERR034597_1.small.fq.trimmed.2U.fq
│ ├── ERR034597_1.small_fastqc.html
│ └── ERR034597_2.small_fastqc.html
├── outputs.json
├── run.pid
├── run_request.json
├── start_time.txt
├── state.txt
├── stderr.log
├── stdout.log
└── workflow_engine_params.txt
├── 2d
│ └── ...
└── 6b
└── ...
```
So, you can initialize and delete each run by physical deletion with `rm`.
The execution of `POST /runs` is very complex.
Examples using `curl` are provided in [GitHub - sapporo/tests/curl](https://github.com/sapporo-wes/sapporo-service/tree/main/tests/curl_example/post_runs).
Please use these as references.
### `run.sh`
We use [`run.sh`](https://github.com/sapporo-wes/sapporo-service/blob/main/sapporo/run.sh) to abstract the workflow engine.
When `POST /runs` is called, the sapporo-service fork the execution of `run.sh` after dumping the necessary files to run dir. Therefore, you can apply various workflow engines to WES by editing `run.sh`.
The default position of `run.sh` is under the application directory of the sapporo-service. You can override it using the startup argument `--run-sh` or the environment variable `SAPPORO_RUN_SH`.
### Other Startup Arguments
You can change the host and port used by the application by using the startup arguments (`--host` and `--port`) or the environment variables `SAPPORO_HOST` and `SAPPORO_PORT`.
The following three startup arguments and environment variables limit the WES.
- `--disable-get-runs`
- `SAPPORO_GET_RUNS`: `True` or `False`.
- Disable `GET /runs`.
- When using WES with an unspecified number of people, by knowing the run_id, you can see the run's contents and cancel the run of other people.
It is difficult to know it in brute force because run_id itself is automatically generated using `uuid4`.
- `--disable-workflow-attachment`
- `SAPPORO_WORKFLOW_ATTACHMENT`: `True` or `False`.
- Disable `workflow_attachment` in `POST /runs`.
- The `workflow_attachment` field is used to attach files for executing workflows.
- There is a security concern because anything can be attached.
- `--url-prefix`.
- `SAPPORO_URL_PREFIX`.
- Set the URL PREFIX.
- If `--url-prefix /foo/bar` is set, `GET /service-info` becomes `GET /foo/bar/service-info`.
The contents of the response of `GET /service-info` are managed in [`service-info.json`](https://github.com/sapporo-wes/sapporo-service/blob/main/sapporo/service-info.json). The default location of `service-info.json` is under the application directory of the sapporo-service. You can override by using the startup argument `--service-info` or the environment variable `SAPPORO_SERVICE_INFO`.
### Generate download link
The sapporo-service provides the file and directory under run_dir as a download link.
For more information, see [SwaggerUI - sapporo-wes - GetData](https://app.swaggerhub.com/apis/suecharo/sapporo-wes/sapporo-wes-1.0.1-oas3#/default/GetData).
### Parse workflow
The sapporo-service provides the feature to check the workflow document's type, version, and inputs.
For more information, see [SwaggerUI - sapporo-wes - GetData](https://app.swaggerhub.com/apis/suecharo/sapporo-wes/sapporo-wes-1.0.1-oas3#/default/GetData).
### Generate RO-Crate
The sapporo-service generates RO-Crate from the run_dir after the workflow execution is completed as `ro-crate-metadata.json` in the run_dir.
You can download the RO-Crate by using `GET /runs/{run_id}/ro-crate/data/ro-crate-metadata.json`.
And, you can generate RO-Crate from the run_dir as follows:
```bash
# At Sapporo run_dir
$ ls
cmd.txt run.sh state.txt
exe/ run_request.json stderr.log
executable_workflows.json sapporo_config.json stdout.log
outputs/ service_info.json workflow_engine_params.txt
run.pid start_time.txt yevis-metadata.yml
# Execute sapporo/ro_crate.py script
$ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v $PWD:$PWD -w $PWD ghcr.io/sapporo-wes/sapporo-service:latest python3 /app/sapporo/ro_crate.py $PWD
```
Please see, [ro-crate-metadata-example.json](./tests/ro-crate-metadata-example.json) as an example.
## Development
You can start the development environment as follows:
```bash
$ docker compose -f docker-compose.dev.yml up -d --build
$ docker compose -f docker-compose.dev.yml exec app bash
# inside container
$ sapporo
```
We use [flake8](https://pypi.org/project/flake8/), [isort](https://github.com/timothycrosley/isort), and [mypy](http://mypy-lang.org) as a linter.
```bash
$ bash ./tests/lint_and_style_check/flake8.sh
$ bash ./tests/lint_and_style_check/isort.sh
$ bash ./tests/lint_and_style_check/mypy.sh
$ bash ./tests/lint_and_style_check/run_all.sh
```
We use [pytest](https://docs.pytest.org/en/latest/) as a tester.
```bash
$ pytest .
```
## Add new Workflow Engines to Sapporo Service
Have a look at the [`run.sh`](https://github.com/sapporo-wes/sapporo-service/blob/main/sapporo/run.sh) script called from Python.
This shell script will receive a request with Workflow Engine such as `cwltool` and will invoke the `run_cwltool` bash function.
That function will execute a Bash Shell command to start a Docker container for the Workflow Engine, and monitor its exit status.
For a complete example, please refer to this pull request: <https://github.com/sapporo-wes/sapporo-service/pull/29>
## License
[Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0). See the [LICENSE](https://github.com/sapporo-wes/sapporo-service/blob/main/LICENSE).
## Notice
Please note that this repository is participating in a study into sustainability of open source projects. Data will be gathered about this repository for approximately the next 12 months, starting from 2021-06-16.
Data collected will include number of contributors, number of PRs, time taken to close/merge these PRs, and issues closed.
For more information, please visit [our informational page](https://sustainable-open-science-and-software.github.io/) or download our [participant information sheet](https://sustainable-open-science-and-software.github.io/assets/PIS_sustainable_software.pdf).
|
/sapporo-1.4.9.tar.gz/sapporo-1.4.9/README.md
| 0.489259 | 0.854217 |
README.md
|
pypi
|
import json
from datetime import datetime
from typing import ClassVar, Optional
import pyairtable.api
import pyairtable.formulas as pyOps
import pyairtable.utils
from pyairtable.api.table import Table
from .crons import CronResponse, CronStat, CronStorage, FetchStrategy
class AirtableStorage(CronStorage):
"""store cron results in Airtable"""
run_id: str = ""
PROJECT_NAME: ClassVar[str]
TABLE_TASKS: ClassVar[Table]
TABLE_RUNS: ClassVar[Table]
TABLE_STATS: ClassVar[Table]
@classmethod
def get_env_params(cls) -> tuple[str, str]:
"""Return env name and env id on airtable"""
raise NotImplementedError
async def record_task(self) -> None:
"""Register the task meta info on Airtable."""
env_name, env_id = self.get_env_params()
query = {"Env": env_name, "Name": self.task_name}
task_info = self.TABLE_TASKS.first(formula=pyOps.match(query), fields=["Name", "Env"]) # type: ignore
if task_info:
self.task_id = task_info["id"]
microapp = self.task_name.split(".")[1]
fields = {"Project": self.PROJECT_NAME, "Name": self.task_name, "Microapp": microapp, "Env": [env_id]}
if self.task_id:
task_record = self.TABLE_TASKS.update(self.task_id, fields=fields)
else:
task_record = self.TABLE_TASKS.create(fields=fields)
self.task_id = task_record["id"]
async def record_run_start(self) -> None:
"""Record in the DB that the crontask has started."""
kwargs = self.task.kwargs
strategy: Optional[FetchStrategy] = kwargs.get("strategy")
run = self.TABLE_RUNS.create(
fields={
"Task": [self.task_id],
"Status": "Running",
"Batch Size": kwargs.get("batch_size", 0),
"Strategy": strategy.name if strategy else "NONE",
"Arguments": json.dumps({k: v for k, v in kwargs.items() if k not in ["batch_size", "strategy"]}),
"Started": pyairtable.utils.datetime_to_iso_str(datetime.utcnow()),
}
)
self.run_id = run["id"]
async def record_run_end(self, response: CronResponse) -> None:
"""Record in the DB that the crontask has ended."""
self.TABLE_RUNS.update(
self.run_id,
fields={
"Response": json.dumps(response),
"Status": "Error" if "error" in response else "Success",
"Ended": pyairtable.utils.datetime_to_iso_str(datetime.utcnow()),
},
)
async def record_stats(self, stats: list[CronStat]) -> None:
"""Record un the DB stats about data to process by this cron."""
for stat in stats:
self.TABLE_STATS.create(fields={"Task": [self.task_id], "Key": stat.name, "Value": stat.value})
|
/sapx-0.1.20-py3-none-any.whl/sap/worker/crons_airtable.py
| 0.770896 | 0.189371 |
crons_airtable.py
|
pypi
|
import typing
# do not remove, forcing update of format for better amqp exchanges structures
from kombu.pidbox import Mailbox
Mailbox.reply_exchange_fmt = "%s.reply.pidbox"
class CeleryConfig:
"""Default config params for all celery applications."""
proj_node: str
is_prod: bool
task_default_exchange: str
task_default_queue: str
task_default_routing_key: str
event_exchange: str
event_queue_prefix: str
accept_content: list[str] = ["application/json"]
task_serializer: str = "json"
result_serializer: str = "json"
broker_transport_options: dict[str, typing.Any]
worker_hijack_root_logger: bool = False
worker_concurrency: int
broker_pool_limit: int
task_create_missing_queues: bool
task_acks_late: bool
task_acks_on_failure_or_timeout: bool
task_reject_on_worker_lost: bool
def __init__(self, proj_name: str, is_prod: bool) -> None:
"""Initialize config."""
self.is_prod = is_prod
celery_app_name = f"celery.{self.proj_node}.{proj_name}"
self.task_default_exchange = celery_app_name
self.task_default_queue = celery_app_name
self.task_default_routing_key = celery_app_name
self.event_exchange = f"celeryev.{self.proj_node}.{proj_name}"
self.event_queue_prefix = f"celeryev.{self.proj_node}.{proj_name}"
self.broker_transport_options = {"client_properties": {"connection_name": celery_app_name}}
class LambdaCeleryConfig(CeleryConfig):
"""Default config params for lambda celery applications."""
proj_node: str = "lambda"
def __init__(self, proj_name: str, is_prod: bool) -> None:
"""Initialize config."""
super().__init__(proj_name=proj_name, is_prod=is_prod)
self.worker_concurrency = 2 if is_prod else 1
self.broker_pool_limit = 4 if is_prod else 2
self.task_create_missing_queues = False
self.task_acks_late = True
self.task_acks_on_failure_or_timeout = False
self.task_reject_on_worker_lost = True
class CronCeleryConfig(CeleryConfig):
"""Default config params for cron celery applications."""
proj_node: str = "cron"
def __init__(self, proj_name: str, is_prod: bool) -> None:
"""Initialize config."""
super().__init__(proj_name=proj_name, is_prod=is_prod)
self.worker_concurrency = 2 if is_prod else 1
self.broker_pool_limit = 4 if is_prod else 2
self.task_create_missing_queues = False
self.task_acks_late = False
self.task_acks_on_failure_or_timeout = True
self.task_reject_on_worker_lost = False
|
/sapx-0.1.20-py3-none-any.whl/sap/worker/config.py
| 0.645008 | 0.1585 |
config.py
|
pypi
|
import asyncio
from typing import Any
import celery
import celery.bootsteps
import celery.worker.consumer
import kombu
from kombu.transport.base import StdChannel # Channel
from sap.loggers import logger
from .packet import SignalPacket
from .utils import match_amqp_topics
class LambdaTask(celery.Task):
"""
A lambda task is a task that run on a specific event, usually after receiving a packet (message).
The Lambda will be connected to an AMQP Queue and will listen
to packets sent to that queue that matches the packet's topic pattern.
"""
time_limit: int = 60 * 1 # 1 minutes
packet: SignalPacket
def __init__(self, **kwargs: Any) -> None:
"""Initialize lambda arguments."""
self.name = self.get_name()
def get_name(self) -> str:
"""Return a human-readable name for this lambda."""
return self.__module__.split(".lambdas", maxsplit=1)[0] + "." + str(self.__name__)
async def handle_process(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""Perform pre-check such as authentication and run the task."""
raise NotImplementedError
async def test_process(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""Call this method to launch the task in test cases."""
return await self.handle_process(*args, **kwargs)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the task."""
logger.debug(f"Running task={self.get_name()} {args=} {kwargs=}")
return asyncio.run(self.handle_process(*args, **kwargs))
def register_lambda(lambda_task_class: type[LambdaTask]) -> LambdaTask:
"""Register the Lambda Task to make it discoverable by task runner (celery)."""
return lambda_task_class()
class LambdaWorker(celery.bootsteps.ConsumerStep):
"""Celery worker that consumes packets (messages) sent to lambda queues."""
packets: list[SignalPacket]
name: str
def _get_queues(self, channel: StdChannel) -> list[kombu.Queue]:
"""Retrieve the list of AMQP queues associated to each packet signal."""
queue_list: list[kombu.Queue] = []
for packet in self.packets:
# declare fallback queue
params = packet.queue_get_params(task_name=self.name, is_fallback=True)
params["exchange"] = kombu.Exchange(name=params["exchange"], type="topic", channel=channel, durable=True)
queue_fallback = kombu.Queue(**params, channel=channel)
queue_fallback.declare()
# declare primary queue
params = packet.queue_get_params(task_name=self.name, is_fallback=False)
params["exchange"] = kombu.Exchange(name=params["exchange"], type="topic", channel=channel, durable=True)
queue_primary = kombu.Queue(**params, channel=channel)
queue_primary.declare()
# only listen to primary queue
queue_list.append(queue_primary)
return queue_list
def get_consumers(self, channel: StdChannel) -> list[kombu.Consumer]:
"""
Create packet consumers.
The consumers are the entrypoint of
the application once celery starts receiving messages.
"""
return [
kombu.Consumer(
channel,
queues=self._get_queues(channel),
callbacks=[self.consume],
accept=["json"],
prefetch_count=10,
)
]
def consume(self, body: dict[str, Any], message: kombu.Message) -> None:
"""
Run the celery worker and consume messages.
This is the entrypoint of the application once celery starts receiving messages.
All packets received are sent to this function that will acknowledge reception and dispatch
to registered Lambda tasks.
"""
topic = message.delivery_info["routing_key"]
headers = message.headers
is_retry = headers and headers.get("x-death")
if is_retry:
logger.debug(f"Consuming worker name={self.name} topic={topic} body={body} headers={headers}")
try:
self._propagate_signal(body, message)
except Exception as exc: # pylint: disable=broad-except
logger.exception(exc)
message.reject()
else:
message.ack()
def _propagate_signal(self, body: dict[str, Any], message: kombu.Message) -> None:
"""
Execute each lambda task that registered to that packet signal.
Lambda tasks are all executed asynchronously and simultaneously through other background celery workers.
Sometimes this can leads to duplicate key errors or integrity errors.
"""
topic = message.delivery_info["routing_key"]
for task in self.get_task_list():
if match_amqp_topics(task.packet.topic, topic):
# logger.debug(f"Matching task.packet.topic={task.packet.topic} topic={topic} task={task.get_name()}")
identifier = body.get("identifier") or body.get("card_pid") or body.get("clover_id")
task.apply_async(args=(identifier,), kwargs=body["kwargs"], time_limit=60)
def get_task_list(self) -> list[LambdaTask]:
"""Retrieve the list of lambda tasks to execute."""
raise NotImplementedError
|
/sapx-0.1.20-py3-none-any.whl/sap/worker/lambdas.py
| 0.813424 | 0.164852 |
lambdas.py
|
pypi
|
import asyncio
import logging
from dataclasses import dataclass
from enum import Enum, IntEnum
from typing import Any, Callable, ClassVar, Optional, TypedDict
from unittest import mock
import celery
import celery.schedules
from beanie.odm.queries.find import FindMany
from sap.loggers import logger
from sap.settings import SapSettings
class FetchStrategy(IntEnum):
"""Define if new or old data should be fetched."""
NEW: int = 1
OLD: int = 2
class CronResponseStatus(Enum):
"""Status of the crontask after it finish running."""
SUCCESS: str = "Success"
ABORTED: str = "Aborted"
ERROR: str = "Error"
class CronResponse(TypedDict, total=False):
"""Define a standard cron task response."""
error: dict[str, str]
result: dict[str, int]
status: str
@dataclass
class CronStat:
"""Metric that gives insight into data to be processed by a cron."""
name: str
value: int
class BaseCronTask(celery.Task):
"""Define how cron task classes should be structured."""
expires = 60 * 60 # automatically expires if not run within 1 hour
time_limit = 60 * 60 * 3 # default to 3 hours, automatically kill the task if exceed the limit
name: str
args: list[Any] = []
kwargs: dict[str, Any] = {}
schedule: celery.schedules.crontab
logger: logging.Logger = logger
def __init__(self, **kw_args: Any) -> None:
"""Initialize the cron task."""
self.name = self.get_name()
for k, v in kw_args.items():
setattr(self, k, v)
@classmethod
def get_name(cls) -> str:
"""Get Name of the current Task."""
return cls.__module__.split(".crons", maxsplit=1)[0] + "." + str(cls.__name__)
def get_queryset(self, *, batch_size: Optional[int] = None, **kwargs: Any) -> Any:
"""Fetch the list of elements to process."""
raise NotImplementedError
async def get_stats(self) -> list[CronStat]:
"""Give stats about the number of elements left to process."""
raise NotImplementedError
async def process(self, *, batch_size: int = 100, **kwargs: Any) -> Any:
"""Run the cron task and process elements."""
raise NotImplementedError
async def handle_process(self, *args: Any, **kwargs: Any) -> CronResponse:
"""Run the task and save meta info to Airtable."""
response: CronResponse
try:
result = await self.process(**self.kwargs)
except Exception as exc: # pylint: disable=broad-except;
if not SapSettings.is_env_prod:
raise
self.logger.exception(exc)
response = {
"error": {"class": exc.__class__.__name__, "message": str(exc)},
"status": CronResponseStatus.ERROR.value,
}
else:
response = {"result": result, "status": CronResponseStatus.SUCCESS.value}
return response
async def test_process(self, filter_queryset: Callable[[FindMany[Any]], FindMany[Any]]) -> CronResponse:
"""Call this method to launch the task in test cases.
filter_queryset: This allows you to run an extra filtering on the data being processing.
Useful if you want to limit the data processing to a specific sample.
"""
original_get_queryset = self.get_queryset
def mock_get_queryset(batch_size: Optional[int] = None, **kwargs: Any) -> FindMany[Any]:
"""Replace the normal filter by a new test filter."""
queryset = original_get_queryset(batch_size=batch_size, **kwargs)
return filter_queryset(queryset)
with mock.patch.object(self, "get_queryset", side_effect=mock_get_queryset):
return await self.handle_process()
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the task."""
logger.debug(f"Running task={self.get_name()} {args=} {kwargs=}")
return asyncio.run(self.handle_process(*args, **kwargs))
class CronStorage:
"""
Interface that store cron results in a database.
Results can be used to collect statistics about cron runs.
"""
task: BaseCronTask
task_id: Optional[str] = None
task_name: str
def __init__(self, task: BaseCronTask):
"""Initialize the storage."""
self.task = task
self.task_name = task.get_name()
async def record_task(self) -> None:
"""Register task to database and return database id of the task."""
raise NotImplementedError
async def record_run_start(self) -> None:
"""Record in the DB that the crontask has started."""
raise NotImplementedError
async def record_run_end(self, response: CronResponse) -> None:
"""Record in the DB that the crontask has ended."""
raise NotImplementedError
async def record_stats(self, stats: list[CronStat]) -> None:
"""Record un the DB stats about data to process by this cron."""
raise NotImplementedError
class TestStorage(CronStorage):
"""Dummy storage used when running test cases."""
async def record_task(self) -> None:
"""Register task to database and return database id of the task."""
async def record_run_start(self) -> None:
"""Record in the DB that the crontask has started."""
async def record_run_end(self, response: CronResponse) -> None:
"""Record in the DB that the crontask has ended."""
async def record_stats(self, stats: list[CronStat]) -> None:
"""Record un the DB stats about data to process by this cron."""
# StorageT = TypeVar("StorageT", bound=CronStorage)
class CronTask(BaseCronTask):
"""Define a cron task and its storage."""
storage_class: ClassVar[type[CronStorage]] = CronStorage
storage: CronStorage
def __init__(self, **kwargs: Any) -> None:
"""Initialize cron task and storage."""
super().__init__(**kwargs)
self.storage = self.storage_class(task=self)
def get_queryset(self, *, batch_size: Optional[int] = None, **kwargs: Any) -> Any:
"""Fetch the list of elements to process."""
raise NotImplementedError
async def process(self, *, batch_size: int = 100, **kwargs: Any) -> Any:
"""Run the cron task and process elements."""
raise NotImplementedError
async def get_stats(self) -> list[CronStat]:
"""Give stats about the number of elements left to process."""
raise NotImplementedError
async def handle_process(self, *args: Any, **kwargs: Any) -> CronResponse:
"""Run the task and save meta info to Airtable."""
# Record task
await self.storage.record_task()
# Record run start
await self.storage.record_run_start()
# Run the task
response = await super().handle_process(*args, **kwargs)
# Record run end
await self.storage.record_run_end(response=response)
# Compute stats
stats = await self.get_stats()
# Record stats
await self.storage.record_stats(stats=stats)
return response
async def test_process(self, filter_queryset: Callable[[FindMany[Any]], FindMany[Any]]) -> CronResponse:
"""Call this method to launch the task in test cases."""
self.storage = TestStorage(task=self)
return await super().test_process(filter_queryset)
def register_crontask(
crontask_class: type[CronTask],
schedule: celery.schedules.crontab,
kwargs: Optional[dict[str, Any]] = None,
) -> CronTask:
"""Register a task on the worker servers."""
return crontask_class(schedule=schedule, kwargs=kwargs or {})
|
/sapx-0.1.20-py3-none-any.whl/sap/worker/crons.py
| 0.909345 | 0.248295 |
crons.py
|
pypi
|
from datetime import datetime
from enum import Enum
from typing import TYPE_CHECKING, Any, Mapping, Optional, Type, TypeVar, Union
from pymongo.client_session import ClientSession
import beanie
import pydantic
from beanie import PydanticObjectId
from .exceptions import Object404Error
if TYPE_CHECKING:
from beanie.odm.documents import DocType
from beanie.odm.interfaces.find import DocumentProjectionType
class DocSourceEnum(Enum):
"""Source where a document has been fetched from."""
WEBHOOK: str = "webhook"
CRON: str = "cron"
class _DocMeta(pydantic.BaseModel):
"""Meta Data allowing to keep trace of Documents versioning and updates."""
version: int = 0 # version of the document being imported
source: Optional[DocSourceEnum] = None # where the data is coming from: webhook, cron
created: Optional[datetime] = None # when the document was first imported
updated: Optional[datetime] = None # when the document was last updated
deleted: Optional[datetime] = None # when the document was deleted, (deleted document may be retained for logging)
class DocMeta(pydantic.BaseModel):
"""Manage meta data and ensure that it's correctly set."""
doc_meta: _DocMeta = _DocMeta()
@pydantic.root_validator
@classmethod
def validate_doc_meta(cls, values: dict[str, Any]) -> dict[str, Any]:
"""Validate doc meta on each model update."""
doc_meta: _DocMeta = values["doc_meta"]
doc_meta.updated = datetime.utcnow()
doc_meta.created = doc_meta.created or doc_meta.updated
return values
class Document(beanie.Document):
"""Subclass beanie.Document that add handy methods."""
doc_meta: _DocMeta = _DocMeta()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@pydantic.root_validator
@classmethod
def validate_doc_meta(cls, values: dict[str, Any]) -> dict[str, Any]:
"""Validate doc meta on each model update."""
doc_meta: _DocMeta = values["doc_meta"]
doc_meta.updated = datetime.utcnow()
doc_meta.created = doc_meta.created or doc_meta.updated
return values
@classmethod
async def get_or_404(
cls: Type["DocType"],
document_id: Union[PydanticObjectId, str],
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
with_children: bool = False,
**pymongo_kwargs: Any,
) -> "DocType":
"""Get document by id or raise 404 error if document does not exist."""
doc_id = document_id if isinstance(document_id, PydanticObjectId) else PydanticObjectId(document_id)
result = await super().get(
document_id=doc_id,
session=session,
ignore_cache=ignore_cache,
fetch_links=fetch_links,
with_children=with_children,
**pymongo_kwargs,
)
if not result:
raise Object404Error
return result
@classmethod
async def find_one_or_404(
cls: Type["DocType"],
*args: Union[Mapping[str, Any], bool],
projection_model: Optional[Type["DocumentProjectionType"]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
with_children: bool = False,
**pymongo_kwargs: Any,
) -> "DocType":
"""Find document from query or raise 404 error if document does not exist."""
result: Optional["DocType"] = await super().find_one(
*args,
projection_model=projection_model,
session=session,
ignore_cache=ignore_cache,
fetch_links=fetch_links,
with_children=with_children,
**pymongo_kwargs,
)
if not result:
raise Object404Error
return result
DocT = TypeVar("DocT", bound=Document)
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
|
/sapx-0.1.20-py3-none-any.whl/sap/beanie/document.py
| 0.908193 | 0.185357 |
document.py
|
pypi
|
import inspect
from copy import copy
from typing import Any, List, Optional, Type
from beanie.odm.fields import ExpressionField, Link, LinkInfo, LinkTypes
from beanie.odm.utils.init import Initializer as Initializer
from pydantic import BaseModel
from pydantic.fields import ModelField
from pydantic.typing import get_origin
def detect_link(field: ModelField) -> Optional[LinkInfo]:
"""
It detects link and returns LinkInfo if any found.
:param field: ModelField
:return: Optional[LinkInfo]
"""
if issubclass(field.type_, Link):
if field.allow_none is True:
return LinkInfo(
field=field.name,
model_class=field.sub_fields[0].type_, # type: ignore
link_type=LinkTypes.OPTIONAL_DIRECT,
)
return LinkInfo(
field=field.name,
model_class=field.sub_fields[0].type_, # type: ignore
link_type=LinkTypes.DIRECT,
)
if (
inspect.isclass(get_origin(field.outer_type_))
and issubclass(get_origin(field.outer_type_), list) # type: ignore
and len(field.sub_fields) == 1 # type: ignore
):
internal_field = field.sub_fields[0] # type: ignore
if internal_field.type_ == Link:
if field.allow_none is True:
return LinkInfo(
field=field.name,
model_class=internal_field.sub_fields[0].type_, # type: ignore
link_type=LinkTypes.OPTIONAL_LIST,
)
return LinkInfo(
field=field.name,
model_class=internal_field.sub_fields[0].type_, # type: ignore
link_type=LinkTypes.LIST,
)
return None
def init_document_fields(cls) -> None:
"""
Init class fields.
:return: None
"""
cls.update_forward_refs()
def check_nested_links(link_info: LinkInfo, prev_models: List[Type[BaseModel]]) -> None:
if link_info.model_class in prev_models:
return
for k, v in link_info.model_class.__fields__.items():
nested_link_info = detect_link(v)
if nested_link_info is None:
continue
if link_info.nested_links is None:
link_info.nested_links = {}
link_info.nested_links[v.name] = nested_link_info
new_prev_models = copy(prev_models)
new_prev_models.append(link_info.model_class)
check_nested_links(nested_link_info, prev_models=new_prev_models)
if cls._link_fields is None:
cls._link_fields = {}
for k, v in cls.__fields__.items():
path = v.alias or v.name
setattr(cls, k, ExpressionField(path))
link_info = detect_link(v)
if link_info is not None:
cls._link_fields[v.name] = link_info
check_nested_links(link_info, prev_models=[])
cls._hidden_fields = cls.get_hidden_fields()
Initializer.init_document_fields = staticmethod(init_document_fields)
|
/sapx-0.1.20-py3-none-any.whl/sap/beanie/patch.py
| 0.754915 | 0.163445 |
patch.py
|
pypi
|
from typing import Callable, Optional, Type, TypeVar, Union
from beanie import Document, PydanticObjectId, operators
from beanie.odm.fields import ExpressionField, LinkInfo
from .document import DocT
from .link import Link
RDocT = TypeVar("RDocT", bound=Document) # Related Model Type
async def prefetch_related(item_list: list[DocT], to_attribute: str) -> None:
"""
Optimize fetching of a related attribute of one-to-one relation.
Fetch related attribute efficiently in order to avoid multiple queries that could kill the db.
Example:
```python
class ProductCategory(Document):
name: str
class Product(Document):
category: ProductCategory
price: float
product_list = await Product.find().to_list()
prefetch_related(product_list, 'category')
```
If there are 70 product and 10 categories, `fetch_related` will only
perform 1 query instead of 70 queries to retrieve category for all products.
```
for product in product_list:
print(product.category.name)
```
"""
# If item_list is empty there is no need to continue.
if not item_list:
return
# Find the model of the related attributed
link_fields = type(item_list[0]).get_link_fields()
assert link_fields
related_field: LinkInfo = link_fields[to_attribute]
assert issubclass(related_field.model_class, Document)
related_model: Type[Document] = related_field.model_class
def get_related_id(item_: Document) -> Optional[PydanticObjectId]:
"""Return the id of the related object."""
link: Optional[Link[Document]] = getattr(item_, to_attribute)
if link:
return link.ref.id
return None
# Fetch the related attribute and map it to the each item in the item_list
related_item_ids = list(set(get_related_id(item) for item in item_list))
related_item_list = await related_model.find(operators.In(related_model.id, related_item_ids)).to_list()
for item in item_list:
link: Optional[Link[Document]] = getattr(item, to_attribute)
if link:
related_item = next((rel for rel in related_item_list if rel.id == link.ref.id), None)
setattr(link, "doc", related_item)
async def prefetch_related_children(
item_list: list[DocT],
to_attribute: str,
related_model: type[RDocT],
related_attribute: str,
filter_func: Optional[
Callable[
[list[RDocT], DocT],
Union[None, RDocT, list[RDocT]],
]
] = None,
) -> None:
"""
Optimize fetching of a related attributes of one-to-many relation.
Fetch related attribute efficiently in order to avoid multiple queries that could kill the db.
Example:
```python
class ProductCategory(Document):
name: str
class Product(Document):
category: ProductCategory
price: float
category_list = await ProductCategory.find().to_list()
prefetch_related_children(
category_list, to_attribute='products', related_model='Product', related_attribute='category'
)
```
If there are 70 product and 10 categories, `fetch_related` will only
perform 1 query instead of 10 queries to retrieve products for all categories.
```python
for category in category_list:
for product in category.products:
print(product.price)
```
"""
if not filter_func:
filter_func = lambda related_items, item: related_items
item_ids = list(set(item.id for item in item_list))
related_expression: ExpressionField = getattr(getattr(related_model, related_attribute), "id")
related_item_list = await related_model.find(
operators.In(related_expression, item_ids), sort="-doc_meta.created"
).to_list()
for item in item_list:
related_items = []
for rel in related_item_list:
rel_link: Link[RDocT] = getattr(rel, related_attribute)
if item.id == rel_link.ref.id:
related_items.append(rel)
setattr(item, to_attribute, filter_func(related_items, item))
def prepare_search_string(search_text: str) -> str:
"""Clean and reformat the search string."""
res = search_text.strip()
if "@" in res and not '"' in res:
res = f'"{res}"'
return res
|
/sapx-0.1.20-py3-none-any.whl/sap/beanie/query.py
| 0.944382 | 0.819388 |
query.py
|
pypi
|
import typing
import urllib.parse
import httpx
from sap.loggers import logger
from . import exceptions
class RestData(dict[str, typing.Any]):
"""A response data from a REST client request."""
response: typing.Optional[httpx.Response] = None
class RestClient:
"""Async Rest API Client.
An async wrapper around any Rest API.
Common errors are handled by the wrapper.
"""
basic_username: str = ""
basic_password: str = ""
base_url: str = ""
def __init__(self, basic_username: str = "", basic_password: str = "") -> None:
"""Initialize the API client."""
self.basic_username = basic_username
self.basic_password = basic_password
async def get(self, path: str, *, params: typing.Optional[dict[str, typing.Union[str, int]]] = None) -> RestData:
"""Retrieve an object."""
return await self.request("GET", path, params=params)
async def post(
self,
path: str,
*,
json: typing.Optional[dict[str, typing.Any]] = None,
files: typing.Optional[list[tuple[str, tuple[str, bytes, str]]]] = None,
) -> RestData:
"""Create an object."""
return await self.request("POST", path, json=json, files=files)
async def put(self, path: str, *, json: dict[str, typing.Any]) -> RestData:
"""Update an object."""
return await self.request("PUT", path, json=json)
async def delete(self, path: str, *, json: typing.Optional[dict[str, typing.Any]] = None) -> RestData:
"""Remove an object."""
return await self.request("DELETE", path, json=json)
def _get_client(self) -> httpx.AsyncClient:
"""Get retrieve client with headers."""
auth = None
if self.basic_username or self.basic_password:
auth = httpx.BasicAuth(self.basic_username, self.basic_password)
return httpx.AsyncClient(auth=auth)
async def request(
self,
method: str,
path: str,
*,
json: typing.Optional[dict[str, typing.Any]] = None,
params: typing.Optional[dict[str, typing.Union[str, int]]] = None,
files: typing.Optional[list[tuple[str, tuple[str, bytes, str]]]] = None,
) -> RestData:
"""Perform an HTTPS request on the Rest API."""
url: str = path if "://" in path else urllib.parse.urljoin(self.base_url, path)
async with self._get_client() as client:
response = await client.request(method, url, json=json, params=params, files=files)
return await self.get_response_data(response)
@staticmethod
async def get_response_data(response: httpx.Response) -> RestData:
"""Extract data from Rest API response and raise exceptions when applicable."""
response_data = RestData()
if "application/json" in response.headers.get("content-type", ""):
response_data = RestData(response.json())
response_data.response = response
if response.status_code >= 300:
logger.debug("Bad response from Rest API code=%d data=%s", response.status_code, str(response_data))
if response.status_code >= 500: # pragma: no cover
raise exceptions.Rest503Error(data=response_data)
if response.status_code == 404:
if "text/html" in response.headers["content-type"]:
raise exceptions.Rest405Error(data=response_data)
raise exceptions.Rest404Error(data=response_data)
if response.status_code in exceptions.RestErrorMap:
raise exceptions.RestErrorMap[response.status_code](data=response_data)
response.raise_for_status()
return response_data
class BeansClient(RestClient):
"""Async Beans API Client.
An async wrapper around the Beans API.
Common errors are handled by the wrapper.
"""
base_url: str = "https://api.trybeans.com/v3/"
def __init__(self, access_token: str) -> None:
"""Initialize the API client."""
super().__init__(basic_username=access_token)
@classmethod
async def get_access_token(cls, code: str, beans_public: str, beans_secret: str) -> RestData:
"""Retrieve access_token to be use to perform API request on behalf on a merchant."""
async with httpx.AsyncClient(auth=httpx.BasicAuth(beans_public, beans_secret)) as client:
response = await client.get(urllib.parse.urljoin(cls.base_url, f"core/auth/integration_key/{code}"))
integration_key = await cls.get_response_data(response)
if isinstance(integration_key["card"], dict): # making the code future proof with the upcoming API update
integration_key["card"] = integration_key["card"]["id"]
return integration_key
|
/sapx-0.1.20-py3-none-any.whl/sap/rest/client.py
| 0.759136 | 0.21713 |
client.py
|
pypi
|
import typing
# ---------------------------------------
# --------- Rest API ERROR -------------
# ---------------------------------------
# Those errors are returned by the Rest API
class RestAPIError(Exception):
"""Base Rest API error."""
code: int = 0
message: str = ""
data: typing.Optional[dict[str, str]] = None
def __repr__(self) -> str:
"""Display a string representation of the object."""
return f"<{self.__class__.__name__}: {self.message}>"
def __str__(self) -> str:
"""Display a string representation of the error."""
return str(self.message)
def __init__(self, *args: object, data: typing.Optional[dict[str, typing.Any]] = None) -> None:
"""Add error data."""
super().__init__(*args)
self.data = data
if not data:
return
# verify if `error` is in data and if it is not empty
if error_ := data.get("error"):
if isinstance(error_, str):
self.message = error_
elif isinstance(error_, list) and isinstance(error_[0], str):
self.message = ". ".join(error_)
elif isinstance(error_, dict) and "message" in error_:
self.message = error_["message"]
# verify if `message` is in data and if it is not empty
if message_ := data.get("message"):
if isinstance(message_, str):
self.message = message_
class Rest400Error(RestAPIError):
"""Data invalid."""
code = 400
message = "Invalid data"
class Rest401Error(RestAPIError):
"""Access token declined."""
code = 401
message = "Authentication refused."
class Rest403Error(RestAPIError):
"""Insufficient permission to access the requested data."""
code = 403
message = "Permission error."
class Rest404Error(RestAPIError):
"""Path or object does not exist."""
code = 404
message = "Data not found."
class Rest405Error(RestAPIError):
"""The method is not allowed is on this path."""
code = 405
message = "Method or path not allowed."
class Rest409Error(RestAPIError):
"""There was a conflict due to a duplicate transaction."""
code = 409
message = "Conflict or duplicate transaction."
class Rest503Error(RestAPIError):
"""The Rest server is down."""
code = 503
message = "The Rest server is unreachable."
RestErrorMap = {
400: Rest400Error,
422: Rest400Error,
401: Rest401Error,
403: Rest403Error,
404: Rest404Error,
405: Rest405Error,
409: Rest409Error,
# Server Error
500: Rest503Error,
501: Rest503Error,
502: Rest503Error,
503: Rest503Error,
}
|
/sapx-0.1.20-py3-none-any.whl/sap/rest/exceptions.py
| 0.79858 | 0.229546 |
exceptions.py
|
pypi
|
import base64
import re
from enum import Enum
from typing import TYPE_CHECKING, Any, Mapping, Optional
from fastapi import Request
if TYPE_CHECKING:
from pydantic.error_wrappers import ErrorDict
def pydantic_format_errors(error_list: list["ErrorDict"]) -> dict[str, dict[str, Any]]:
"""Format pydantic ErrorDict with listed loc to dict format.
[{'loc': ('a', 'b'), 'msg': 'message', 'type': 'value_error.str.regex'}]
=>
{'a': {'b': {'msg': 'message', 'type': 'value_error.str.regex'}}}
"""
result = {}
for error in error_list:
loc = error["loc"]
error_dict: dict[str, Any] = {"msg": error["msg"], "type": error["type"]}
if "ctx" in error:
error_dict["ctx"] = error["ctx"]
for x in loc[:0:-1]:
error_dict = {str(x): error_dict}
result[str(loc[0])] = error_dict
return result
class FlashLevel(Enum):
"""Fash message levels."""
INFO: str = "info"
ERROR: str = "error"
SUCCESS: str = "success"
class Flash:
"""Toast messaging backend.
Good applications and user interfaces are all about feedback.
If the user does not get enough feedback they will probably end up hating the application.
This provides a really simple way to give feedback to a user with the flashing system.
The flashing system basically makes it possible to record a message at the end of a request
and access it next request and only next request.
This is based on https://flask.palletsprojects.com/en/2.2.x/patterns/flashing/
"""
@classmethod
def add_message(cls, request: Request, message: str, level: FlashLevel = FlashLevel.INFO) -> None:
"""Record a message to be displayed to the user."""
if "_messages" not in request.session:
request.session["_messages"] = []
request.session["_messages"].append({"message": message, "level": level.value})
@classmethod
def get_messages(cls, request: Request) -> list[str]:
"""Get flashed messages in the template."""
messages: list[str] = []
if "_messages" in request.session:
messages = request.session.pop("_messages")
request.session["_messages"] = []
return messages
def base64_url_encode(text: str) -> str:
"""Encode a b64 for use in URL query by removing `=` character."""
return base64.urlsafe_b64encode(text.encode()).rstrip(b"\n=").decode("ascii")
def base64_url_decode(text: str) -> str:
"""Decode a URL safely encoded b64."""
return base64.urlsafe_b64decode(text.encode().ljust(len(text) + len(text) % 4, b"=")).decode()
def merge_dict_deep(dict_a: dict[str, Any], dict_b: dict[str, Any], path: Optional[list[str]] = None) -> dict[str, Any]:
"""
Deep merge dictionaries. Merge b into a.
```python
a = {1:{"a":{A}}, 2:{"b":{B}}}
b = {2:{"c":{C}}, 3:{"d":{D}}}
print(merge_dict_deep(a, b))
# result
{1:{"a":{A}}, 2:{"b":{B},"c":{C}}, 3:{"d":{D}}}
```
"""
# source: https://stackoverflow.com/questions/7204805/how-to-merge-dictionaries-of-dictionaries/7205107#7205107
if path is None:
path = []
for key in dict_b:
if key in dict_a:
if isinstance(dict_a[key], dict) and isinstance(dict_b[key], dict):
merge_dict_deep(dict_a[key], dict_b[key], path + [str(key)])
elif dict_a[key] == dict_b[key]:
pass # same leaf value
else: # b value is more recent
dict_a[key] = dict_b[key]
else:
dict_a[key] = dict_b[key]
return dict_a
unflatten_regex = re.compile(r"(?P<key_parent>\w+)\[(?P<key_child>\w+)\]")
def unflatten_form_data(form_data: Mapping[str, Any]) -> dict[str, Any]:
"""
Un-flatten a form data and return the corresponding cascading dict.
```python
form_data = { "user[first_name]": "John", "user[last_name]": "Doe"}
print(restructure_form_data(form_data))
```
The result will be:
```python
{ "user": {"first_name": "John", "last_name": "Doe"}}
```
"""
res: dict[str, Any] = {}
for key, value in form_data.items():
if reg_match := unflatten_regex.match(key):
key_parent, key_child = reg_match.groups()
res.setdefault(key_parent, {})
res[key_parent][key_child] = value
else:
res[key] = value
return res
|
/sapx-0.1.20-py3-none-any.whl/sap/fastapi/utils.py
| 0.838812 | 0.584686 |
utils.py
|
pypi
|
from dataclasses import dataclass
from typing import Any, Generic, Optional
import pydantic
from fastapi import Request
from sap.beanie.document import DocT
from .exceptions import Validation422Error
from .serializers import SerializerT, WSerializerT
from .utils import Flash, FlashLevel, merge_dict_deep, pydantic_format_errors, unflatten_form_data
@dataclass
class FormValidation(Generic[WSerializerT]):
"""Return the result of data validation for a form serializer."""
data: dict[str, Any]
errors: dict[str, Any]
serializer: Optional[WSerializerT]
async def validate_form(
request: Request,
serializer_write_class: type[WSerializerT],
serializer_read_class: Optional[type[SerializerT]] = None,
instance: Optional[DocT] = None,
) -> FormValidation[WSerializerT]:
"""Check that a submitted form pass validation."""
form_data: dict[str, Any] = {}
if serializer_read_class and instance:
# Means this is an update. So we first populate existing data
serializer_read: SerializerT = serializer_read_class.read(instance=instance)
form_data = serializer_read.dict()
form_data_received = await request.form()
form_data = merge_dict_deep(form_data, unflatten_form_data(form_data_received))
form_errors: dict[str, Any] = {}
async def run_validation() -> WSerializerT:
"""Run serializer validation."""
serializer_: WSerializerT = serializer_write_class(**form_data, instance=instance)
await serializer_.run_async_validators()
return serializer_
serializer_write: Optional[WSerializerT] = None
try:
serializer_write = await run_validation()
except pydantic.ValidationError as err:
form_errors = pydantic_format_errors(err.errors())
msg = "Les informations soumises ne sont pas valides."
form_errors["__root__"] = {"msg": msg}
Flash.add_message(request, msg, level=FlashLevel.ERROR)
except (AssertionError, Validation422Error) as err:
form_errors["__root__"] = {"msg": str(err)}
Flash.add_message(request, str(err), level=FlashLevel.ERROR)
return FormValidation(serializer=serializer_write, data=form_data, errors=form_errors)
|
/sapx-0.1.20-py3-none-any.whl/sap/fastapi/forms.py
| 0.900906 | 0.162115 |
forms.py
|
pypi
|
import numpy as np
from random import sample
from PIL import ImageOps, Image
def load_images(img_path, list_of_img_names) -> list:
"""
Load all the images in determine folder and convert to grayscale
"""
imgs_list = []
for image_name in list_of_img_names:
img = Image.open(f"{img_path}\{image_name}")
img = ImageOps.grayscale(img)
imgs_list.append(img)
return imgs_list
def get_shuffled_idxs(*, imgs_list, step=1) -> list[list, list, int]:
"""
Finds and returns the shuffled pixel of image
[{column1: [row1],
column2: [row2],
...},
...]
"""
shuffled_imgs_idxs = []
keys_list = []
total_length = 0
for img in imgs_list:
img_size = img.size # tuple (x, y)
x, y = (i // step for i in img_size)
keys_y = list(range(0, y * step, step))
vals_x = list(range(0, x * step, step))
img_indxs = {key: sample(vals_x, x) for key in keys_y}
keys_list.append(keys_y)
shuffled_imgs_idxs.append(img_indxs)
total_length += x * y
return shuffled_imgs_idxs, keys_list, total_length
def add_noise(img, scale=0.2707) -> np.array:
"""
Adds the noise on image using
(img + img * noise) formula
"""
# Create noise
size = img.shape
noise = np.random.rayleigh(scale=scale, size=size)
# Add noise
noised_img = img + img * noise
noised_img = np.where(noised_img <= 255, noised_img, 255)
return noised_img
def add_borders(img, win_size) -> Image:
"""
Creates images with mirrowed and
flipped borders with width = win_size // 2
"""
x = img.size[0]
y = img.size[1]
# Sides
left_side = img.crop((0, 0, win_size, y))
right_side = img.crop((x - win_size, 0, x, y))
top_side = img.crop((0, 0, x, win_size))
bottom_side = img.crop((0, y - win_size, x, y))
# Flip or mirrowed sides
rot_left_side = ImageOps.mirror(left_side)
rot_right_side = ImageOps.mirror(right_side)
rot_top_side = ImageOps.flip(top_side)
rot_bottom_side = ImageOps.flip(bottom_side)
# Corners
top_left = left_side.crop((0, 0, win_size, win_size))
top_right = right_side.crop((0, 0, win_size, win_size))
bottom_left = left_side.crop((0, y - win_size, win_size, y))
bottom_right = right_side.crop((0, y - win_size, win_size, y))
# flipped and mirrowed corners
rot_top_left = ImageOps.flip(ImageOps.mirror(top_left))
rot_top_right = ImageOps.flip(ImageOps.mirror(top_right))
rot_bottom_left = ImageOps.flip(ImageOps.mirror(bottom_left))
rot_bottom_right = ImageOps.flip(ImageOps.mirror(bottom_right))
# Create new image
size = (x + 2 * win_size, y + 2 * win_size)
new_image = Image.new("L", size=size)
# Add corners
new_image.paste(rot_top_left, (0, 0))
new_image.paste(rot_top_right, (win_size + x, 0))
new_image.paste(rot_bottom_left, (0, win_size + y))
new_image.paste(rot_bottom_right, (x + win_size, y + win_size))
# Add sides
new_image.paste(rot_top_side, (win_size, 0))
new_image.paste(rot_bottom_side, (win_size, win_size + y))
new_image.paste(rot_left_side, (0, win_size))
new_image.paste(rot_right_side, (x + win_size, win_size))
# Add main path
new_image.paste(img, (win_size, win_size))
return new_image
|
/sar_handler-0.1.8.tar.gz/sar_handler-0.1.8/sar_handler/image_processing.py
| 0.603231 | 0.510192 |
image_processing.py
|
pypi
|
import numpy as np
import warnings
def anisodiff(img, niter=1, kappa=50, gamma=0.1, step=(1., 1.), option=1, ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Department of Pharmacology
University of Oxford
<[email protected]>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
fig = pl.figure(figsize=(20, 5.5), num="Anisotropic diffusion")
ax1, ax2 = fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)
ax1.imshow(img, interpolation='nearest')
ih = ax2.imshow(imgout, interpolation='nearest', animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in range(niter):
# calculate the diffs
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
# update the image
imgout += gamma*(NS+EW)
if ploton:
iterstring = "Iteration %i" % (ii+1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return imgout
def anisodiff3(stack, niter=1, kappa=50, gamma=0.1, step=(1., 1., 1.), option=1, ploton=False):
"""
3D Anisotropic diffusion.
Usage:
stackout = anisodiff(stack, niter, kappa, gamma, option)
Arguments:
stack - input stack
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (z,y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the middle z-plane will be plotted on every
iteration
Returns:
stackout - diffused stack.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x,y and/or z axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Department of Pharmacology
University of Oxford
<[email protected]>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
if stack.ndim == 4:
warnings.warn("Only grayscale stacks allowed, converting to 3D matrix")
stack = stack.mean(3)
# initialize output array
stack = stack.astype('float32')
stackout = stack.copy()
# initialize some internal variables
deltaS = np.zeros_like(stackout)
deltaE = deltaS.copy()
deltaD = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
UD = deltaS.copy()
gS = np.ones_like(stackout)
gE = gS.copy()
gD = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
showplane = stack.shape[0]//2
fig = pl.figure(figsize=(20, 5.5), num="Anisotropic diffusion")
ax1, ax2 = fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)
ax1.imshow(stack[showplane, ...].squeeze(), interpolation='nearest')
ih = ax2.imshow(stackout[showplane, ...].squeeze(),
interpolation='nearest', animated=True)
ax1.set_title("Original stack (Z = %i)" % showplane)
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in xrange(niter):
# calculate the diffs
deltaD[:-1, :, :] = np.diff(stackout, axis=0)
deltaS[:, :-1, :] = np.diff(stackout, axis=1)
deltaE[:, :, :-1] = np.diff(stackout, axis=2)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gD = np.exp(-(deltaD/kappa)**2.)/step[0]
gS = np.exp(-(deltaS/kappa)**2.)/step[1]
gE = np.exp(-(deltaE/kappa)**2.)/step[2]
elif option == 2:
gD = 1./(1.+(deltaD/kappa)**2.)/step[0]
gS = 1./(1.+(deltaS/kappa)**2.)/step[1]
gE = 1./(1.+(deltaE/kappa)**2.)/step[2]
# update matrices
D = gD*deltaD
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'Up/North/West' by one
# pixel. don't as questions. just do it. trust me.
UD[:] = D
NS[:] = S
EW[:] = E
UD[1:, :, :] -= D[:-1, :, :]
NS[:, 1:, :] -= S[:, :-1, :]
EW[:, :, 1:] -= E[:, :, :-1]
# update the image
stackout += gamma*(UD+NS+EW)
if ploton:
iterstring = "Iteration %i" % (ii+1)
ih.set_data(stackout[showplane, ...].squeeze())
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return stackout
|
/sar_handler-0.1.8.tar.gz/sar_handler-0.1.8/sar_handler/fastaniso.py
| 0.895694 | 0.697519 |
fastaniso.py
|
pypi
|
import pandas as pd
from pathlib import Path
from math import floor
from torch import Tensor
from numpy import newaxis
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset
# Paths
main_data_path = Path("../data")
scv_folder = main_data_path / "csv_files" # scv_folder
img_path = main_data_path / "images"
class _CustomBigDataLoader:
def __init__(self, *, scv_folder, dataset_name, batch_size, train_size, is_train):
self.scv_folder = scv_folder
self.dataset_name = dataset_name
self.main_path = scv_folder / dataset_name
self.batch_size = batch_size
self.train_size = train_size
self.test_batches, self.real_test_samples = self._get_test_amount()
self.skip_rows = self.real_test_samples
self.is_train = is_train
self.counter = 0
if self.is_train:
self.data = pd.read_csv(self.main_path, chunksize=self.batch_size,
header=None, index_col=None, iterator=True)
else:
self.data = pd.read_csv(self.main_path, chunksize=self.batch_size,
header=None, index_col=None, iterator=True,
skiprows=self.skip_rows)
def _get_len_data(self) -> int:
idx_start = self.dataset_name.find("L") + 1
idx_finish = self.dataset_name.find(".")
length = int(self.dataset_name[idx_start:idx_finish])
return length
def _get_test_amount(self) -> tuple:
length = self._get_len_data()
test_smaples = int(length * self.train_size)
test_batches = floor(test_smaples / self.batch_size)
real_test_samples = test_batches * self.batch_size
return test_batches, real_test_samples
def __iter__(self):
return self
def __next__(self):
if self.is_train:
if self.counter < self.test_batches:
self.counter += 1
raw_chunk = self.data.get_chunk()
x, y = self._prepare_chunk(raw_chunk)
return x, y
raise StopIteration
else:
raw_chunk = self.data.get_chunk()
x, y = self._prepare_chunk(raw_chunk)
return x, y
def _prepare_chunk(self, raw_chunk):
x = raw_chunk.drop(columns=raw_chunk.shape[1] - 1)
y = raw_chunk[raw_chunk.shape[1] - 1]
x = Tensor(x.to_numpy()).float()
y = Tensor(y.to_numpy()[:, newaxis]).float()
return x, y
def get_train_test_big_data(*, scv_folder, dataset_name, batch_size, train_size):
train = _CustomBigDataLoader(scv_folder=scv_folder,
dataset_name=dataset_name,
batch_size=batch_size,
train_size=train_size,
is_train=True)
test = _CustomBigDataLoader(scv_folder=scv_folder,
dataset_name=dataset_name,
batch_size=batch_size,
train_size=train_size,
is_train=False)
return train, test
class _CustomSmallDataLoader(Dataset):
def __init__(self, x, y) -> None:
super().__init__()
self.x = Tensor(x).float()
self.y = Tensor(y[:, newaxis]).float()
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return len(self.x)
def get_train_test_small_data(*, scv_folder, dataset_name, batch_size, test_size=0.2, split=True):
main_path = scv_folder / dataset_name
data = pd.read_csv(main_path, header=None)
y = data[data.shape[1] - 1].to_numpy()
x = data.iloc[:, 0:data.shape[1]-1].to_numpy()
if split:
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=test_size, shuffle=True)
train_data = DataLoader(_CustomSmallDataLoader(X_train, y_train), batch_size=batch_size, shuffle=False)
test_data = DataLoader(_CustomSmallDataLoader(X_test, y_test), batch_size=batch_size, shuffle=False)
return train_data, test_data
else:
return DataLoader(_CustomSmallDataLoader(x, y), batch_size=batch_size, shuffle=True)
|
/sar_handler-0.1.8.tar.gz/sar_handler-0.1.8/sar_handler/csv_dataloader.py
| 0.698227 | 0.350922 |
csv_dataloader.py
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sara_distributions-0.1.tar.gz/sara_distributions-0.1/sara_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
from array import array
from typing import Dict
import json
from sara_sdk.client.requests import fetch
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, update as _update, delete as _delete, create as _create
from requests import post
RESOURCE = "webhook/endpoints"
def list(session: Session = None, **filters):
"""
List a array of endpoints
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of endpoints
Returns:
result (json): returns the result of the request as json
Example:
>>> list()
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def create(url: Dict, session: Session = None):
"""
Create a new endpoint
Args:
name (string): name of the endpoint
url (string): url of the endpoint
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> create("https://endpoint.url")
"""
result = _create(resource=RESOURCE, payload=url, session=session)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a endpoint
Args:
uuid (string): uuid of the endpoint
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource=RESOURCE, id=uuid, session=session)
return result
def update(uuid: str, url: Dict, session: Session = None):
"""
Update a endpoint
Args:
uuid (string): uuid of the endpoint
url (string): url of the endpoint
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "url": "https://endpoint.url" })
"""
data = {"url": url}
result = _update(resource=RESOURCE, id=uuid, payload=url, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a endpoint
Args:
uuid (string): uuid of the endpoint
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(resource=RESOURCE, id=uuid, session=session)
return result
def list_relations(endpoint: str, session: Session = None, **filters):
"""
List a array of relations
Args:
endpoint (string): uuid of the endpoint
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of relations
Returns:
result (json): returns the result of the request as json
Example:
>>> list_realations()
"""
result = _list(resource="{}/{}/relations".format(RESOURCE,
endpoint), session=session, **filters)
return result
def create_relations(endpoint: str, robot: array, topic: array, session: Session = None):
"""
Create a new relation
Args:
endpoint (string): uuid of the endpoint
topic (string): name of the topics
robot (string): uuid of the robots
session (Session): Used only if want to use a different session instead default
Returns:
result (string): returns the result of the request as string
Example:
>>> create_relations("endpoint_uuid", [robots], [topics])
"""
data = {"robots": robot, "topics": topic}
result = fetch(method=post, path="{}/{}/relations".format(RESOURCE,
endpoint), payload=data, session=session)
return result.content.decode("utf-8")
def delete_relations(endpoint: str, uuid: str, session: Session = None):
"""
Delete a relation
Args:
endpoint (string): uuid of the endpoint
uuid (string): uuid of the relation
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> delete_relations("endpoint_uuid", "relation_uuid")
"""
result = _delete(
resource="{}/{}/relations".format(RESOURCE, endpoint), id=uuid, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/webhook/endpoints.py
| 0.89733 | 0.265214 |
endpoints.py
|
pypi
|
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, update as _update, delete as _delete, create as _create, attach, detach
RESOURCE = "iam/clients"
def list(session: Session = None, **filters):
"""
List a array of clients
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of clients
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="client name")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def attachRobot(robot: str, client: str, session: Session = None):
"""
Attach a robot to a client
Args:
robot (string): robot uuid to attach on client
client (string): client uuid that the robot is going to be attached
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> attachRobot("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = attach(RESOURCE, type="robot", this=robot,
that=client, session=session)
return result
def detachRobot(robot: str, client: str, session: Session = None):
"""
Detach a robot from a client
Args:
robot (string): robot uuid to detach from client
client (string): client uuid that the robot is, to be detached of
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> detachRobot("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = detach(RESOURCE, type="robot", this=robot,
that=client, session=session)
return result
def attachUser(user: str, client: str, session: Session = None):
"""
Attach a user to a client
Args:
user (string): user uuid to attach on client
client (string): client uuid that the user is going to be attached
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> attachUser("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = attach(RESOURCE, type="user", this=user,
that=client, session=session)
return result
def detachUser(user: str, client: str, session: Session = None):
"""
Detach a user from a client
Args:
user (string): user uuid to detach from client
client (string): client uuid that the user is, to be detached of
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> detachUser("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = detach(RESOURCE, type="user", this=user,
that=client, session=session)
return result
def slug(slug: str, session=None):
"""
Get the data of an client by passing the slug
Args:
slug (str): slug of the client
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> slug("test-client")
"""
result = _list("iam/slugs", session=session, pk=slug)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/iam/clients.py
| 0.894075 | 0.284272 |
clients.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import attach, detach, retrieve as _retrieve, list as _list, update as _update, delete as _delete, create as _create
RESOURCE = "iam/policies"
def list(session: Session = None, **filters):
"""
List a array of policies
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of policies
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="policy name")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a policy by uuid
Args:
uuid (string): policy uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session)
return result
def update(uuid: str, model: Dict, session: Session = None):
"""
Update a policy by passing uuid and an model (Data to update)
Args:
uuid (string): policy uuid to retrieve
model (Dict): A dictionary with the data the will be updated on policy
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "new name" })
"""
result = _update(RESOURCE, id=uuid, payload=model, session=session)
return result
def create(model: Dict, session: Session = None):
"""
Create a policy by passing an model (Data)
Args:
model (Dict): A dictionary with the data the will be used to create an policy
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
"""
result = _create(RESOURCE, payload=model, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a policy by passing uuid
Args:
uuid (string): policy uuid to delete
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, id=uuid, session=session)
return result
def attachAction(action: str, policy: str, session: Session = None):
"""
Attach a action to a policy
Args:
action (string): action uuid to attach on policy
policy (string): policy uuid that the action is going to be attached
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> attachAction("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = attach(RESOURCE, type="actions", this=action,
that=policy, session=session)
return result
def detachAction(action: str, policy: str, session: Session = None):
"""
Detach a action from a policy
Args:
action (string): action uuid to detach from policy
policy (string): policy uuid that the action is, to be detached of
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> detachAction("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = detach(RESOURCE, type="actions", this=action,
that=policy, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/iam/policies.py
| 0.920178 | 0.31732 |
policies.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, update as _update, delete as _delete, create as _create, attach, detach
RESOURCE = "iam/groups"
def list(session: Session = None, **filters):
"""
List a array of groups
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of groups
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="group name")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a group by uuid
Args:
uuid (string): group uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session)
return result
def update(uuid: str, model: Dict, session: Session = None):
"""
Update a group by passing uuid and an model (Data to update)
Args:
uuid (string): group uuid to retrieve
model (Dict): A dictionary with the data the will be updated on group
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "new name" })
"""
result = _update(RESOURCE, id=uuid, payload=model, session=session)
return result
def create(model: Dict, session: Session = None):
"""
Create a group by passing an model (Data)
Args:
model (Dict): A dictionary with the data the will be used to create an group
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
"""
result = _create(RESOURCE, payload=model, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a group by passing uuid
Args:
uuid (string): group uuid to delete
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, id=uuid, session=session)
return result
def attachUser(user: str, group: str, session: Session = None):
"""
Attach a user to a group
Args:
user (string): user uuid to attach on group
group (string): group uuid that the user is going to be attached
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> attachUser("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = attach(RESOURCE, type="user", this=user,
that=group, session=session)
return result
def detachUser(user: str, group: str, session: Session = None):
"""
Detach a user from a group
Args:
user (string): user uuid to detach from group
group (string): group uuid that the user is, to be detached of
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> detachUser("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = detach(RESOURCE, type="user", this=user,
that=group, session=session)
return result
def attachPolicy(policy: str, group: str, session: Session = None):
"""
Attach a policy to a group
Args:
policy (string): policy uuid to attach on group
group (string): group uuid that the policy is going to be attached
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> attachPolicy("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = attach(RESOURCE, type="policy", this=policy,
that=group, session=session)
return result
def detachPolicy(policy: str, group: str, session: Session = None):
"""
Detach a policy from a group
Args:
policy (string): policy uuid to detach from group
group (string): group uuid that the policy is, to be detached of
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> detachPolicy("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = detach(RESOURCE, type="policy", this=policy,
that=group, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/iam/groups.py
| 0.927273 | 0.293117 |
groups.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, update as _update, delete as _delete, create as _create
RESOURCE = "iam/robots"
def list(session: Session = None, **filters):
"""
List a array of robots
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of robots
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,fleet="3ddc1eb5-8433-4eca-a95d-ff2d688cc2fc")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a robot by uuid
Args:
uuid (string): Robot uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session)
return result
def update(uuid: str, model: Dict, session: Session = None,):
"""
Update a robot by passing uuid and an model (Data to update)
Args:
uuid (string): Robot uuid to retrieve
model (Dict): A dictionary with the data the will be updated on robot
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "new name" })
"""
result = _update(RESOURCE, id=uuid, payload=model, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a robot by passing uuid
Args:
uuid (string): Robot uuid to delete from client
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, id=uuid, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/iam/robots.py
| 0.928393 | 0.264655 |
robots.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, update as _update, delete as _delete, create as _create, attach, detach
RESOURCE = "iam/fleets"
def list(session: Session = None, **filters):
"""
List a array of fleets
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of fleets
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="fleetname")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a fleet by uuid
Args:
uuid (string): fleet uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session)
return result
def update(uuid: str, model: Dict, session: Session = None):
"""
Update a fleet by passing uuid and an model (Data to update)
Args:
uuid (string): fleet uuid to retrieve
model (Dict): A dictionary with the data the will be updated on fleet
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "new name" })
"""
result = _update(RESOURCE, id=uuid, payload=model, session=session)
return result
def create(model: Dict, session: Session = None):
"""
Create a fleet by passing an model (Data)
Args:
model (Dict): A dictionary with the data the will be used to create an fleet
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
"""
result = _create(RESOURCE, payload=model, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a fleet by passing uuid
Args:
uuid (string): fleet uuid to delete
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, id=uuid, session=session)
return result
def attachRobot(robot: str, fleet: str, session: Session = None):
"""
Attach a robot to a fleet
Args:
robot (string): robot uuid to attach on fleet
fleet (string): fleet uuid that the robot is going to be attached
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> attachRobot("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = attach(RESOURCE, type="robot", this=robot,
that=fleet, session=session)
return result
def detachRobot(robot: str, fleet: str, session: Session = None):
"""
Detach a robot from a fleet
Args:
robot (string): robot uuid to detach from fleet
fleet (string): fleet uuid that the robot is, to be detached of
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of request as json
Example:
>>> detachRobot("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", f3a4d78a-4540-4d46-a2ed-00e613a65d4a)
"""
result = detach(RESOURCE, type="robot", this=robot,
that=fleet, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/iam/fleets.py
| 0.928563 | 0.312023 |
fleets.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, update as _update, delete as _delete, create as _create
RESOURCE = "iam/users"
def list(session: Session = None, **filters):
"""
List a array of users
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of users
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="user name")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a user by uuid
Args:
uuid (string): user uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session)
return result
def update(uuid: str, model: Dict, session: Session = None):
"""
Update a user by passing uuid and an model (Data to update)
Args:
uuid (string): user uuid to retrieve
model (Dict): A dictionary with the data the will be updated on user
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "new name" })
"""
result = _update(RESOURCE, id=uuid, payload=model, session=session)
return result
def create(model: Dict, session: Session = None):
"""
Create a user by passing an model (Data)
Args:
model (Dict): A dictionary with the data the will be used to create an user
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
"""
result = _create(RESOURCE, payload=model, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a user by passing uuid
Args:
uuid (string): user uuid to delete
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, id=uuid, session=session)
return result
def me(session: Session = None):
"""
Get the data from the user logged
Args:
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> me()
"""
result = _retrieve(RESOURCE, id="me", session=session)
return result
def verifyUserByEmail(email: str, session: Session = None):
"""
Verify user by email
Args:
email (string): email to validate
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> verifyUserByEmail("[email protected]")
"""
result = _retrieve(RESOURCE, id="verifyUserByEmail",
session=session, email=email)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/iam/users.py
| 0.890449 | 0.264335 |
users.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import list as _list, list_paginated as _list_paginated, create as _create, retrieve as _retrieve, delete as _delete
RESOURCE = "missions/tags"
def list(session: Session = None, **filters):
"""
List a array of tags
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of tags
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="tag name")
"""
result = _list(resource=RESOURCE, session=session,version="v2", **filters)
return result
def list_paginated(session: Session = None, **filters):
"""
List iterator of pages of tags
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of tags
Returns:
result (json): returns the result of the request as json by page
Example:
>>> next(list(page=1,page_size=10,name="tag name"))
"""
result = _list_paginated(resource=RESOURCE, session=session,
version="v2", **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a tag by passing id
Args:
id (UUID): tag id to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session, version="v2")
return result
def create(model: Dict, session: Session = None):
"""
Create a tag
Args:
name (str): name of the tag
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> create("tag name")
"""
result = _create(RESOURCE, payload=model, session=session,version="v2")
return result
def delete(uuid: str, session: Session = None):
"""
Delete a tag
Args:
uuid (str): id of the tag
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
print('delete')
result = _delete(RESOURCE, id=uuid, session=session, version="v2")
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/missions/tags.py
| 0.882763 | 0.184327 |
tags.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list_paginated as _list_paginated, list as _list, update as _update, delete as _delete, create as _create
RESOURCE = "missions/stages"
def list(session: Session = None, **filters):
"""
List a array of stages
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of stages
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="stage name")
"""
result = _list(resource=RESOURCE, session=session, version="v2", **filters)
return result
def list_paginated(session: Session = None, **filters):
"""
List iterator of pages of stages
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of stages
Returns:
result (json): returns the result of the request as json by page
Example:
>>> next(list(page=1,page_size=10,name="stage name"))
"""
result = _list_paginated(resource=RESOURCE, session=session,
version="v2", **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a stage by uuid
Args:
uuid (string): stage uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session, version="v2")
return result
def update(uuid: str, model: Dict, session: Session = None):
"""
Update a stage by passing uuid and an model (Data to update)
Args:
uuid (string): stage uuid to retrieve
model (Dict): A dictionary with the data the will be updated on stage
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "new name" })
"""
result = _update(RESOURCE, id=uuid, payload=model,
session=session, version="v2")
return result
def create(model: Dict, session: Session = None):
"""
Create a stage by passing an model (Data)
Args:
model (Dict): A dictionary with the data the will be used to create an stage
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
"""
result = _create(RESOURCE, payload=model, session=session, version="v2")
return result
def delete(uuid: str, session: Session = None):
"""
Delete a stage by passing uuid
Args:
uuid (string): stage uuid to delete
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, id=uuid, session=session, version="v2")
return result
def list_steps(stage: str, session: Session = None, **filters):
"""
List a array of steps of one stage
Args:
stage (string): stage uuid to retrieve
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of steps
Returns:
result (json): returns the result of the request as json
Example:
>>> list_steps("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", page=1,page_size=10,name="step name")
"""
result = _list(resource="{}/{}/steps".format(RESOURCE, stage),
session=session, version="v2", **filters)
return result
def retrieve_steps(stage: str, step: str, session: Session = None):
"""
Retrieve a step of one stage by uuid
Args:
stage (string): stage uuid to retrieve
step (string): step uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve_step("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", "f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource="{}/{}/steps".format(RESOURCE,
stage), id=step, session=session, version="v2")
return result
def list_params(stage: str, session: Session = None, **filters):
"""
List a array of params of one stage
Args:
stage (string): stage uuid to retrieve
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of params
Returns:
result (json): returns the result of the request as json
Example:
>>> list_params("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", page=1,page_size=10,name="param name")
"""
result = _list(resource="{}/{}/params".format(RESOURCE, stage),
session=session, version="v2", **filters)
return result
def retrieve_params(stage: str, param: str, session: Session = None):
"""
Retrieve a param of one stage by uuid
Args:
stage (string): stage uuid to retrieve
param (string): param uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve_params("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", "f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource="{}/{}/params".format(RESOURCE,
stage), id=param, session=session, version="v2")
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/missions/stages.py
| 0.917709 | 0.294587 |
stages.py
|
pypi
|
from typing import Dict
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list_paginated as _list_paginated, list as _list, update as _update, delete as _delete, create as _create
RESOURCE = "missions/steps"
def list(session: Session = None, **filters):
"""
List a array of steps
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of steps
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="step name")
"""
result = _list(resource=RESOURCE, session=session, version="v2", **filters)
return result
def list_paginated(session: Session = None, **filters):
"""
List iterator of pages of steps
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of steps
Returns:
result (json): returns the result of the request as json by page
Example:
>>> next(list(page=1,page_size=10,name="step name"))
"""
result = _list_paginated(resource=RESOURCE, session=session,
version="v2", **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a step by uuid
Args:
uuid (string): step uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session, version="v2")
return result
def update(uuid: str, model: Dict, session: Session = None):
"""
Update a step by passing uuid and an model (Data to update)
(Only super step can do it!)
Args:
uuid (string): step uuid to retrieve
model (Dict): A dictionary with the data the will be updated on step
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "new name" })
"""
result = _update(RESOURCE, id=uuid, payload=model,
session=session, version="v2")
return result
def create(model: Dict, session: Session = None):
"""
Create a step by passing an model (Data)
(Only super step can do it!)
Args:
model (Dict): A dictionary with the data the will be used to create an step
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
"""
result = _create(RESOURCE, payload=model, session=session, version="v2")
return result
def delete(uuid: str, session: Session = None):
"""
Delete a step by passing uuid
(Only super step can do it!)
Args:
uuid (string): step uuid to delete
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, id=uuid, session=session, version="v2")
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/missions/steps.py
| 0.919466 | 0.273344 |
steps.py
|
pypi
|
from typing import Dict
import json
from sara_sdk.common.session import Session
from ...utils.rest import list as _list, list_paginated as _list_paginated, create as _create, retrieve as _retrieve
from requests import get, post, delete as _delete, patch
from ...client.requests import fetch
RESOURCE = "missions"
def list(robot: str, session: Session = None, **filters):
"""
List a array of missions
Args:
robot (UUID): robot to return a mission
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of missions
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="mission name")
"""
filters["robot_id"] = robot
result = _list(resource=RESOURCE, session=session, version="v2", **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a mission by passing uuid
Args:
mission (UUID): mission uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(RESOURCE, id=uuid, session=session, version="v2")
return result
def list_paginated(robot: str, session: Session = None, **filters):
"""
List iterator of pages of missions
Args:
robot (UUID): robot to return a mission
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of missions
Returns:
result (json): returns the result of the request as json by page
Example:
>>> next(list(page=1,page_size=10,name="mission name"))
"""
filters["robot_id"] = robot
result = _list_paginated(resource=RESOURCE, session=session,
version="v2", **filters)
return result
def last(robot: str, session: Session = None):
"""
Retrieve the last mission by robot id
Args:
robot (UUID): robot to return a mission
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _list(RESOURCE+"/last", session=session,
version="v2", robot_id=robot)
return result
def create(robot: str, stages: Dict, session: Session = None):
"""
Create a mission by passing an model (Data)
Args:
robot (UUID): robot uuid to create mission
stages (Dict): A dictionary with the data the will be used to create an mission
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
"""
model = {
"robot": robot,
"stages": json.dumps(stages)
}
result = _create(RESOURCE, payload=model, session=session, version="v2")
return result
def retry(mission: str, session: Session = None):
"""
Retry a mission by passing uuid
Args:
mission (UUID): mission uuid to retry
session (Session): Used only if want to use a different session instead default
Returns:
null
Example:
>>> retry("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = fetch(path=RESOURCE+"/"+mission+"/retry",
session=session, method=post, version="v2")
if result and result.status == 202:
return True
return False
def cancel(mission: str, session: Session = None):
"""
Cancel a mission by passing uuid
Args:
mission (UUID): mission uuid to cancel
session (Session): Used only if want to use a different session instead default
Returns:
null
Example:
>>> cancel("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = fetch(path=RESOURCE+"/"+mission+"/cancel",
session=session, method=post, version="v2")
if result and result.status == 202:
return True
return False
def pause(mission: str, session: Session = None):
"""
Pause a mission by passing uuid
Args:
mission (UUID): mission uuid to pause
session (Session): Used only if want to use a different session instead default
Returns:
null
Example:
>>> pause("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = fetch(path=RESOURCE+"/"+mission+"/pause",
session=session, method=post, version="v2")
if result and result.status == 202:
return True
return False
def resume(mission: str, session: Session = None):
"""
Resume a mission by passing uuid
Args:
mission (UUID): mission uuid to resume
session (Session): Used only if want to use a different session instead default
Returns:
null
Example:
>>> resume("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = fetch(path=RESOURCE+"/"+mission+"/resume",
session=session, method=post, version="v2")
if result and result.status == 202:
return True
return False
def list_tags(mission: str, session: Session = None):
"""
List a array of missions tags
Args:
mission (UUID): mission to return a mission tags
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> list_tags("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _list(resource="{}/{}/tags".format(RESOURCE,
mission), session=session, version="v2")
return result
def retrieve_tags(mission: str, tag: str, session: Session = None):
"""
Retrieve a mission tag by passing uuid
Args:
mission (UUID): mission uuid to retrieve
tag (UUID): tag uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve_tags("f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource="{}/{}/tags".format(RESOURCE,
mission), id=tag, session=session, version="v2")
return result
def list_steps(mission: str, session: Session = None):
"""
List a array of steps from one mission
Args:
mission (UUID): mission to return a mission steps
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> list_steps("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _list(resource="{}/{}/steps".format(RESOURCE,
mission), session=session, version="v2")
return result
def retrieve_steps(mission: str, step: str, session: Session = None):
"""
Retrieve a step of a mission by passing uuid
Args:
mission (UUID): mission uuid to retrieve
step (UUID): step uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve_steps("f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource="{}/{}/steps".format(RESOURCE,
mission), id=step, session=session, version="v2")
return result
def list_stages(mission: str, session: Session = None):
"""
List a array of stages of one mission
Args:
mission (UUID): mission to return its stages
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> list_missions_stages("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _list(resource="{}/{}/stages".format(RESOURCE,
mission), session=session, version="v2")
return result
def retrieve_stages(mission: str, stage: str, session: Session = None):
"""
Retrieve a stage of one mission by passing uuid
Args:
mission (UUID): mission uuid to retrieve
stage (UUID): stage uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve_stage("f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource="{}/{}/stages".format(RESOURCE,
mission), id=stage, session=session, version="v2")
return result
def list_stages_steps(mission: str, stage: str, session: Session = None):
"""
List a array of steps of one stage of one mission
Args:
mission (UUID): mission to return its stages
stage (UUID): stage to return its steps
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> list_stages_steps("f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _list(resource="{}/{}/stages/{}/steps".format(RESOURCE,
mission, stage), session=session, version="v2")
return result
def retrieve_stages_steps(mission: str, stage: str, step: str, session: Session = None):
"""
Retrieve a step of one step of one mission by passing uuid
Args:
mission (UUID): mission uuid to retrieve
stage (UUID): stage uuid to retrieve
step (UUID): step uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve_missions_stages_step("f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource="{}/{}/stages/{}/steps".format(RESOURCE,
mission, stage), id=step, session=session, version="v2")
return result
def list_stages_params(mission: str, stage: str, session: Session = None):
"""
List a array of params of one stage of one mission
Args:
mission (UUID): mission to return its stages
stage (UUID): stage to return its params
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> list_stages_params("f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _list(resource="{}/{}/stages/{}/params".format(RESOURCE,
mission, stage), session=session, version="v2")
return result
def retrieve_stage_params(mission: str, stage: str, param: str, session: Session = None):
"""
Retrieve a param of one stage of one mission by passing uuid
Args:
mission (UUID): mission uuid to retrieve
stage (UUID): stage uuid to retrieve
param (UUID): param uuid to retrieve
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve_stage_params("f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a","f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _retrieve(resource="{}/{}/stages/{}/params".format(RESOURCE,
mission, stage), id=param, session=session, version="v2")
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/missions/missions.py
| 0.828245 | 0.21844 |
missions.py
|
pypi
|
from typing import Dict
from ...client.requests import fetch
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, list_paginated as _list_paginated, update as _update, delete as _delete, create as _create
RESOURCE = "srs/relationships"
def list(session: Session = None, **filters):
"""
List relationships
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of relationships
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="relationship name")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def list_paginated(session: Session = None, **filters):
"""
List iterator of relationships pages
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of relationships
Returns:
result (json): returns the result of the request as json by page
Example:
>>> next(list(page=1,page_size=10,name="relationship name"))
"""
result = _list_paginated(resource=RESOURCE, session=session,
version="v2", **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a relationship by uuid
Args:
uuid (UUID): uuid to return a relationship
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("3daa992b-ccca-4920-bcfd-24015d8f2f10")
"""
result = _retrieve(RESOURCE, uuid, session=session)
return result
def create(bucket: str, session: Session = None):
"""
Create a relationship
Args:
bucket (str): bucket to create a relationship
data (Dict): data to create a relationship
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> create({"bucket": "515488d3-0262-4879-ab2f-7da8d107e447"})
"""
result = _create(RESOURCE, bucket=bucket, session=session)
return result
def update(uuid: str, bucket: str, model: Dict, session: Session = None):
"""
Update a relationship
Args:
uuid (UUID): uuid to update a relationship
data (Dict): data to update a relationship
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("515488d3-0262-4879-ab2f-7da8d107e447", {"bucket": "3daa992b-ccca-4920-bcfd-24015d8f2f10"})
"""
model = {
"uuid": uuid,
"bucket": bucket
}
result = _update(RESOURCE, uuid, payload=model, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a relationship
Args:
uuid (UUID): uuid to delete a relationship
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("515488d3-0262-4879-ab2f-7da8d107e447")
"""
result = _delete(RESOURCE, uuid, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/srs/relationships.py
| 0.921442 | 0.230227 |
relationships.py
|
pypi
|
from typing import Dict
from ...client.requests import fetch
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, list_paginated as _list_paginated, update as _update, delete as _delete, create as _create
RESOURCE = "srs/buckets"
def list(session: Session = None, **filters):
"""
List buckets
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of buckets
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="bucket name")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def list_paginated(session: Session = None, **filters):
"""
List iterator of buckets pages
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of buckets
Returns:
result (json): returns the result of the request as json by page
Example:
>>> next(list(page=1,page_size=10,name="bucket name"))
"""
result = _list_paginated(resource=RESOURCE, session=session,
version="v2", **filters)
return result
def retrieve(uuid: str, session: Session = None):
"""
Retrieve a bucket by uuid
Args:
uuid (UUID): uuid to return a bucket
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> retrieve("230dd69a-0470-4542-93ef-8fb75ff4e3be")
"""
result = _retrieve(RESOURCE, uuid, session=session)
return result
def create(name: str, locality: str, type: str, session: Session = None):
"""
Create a bucket
Args:
name (str): name of the bucket
locality (str): locality of the bucket
type (str): type of the bucket ('PUBLIC' or 'PROTECTED')
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> create({ "name": "bucket-test-public", "locality": "sp_ribeirao_sy", "type": "PUBLIC" })
"""
model = {
"name": name,
"locality": locality,
"type": type
}
result = _create(RESOURCE, payload=model, session=session)
return result
def update(uuid: str, model: Dict, session: Session = None):
"""
Update a bucket
Args:
uuid (UUID): uuid of the bucket
name (str): name of the bucket
locality (str): locality of the bucket
type (str): type of the bucket ('PUBLIC' or 'PROTECTED')
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> update("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", { "name": "bucket-test-public", "type": "PUBLIC" })
"""
result = _update(RESOURCE, uuid, payload=model, session=session)
return result
def delete(uuid: str, session: Session = None):
"""
Delete a bucket
Args:
uuid (UUID): uuid of the bucket
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = _delete(RESOURCE, uuid, session=session)
return result
def upload(uuid: str, session: Session = None):
"""
Upload a file to a bucket
Args:
uuid (UUID): uuid of the bucket
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> upload("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", file=open("test.txt", "rb"))
"""
result = fetch(
RESOURCE+"/"+uuid+"/upload",
session=session,
method="POST"
)
return result
def download(uuid: str, session: Session = None):
"""
Download a file from a bucket
Args:
uuid (UUID): uuid of the bucket
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> download("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = fetch(
RESOURCE+"/"+uuid+"/download",
session=session,
method="GET"
)
return result
def create_folder(name: str, session: Session = None):
"""
Create a folder in a bucket
Args:
uuid (UUID): uuid of the bucket
name (str): name of the folder
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> create_folder("test")
"""
model = {
"name": name
}
result = _create(RESOURCE+"/"+name, payload=model, session=session)
return result
def delete_object(key: str, session: Session = None):
"""
Delete an object in a bucket
Args:
key (str): key of the object
session (Session): Used only if want to use a different session instead default
Returns:
result (json): returns the result of the request as json
Example:
>>> delete_object("test.txt")
"""
result = _delete(RESOURCE+"/"+key, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/srs/buckets.py
| 0.878477 | 0.23144 |
buckets.py
|
pypi
|
from typing import Dict
from ...client.requests import fetch
from sara_sdk.common.session import Session
from ...utils.rest import list as _list, list_paginated as _list_paginated, create as _create
RESOURCE = "srs/activities"
def list(session: Session = None, **filters):
"""
List activities
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of activities
Returns:
result (json): returns the result of the request as json
Example:
>>> list(page=1,page_size=10,name="bucket name")
"""
result = _list(resource=RESOURCE, session=session, **filters)
return result
def list_paginated(session: Session = None, **filters):
"""
List iterator of activities pages
Args:
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of activities
Returns:
result (json): returns the result of the request as json by page
Example:
>>> next(list(page=1,page_size=10,name="bucket name"))
"""
result = _list_paginated(resource=RESOURCE, session=session,
version="v2", **filters)
return result
def create(robot: str, operation: str, payload: str, model: Dict, session: Session = None, **data):
"""
Create an activity
Args:
robot (str): robot uuid
operation (str): operation to create an activity ('DownloadFile', 'UploadFile', 'ExecuteFile')
payload (str): filename, including path
session (Session): Used only if want to use a different session instead default
data (Any): data to create a activity
Returns:
result (json): returns the result of the request as json
Example:
>>> create({ "robot": "f8b85a7a-4540-4d46-a2ed-00e6134ee84a", "operation": "DownloadFile", "payload": "test.txt" })
"""
model = {
"robot": robot,
"operation": operation,
"payload": payload
}
result = _create(RESOURCE, payload=model, session=session)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/srs/activities.py
| 0.913534 | 0.219338 |
activities.py
|
pypi
|
from typing import Dict
from ...client.requests import fetch
from requests import get
from sara_sdk.common.session import Session
from ...utils.rest import retrieve as _retrieve, list as _list, list_paginated as _list_paginated, update as _update, delete as _delete, create as _create
RESOURCE = "telemetry"
def listDiagnostics(robotId: str,session: Session = None, **filters):
"""
List robot telemetry diagnostics
Args:
robotId (str): robotId to return telemetry diagnostics
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of telemetry
Returns:
result (json): returns the result of the request as json
Example:
>>> list("f8b85a7a-4540-4d46-a2ed-00e6134ee84a", page=1,page_size=10)
"""
result = _list(resource="{}/{}/diagnostics".format(RESOURCE, robotId), session=session, **filters)
return result
def isOnline(robotId: str,session: Session = None, **filters):
"""
Check robot connection status
Args:
robotId (str): robotId to return connection status
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of telemetry
Returns:
result (json): returns the result of the request as json
Example:
>>> list("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
result = fetch(path="{}/{}/isonline".format(RESOURCE, robotId), method=get, session=session, **filters)
return result.content.decode("utf-8")
def connection(robotId: str, middleware: str, session: Session = None, **filters):
"""
Check middleware status
Args:
robotId (str): robotId to check middleware status
middleware (str): middleware to check ('mission-bridge','mqtt-bridge','webrtc-signaling-proxy')
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of telemetry
Returns:
result (json): returns the result of the request as json
Example:
>>> list("f8b85a7a-4540-4d46-a2ed-00e6134ee84a")
"""
filters["middleware"] = middleware
result = _list(resource="{}/{}/connection".format(RESOURCE, robotId), session=session, **filters)
return result
def srsFeedback(feedbackId: str,session: Session = None, **filters):
"""
Retrieve last robot feedback
Args:
feedbackId (str): feedbackId to return a telemetry
session (Session): Used only if want to use a different session instead default
filters (Any): filters to pass to filter the list of telemetry
Returns:
result (json): returns the result of the request as json
Example:
>>> list("U_a21685f7-aca5-4ac7-b93f-5ec711478858")
"""
result = _list(resource="{}/srs-feedback/{}".format(RESOURCE, feedbackId), session=session, **filters)
return result
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/core/telemetry/telemetry.py
| 0.906269 | 0.15084 |
telemetry.py
|
pypi
|
from requests import get, post, delete as _delete, patch
from ..client.requests import fetch
def retrieve(resource, id, session=None,version="v1", **kwargs):
"""
Retrieve function to request a GET passing id to the API
Args:
resource (string): the route to access on the api
id (string): uuid to the resource
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
kwargs (any): will be used as query to the route
Returns:
Json: The json of the content of the response sended by the api
Example:
>>> retrieve(resource="iam/robots", "09594aae-7e88-4c8b-b4e5-095c6e785509")
"""
path = "{endpoint}/{id}".format(endpoint=resource, id=id)
json = fetch(method=get, path=path, query=kwargs, version=version, session=session).json()
return json
def list_paginated(resource, session=None, version="v1", **kwargs):
"""
List function to request a GET to receive a list of objects
Args:
resource (string): the route to access on the api
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
kwargs (any): will be used as query to the route
Returns:
Json: The json of the content results of the response sended by the api
Example:
>>> list(resource="iam/robots")
"""
page = kwargs.get("page", 1)
while True:
json = fetch(method=get, path=resource, query={
**kwargs, "page": page}, session=session, version=version).json()
yield json.get("results", [])
if json.get("next") is None:
break
page += 1
def list(resource, session=None, version="v1", **kwargs):
"""
List function to request a GET to receive a list of objects
Args:
resource (string): the route to access on the api
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
kwargs (any): will be used as query to the route
Returns:
Json: The json of the content of the response sended by the api
Example:
>>> list(resource="iam/robots")
"""
json = fetch(method=get, path=resource,
query=kwargs, session=session, version=version).json()
return json
def create(resource, payload, session=None, version="v1", **kwargs):
"""
Create function to request a POST to create a new instance of resource
Args:
resource (string): the route to access on the api
payload (Dict): A dict with data to use to create the new resource
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
kwargs (any): will be used as query to the route
Returns:
Json: The json of the content of the response sended by the api
Example:
>>> create(resource="iam/robots", **kwargs)
"""
json = fetch(method=post, path=resource, session=session,
query=kwargs, payload=payload, version=version).json()
return json
def delete(resource, id, session=None, version="v1"):
"""
Delete function to request a DELETE passing id to the API
Args:
resource (string): the route to access on the api
id (string): uuid to the resource
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
kwargs (any): will be used as query to the route
Returns:
Json: The json of the content of the response sended by the api
Example:
>>> delete(resource="iam/robots", "09594aae-7e88-4c8b-b4e5-095c6e785509")
"""
path = "{endpoint}/{id}".format(endpoint=resource, id=id)
json = fetch(method=_delete, path=path,
session=session, version=version).json()
return json
def update(resource, id, payload, session=None, version="v1"):
"""
Delete function to request a DELETE passing id to the API
Args:
resource (string): the route to access on the api
id (string): uuid to the resource
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
payload (Dict): A dict with data to use to update the resource
Returns:
Json: The json of the content of the response sended by the api
Example:
>>> delete(resource="iam/robots", "09594aae-7e88-4c8b-b4e5-095c6e785509", **payload)
"""
path = "{endpoint}/{id}".format(endpoint=resource, id=id)
json = fetch(method=patch, path=path,
payload=payload, session=session, version=version).json()
return json
def attach(resource, type, this, that, session=None, version="v1"):
"""
Attach function to attach something (this) to other entity (that)
Args:
resource (string): the route to access on the api
type (string): type of entity that is going to be attach
this (string): an uuid from the thing that is going to be attach to the entity
that (string): uuid from the entity
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
Returns:
Json: The json of the content of the response sended by the api
Example:
>>> attach("iam/groups", "user", "9871a24a-89ff-4ba1-a350-458244e8244d", "00ea03f1-5b2e-4787-8aa6-745961c6d506")
"""
body = {
type: this
}
# Workaround to userGroup route
if type == "user" and resource == "iam/groups":
type = "UserGroup"
if type == "actions" and resource == "iam/policies":
type = "Permissions"
path = "{endpoint}/{that}/attach{type}".format(
endpoint=resource, that=that, type=type.capitalize())
if resource == "iam/clients":
body = {
type: this,
'client': that
}
path = "{endpoint}/attach{type}".format(
endpoint=resource, type=type.capitalize())
json = fetch(method=post, path=path, payload=body,
session=session, version=version).json()
return json
def detach(resource, type, this, that, session=None, version="v1"):
"""
Attach function to detach something (this) from other entity (that)
Args:
resource (string): the route to access on the api
type (string): type of entity that is going to be detach
this (string): an uuid from the thing that is going to be detach from the entity
that (string): uuid from the entity
session (Session): session where to get the authorization (only needed if doens't want
to use the DEFAULT_SESSION)
Returns:
Json: The json of the content of the response sended by the api
Example:
>>> detach("iam/groups", "user", "9871a24a-89ff-4ba1-a350-458244e8244d", "00ea03f1-5b2e-4787-8aa6-745961c6d506")
"""
body = {
type: this
}
# Workaround to userGroup route
if type == "user" and resource == "iam/groups":
type = "UserGroup"
if type == "actions" and resource == "iam/policies":
type = "Action"
path = "{endpoint}/{that}/attach{type}".format(
endpoint=resource, that=that, type=type.capitalize())
if resource == "iam/clients":
body = {
type: this,
'client': that
}
path = "{endpoint}/attach{type}".format(
endpoint=resource, type=type.capitalize())
json = fetch(method=_delete, path=path,
payload=body, session=session, version="v1").json()
return json
|
/sara-sdk-1.1.0.tar.gz/sara-sdk-1.1.0/sara_sdk/utils/rest.py
| 0.834407 | 0.183612 |
rest.py
|
pypi
|
import math
import random
import time
from six.moves import range
class DiscreteRandomVariable(object):
""" Simulates a Discrete Random Variable """
def __init__(self, values):
""" <values>: a list of (v, p) pairs where p is the relative
probability for the value v """
self.dist = list() # the scaled distribution of the variable
c = 0.0
for v, p in values:
self.dist.append((v, c))
c += p
self.ub = c # the upperbound of self.dist
# we'll use binary search, which is simpler with a list of
# length 2**n -- thus we'll fake the cneter of the list:
self.vc = 2**int(math.ceil(math.log(len(self.dist), 2))) / 2
def pick(self):
""" picks a value accoriding to the given density """
v = random.uniform(0, self.ub)
d = self.dist
c = self.vc - 1
s = self.vc
while True:
s = s / 2
if s == 0:
break
if v <= d[c][1]:
c -= s
else:
c += s
# we only need this logic when increasing c
while len(d) <= c:
s = s / 2
c -= s
if s == 0:
break
# we may have converged from the left, instead of the right
if c == len(d) or v <= d[c][1]:
c -= 1
return d[c][0]
class DiscreteRandomVariable_alt(object):
""" Simulates a Discrete Random Variable alternatively"""
def __init__(self, values):
""" <values>: a list of (v, p) pairs where p is the relative
probability for the value v """
if len(values) > 0:
m = max([p for v, p in values])
assert(m != 0)
values = [(v, p / m) for v, p in values]
self.values = values
def pick(self):
while True:
idx = random.randint(0, len(self.values) - 1)
v, p = self.values[idx]
if p >= random.uniform(0, 1):
return v
if __name__ == '__main__':
# Curiously enought, the complexity of both algorithms seems to
# be the same. Is random.randint O(log(n)) !?
ne = 100000
nt = 10000
values = [(random.uniform(0, 10), random.uniform(0, 10))
for i in range(ne)]
drv = DiscreteRandomVariable(values)
drva = DiscreteRandomVariable_alt(values)
print("Starting..")
bt = time.time()
for i in range(nt):
drv.pick()
print((time.time() - bt) / float(nt))
bt = time.time()
for i in range(nt):
drva.pick()
print((time.time() - bt) / float(nt))
# vim: et:sta:bs=2:sw=4:
|
/sarah-0.1.4.tar.gz/sarah-0.1.4/src/drv.py
| 0.663451 | 0.513059 |
drv.py
|
pypi
|
# Documentation for AMR Context Extraction
This document provides an overview of how the proposed tool can be installed and used. It attempts to document all necessary details to set up the tool, and provides some run guides.
## Overview
This tool can be used to extract the neighborhood of the target Antimicrobial Resistance (AMR) genes from the assembly graph.
It can also be used to simulate sequence reads from some reference genomes (through ART), run MetaSPAdes to assemble the simulated reads and then reconstruct the neighborhood of the AMR genes.
## Installation
### Step I: install the dependencies
Our tool relies on several dependencies, including Python, Prokka, RGI, BLAST, Bandage, MetaSPAdes (in case the assembly graph has not already been generated) and ART (in case of simulating reads).
The most straight forward way to install this tool's dependencies is using bioconda.
#### Cloning the tool repository
`git clone https://github.com/beiko-lab/AMR_context`
Now, move to AMR_context directory.
#### Installing bioconda
Make sure [bioconda](https://bioconda.github.io/user/install.html) has been installed and the channels are set properly as follows.
```
conda config --add channels defaults
conda config --add channels bioconda
conda config --add channels conda-forge
```
#### Creating a new conda environment
It's recommend to set up a conda environment first, so packages and
versions don't get mixed up with system versions.
`conda create -n amr_context python=3.6.10`
#### Activating the conda environment
`conda activate amr_context`
#### Installing BLAST
The installation instructions are available for [Linux/Unix/MacOS](https://www.ncbi.nlm.nih.gov/books/NBK52640/) and [Windows](https://www.ncbi.nlm.nih.gov/books/NBK52637/). For more details, please refer to [Blast+ user manual](https://www.ncbi.nlm.nih.gov/books/NBK279690/).
#### Installing dependencies through conda
```
conda install pip
conda install rgi=5.1.1
conda install prokka
conda install art
conda install bandage
```
Links to their repositories can be found here:
- prokka: https://github.com/tseemann/prokka
- RGI: https://github.com/arpcard/rgi
- Bandage: https://rrwick.github.io/Bandage/
- ART: https://www.niehs.nih.gov/research/resources/software/biostatistics/art/
Note: In case, prokka can't be installed through bioconda, I suggest using the docker
container [staphb/prokka](https://hub.docker.com/r/staphb/prokka) by the following command:
`docker pull staphb/prokka:latest`. Please note that PROKKA_COMMAND_PREFIX variable in params.py need to be updated with an appropriate value which is probably an empty string unless prokka is run through docker.
#### Installing python requirements
Note: Make sure that you are in the root directory of this tool (AMR_context).
`pip install -r requirements.txt`
### Step II: Testing
#### Updating params.py
Make sure to update the following parameter in code/params.py.
- PROKKA_COMMAND_PREFIX (it probably should either be an empty string or the command from docker if you installed prokka via docker)
Note: you might also need to update the following parameters in code/params.py to provide the path to Bandage, ART and SPAdes, in case they have not been installed via conda.
- BANDAGE_PATH (the path to access bandage executable file)
- ART_PATH (the path to access art_illumina directory)
- SPADES_PATH (the path to spades.py)
#### Running the test code
To run the code, make sure you are in the created conda environment.
To activate it, run:
`conda activate amr_context`
and then run the code by:
`python code/full_pipeline.py`
Note: You don't need to install and set the parameters for ART and SPAdes if the assembly graph is provided as an input.
#### Expected results
All results will be available in test directory.
Here is the list of important directories and files that can be seen there and a short description of their content.
- metagenome.fasta: a file containing all ref genomes.
- metagenome_1.fq and metagenome_2.fq: reads simulated by ART from metagenome.fasta
- AMR_info: if reference genomes are available, this directory contains the list of identified AMR sequences, their extracted neighborhood and annotation.
- AMR_info/sequences/:The sequence of identified AMRs is stored here, with a name similar to their original name (file name is generated by calling `code/utils.py::restricted_amr_name_from_modified_name(amr_name_from_title(amr_original_name)))`
- AMR_info/ref_annotations/: the annoation details are stored here.
- AMR_info/AMR_ref_neighborhood.fasta: all extracted neighborhood sequences are stored in this file.
- AMR_info/ref_neighborhood_annotations.csv: the summary of annotation info is stored in this csv file.
- spade_output: This directory contains metaSPAdes assembly outputs. The most important files are `assembly_graph_with_scaffolds.gfa` and `contigs.fasta`.
- sequences_info/sequences_info_{params.seq_length}/: This directory stores the information of extracted neighborhood sequences from the assembly graph.
- sequences_info/sequences_info_{params.seq_length}/sequences/: the extracted sequences in the neighborhood of each AMR are stored in a file like `ng_sequences_{AMR_NAME}_{params.seq_length}_{DATE}.txt`.
For each extracted sequence, the first line denotes the corresponding path, where the nodes representing the AMR sequence are placed in '[]'.
The next line denotes the extracted sequence where the AMR sequence is in lower case letters and the neighborhood is in upper case letters.
- sequences_info/sequences_info_{params.seq_length}/paths_info/: The information of nodes representing the AMR neighborhood including their name, the part of the sequence represented by each node (start position and end position) as well as their coverage is stored in a file like `ng_sequences_{AMR_NAME}_{params.seq_length}_{DATE}.csv`
- annotations/annotations_{params.seq_length}: The annotation details are stored in this directory.
- annotations/annotations_{params.seq_length}/annotation_{AMR_NAME}_{params.seq_length}: this directory contains all annotation details.
- prokka_dir_extracted{NUM}_{DATE}: it contains the output of prokka for annotation of a sequence extracted from the neighborhood of the target AMR gene in the assembly graph.
- rgi_dir: contains RGI annotation details for all extracted neighborhood sequences of the target AMR gene.
- annotation_detail_{AMR_NAME}.csv: the list of annotations of all extracted sequences for an AMR gene
- trimmed_annotation_info_{AMR_NAME}.csv: the list of unique annotations of all extracted sequences for an AMR gene
- coverage_annotation_{GENE_COVERAGE_THRESHOLD}_{AMR_NAME}.csv:
the list of the annotations in which the gene coverage difference from the AMR gene coverage is less than GENE_COVERAGE_THRESHOLD value.
- vis_annotation.csv: this csv file contains the annotations of extracted sequences from both ref genomes and the graph and is used for visualization
- gene_comparison_<AMR_NAME>.png: An image visualizing annotations
- annotations/annotations_{params.seq_length}/not_found_annotation_amrs_in_graph.txt: the list of all AMRs available in the reference genomes but not identified in the graph.
- evaluation/evaluation_{params.seq_length}/summaryMetrics_up_down_{GENE_COVERAGE_THRESHOLD}_{DATE}.csv: This csv file contains the calculated precision and sensitivity for all AMRs comparing the sequences extracted from the graph with those of the ref genomes.
## Exploring the code
### Optional parameters to set
The list of all parameters that can be set in this tool have been provided in code/params.py, and includes the following parameters that can be set via command line as well:
- -h, --help
> show help message and exit
- --task TASK or [FIRST_TASK, LAST_TASK]
> which task would you like to do? For the entire pipeline choose 0; otherwise either provide a number representing one of the following tasks or two numbers to denote the start and end tasks (and of course all tasks in the middle will be run too). Here is the list: metagenome_creation = 1 read_simulation = 2 assembly = 3 graph_neighborhood = 4 sequence_neighborhood = 5 neighborhood_annotation = 6, neighborhood_evaluation = 7
- --amr_file AMR_FILE, -A AMR_FILE
> the path of the files containing the AMR genes sequence
- --ref_genome_files REF_GENOME_FILES [REF_GENOME_FILES ...]
> the address of reference genomes; it can be a file, a list of files or a directory
- --main_dir MAIN_DIR, -M MAIN_DIR
> the main dir to store all results
- --read_length READ_LENGTH
> the length of simulated reads can be either 150 or 250
- --spades_thread_num SPADES_THREAD_NUM
> the number of threads used for MetaSPAdes
- --spades_output_dir SPADES_OUTPUT_DIR
> the output dir to store MetaSPAdes results
- --graph_distance GRAPH_DISTANCE, -D GRAPH_DISTANCE
> the maximum distance of neighborhood nodes to be extracted from the AMR gene
- --seq_length SEQ_LENGTH, -L SEQ_LENGTH
> the length of AMR gene's neighbourhood to be extracted
- --ng_seq_files NEIGHBORHOOD_SEQ_FILE
> the address of the files (directory) containing all extracted neighborhood sequences in assembly graph
- --ng_path_info_files NG_PATH_INFO_FILES
> the address of the files (directory) containing all path information for extracted neighborhood sequences in assembly graph')
- --gfa_file GFA_FILE
> the address of the file for assembly graph
- --contig_file CONTIG_FILE
> the address of the file containing contigs after assembly
- --genome_amr_files GENOME_AMR_FILES [GENOME_AMR_FILES ...]
> the address of the files containing genome after AMR insertion
- --reads [READs]
> the address of the files containing paired-end reads
- --spades_error_correction SPADES_ERROR_CORRECTION
> Whether to turn on or off error correction in MetaSPAdes
- --use_RGI USE_RGI
> Whether to contribute RGI annotation in Prokka result
- --RGI_include_loose RGI_INCLUDE_LOOSE
> Whether to include loose cases in RGI result
- --find_amr_genes {BOOLEAN>
> Whether to assume the AMR genes (in metagenome) are known or to look for them in assembly graph
- --amr_identity_threshold AMR_IDENTITY_THRESHOLD
> the threshold used for amr alignment: a hit is returned if identity/coverage >= threshold
- --path_node_threshold PATH_NODE_THRESHOLD
> the threshold used for recursive pre_path and post_path search as long as the length of the path is less that this threshold
- --path_seq_len_percent_threshold PATH_SEQ_LEN_PERCENT_THR
> the threshold used for recursive pre_seq and post_seq until we have this percentage of the required length after which we just extract from the longest neighbor
- --ref_genomes_available {BOOLEAN>
> Whether we have access to reference genome(s)
- --multi_processor {BOOLEAN>
> Whether to use multi processors for parallel programming
- --core_num CORE_NUM
> the number of cores used in case of parallel programming
- --coverage_thr COVERAGE_THRESHOLD
> coverage threshold to check if an annotated gene is truly AMR neighbor or just a false positive
### Python files
#### 1- full_pipeline.py
This is the core file to do all the steps available in our tool including concatenating ref genomes in a single file, simulating reads, assembling reads, extracting amr neighborhood, annotating amr neighborhood sequences and evaluation (in case that ref genomes are available).
To run, make sure that parameters are set in code/params.py:
`python code/full_pipeline.py`
#### 2- extract_neighborhood.py
This is the main file to extract the neighborhood of an AMR gene from an assembly graph.
To run:
```
python code/extract_neighborhood.py --amr/-A <AMR gene file path in FASTA format>
--gfa/-G <GFA assembly graph>
--length/-L <length of the linear sequence around AMR gene to be extracted (default = 1000)>
--main_dir <the output directory to store the results>
```
#### 3- find_amrs_in_sample.py
This code is used to find all AMRs available in a metagenome sample, extract their neighborhood sequences and annotate them.
To run:
```
python code/find_amrs_in_sample.py --db <metagenome file path>
--seq <fasta file containing all AMR sequences>
```
Note: it reads 3 parameetrs from params.py:
- params.PROKKA_COMMAND_PREFIX
- params.use_RGI
- params.RGI_include_loose
#### 4- amr_neighborhood_in_contigs.py
This code is used to find the neighborhood of AMRs in a contig file, annotate them, compare them with that of the ref genomes and calculate the sentivity and precision.
To run:
`code/python amr_neighborhood_in_contigs.py`
NOTE: It reads required parameters from code/params.py and the most important parameters need to be set correctly there, are:
- params.seq_length
- params.contig_file
- params.amr_identity_threshold
- params.amr_files
- params.ref_ng_annotations_file
- params.main_dir
NOTE: The result are available in the following directory:
`params.main_dir+'contigs_output_'+<params.seq_length>`
#### 5- annotation_visualization.py
This file is used to visualize sequences annotations.
To run:
```
python annotation_visualization.py --csvfile <annotation file path>
--title <the image title> --output <the output image name>
```
|
/sarand-1.0.1.tar.gz/sarand-1.0.1/README.md
| 0.506836 | 0.971699 |
README.md
|
pypi
|
import collections
from cycler import cycler
from .basic_rng import RandomGenerator
class StyleRandomizer:
"""
Gets random matplotlib styles
"""
brights = 'purple', '#cccccc', "white", "magenta", 'yellow', 'lime', "cyan", "orange", "red", "xkcd:lightblue"
backgrounds = "#ffdddd", '#ddffdd', '#ddddff'
darks = 'black', '#800000', '#008000', '#000080'
grid_axis = 'x', 'y'
markers = '*', ' '
dashes = '-.', '-', '--'
padding_range = -8, 8
figsize_range = 4, 6
font_size = 'large', 'small'
def __init__(self, seed=0, **kwargs):
self._rng = RandomGenerator(seed)
self._usages = collections.defaultdict(int)
for k, v in kwargs.items():
assert hasattr(self, k)
setattr(self, k, v)
def pick(self, items):
"""
Pick a random item from the given list, prioritizing ones that have been used less recently
"""
min_usage = min(self._usages[item] for item in items)
min_used_items = [item for item in items if self._usages[item] == min_usage]
item = self._rng.choose(min_used_items)
self._usages[item] += 1
return item
def pick_range(self, bottom, top, divisions=1000):
return self._rng.rand(divisions + 1) / divisions * (top - bottom) + bottom
def get_random_style(self):
"""
Get a random style pack
"""
colors = cycler('color', list({self.pick(self.brights) for _ in range(10)}))
updated_rc_params = {
'axes.edgecolor' : self.pick(self.brights),
'axes.facecolor' : self.pick(self.backgrounds),
'axes.grid' : True,
'axes.grid.axis' : self.pick(self.grid_axis),
'axes.grid.which' : 'both',
'axes.labelcolor' : self.pick(self.darks),
'axes.labelsize' : self.pick(self.font_size),
'axes.labelpad' : self.pick_range(*self.padding_range),
'axes.linewidth' : 4,
'axes.labelweight' : 'bold',
'axes.spines.bottom': True,
'axes.spines.left': True,
'axes.spines.right': False,
'axes.spines.top': True,
'axes.titlepad' : self.pick_range(*self.padding_range),
'axes.titlesize' : self.pick(self.font_size),
'axes.xmargin' : 0.2,
'axes.ymargin' : 0.2,
'axes.prop_cycle' : colors,
'axes.unicode_minus' : False,
'figure.figsize' : [self.pick_range(*self.figsize_range) for _ in range(2)],
'font.size' : 8,
'font.stretch' : 'ultra-expanded',
'grid.color' : self.pick(self.darks),
'grid.linestyle' : self.pick(self.dashes),
'grid.linewidth' : 2,
'legend.edgecolor' : self.pick(self.darks),
'legend.facecolor' : self.pick(self.backgrounds),
'legend.handleheight' : self.pick_range(*self.padding_range),
'legend.shadow' : True,
'legend.markerscale' : 2,
'lines.marker' : self.pick(self.markers),
'xtick.color' : self.pick(self.brights),
'ytick.color' : self.pick(self.brights),
'patch.facecolor' : self.pick(self.backgrounds),
}
return updated_rc_params
|
/sarcastic-color-scheme-0.2.0.tar.gz/sarcastic-color-scheme-0.2.0/sarcastic_color_scheme/get_random_style.py
| 0.76207 | 0.30319 |
get_random_style.py
|
pypi
|
# **SarcGraph**
[](https://www.python.org/)  [](https://github.com/Sarc-Graph/sarcgraph#license)
[](https://github.com/Sarc-Graph/sarcgraph/actions/workflows/black_flake8.yml) [](https://codecov.io/gh/Sarc-Graph/sarcgraph) [](https://sarc-graph.readthedocs.io/en/latest/?badge=latest)
## **Table of Contents**
* [Project Summary](#summary)
* [Installation Instructions](#install)
* [Contents](#contents)
* [Tutorial](#tutorial) - [Notebooks](https://github.com/Sarc-Graph/sarcgraph/tree/main/tutorials)
* [Validation](#validation)
* [References to Related Work](#references)
* [Contact Information](#contact)
* [Acknowledgements](#acknowledge)
## **Project Summary** <a name="summary"></a>
**SarcGraph** is a tool for automatic detection, tracking and analysis of
z-discs and sarcomeres in movies of beating *human induced pluripotent stem
cell-derived cardiomyocytes (hiPSC-CMs)*.
<br />
<center><img src="figures/intro.png" width=30%></center>
<br />
SarcGraph was initially introduced in [Sarc-Graph: Automated segmentation, tracking, and analysis of sarcomeres in hiPSC-derived cardiomyocytes](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1009443).
This package is created to make SarcGraph more accessible to the broader
research community.
**For more information visit [SarcGraph documentation](https://sarc-graph.readthedocs.io/en/latest/).**
## **Installation Instructions** <a name="install"></a>
### **Get a copy of the [SarcGraph repository](https://github.com/Sarc-Graph/sarcgraph) on your local machine**
You can do this by clicking the green ``<> code`` button and selecting ``Download Zip`` or by running the following command in terminal:
```bash
git clone https://github.com/Sarc-Graph/sarcgraph.git
```
### **Create and activate a conda virtual environment**
1. Install [Anaconda](https://docs.anaconda.com/anaconda/install/) on your local machine.
2. Open a terminal and move to the directory of the ``sarcgraph`` repository. Then, type the following command in terminal to create a virtual envirnoment and install the required packages:
```bash
cd sarcgraph
conda env create --file=environment.yml
```
3. Activate your virtual environment.
```bash
conda activate sarcgraph
```
### **Install SarcGraph**
SarcGraph can be installed using ``pip``:
```bash
pip install sarcgraph
```
## **Contents** <a name="contents"></a>
```bash
|___ sarcgraph
| |___ docs/
| |___ figures/
| |___ *.png
| |___ samples/
| |___ sarcgraph/
| |___ __init__.py
| |___ sg.py
| |___ sg_tools.py
| |___ tests/
| |___ tutorials/
| |___ *.ipynb
```
## **Tutorial** <a name="tutorial"></a>
This GitHub repository contains a folder called ``tutorials`` that contains demos to extensively show how this package can be used to analyze videos or images of hiPSC-CMs.
### **Package Contents** <a name="whats-in-package"></a>
The package contains two seperate modules: `sg` for sarcomere detection and tracking and `sg_tools` for running further analysis and visualizations.
#### **sarcgraph.sg** <a name="sarcgraph.py"></a>
`sarcgraph.sg` module takes a video/image file as input (more details in tutorials). This module then processes the input file to detect and track z-discs and sarcomeres through running 3 tasks:
- Z-disc Segmentation,
- Z-disc Tracking,
- Sarcomere Detection.
Here is a list of functions developed for each task:
- `zdisc_segmentation`: Detect z-discs in each frame of the input video/image and saves the following information into a pandas `DataFrame`:
> - `frame`: (frame number)
> - `x` and `y`: (X and Y position of the center of a z-disc)
> - `p1_x`, `p1_y` and `p2_x`, `p2_y`: (X and Y position of both ends of a z-disc)
- `zdisc_tracking`: Tracks detected z-discs in the input video over all frames and adds the following information to the pandas `DataFrame`:
> - `particle`: (z-disc id)
> - `freq`: (number of frames in which a z-discs is tracked)
frame,sarc_id,x,y,length,width,angle,z-discs
- `sarcomere_detection`: Detects sarcomeres in the input video/image using tracked z-discs `DataFrame` and saves the following information into a new pandas `DataFrame`:
> - `frame`: (frame number)
> - `sarc_id`: (sarcomere id)
> - `x` and `y`: (X and Y position of the center of a sarcomere)
> - `length`: (sarcomere length)
> - `width`: (sarcomere width)
> - `angle`: (sarcomere angle)
> - `zdiscs`: (ids of the two z-discs forming a sarcomere)
#### **sarcgraph.sg_tools** <a name="sarcgraph_tools.py"></a>
`sarcgraph.sg_tools` module consists of 3 subclasses:
- `TimeSeries`: Process timeseries of detected and tracked sarcomeres
> - `sarcomeres_gpr()`: Applies Gaussian Process Regression (GPR) on each recovered timeseries characteristic of all detected sarcomeres to reduce the noise and fill in the missing data
- `Analysis`: Extract more information from detected sarcomeres characteristics timeseries
> - `compute_F_J`: Computes the average deformation gradient (F) and its jacobian (J)
> - `compute_OOP`: Computes the Orientation Order Parameter (OOP)
> - `compute_metrics`: Computes {OOP, C_iso, C_OOP, s_til, s_avg} as defined in the [SarcGraph paper](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1009443)
> - `compute_ts_params`: Computes timeseries constants (contraction time, relaxation time, flat time, period, offset)
> - `create_spatial_graph`: Generates a spatial graph of tracked z-discs where edges indicate sarcomeres and edge weights indicate the ratio of the frames in which each sarcomere is detected
- `Visualization`: Visualize detected sarcomeres information
> - `zdiscs_and_sarcs`: Visualizes detected z-discs and sarcomeres in the chosen frame
> - `contraction`:Visualizes detected sarcomeres in every frame as a gif file
> - `normalized_sarcs_length`: Plots normalized length of all detected sarcomeres vs frame number
> - `OOP`: Plots recovered Orientational Order Parameter
> - `F`: Plots recovered deformation gradient
> - `J`: Plots recovered deformation jacobian
> - `F_eigenval_animation`: Visualizes the eigenvalues of F vs frame number
> - `timeseries_params`: Visualizes time series parameters
> - `dendrogram`: Clusters timeseries and plots as a dendrogram of the clusters
> - `spatial_graph`: Visualizes the spatial graph
> - `tracked_vs_untracked`: Visualizes metrics that compare the effect of tracking sarcomeres in a video vs only detecting sarcomeres in each frame without tracking
To use this module an object of the class `SarcGraphTools` should be created by setting the `input_dir` to the folder that contains the output saved from running full sarcomere detection and timeseries processing on the input data.
## Validation <a name="validation"></a>
To validate our methods and ensure correct implementation, we generated challenging synthetic videos with characteristics similar to beating hiPSC-CMs. We used these videos to evaluate the sarcomere detection algorithm by comparing recovered metrics to their known ground truth. The figure shows this process for one of many tested validation examples.
<br />
<center><img src="figures/validation.png" width=75%></center>
<br />
## References to Related Work <a name="references"></a>
* Zhao, B., Zhang, K., Chen, C. S., & Lejeune, E. (2021). Sarc-graph: Automated segmentation, tracking, and analysis of sarcomeres in hiPSC-derived cardiomyocytes. PLoS Computational Biology, 17(10), e1009443.
* Allan, D. B., Caswell, T., Keim, N. C., Wel, C. M. van der, & Verweij, R. W. (2023). Soft-matter/trackpy: v0.6.1 (Version v0.6.1). Zenodo. https://doi.org/10.5281/zenodo.7670439
* Toepfer, C. N., Sharma, A., Cicconet, M., Garfinkel, A. C., Mücke, M., Neyazi, M., Willcox, J. A., Agarwal, R., Schmid, M., Rao, J., & others. (2019). SarcTrack: An adaptable software tool for efficient large-scale analysis of sarcomere function in hiPSC-cardiomyocytes. Circulation Research, 124(8), 1172–1183.
* Morris, T. A., Naik, 94 J., Fibben, K. S., Kong, X., Kiyono, T., Yokomori, K., & Grosberg, A. (2020). Striated myocyte structural integrity: Automated analysis of sarcomeric z-discs. PLoS Computational Biology, 16(3), e1007676.
* Pasqualin, C., Gannier, F., Yu, A., Malécot, C. O., Bredeloux, P., & Maupoil, V. (2016). SarcOptiM for ImageJ: High-frequency online sarcomere length computing on stimulated cardiomyocytes. American Journal of Physiology-Cell Physiology, 311(2), C277–C283.
* Ribeiro, A. J. S., Schwab, O., Mandegar, M. A., Ang, Y.-S., Conklin, B. R., Srivastava, D., & Pruitt, B. L. (2017). Multi-imaging method to assay the contractile mechanical output of micropatterned human iPSC-derived cardiac myocytes. Circulation Research, 120(10), 1572–1583. https://doi.org/10.1161/CIRCRESAHA.116.310363
## Contact Information <a name="contact"></a>
For information about this software, please get in touch with [Saeed Mohammadzadeh](mailto:[email protected]) or [Emma Lejeune](mailto:[email protected]).
## Acknowledgements <a name="acknowledge"></a>
|
/sarcgraph-0.1.1.tar.gz/sarcgraph-0.1.1/README.md
| 0.493409 | 0.953405 |
README.md
|
pypi
|
import datetime
import numpy
from .redis import to_redis, unpack
class Scan:
def __init__(self, redis, key, meta):
self.redis = redis
self.key = key
self.meta = meta
self.rid = '.'.join(key.rsplit(':', 2)[-2:])
dt = datetime.datetime.fromtimestamp(meta['start_ts'])
self.meta['start_dt'] = dt
size = meta['total_nb_points']
dtypes = []
ref_dtypes = []
bitmap_dtypes = []
for i, col in enumerate(meta['columns']):
name = col['name']
is_ref = col.get('value_ref_enabled', False)
dtype = col['dtype'].replace('str', 'object')
if col['shape'] and col['shape'][0] > 1:
dtype = 'object'
if is_ref:
ref_dtypes.append((name, dtype))
dtype = 'object'
bitmap_dtypes.append((name, 'b'))
dtypes.append((name, dtype))
self._data = numpy.empty(size, dtype=dtypes)
self._points = numpy.zeros(size, dtype=bitmap_dtypes)
self.closed = False
def __getattr__(self, name):
return self.meta[name]
def __getitem__(self, name):
if isinstance(name, str):
return self.data[name]
else:
data = self._data[name]
return {f: data[f] for f in data.dtype.fields}
def __len__(self):
points = self._points
size = min(points[f].argmin() for f in points.dtype.fields)
if not size and all(points[0]):
size = len(points)
return size
def set(self, point_nb, record):
for name, value in record.items():
self._set_point(point_nb, name, value)
def _set_point(self, point_nb, name, value):
if name not in self._data.dtype.fields:
# ignore unknown fields for now
return
col = self._data[name]
if col.dtype == 'object':
v = col[point_nb]
if v is None:
v = {}
key = 'ref' if isinstance(value, str) else 'value'
v[key] = value
value = v
col[point_nb] = value
self._points[name][point_nb] = True
def close(self, tail=None):
if tail is not None:
self.meta.update(tail)
dt = datetime.datetime.fromtimestamp(self.meta['end_ts'])
self.meta['end_dt'] = dt
self.closed = True
@property
def data(self):
result = {}
for field in self._data.dtype.fields:
data, pts = self._data[field], self._points[field]
n = pts.argmin()
result[field] = data if not n and all(pts) else data[:n]
return result
def handle(redis, scan, etype, key, data):
data = unpack(data)
if etype == 'header':
data['redis'] = key
scan = Scan(redis, key, data)
elif etype == 'record':
base_key, index = key.rsplit(':points:', 1)
if scan is None:
scan = load_scan_from_redis(redis, base_key)
elif scan.key == base_key:
point_nb = int(index.split(':', 1)[0])
scan.set(point_nb, data)
else:
# ignore out of order scan (maybe a previous scan
# delayed post-processing data)
return
elif etype == 'tail':
scan.close(data)
return scan, etype, key, data
class ScanStream:
def __init__(self, redis, prefix):
self.redis = to_redis(redis)
self.prefix = prefix
self._pubsub = None
self.scan = None
def __iter__(self):
if self._pubsub is not None:
raise RuntimeError('Stream already running')
self.scan = None
with self.redis.pubsub() as pubsub:
self._pubsub = pubsub
pubsub.subscribe(self.prefix)
for event in pubsub.listen():
msg_type = event['type']
if msg_type == 'subscribe':
continue
elif msg_type == 'unsubscribe':
break
key, etype, data = event['data'].split(b'|', 2)
key, etype = key.decode(), etype.decode()
result = handle(self.redis, self.scan, etype, key, data)
if result is None: # (probably out of order delayed event)
continue
self.scan = result[0]
yield result
def close(self):
if self._pubsub is None:
return
self._pubsub.unsubscribe()
self._pubsub = None
self.scan = None
|
/sardana-streams-0.1.0.tar.gz/sardana-streams-0.1.0/sardana_streams/client.py
| 0.529263 | 0.211865 |
client.py
|
pypi
|
.. _sardana-glossary:
===========
Glossary
===========
.. glossary::
:sorted:
``>>>``
The default Python prompt of the interactive shell. Often seen for code
examples which can be executed interactively in the interpreter.
``...``
The default Python prompt of the interactive shell when entering code for
an indented code block or within a pair of matching left and right
delimiters (parentheses, square brackets or curly braces).
ADC
In electronics, an analog-to-digital converter (ADC) is a system that
converts an analog signal e.g. voltage into its digital representation.
argument
A value passed to a function or method, assigned to a named local
variable in the function body. A function or method may have both
positional arguments and keyword arguments in its definition.
Positional and keyword arguments may be variable-length: ``*`` accepts
or passes (if in the function definition or call) several positional
arguments in a list, while ``**`` does the same for keyword arguments
in a dictionary.
Any expression may be used within the argument list, and the evaluated
value is passed to the local variable.
attribute
A value associated with an object which is referenced by name using
dotted expressions. For example, if an object *o* has an attribute
*a* it would be referenced as *o.a*.
dictionary
An associative array, where arbitrary keys are mapped to values. The
keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods.
Called a hash in Perl.
class
A template for creating user-defined objects. Class definitions
normally contain method definitions which operate on instances of the
class.
closed loop
A.k.a feedback loop, occurs when outputs of a system are routed back
as inputs as part of a chain of cause-and-effect that forms a circuit
or loop. In case of motion systems, closed loop positioning uses the
position sensors e.g. encoders to measure the system's output. The
measured signal is looped back to the control unit as input and is used
to correct the moveable's position.
expression
A piece of syntax which can be evaluated to some value. In other words,
an expression is an accumulation of expression elements like literals,
names, attribute access, operators or function calls which all return a
value. In contrast to many other languages, not all language constructs
are expressions. There are also :term:`statement`\s which cannot be used
as expressions, such as :func:`print` or :keyword:`if`. Assignments
are also statements, not expressions.
function
A series of statements which returns some value to a caller. It can also
be passed zero or more arguments which may be used in the execution of
the body. See also :term:`argument` and :term:`method`.
generator
A function which returns an iterator. It looks like a normal function
except that it contains :keyword:`yield` statements for producing a series
a values usable in a for-loop or that can be retrieved one at a time with
the :func:`next` function. Each :keyword:`yield` temporarily suspends
processing, remembering the location execution state (including local
variables and pending try-statements). When the generator resumes, it
picks-up where it left-off (in contrast to functions which start fresh on
every invocation).
.. index:: single: generator expression
generator expression
An expression that returns an iterator. It looks like a normal expression
followed by a :keyword:`for` expression defining a loop variable, range,
and an optional :keyword:`if` expression. The combined expression
generates values for an enclosing function::
>>> sum(i*i for i in range(10)) # sum of squares 0, 1, 4, ... 81
285
IDE
Integrated Development Environment is a software application that provides
comprehensive facilities to computer programmers for software development.
interactive
Python has an interactive interpreter which means you can enter
statements and expressions at the interpreter prompt, immediately
execute them and see their results. Just launch ``python`` with no
arguments (possibly by selecting it from your computer's main
menu). It is a very powerful way to test out new ideas or inspect
modules and packages (remember ``help(x)``).
interpreted
Python is an interpreted language, as opposed to a compiled one,
though the distinction can be blurry because of the presence of the
bytecode compiler. This means that source files can be run directly
without explicitly creating an executable which is then run.
Interpreted languages typically have a shorter development/debug cycle
than compiled ones, though their programs generally also run more
slowly. See also :term:`interactive`.
iterable
An object capable of returning its members one at a
time. Examples of iterables include all sequence types (such as
:class:`list`, :class:`str`, and :class:`tuple`) and some non-sequence
types like :class:`dict` and :class:`file` and objects of any classes you
define with an :meth:`__iter__` or :meth:`__getitem__` method. Iterables
can be used in a :keyword:`for` loop and in many other places where a
sequence is needed (:func:`zip`, :func:`map`, ...). When an iterable
object is passed as an argument to the built-in function :func:`iter`, it
returns an iterator for the object. This iterator is good for one pass
over the set of values. When using iterables, it is usually not necessary
to call :func:`iter` or deal with iterator objects yourself. The ``for``
statement does that automatically for you, creating a temporary unnamed
variable to hold the iterator for the duration of the loop. See also
:term:`iterator`, :term:`sequence`, and :term:`generator`.
iterator
An object representing a stream of data. Repeated calls to the iterator's
:meth:`next` method return successive items in the stream. When no more
data are available a :exc:`StopIteration` exception is raised instead. At
this point, the iterator object is exhausted and any further calls to its
:meth:`next` method just raise :exc:`StopIteration` again. Iterators are
required to have an :meth:`__iter__` method that returns the iterator
object itself so every iterator is also iterable and may be used in most
places where other iterables are accepted. One notable exception is code
which attempts multiple iteration passes. A container object (such as a
:class:`list`) produces a fresh new iterator each time you pass it to the
:func:`iter` function or use it in a :keyword:`for` loop. Attempting this
with an iterator will just return the same exhausted iterator object used
in the previous iteration pass, making it appear like an empty container.
More information can be found in :ref:`typeiter`.
key function
A key function or collation function is a callable that returns a value
used for sorting or ordering. For example, :func:`locale.strxfrm` is
used to produce a sort key that is aware of locale specific sort
conventions.
A number of tools in Python accept key functions to control how elements
are ordered or grouped. They include :func:`min`, :func:`max`,
:func:`sorted`, :meth:`list.sort`, :func:`heapq.nsmallest`,
:func:`heapq.nlargest`, and :func:`itertools.groupby`.
There are several ways to create a key function. For example. the
:meth:`str.lower` method can serve as a key function for case insensitive
sorts. Alternatively, an ad-hoc key function can be built from a
:keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also,
the :mod:`operator` module provides three key function constructors:
:func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and
:func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO
<sortinghowto>` for examples of how to create and use key functions.
keyword argument
Arguments which are preceded with a ``variable_name=`` in the call.
The variable name designates the local name in the function to which the
value is assigned. ``**`` is used to accept or pass a dictionary of
keyword arguments. See :term:`argument`.
lambda
An anonymous inline function consisting of a single :term:`expression`
which is evaluated when the function is called. The syntax to create
a lambda function is ``lambda [arguments]: expression``
list
A built-in Python :term:`sequence`. Despite its name it is more akin
to an array in other languages than to a linked list since access to
elements are O(1).
list comprehension
A compact way to process all or part of the elements in a sequence and
return a list with the results. ``result = ["0x%02x" % x for x in
range(256) if x % 2 == 0]`` generates a list of strings containing
even hex numbers (0x..) in the range from 0 to 255. The :keyword:`if`
clause is optional. If omitted, all elements in ``range(256)`` are
processed.
method
A function which is defined inside a class body. If called as an attribute
of an instance of that class, the method will get the instance object as
its first :term:`argument` (which is usually called ``self``).
See :term:`function` and :term:`nested scope`.
namespace
The place where a variable is stored. Namespaces are implemented as
dictionaries. There are the local, global and built-in namespaces as well
as nested namespaces in objects (in methods). Namespaces support
modularity by preventing naming conflicts. For instance, the functions
:func:`__builtin__.open` and :func:`os.open` are distinguished by their
namespaces. Namespaces also aid readability and maintainability by making
it clear which module implements a function. For instance, writing
:func:`random.seed` or :func:`itertools.izip` makes it clear that those
functions are implemented by the :mod:`random` and :mod:`itertools`
modules, respectively.
nested scope
The ability to refer to a variable in an enclosing definition. For
instance, a function defined inside another function can refer to
variables in the outer function. Note that nested scopes work only for
reference and not for assignment which will always write to the innermost
scope. In contrast, local variables both read and write in the innermost
scope. Likewise, global variables read and write to the global namespace.
new-style class
Any class which inherits from :class:`object`. This includes all built-in
types like :class:`list` and :class:`dict`. Only new-style classes can
use Python's newer, versatile features like :attr:`__slots__`,
descriptors, properties, and :meth:`__getattribute__`.
object
Any data with state (attributes or value) and defined behavior
(methods). Also the ultimate base class of any :term:`new-style
class`.
positional argument
The arguments assigned to local names inside a function or method,
determined by the order in which they were given in the call. ``*`` is
used to either accept multiple positional arguments (when in the
definition), or pass several arguments as a list to a function. See
:term:`argument`.
Python 3000
Nickname for the Python 3.x release line (coined long ago when the release
of version 3 was something in the distant future.) This is also
abbreviated "Py3k".
Pythonic
An idea or piece of code which closely follows the most common idioms
of the Python language, rather than implementing code using concepts
common to other languages. For example, a common idiom in Python is
to loop over all elements of an iterable using a :keyword:`for`
statement. Many other languages don't have this type of construct, so
people unfamiliar with Python sometimes use a numerical counter instead::
for i in range(len(food)):
print(food[i])
As opposed to the cleaner, Pythonic method::
for piece in food:
print(piece)
sequence
An :term:`iterable` which supports efficient element access using integer
indices via the :meth:`__getitem__` special method and defines a
:meth:`len` method that returns the length of the sequence.
Some built-in sequence types are :class:`list`, :class:`str`,
:class:`tuple`, and :class:`unicode`. Note that :class:`dict` also
supports :meth:`__getitem__` and :meth:`__len__`, but is considered a
mapping rather than a sequence because the lookups use arbitrary
:term:`immutable` keys rather than integers.
singleton pattern
Singleton pattern is a software design pattern that restricts the
instantiation of a class to one "single" instance. This is useful when
exactly one object is needed to coordinate actions across the system.
slice
An object usually containing a portion of a :term:`sequence`. A slice is
created using the subscript notation, ``[]`` with colons between numbers
when several are given, such as in ``variable_name[1:3:5]``. The bracket
(subscript) notation uses :class:`slice` objects internally (or in older
versions, :meth:`__getslice__` and :meth:`__setslice__`).
statement
A statement is part of a suite (a "block" of code). A statement is either
an :term:`expression` or a one of several constructs with a keyword, such
as :keyword:`if`, :keyword:`while` or :keyword:`for`.
stepper
A stepper motor (or step motor) is a brushless DC electric motor that
divides a full rotation into a number of equal steps. The motor's
position can then be commanded to move and hold at one of these steps
without any feedback sensor (an open-loop controller), as long as the
motor is carefully sized to the application.
triple-quoted string
A string which is bound by three instances of either a quotation mark
(") or an apostrophe ('). While they don't provide any functionality
not available with single-quoted strings, they are useful for a number
of reasons. They allow you to include unescaped single and double
quotes within a string and they can span multiple lines without the
use of the continuation character, making them especially useful when
writing docstrings.
type
The type of a Python object determines what kind of object it is; every
object has a type. An object's type is accessible as its
:attr:`__class__` attribute or can be retrieved with ``type(obj)``.
plugin
See :term:`plug-in`.
plug-in
a plug-in (or plugin) is a set of software components that adds
specific abilities to a larger software application. If supported,
plug-ins enable customizing the functionality of an application. For
example, plug-ins are commonly used in web browsers to play video,
scan for viruses, and display new file types.
PLC
A programmable logic controller (PLC) is an industrial digital computer
which has been ruggedised and adapted for the control of manufacturing
processes, such as assembly lines, or robotic devices, or any activity
that requires high reliability control e.g. equipment or personal
protection.
MCA
Multichannel Analyzer (MCA) is a device for ...
CCD
A charge-coupled device (CCD) is a device for the movement of electrical
charge, usually from within the device to an area where the charge can
be manipulated, for example conversion into a digital value. This is
achieved by "shifting" the signals between stages within the device one
at a time. CCDs move charge between capacitive bins in the device, with
the shift allowing for the transfer of charge between bins.
API
An application programming interface (API) is a particular set of rules
and specifications that software programs can follow to communicate with
each other. It serves as an interface between different software
programs and facilitates their interaction, similar to the way the user
interface facilitates interaction between humans and computers.
An API can be created for applications, libraries, operating systems,
etc., as a way of defining their "vocabularies" and resources request
conventions (e.g. function-calling conventions). It may include
specifications for routines, data structures, object classes, and
protocols used to communicate between the consumer program and the
implementer program of the API.
CLI
A command-line interface (CLI) is a mechanism for interacting with a
computer operating system or software by typing commands to perform
specific tasks. This text-only interface contrasts with the use of a
mouse pointer with a graphical user interface (:term:`GUI`) to click on
options, or menus on a text user interface (TUI) to select options.
This method of instructing a computer to perform a given task is
referred to as "entering" a command: the system waits for the user
to conclude the submitting of the text command by pressing the "Enter"
key (a descendant of the "carriage return" key of a typewriter keyboard).
A command-line interpreter then receives, parses, and executes the
requested user command. The command-line interpreter may be run in a
text terminal or in a terminal emulator window as a remote shell client
such as PuTTY. Upon completion, the command usually returns output to
the user in the form of text lines on the CLI. This output may be an
answer if the command was a question, or otherwise a summary of the
operation.
GUI
A graphical user interface (GUI) is a type of user interface that
allows users to interact with electronic devices with images rather
than text commands. GUIs can be used in computers, hand-held devices
such as MP3 players, portable media players or gaming devices,
household appliances and office equipment. A GUI represents the
information and actions available to a user through graphical icons and
visual indicators such as secondary notation, as opposed to text-based
interfaces (:term:`CLI`), typed command labels or text navigation.
The actions are usually performed through direct manipulation of the
graphical elements.
SDS
Sardana Device server (SDS) is the sardana tango device server
:term:`daemon`.
OS
An operating system (OS) is software, consisting of programs and data,
that runs on computers, manages computer hardware resources, and
provides common services for execution of various application software.
Operating system is the most important type of system software in a
computer system. Without an operating system, a user cannot run an
application program on their computer, unless the application program
is self booting.
daemon
In Unix and other computer multitasking operating systems, a daemon is a
computer program that runs in the background, rather than under the
direct control of a user. They are usually initiated as background
processes. Typically daemons have names that end with the letter "d": for
example, *syslogd*, the daemon that handles the system log, or *sshd*,
which handles incoming SSH connections.
SCADA
supervisory control and data acquisition (SCADA) generally refers to
industrial control systems: computer systems that monitor and control
industrial, infrastructure, or facility-based processes.
client-server model
The client-server model of computing is a distributed application
structure that partitions tasks or workloads between the providers of a
resource or service, called servers, and service requesters, called
clients. Often clients and servers communicate over a computer network
on separate hardware, but both client and server may reside in the same
system. A server machine is a host that is running one or more server
programs which share their resources with clients. A client does not
share any of its resources, but requests a server's content or service
function. Clients therefore initiate communication sessions with servers
which await incoming requests.
user position
Moveable position in user units (See also :term:`dial position`).
Dial and user units are related by the following expressions:
user = sign x dial + offset
dial = controller_position / steps_per_unit
where *sign* is -1 or 1. *offset* can be any number and *steps_per_unit*
must be non zero.
user
See :term:`user position`
dial position
Position in controller units (See also :term:`user position`).
dial
See :term:`dial position`
RoI
*Region of Interest* are samples within a data set identified for a
particular purpose.
.. _plug-in: http://en.wikipedia.org/wiki/Plug-in_(computing)
.. _CCD: http://en.wikipedia.org/wiki/Charge-coupled_device
.. _API: http://en.wikipedia.org/wiki/API
.. _CLI: http://en.wikipedia.org/wiki/Command-line_interface
.. _GUI: http://en.wikipedia.org/wiki/Graphical_user_interface
.. _OS: http://en.wikipedia.org/wiki/Operating_system
.. _daemon: http://en.wikipedia.org/wiki/Daemon_(computing)
.. _SCADA: http://en.wikipedia.org/wiki/SCADA
.. _client-server model: http://en.wikipedia.org/wiki/Client%E2%80%93server_model
.. _ALBA: http://www.cells.es/
.. _ANKA: http://http://ankaweb.fzk.de/
.. _ELETTRA: http://http://www.elettra.trieste.it/
.. _ESRF: http://www.esrf.eu/
.. _FRMII: http://www.frm2.tum.de/en/index.html
.. _HASYLAB: http://hasylab.desy.de/
.. _MAX-lab: http://www.maxlab.lu.se/maxlab/max4/index.html
.. _SOLEIL: http://www.synchrotron-soleil.fr/
.. _Tango: http://www.tango-controls.org/
.. _PyTango: http://packages.python.org/PyTango/
.. _Taurus: http://packages.python.org/taurus/
.. _QTango: http://www.tango-controls.org/download/index_html#qtango3
.. _Qt: http://qt.nokia.com/products/
.. _PyQt: http://www.riverbankcomputing.co.uk/software/pyqt/
.. _PyQwt: http://pyqwt.sourceforge.net/
.. _Python: http://www.python.org/
.. _IPython: http://ipython.scipy.org/
.. _ATK: http://www.tango-controls.org/Documents/gui/atk/tango-application-toolkit
.. _Qub: http://www.blissgarden.org/projects/qub/
.. _numpy: http://numpy.scipy.org/
.. _SPEC: http://www.certif.com/
.. _EPICS: http://www.aps.anl.gov/epics/
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/glossary.rst
| 0.92924 | 0.761361 |
glossary.rst
|
pypi
|
.. currentmodule:: sardana.pool.poolpseudomotor
.. _sardana-pseudomotor-api:
=============================
Pseudo motor API reference
=============================
A pseudo motor has a ``state``, and a ``position`` attributes. The state
indicates at any time if the pseudo motor is stopped, in alarm or moving. The
state is composed from the states of all the physical motors involved in the
pseudo motor. So, if one of the motors is in moving or alarm state, the whole
pseudo motor will be in that state. The position, indicates the current
position.
The other pseudo motor's attributes are:
.. _sardana-pseudomotor-api-driftcorrection:
drift correction
----------------
Flag to enable/disable drift correction while calculating physical
motor(s) position(s). When enabled, the write sibling(s) position(s) will
be used, when disabled, the read sibiling(s) position(s) will be
used instead. By default drift correction is enabled. For more details,
see :ref:`Drift Correction <drift_section>`.
:attr:`~PoolPseudoMotor.drift_correction`
siblings
--------
List of other pseudo motor objects that belongs to the same controller.
:attr:`~PoolPseudoMotor.siblings`
The available operations are:
start move absolute
-------------------
Starts to move the pseudo motor to the given absolute position.
:meth:`~PoolPseudoMotor.start_move`
stop
----
Stops the pseudo motor motion, by stopping all the physical motors, in an
orderly fashion.
abort
-----
Stops the pseudo motor motion, by stopping all the physical motors, as
fast as possible (possibly without deceleration time and loss of position).
release
-------
Release hung motion e.g. due to the hardware controller that
got hung. You should first try stop/abort.
.. seealso::
:ref:`sardana-pseudomotor-overview`
the pseudo-motor overview
:class:`~sardana.tango.pool.PseudoMotor.PseudoMotor`
the pseudo-motor tango device :term:`API`
.. _sardana-pseudomotor-api-position:
PseudoMotor position
--------------------
The pseudomotor's current position can be obtained by reading the
position attribute. The diagram shows the internal sequence of calls.
As it is shown on this picture this process is divided into two parts. First
the physical :ref:`motor positions are read <sardana-motor-api-position>`
from the hardware. Later these motor positions are used to calculate the
pseudo position.
.. image:: /_static/sardana_server_internal_pseudomotor_read_position_flow.png
:width: 680
:align: center
Motion
------
The most useful thing to do with a pseudo motor is, of course, to move it. To
move a pseudo motor to another absolute position you have to write the value
into the position attribute.
.. image:: /_static/sardana_server_internal_pseudomotor_write_position_flow.png
:width: 680
:align: center
Please refer to
:meth:`~sardana.pool.poolpseudomotor.PoolPseudoMotor.get_siblings_positions`
for more details about ``use`` and ``write_pos`` arguments. The value of the
last one corresponds to the :ref:`sardana-pseudomotor-api-driftcorrection`
attribute value.
After all calculations are done, the pseudo motor will deploy a motion *job*
into the sardana kernel engine which will trigger a series of calls to the
underlying motor(s) controller.
.. _drift_section:
Drift correction
~~~~~~~~~~~~~~~~
Pseudomotors which have siblings and are based on physical motors with an
inaccurate or a finite precision positioning system could be affected by the
drift effect.
**Why does it happen?**
Each move of a pseudomotor requires calculation of the physical motors
positions in accordance with the current positions of its siblings.
The consecutive movements of a pseudomotor can accumulate errors
of the positioning system and cause drift of its siblings.
**Who is affected?**
* **Inaccurate positioning systems** which lead to a discrepancy between
the write and the read position of the physical motors. In this case the
physical motors must have a position sensor e.g. encoder but
must not be configured in :term:`closed loop` (in some special cases,
where the closed loop is not precise enough, the drift effect can be
observed as well). This setup can lead to the situation where write and
read values of the position attribute of the physical motors are
different e.g. due to the loosing steps problems or the inaccurate
*step_per_unit* calibration.
* **Finite precision physical motors** e.g. :term:`stepper` is affected by
the rounding error when moving to a position which does not translate
into a discrete number of steps that must be commanded to the hardware.
**How is it solved in Sardana?**
Sardana implements the drift correction which use is optional but enabled
by default for all pseudomotors. It is based on the use of the write
value, instead of the read value, of the siblings' positions, together with
the new desired position of the pseudomotor being moved, during the
calculation of the physical positions. The write value of the
pseudomotor's position gets updated at each move of the pseudomotor or
any of the underneath motors.
.. note:: Movements being stopped unexpectedly: abort by the user,
over-travel limit or any other exceptional condition may cause
considerable discrepancy in the motor's write and read positions.
In the subsequent pseudomotor's move, Sardana will also correct this
difference by using the write instead of read values.
The drift correction is configurable with the *DriftCorrection* property
either globally (on the Pool device level) or locally (on each PseudoMotor
device level).
**Example**
Let's use the slit pseudomotor controller to visualize the drift effect.
This controller comprises two pseudomotors: gap and offset, each of them based
on the same two physical motors: right and left. In this example we will
simulate the inaccurate positioning of the left motor (loosing of 0.002 unit
every 1 unit move).
*Drift correction disabled*
#. Initial state: gap and offset are at positions 0 (gap totally closed and
offset at the nominal position)
.. sourcecode:: spock
Door_lab_1 [1]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 0.000 0.000 0.000 0.000
Low Not specified Not specified Not specified Not specified
#. Move gap to 1
.. sourcecode:: spock
Door_lab_1 [2]: mv gap 1
The calculation of the physical motors' positions gives us 0.5 for both right
and left (in accordance with the current offset of 0)
.. sourcecode:: spock
Door_lab_1 [3]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 0.500 0.498 0.998 0.001
Low Not specified Not specified Not specified Not specified
We observe that the gap pseudomotor did not reach the desired
position of 1 due to the left's positioning problem. Left's
position write and read discrepancy of 0.002 causes that the gap reached
only 0.998 and that the offset drifted to 0.001.
#. Move gap to 2
.. sourcecode:: spock
Door_lab_1 [4]: mv gap 2
The calculation of the physical motors' positions gives us 1.001 for right
and 0.999 for left (in accordance with the current offset of 0.001).
.. sourcecode:: spock
Door_lab_1 [5]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 1.001 0.997 1.998 0.002
Low Not specified Not specified Not specified Not specified
We observe that the gap pseudomotor did not reach the desired position of 2
due to the left's positioning problem. Left's position write and
read discrepancy of 0.002 causes that the gap reached only 1.998 and that
the offset drifted again by 0.001 and the total accumulated drift is 0.002.
#. Move gap to 3
The calculation of the physical motors' positions gives us 1.502 for right
and 1.498 for left (in accordance with the current offset of 0.002).
.. sourcecode:: spock
Door_lab_1 [6]: mv gap 3
Door_lab_1 [7]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 1.502 1.496 2.998 0.003
Low Not specified Not specified Not specified Not specified
We observe that the gap pseudomotor did not reach the desired position of 3
due to the left's positioning problem. Left's position write and
read discrepancy of 0.002 causes that the gap reached only 2.998 and that
the offset drifted by 0.001 and the total accumulated drift is 0.003.
.. figure:: /_static/drift_correction_disabled.png
:align: center
:width: 680
This sketch demonstrates the above example where offset drifted by 0.003.
*Drift correction enabled*
#. Initial state: gap and offset are at positions 0 (gap totally closed and
offset at the nominal position)
.. sourcecode:: spock
Door_lab_1 [1]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 0.000 0.000 0.000 0.000
Low Not specified Not specified Not specified Not specified
#. Move gap to 1
.. sourcecode:: spock
Door_lab_1 [2]: mv gap 1
The calculation of the physical motors' positions gives us 0.5 for both right
and left (in accordance with the **last set** offset of 0).
.. sourcecode:: spock
Door_lab_1 [3]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 0.500 0.498 0.998 0.001
Low Not specified Not specified Not specified Not specified
We observe that the gap pseudomotor did not reach the desired position of 1
due to the left's positioning problem. Left's position write and
read discrepancy of 0.002 causes that the gap reached only 0.998 and that
the offset drifted to 0.001.
#. Move gap to 2
.. sourcecode:: spock
Door_lab_1 [4]: mv gap 2
The calculation of the physical motors' positions gives us 1 for right
and 1 for left (in accordance to the **last set** offset 0).
.. sourcecode:: spock
Door_lab_1 [5]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 1.000 0.998 1.998 0.001
Low Not specified Not specified Not specified Not specified
We observe that the gap pseudomotor did not reach the desired position of 2
due to the left's positioning problem. Left's position write and
read discrepancy of 0.002 causes that the gap reached only 1.998 and that
the offset drifted again by 0.001 but thanks to the drift correction is
maintained at this value.
#. Move gap to 3
.. sourcecode:: spock
Door_lab_1 [6]: mv gap 3
The calculation of the physical motors' positions gives us 1.5 for right
and 1.5 for left (in accordance to the **last set** offset of 0).
.. sourcecode:: spock
Door_lab_1 [7]: wm right left gap offset
right left gap offset
User
High Not specified Not specified Not specified Not specified
Current 1.500 1.498 2.998 0.001
Low Not specified Not specified Not specified Not specified
We observe that the gap pseudomotor did not reach the desired position of 3
due to the left's positioning problem. Left's position write and
read discrepancy of 0.002 causes that the gap reached only 2.998 and that
the offset drifted again by 0.001 but thanks to the drift correction is
maintained at this value.
.. figure:: /_static/drift_correction_enabled.png
:align: center
:width: 680
This sketch demonstrates the above example where offset's drift was
corrected.
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/devel/api/api_pseudomotor.rst
| 0.943445 | 0.705565 |
api_pseudomotor.rst
|
pypi
|
import time
import springfieldlib
from sardana import State
from sardana.pool.controller import TriggerGateController
class SpringfieldBaseTriggerGateController(TriggerGateController):
"""The most basic controller intended from demonstration purposes only.
This is the absolute minimum you have to implement to set a proper trigger
controller able to get a trigger value, get a trigger state and do an
acquisition.
This example is so basic that it is not even directly described in the
documentation"""
def __init__(self, inst, props, *args, **kwargs):
"""Constructor"""
super(SpringfieldBaseTriggerGateController, self).__init__(
inst, props, *args, **kwargs)
self.springfield = springfieldlib.SpringfieldTriggerHW()
def StateOne(self, axis):
"""Get the specified trigger state"""
springfield = self.springfield
state = springfield.getState(axis)
if state == 1:
return State.On, "Trigger is stopped"
elif state == 2:
return State.Moving, "Trigger is running"
elif state == 3:
return State.Fault, "Trigger has an error"
def StartOne(self, axis, value=None):
"""acquire the specified trigger"""
self.springfield.StartChannel(axis)
def SynchOne(self, axis, synchronization):
self.springfield.SynchChannel(axis, synchronization)
def StopOne(self, axis):
"""Stop the specified trigger"""
self.springfield.stop(axis)
from sardana import DataAccess
from sardana.pool.controller import Type, Description, DefaultValue, Access, FGet, FSet
class SpringfieldTriggerGateController(TriggerGateController):
def __init__(self, inst, props, *args, **kwargs):
super(SpringfieldTriggerGateController, self).__init__(
inst, props, *args, **kwargs)
# initialize hardware communication
self.springfield = springfieldlib.SpringfieldTriggerHW()
# do some initialization
self._triggers = {}
def AddDevice(self, axis):
self._triggers[axis] = True
def DeleteDevice(self, axis):
del self._triggers[axis]
StateMap = {
1: State.On,
2: State.Moving,
3: State.Fault,
}
def StateOne(self, axis):
springfield = self.springfield
state = self.StateMap[springfield.getState(axis)]
status = springfield.getStatus(axis)
return state, status
def SynchOne(self, axis, synchronization):
self.springfield.SynchChannel(axis, synchronization)
def StartOne(self, axis, position):
self.springfield.StartChennel(axis, position)
def StopOne(self, axis):
self.springfield.stop(axis)
def AbortOne(self, axis):
self.springfield.abort(axis)
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/devel/howto_controllers/sf_tg_ctrl.py
| 0.765856 | 0.255466 |
sf_tg_ctrl.py
|
pypi
|
.. currentmodule:: sardana.pool.controller
.. _sardana-pseudomotorcontroller-howto-basics:
======================================
How to write a pseudo motor controller
======================================
This chapter describes how to write a valid Python pseudo motor system
class.
Prerequisites
-------------
Before writing the first Python pseudo motor class for your Device
Pool two checks must be performed:
#. The device pool **PoolPath** property must exist and must point to the
directory which will contain your Python pseudo motor module. The syntax of
this **PoolPath** property is one directory per line.
.. seealso:: Please see :ref:`sardana-pool-api-poolpath`
for more information on setting this property.
#. A ``poolpseudomotor.py`` file is part of the Device Pool distribution and is
located within the :mod:`sardana.pool` module. The directory containing this
module must be in the PYTHONPATH environment variable or it must be part of
the **PoolPath** Device Pool property mentioned above.
Rules
-----
A correct pseudo motor system class must obey the following rules:
#. The pseudo motor system class being written must be a subclass of the
PseudoMotorController class from :mod:`sardana.pool.controller` module
(see example :ref:`below <pseudomotor-example>`).
#. The class variable **motor_roles** should be a tuple of motor role name.
The number of elements in this tuple will determine the number of required
motors for this pseudo motor class. The order in which the roles are defined
is also important as it will determine the index of the motors in the pseudo
motor system.
#. The class variable **pseudo_motor_roles** must be set if the pseudo motor
class being written represents more than one pseudo motor. This variable
must contain a tuple of pseudo motor role names.
The order in which the roles are defined will determine the index of the
pseudo motors in the pseudo motor system. If the pseudo motor class
represents only one pseudo motor then this operation is optional.
If omitted, the value of pseudo_motor_roles will be set to the class name.
#. In case the pseudo motor class needs special properties or attributes,
it exist the possibility of defining them as explained in the section
:ref:`sardana-controller-howto-axis-attributes` and
:ref:`sardana-controller-howto-controller-attributes`.
#. The pseudo motor class must implement a **CalcPseudo** method with the
following signature:
::
number = CalcPseudo(index, physical_pos, curr_pseudo_pos)
The method will receive as argument the index of the pseudo motor for
which the pseudo position calculation is requested. This number refers
to the index in the pseudo_motor_roles class variable.
The physical_pos is a tuple containing the motor positions.
The method body should contain a code to translate the given motor
positions into pseudo motor positions.
The method will return a number representing the calculated pseudo
motor position.
#. The pseudo motor class must implement a **CalcPhysical** method with the
following signature:
::
number = CalcPhysical(index, pseudo_pos, curr_physical_pos)
The method will receive as argument the index of the motor for which
the physical position calculation is requested. This number refers to
the index in the motor_roles class variable.
The pseudo_pos is a tuple containing the pseudo motor positions.
The method body should contain a code to translate the given pseudo
motor positions into motor positions.
The method will return a number representing the calculated motor position.
#. Optional implementation of **CalcAllPseudo** method with the following
signature:
::
()/[]/number = CalcAllPseudo(physical_pos, curr_pseudo_pos)
The method will receive as argument a physical_pos which is a tuple of
motor positions.
The method will return a tuple or a list of calculated pseudo motor
positions. If the pseudo motor class represents a single pseudo motor
then the return value could be a single number.
.. note:: At the time of writing this documentation, the method
**CalcAllPseudo** is not used. Is still available for backward
compatibility.
#. Optional implementation of **CalcAllPhysical** method with the following
signature:
::
()/[]/number = CalcAllPhysical(pseudo_pos, curr_physical_pos)
The method will receive as argument a pseudo_pos which is a tuple of
pseudo motor positions.
The method will return a tuple or a list of calculated motor
positions. If the pseudo motor class requires a single motor then the
return value could be a single number.
.. note:: The default implementation **CalcAllPhysical** and
**CalcAllPseudo** methods will call CalcPhysical and CalcPseudo
for each motor and physical motor respectively. Overwriting the
default implementation should only be done if a gain in performance
can be obtained.
.. _pseudomotor-example:
Example
~~~~~~~
One of the most basic examples is the control of a slit. The slit has
two blades with one motor each. Usually the user doesn't want to
control the experiment by directly handling these two motor positions
since their have little meaning from the experiments perspective.
.. image:: /_static/gap_offset.png
Instead, it would be more useful for the user to control the
experiment by means of changing the gap and offset values. Pseudo
motors gap and offset will provide the necessary interface for
controlling the experiments gap and offset values respectively.
The calculations that need to be performed are:
::
gap = sl2t+sl2b
offset = (sl2t-sl2b) / 2
::
sl2t = -offset + gap/2
sl2b = offset + gap/2
The corresponding Python code would be:
::
"""This module contains the definition of a slit pseudo motor controller
for the Sardana Device Pool"""
__all__ = ["Slit"]
__docformat__ = 'restructuredtext'
from sardana import DataAccess
from sardana.pool.controller import PseudoMotorController
from sardana.pool.controller import DefaultValue, Description, Access, Type
class Slit(PseudoMotorController):
"""A Slit pseudo motor controller for handling gap and offset pseudo
motors. The system uses to real motors sl2t (top slit) and sl2b (bottom
slit)"""
gender = "Slit"
model = "Default Slit"
organization = "Sardana team"
pseudo_motor_roles = "Gap", "Offset"
motor_roles = "sl2t", "sl2b"
ctrl_properties = {'sign': {Type: float,
Description: 'Gap = sign * calculated gap\nOffset = sign * calculated offet',
DefaultValue: 1}, }
axis_attributes = {'example': {Type: int,
Access: DataAccess.ReadWrite,
Description: 'test purposes'}, }
def __init__(self, inst, props, *args, **kwargs):
PseudoMotorController.__init__(self, inst, props, *args, **kwargs)
self._log.debug("Created SLIT %s", inst)
self._example = {}
def CalcPhysical(self, index, pseudo_pos, curr_physical_pos):
half_gap = pseudo_pos[0] / 2.0
if index == 1:
ret = self.sign * (pseudo_pos[1] + half_gap)
else:
ret = self.sign * (half_gap - pseudo_pos[1])
self._log.debug("Slit.CalcPhysical(%d, %s) -> %f",
index, pseudo_pos, ret)
return ret
def CalcPseudo(self, index, physical_pos, curr_pseudo_pos):
gap = physical_pos[1] + physical_pos[0]
if index == 1:
ret = self.sign * gap
else:
ret = self.sign * (physical_pos[0] - gap / 2.0)
return ret
def CalcAllPseudo(self, physical_pos, curr_pseudo_pos):
"""Calculates the positions of all pseudo motors that belong to the
pseudo motor system from the positions of the physical motors."""
gap = physical_pos[1] + physical_pos[0]
return (self.sign * gap,
self.sign * (physical_pos[0] - gap / 2.0))
def SetAxisExtraPar(self, axis, parameter, value):
self._example[axis] = value
def GetAxisExtraPar(self, axis, parameter):
return self._example.get(axis, -1)
What to do when pseudo position can not be determined?
------------------------------------------------------
Some controllers won't be able to provide the pseudo calculation result because
for some combinations of the physical values it may not exist. For example,
``DiscretePseudoMotorController`` with a configuration which has blind regions
in the physical motor continuous space, when its physical motor stays on one of
these spots, then the discrete position can not be determined.
In such cases as the controller developer you should raise an exception from
`~sardana.pool.controller.PseudoMotorController.CalcPseudo()` with
a descriptive message why the value could not be calculated. Sardana will take care of
propagating this exception message to the user giving him feedback.
.. seealso:: For more details on pseudo motors please refer to
:ref:`sardana-pseudomotor-api`
.. _Tango: http://www.tango-controls.org/
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/devel/howto_controllers/howto_pseudomotorcontroller.rst
| 0.923308 | 0.667121 |
howto_pseudomotorcontroller.rst
|
pypi
|
import springfieldlib
from sardana import State
from sardana.pool.controller import MotorController
class SpringfieldBaseMotorController(MotorController):
"""The most basic controller intended from demonstration purposes only.
This is the absolute minimum you have to implement to set a proper motor
controller able to get a motor position, get a motor state and move a
motor.
This example is so basic that it is not even directly described in the
documentation"""
MaxDevice = 128
def __init__(self, inst, props, *args, **kwargs):
"""Constructor"""
super(SpringfieldBaseMotorController, self).__init__(
inst, props, *args, **kwargs)
self.springfield = springfieldlib.SpringfieldMotorHW()
def ReadOne(self, axis):
"""Get the specified motor position"""
return self.springfield.getPosition(axis)
def StateOne(self, axis):
"""Get the specified motor state"""
springfield = self.springfield
state = springfield.getState(axis)
if state == 1:
return State.On, "Motor is stopped"
elif state == 2:
return State.Moving, "Motor is moving"
elif state == 3:
return State.Fault, "Motor has an error"
def StartOne(self, axis, position):
"""Move the specified motor to the specified position"""
self.springfield.move(axis, position)
def StopOne(self, axis):
"""Stop the specified motor"""
self.springfield.stop(axis)
from sardana import DataAccess
from sardana.pool.controller import Type, Description, DefaultValue, Access, FGet, FSet
class SpringfieldMotorController(MotorController):
axis_attributes = {
"CloseLoop": {
Type: bool,
Description: "(de)activates the motor close loop algorithm",
DefaultValue: False,
},
}
def getCloseLoop(self, axis):
return self.springfield.isCloseLoopActive(axis)
def setCloseLoop(self, axis, value):
self.springfield.setCloseLoop(axis, value)
def __init__(self, inst, props, *args, **kwargs):
super(SpringfieldMotorController, self).__init__(
inst, props, *args, **kwargs)
# initialize hardware communication
self.springfield = springfieldlib.SpringfieldMotorHW()
# do some initialization
self._motors = {}
def AddDevice(self, axis):
self._motors[axis] = True
def DeleteDevice(self, axis):
del self._motors[axis]
StateMap = {
1: State.On,
2: State.Moving,
3: State.Fault,
}
def StateOne(self, axis):
springfield = self.springfield
state = self.StateMap[springfield.getState(axis)]
status = springfield.getStatus(axis)
limit_switches = MotorController.NoLimitSwitch
hw_limit_switches = springfield.getLimits(axis)
if hw_limit_switches[0]:
limit_switches |= MotorController.HomeLimitSwitch
if hw_limit_switches[1]:
limit_switches |= MotorController.UpperLimitSwitch
if hw_limit_switches[2]:
limit_switches |= MotorController.LowerLimitSwitch
return state, status, limit_switches
def ReadOne(self, axis):
position = self.springfield.getPosition(axis)
return position
def StartOne(self, axis, position):
self.springfield.move(axis, position)
def StopOne(self, axis):
self.springfield.stop(axis)
def AbortOne(self, axis):
self.springfield.abort(axis)
def DefinePosition(self, axis, position):
self.springfield.setCurrentPosition(axis, position)
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/devel/howto_controllers/sf_motor_ctrl.py
| 0.848078 | 0.364523 |
sf_motor_ctrl.py
|
pypi
|
import time
import springfieldlib
from sardana import State
from sardana.pool.controller import CounterTimerController
class SpringfieldBaseCounterTimerController(CounterTimerController):
"""The most basic controller intended from demonstration purposes only.
This is the absolute minimum you have to implement to set a proper counter
controller able to get a counter value, get a counter state and do an
acquisition.
This example is so basic that it is not even directly described in the
documentation"""
def __init__(self, inst, props, *args, **kwargs):
"""Constructor"""
super(SpringfieldBaseCounterTimerController,
self).__init__(inst, props, *args, **kwargs)
self.springfield = springfieldlib.SpringfieldCounterHW()
def ReadOne(self, axis):
"""Get the specified counter value"""
return self.springfield.getValue(axis)
def StateOne(self, axis):
"""Get the specified counter state"""
springfield = self.springfield
state = springfield.getState(axis)
if state == 1:
return State.On, "Counter is stopped"
elif state == 2:
return State.Moving, "Counter is acquiring"
elif state == 3:
return State.Fault, "Counter has an error"
def StartOne(self, axis, value=None):
"""acquire the specified counter"""
self.springfield.StartChannel(axis)
def LoadOne(self, axis, value, repetitions, latency):
self.springfield.LoadChannel(axis, value)
def StopOne(self, axis):
"""Stop the specified counter"""
self.springfield.stop(axis)
from sardana import DataAccess
from sardana.pool.controller import Type, Description, DefaultValue, Access, FGet, FSet
class SpringfieldCounterTimerController(CounterTimerController):
def __init__(self, inst, props, *args, **kwargs):
super(SpringfieldCounterTimerController, self).__init__(
inst, props, *args, **kwargs)
# initialize hardware communication
self.springfield = springfieldlib.SpringfieldCounterHW()
# do some initialization
self._counters = {}
def AddDevice(self, axis):
self._counters[axis] = True
def DeleteDevice(self, axis):
del self._counters[axis]
StateMap = {
1: State.On,
2: State.Moving,
3: State.Fault,
}
def StateOne(self, axis):
springfield = self.springfield
state = self.StateMap[springfield.getState(axis)]
status = springfield.getStatus(axis)
return state, status
def ReadOne(self, axis):
value = self.springfield.getValue(axis)
return value
def LoadOne(self, axis, value, repetitions, latency):
self.springfield.LoadChannel(axis, value)
def StartOne(self, axis, position):
self.springfield.StartChannel(axis, position)
def StopOne(self, axis):
self.springfield.stop(axis)
def AbortOne(self, axis):
self.springfield.abort(axis)
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/devel/howto_controllers/sf_ct_ctrl.py
| 0.770896 | 0.301137 |
sf_ct_ctrl.py
|
pypi
|
.. currentmodule:: sardana.macroserver.macros.scan
.. _sardana-users-scan:
=====
Scans
=====
Perhaps the most used type of macro is the scan macros. In general terms, we
call *scan* to a macro that moves one or more :ref:`motors <sardana-motor-api>`
and acquires data along the path of the motor(s).
.. note:: Sardana provides a :ref:`Scan Framework <sardana-macros-scanframework>`
for developing scan macros so that the scan macros behave in a consistent way.
Unless otherwise specified, the following discussion applies to scan macros
based on such framework.
The various scan macros mostly differ in how many motors are moved and the
definition of their paths.
Typically, the selection of which data is going to be acquired depends on the
active *measurement group* and is *not* fixed by the macro itself (although
there is no limitation in this sense).
Depending on whether the motors are stopped before acquiring the data or not, we
can classify the scan macros in *step* scans or *continuous* scans,
respectively.
.. figure:: /_static/trend_ascanVSascanc.png
:width: 100%
:figwidth: 80%
:align: center
Trend plot showing a step scan (:class:`ascan` *m_cp1_1 0 1000 8 .5*)
followed by a continuous scan (:class:`ascanc` *m_cp1_1 0 1000 .5*).
The line corresponds to the motor position and the blue shaded areas
correspond to the intervals in which the data acquisition took place.
.. _sardana-users-scan-step:
Step scans
----------
In a step scan, the motors are moved to given points, and once they reach each
point they stop. Then, one or more channels are acquired for a certain amount
of time, and only when the data acquisition is finished, the motors proceed to
the next point.
In this way, the position associated to a data readout is well known and does
not change during the acquisition time.
Some examples of step scan macros are:
:class:`ascan`,
:class:`a2scan`, ...
:class:`dscan`,
:class:`d2scan`, ...
:class:`mesh`.
.. _sardana-users-scan-continuous:
Continuous scans
----------------
In a continuous scan, the motors are not stopped for acquisition, which
therefore takes place while the motors are moving. The most common reason for
using this type of scan is optimizing the acquisition time by not having to
wait for motors to accelerate and decelerate between acquisitions.
The continuous scans introduce some constraints and issues that should be
considered.
#. If a continuous scan involves moving more than one motor simultaneously
(as it is done, e.g. in :class:`~sardana.macroserver.macros.scan.a2scan`),
then the movements of the motors should be synchronized so that they all
start their path at the same time and finish it at the same time.
#. If motors do not maintain a constant velocity along the path of their
movement, the trajectories followed when using more than one motor may not
be linear.
#. While in step scans it is possible to scan two pseudo-motors that access
the same physical motors (e.g. the *gap* and *offset* of a slit, being both
pseudo-motors accessing the same physical motors attached to each blade of
the slit), in a continuous scan the motions cannot be decoupled in a
synchronized way.
#. Backslash correction is incompatible with continuous scans, so you should
keep in mind that continuous scans should only be done in the backslash-free
direction of the motor (typically, by convention the positive one for a
physical motor).
In order to address the first two issues, the
:ref:`scan framework <sardana-macros-scanframework>` attempts the following:
* If the motors support changing their velocity, Sardana will adjust the
velocities of the motors so that they all start and finish the required path
simultaneously. For motors that specify a range of allowed velocities, this
range will be used (for motors that do not specify a maximum allowed
velocity, the current "top velocity" will be assumed to be the maximum)
* For motors that can maintain a constant velocity after an acceleration phase
(this is the case for most physical motors), Sardana will transparently
extend the user-given path both at the beginning and the end in order to
allow for the motors to move at constant velocity along all the user defined
path (i.e., the motors are allowed time and room to accelerate before
reaching the start of the path and to decelerate after the end of the nominal
path selected by the user)
These two actions can be seen in the following plot of the positions of the two
motors involved in a :class:`~sardana.macroserver.macros.scan.a2scanc`.
.. figure:: /_static/trend_a2scanc.png
:width: 100%
:figwidth: 80%
:align: center
Trend plot showing a two-motor continuous scan
(:class:`a2scanc` *m_cp1_1 100 200 m_cp1_2 0 500 .1*).
The lines correspond to the motor positions and the blue shaded areas correspond to the intervals in
which the data acquisition took place.
Both motors are capable of same velocity and acceleration, but since the
required scan path for m_cp1_1 is shorter than that for m_cp1_2, its top
velocity has been adjusted (gentler slope for m_cp1_1) so that both motors go
through the user-requested start and stop positions simultaneously.
The same figure also shows how the paths for both motors have been automatically
(and transparently, for the user) extended to guarantee that the user defined
path is followed at constant velocity and that the data acquisition takes place
also while the motors are running at constant velocity.
The synchronization of movement and acquisition can be done via hardware or
via software. Currently Sardana provides two different interfaces for
continuous scans. They can be easily differentiated by the scan name suffix:
* *c* - allows only software synchronization
* *ct* - allows both software and hardware synchronization (introduced with
SEP6_)
In the *c* type of scans, in order to optimize the acquisition time, Sardana
attempts to perform as many acquisitions as allowed during the scan time. Due
to the uncertainty in the delay times involved, it is not possible to know
beforehand how many acquisitions will be completed. In other words, the number
of acquired points along a continuous scan is not fixed (but it is guaranteed
to be as large as possible). Some examples of continuous scan macros are:
:class:`ascanc`,
:class:`a2scanc`, ...
:class:`dscanc`,
:class:`d2scanc`, ...
:class:`meshc`.
In the *ct* type of scans, Sardana perform the exact number of acquisitions
selected by the user by the means of hardware or software synchronization
configurable on the
:ref:`measurement group <sardana-measurementgroup-overview>` level.
The software synchronized channels may not follow the synchronization pace and
some acquisitions may need to be skipped. In order to mitigate this risk an
extra latency time can be spend in between the scan points. Another possibility
is to enable data interpolation in order to fill the gaps in the scan records.
Some examples of continuous scan macros are:
:class:`ascanct`,
:class:`a2scanct`, ...
:class:`dscanct`,
:class:`d2scanct`, ...
Currently he *ct* types of continuous scans still do not support acquiring of
external attributes e.g. Tango_, however it is planned to support it in the
future.
Number of scan points in *ct* type of continuous scans indirectly depends
on the ``nr_interv`` macro parameter. If passed as a positive number, then the
scan will have ``nr_interv`` + 1 scan points (at the start and end of each interval),
but if passed as a negative number then the the scan will have one point less
(the point corresponding to the end of the last interval won't be acquired).
This affects the scan motion range - when the last point is acquired
then the motion range is extended in order to acquire the last point at constant speed.
The following two sketches depicts this difference:
.. figure:: /_static/cont_nr_interv_3.png
:width: 100%
:align: center
Motion and acquisition of continuous scan when ``nr_interv`` > 0.
.. figure:: /_static/cont_nr_interv_-3.png
:width: 100%
:align: center
Motion and acquisition of continuous scan when ``nr_interv`` < 0.
On the above sketches, the dashed line indicates when theoretically the motion
could already start decelerating as it was foreseen in SEP6_. However currently
deceleration starts after the last ``latency_time`` interval.
.. note::
The creation of two different types of continuous scans is just the result
of the iterative development of the :ref:`Scan Framework <sardana-macros-scanframework>`.
Ideally they will merge into one based on the *ct* approach. This process
may require backwards incompatible changes (up to and including removal of
the affected scan macros) if deemed necessary by the core developers.
Configuration
-------------
Scans are highly configurable using the environment variables
(on how to use environment variables see environment related macros in
:ref:`sardana-standard-macro-catalog`).
.. seealso:: For further information about the available Sardana Environment
Variables, check the
:ref:`Environment Variable Catalog <environment-variable-catalog>`
.. _sardana-users-scan-data-storage:
Data storage
------------
Data being produced by scans can be optionally handled by *recorders* and
sent to a variety of destinations. Typical use case is to store the scan data
in a file.
Built-in recorders
^^^^^^^^^^^^^^^^^^
Sardana defines some standard recorders e.g. the Spock output recorder or the
SPEC file recorder. From the other hand users may define their custom recorders.
Sardana provides the following standard recorders (grouped by types):
* file [*]
* FIO_FileRecorder
* NXscanH5_FileRecorder
* SPEC_FileRecorder
* shared memory [*]
* SPSRecorder
* ShmRecorder
* output
* JsonRecorder [*]
* OutputRecorder
[*] Scan Framework provides mechanisms to enable and select this recorders
using the environment variables.
.. _sardana-users-scan-data-storage-nxscanh5_filerecorder:
NXscanH5_FileRecorder
"""""""""""""""""""""
NXscanH5_FileRecorder is a scan recorder which writes the scan data according
to the NXscan `NeXus <http://nexusformat.org>`_ application definition
in the `HDF5 <https://www.hdfgroup.org/solutions/hdf5/>`_ file format.
Sardana scan recorders are instantiated per scan execution and therefore this
recorder opens and closes the HDF5 file for writing when the scan starts
and ends respectively. This may cause file locking issues with reading
applications opened in between the scans. To overcome this issue
the *write session* concept, with optional support of SWMR mode,
was introduced for this particular recorder.
The write sessions use case scenarios:
* Manual session control with macros
To start and end the session you can use
`~sardana.macroserver.macros.h5storage.h5_start_session` /
`~sardana.macroserver.macros.h5storage.h5_start_session_path` and
`~sardana.macroserver.macros.h5storage.h5_end_session` /
`~sardana.macroserver.macros.h5storage.h5_end_session_path` macros.
You can list the active sessions with
`~sardana.macroserver.macros.h5storage.h5_ls_session` macro.
* Programmatic session control with context manager (for macro developers)
You can use the `~sardana.macroserver.macros.h5storage.h5_write_session`
context manager to ensure that the write session is only active over a
specific part of your macro code.
.. _sardana-users-scan-snapshot:
Scan snapshots
--------------
Snapshots are used for saving data (or metadata) from elements and devices not
necessarily related to the scan itself. A scan saves only the values of the involved
elements, that is positions of motors being moved, and values read from experiment
channels in the active measurement group. If you want your scans to include
something more you can use the snapshot.
Snapshot group can be configured via :ref:`Experiment Configuration widget <expconf_ui_snapshot_group>`
and :ref:`prescansnapshot` environment variable. It can include both Sardana
elements as well as external (Tango) data sources.
The snapshot is saved only once during a scan, on the very beginning. The exact
way the snapshot data is saved depends on the :ref:`recorder <sardana-writing-recorders>`
and scan file format being used.
Scan statistics
---------------
Sardana may automatically calculate some basic statistics over the scan
results e.g., max, mean, FWHM, etc.
In order to enable the statistics calculation you just need to attach
the :class:`~sardana.macroserver.macros.scan.scanstats` macro to the
``post-scan`` hook place (see :ref:`hook documentation <sardana-macros-hooks>`
for more info).
Apart from printing the statistics by the scanstats macro these are stored in
the door's :ref:`scanstats` environment variable. This way some other macro
can use them e.g.,
* move the scanned motor to the position where a given channel
reached the maximum value (:class:`~sardana.macroserver.macros.standard.pic`)
* move the scanned motor to center position of FWHM
(:class:`~sardana.macroserver.macros.standard.cen`)
.. _SEP6: http://www.sardana-controls.org/sep/?SEP6.md
.. _Tango: http://www.tango-controls.org
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/users/scan.rst
| 0.931044 | 0.708836 |
scan.rst
|
pypi
|
.. _sardana-overview:
========
Overview
========
Sardana is the control program initially developed at ALBA_. Our mission
statement:
Produce a modular, high performance, robust, and generic user environment
for control applications in large and small installations.
Make Sardana the generic user environment distributed in the Tango project
and the standard basis of collaborations in control.
Up to now, control applications in large installations have been notoriously
difficult to share. Inspired by the success of the Tango_ collaboration, ALBA_
decided to start the creation of a generic tool to enlarge the scope of the
Tango_ project to include a standard client program - or better a standard
generic user environment. From the beginning our aim has been to involve others
in this process. At this moment in time the user environment consists of a highly
configurable standard graphical user interface, a standard command line
interface understanding SPEC_ commands, and a standard way to compose new
applications either by programming or with a graphical tool. It further
consists of a standard macro executer, standard set of macros, a standard range
of common hardware types (like motors, counters, cameras and so on) and a
configuration editor to set all this up. The origin of the Sardana name comes
from a Catalan dance to honor the region where the ALBA_ synchrotron is build.
The toolkit to build Sardana has been C++, Python_, Qt_ and Tango_. If you like
the tools you will love Sardana.
What do we "sell" to our users
==============================
Let’s start our excursion into the Sardana world by a word of caution. We will
talk a lot about technical possibilities and implementation details. Our users
will judge us on the ease of use of the final GUI, its robustness and the
features it offers. There are millions of ways to arrive at this end result.
Our claim is however that by doing it the *Sardana way* and developing the
application out of *lego* components in a collaborative environment we will
arrive at higher quality software with much higher efficiency.
The following screen shot of an early prototype of a specific beamline
application should serve as a reminder of this final goal.
.. image:: /_static/snapshot01.png
:align: center
:width: 500
Inside this application we have many features common to other beamline control
applications or to some accelerator applications. The following screen shot
shows such a standard application which has been done without programming -
just by configuring the application. This illustrates one of the design
guidelines in Sardana: Always provide a generic interface which can be
specialized for an application.
.. image:: /_static/snapshot02.png
:align: center
:width: 500
Starting a procedure
====================
At the heart of the Sardana system are standard reusable procedures. From past
experiences, the importance of standard procedures has been realized and has
influenced most of the major design decisions. To illustrate this point, please
let me walk you through different ways how to start such a procedure without
going into too many details. You might want to think of a *scan* as an example.
One way of starting a procedure is with a command line interface. Users
familiar with SPEC_ will immediately recognize this way. In effect, inside
Sardana most of the standard SPEC_ commands (including many diffractometer
geometries thanks to Frédéric Picca from the SOLEIL_ synchrotron) are provided
as standard procedures and can be invoked in the same way.
.. image:: /_static/snapshot03.png
:align: center
:width: 500
Every procedure can also be started from a GUI. This does not need any
programming or configuration from the user of the system. When a new procedure
is created, it is automatically visible inside the GUI and command line tools.
.. image:: /_static/snapshot04.png
:align: center
:width: 500
This GUI interface will mainly be used for procedures which are rarely used and
where a specialized interface has not yet been developed. An example of how to
use the same procedure in order to carry out energy spread and emittance
measurements is presented in the following picture.
.. image:: /_static/snapshot05.png
:align: center
:width: 500
The standard Qt_ designer can be used to create new graphical elements (widgets)
and connect them to the system for even greater flexibility. The following
screen shot shows the standard qt designer with some fancy widgets developed in
house.
.. image:: /_static/snapshot06.png
:align: center
:width: 500
Taurus as a toolkit for applications
====================================
The GUI toolkit for Sardana is called Taurus_. The graphical user interfaces in
this paper have been created with this toolkit. It can be used in conjunction
or independent from the rest of the system. It can be used to create custom
panels inside the generic GUI or to create stand alone applications. Again,
this approach of *take what you need* has been implemented to foster the widest
range of collaborations. Almost all applications in the ALBA_ machine control
system have been created with this toolkit. Creating the applications out of
standard components has been proven to be extremely powerful. In the
:ref:`sardana-screenshots-guis` chapter you can see some of the graphical user
interfaces used.
How to write your own procedure
===============================
Another example I would like to look into is how to write your own procedure.
The simplest possible way is to use an editor to assemble commands and execute
them. This batch files type of procedures are useful to automatically run
procedures over night and for similar simple applications. The following screen
shots show the procedure executer with this feature enabled.
.. image:: /_static/snapshot08.png
:align: center
:width: 500
To go further I would like to explain some internal details. All procedures are
executed in a central place (called the macro server). There can be more than
one macro server per system but for the following I assume the common case of a
unique macro server. This macro server holds all the general procedures
centrally. It provides a controlled environment for these procedures. They can
be edited, run, debugged under its supervision. This allows for example to
automatically roll back changes made in case of problems, log access and grant
permissions. The procedures executed in the macro server provided by the
current Sardana system are Python_ functions or classes. Writing a procedure as
a function is more straightforward and recommended for the beginners. Writing it
is a class is a way to group the different methods which concerns this
procedure. As an example, in some procedures it could be possible to do very
specific things in case the user orders an emergency abort of the procedure.
The following example shows the procedure to move a motor.
::
from sardana.macroserver.macro import macro, Type
@macro([ ["moveable", Type.Moveable, None, "moveable to move"],
["position", Type.Float, None, "absolute position"] ])
def move(self, moveable, position):
"""This macro moves a moveable to the specified position"""
moveable.move(position)
self.output("%s is now at %s", moveable.getName(), moveable.getPosition())
As you can see in the example, the procedure must be documented and the input
parameters described. From this information, the graphical user interface is
constructed. It is also possible now to start the procedure from the command
line interface and use the tab key to automatically complete the input. The
actual action is actually carried out in the run method. The motor movement is
started and the procedure waits until it arrives at its destiny. The Python_
classes should stay small and very simple. All complicated code can be put into
modules and tested separately from the system.
How to adapt it to your own hardware
====================================
As the system has been thought from the beginning to be used at different
institutes, no assumptions of the hardware used could be made. There exists
therefore a mechanism to adapt the Sardana system to your own hardware. This
adaptor also has another very important role to play. This is best explained
with the motor as example. We consider more or less everything which can be
changed in the system as a motor. The term which should have better been used to
describe this thing should have been therefore *movable*. A motor can be a
temperature of a temperature controller which can be changed, a motor from an
insertion device which needs a highly complicated protocol to be moved, or just
about anything. Sometimes we also consider calculated value like H,K,L, the
height of a table, and the gap of a slit to be a motor. All these different
*motors* can be scanned with the same generic procedures without having to
worry about on which elements it is working on. You can add one of these pseudo
motors by means of configuration. It is easily possible to add new types of
pseudo motors. This has only to be done once and the Sardana system already
provides a large variety of these types.
Please find in the following an example for adding a completely new type in the
case of a *slit*.
The actual information how to create a motor of type *slit* is kept in the two
methods calc_physical and calc_pseudo which can be used to do the
transformation between the different coordinate systems. Or to say it in the
language of Sardana between the pseudo motors gap and offset and the real
motors left blade and right blade.
Once again the information in the beginning allows the graphical user interface
to be created automatically once it is loaded into the system.
Symbolic Sketch
===============
I would like to end this summary with a symbolic sketch of the different
subsystems in Sardana.
.. image:: /_static/sardana_sketch.png
:align: center
:width: 500
The user will normally not be concerned with these implementation details. It is
presented here to allow appreciating the modularity of the system.
.. _ALBA: http://www.cells.es/
.. _ANKA: http://http://ankaweb.fzk.de/
.. _ELETTRA: http://http://www.elettra.trieste.it/
.. _ESRF: http://www.esrf.eu/
.. _FRMII: http://www.frm2.tum.de/en/index.html
.. _HASYLAB: http://hasylab.desy.de/
.. _MAX-lab: http://www.maxlab.lu.se/maxlab/max4/index.html
.. _SOLEIL: http://www.synchrotron-soleil.fr/
.. _Tango: http://www.tango-controls.org/
.. _PyTango: http://packages.python.org/PyTango/
.. _Taurus: http://packages.python.org/taurus/
.. _QTango: http://www.tango-controls.org/download/index_html#qtango3
.. _`PyTango installation steps`: http://packages.python.org/PyTango/start.html#getting-started
.. _Qt: http://qt.nokia.com/products/
.. _PyQt: http://www.riverbankcomputing.co.uk/software/pyqt/
.. _PyQwt: http://pyqwt.sourceforge.net/
.. _Python: http://www.python.org/
.. _IPython: http://ipython.org/
.. _ATK: http://www.tango-controls.org/Documents/gui/atk/tango-application-toolkit
.. _Qub: http://www.blissgarden.org/projects/qub/
.. _numpy: http://numpy.scipy.org/
.. _SPEC: http://www.certif.com/
.. _EPICS: http://www.aps.anl.gov/epics/
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/users/overview.rst
| 0.81335 | 0.765111 |
overview.rst
|
pypi
|
.. _sequencer_ui:
=========
Sequencer
=========
*Sequencer* provides an user-friendly interface to compose and execute
sequences of macros. Sequence of macros allows execution
of ordered set of macros with just one trigger. Sequencer also allows
using the concept of :ref:`hooks<sardana-macros-hooks>` (macros attached
and executed in defined places of other macros).
The widget is divided into 3 main areas:
* actions bar
* sequence editor
* parameters editor
`Sequence editor` allows you modify sequences in many ways: appending new
macros, changing macros locations and removing macros.
Graphical `parameters editor` (standard/custom) provides a clear way to
set/modify macro execution settings(parameters).
The `actions bar` provides many features. Once sequence of macros is in
being executed, `Sequencer` informs you about the progress with Door's state
led and macros progress bars. User has full control over the sequence, with
action buttons: Start, Stop, Pause, Resume.
If desirable, sequences can be permanently stored into a file and later on restored from there.
This functionality is provided thanks to the Save and Open a sequence action buttons.
The sequence file can use the XML format or the :ref:`spock syntax<sardana-spock-syntax>`.
The later one is basically a text file that lists macros, one per line,
written exactly the way they would be entered into Spock CLI.
The Sequencer is able to save only the XML format file, but it can load both.
.. figure:: /_static/macros/sequencer01.png
:align: center
.. _sequencer_stand-alone:
Sequencer as a stand-alone application
--------------------------------------
You may also use *Sequencer* as a stand-alone application. In this case it appears embedded
in window and some extra functionalities are provided.
You can launch the stand-alone *Sequencer* with the following command::
sequencer [options] [<macro_executor_dev_name> <door_dev_name>]
The model list is optional and is a space-separated list of two device names: macro server and door.
If not provided at the application startup, device names can be later on selected from Macro Configuration Dialog.
To get the full list of options type::
sequencer -h
Extra functionalities:
- MacroConfigurationDialog
.. todo::
This chapter in not ready... Sorry for inconvenience.
- CustomEditorsPathDialog
.. todo::
This chapter in not ready... Sorry for inconvenience.
.. _editing_sequence:
Editing sequence
----------------
Sequence is represented as a flat list of ordered macros, in this view each macro is represented as a new line with 4 columns:
Macro (macro name), Parameters (comma separated parameter values), Progress (macro progress bar) and Pause
(pause point before macro execution - not implemented yet). Macros which contain hooks, expand with branched macros.
Macro parameters values can be edited from `parameters editor`, to do so select one macro in sequence editor by clicking on it.
Selected macro becomes highlighted, and `parameters editor` populate with its current parameters values.
.. figure:: /_static/macros/sequenceeditor01.png
:align: center
- adding a new macro
First select macro from macro combo box, and when you are sure to add it to the sequence, press '+' button.
To add macro as a hook of other macro, before adding it, please select its parent macro in the sequence, and then press '+' button.
If no macro was selected as a parent, macro will be automatically appended at the end of the list.
.. figure:: /_static/macros/sequenceeditor02.png
:align: center
- reorganizing sequence
Macros which are already part of a sequence, can be freely moved around, either in execution order or in hook place (if new macro accepts hooks).
To move macro first select it in the sequence by single clicking on it (it will become highlighted). Then a set of buttons with arrows
become enabled. Clicking on them will cause selected macro changin its position in the sequence (either vertically - execution order or horizontal
parent macro - hook macro relationship)
.. figure:: /_static/macros/sequenceeditor03.png
:align: center
- remove macro
Macros which are already part of a sequence, can be freely removed from it. To do so first select macro in a sequence by
single clicking on it (it will become highlighted). Then button with '-' becomes enabled. Clicking on it removes selected macro.
.. figure:: /_static/macros/sequenceeditor04.png
:align: center
- configuring hook execution place
If macro is embedded as a hook in parent macro, please follow these instructions to configure its hook execution place.
First select macro in a sequence by single clicking on it (it will become highlighted).
Then using right mouse button open context menu, go to 'Hook places' sub-menu and select hook places which interest you
(you can select more than one).
.. figure:: /_static/macros/sequenceeditor05_raw.png
:align: center
Editing macro parameters
------------------------
To obtain information about editing macro parameters, please refer to the following link :ref:`Editing macro parameters <editing_macro_parameters>`
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/users/taurus/sequencer.rst
| 0.792625 | 0.663096 |
sequencer.rst
|
pypi
|
#-----------------------------------------------------------------------------
# Needed modules
# Standard library
import re
# Third party
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
PythonTracebackLexer)
from pygments.token import Comment, Generic
from sphinx import highlighting
#-----------------------------------------------------------------------------
# Global constants
line_re = re.compile('.*?\n')
#-----------------------------------------------------------------------------
# Code begins - classes and functions
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print(a)
foo
In [4]: 1 / 0
Notes:
- Tracebacks are not currently supported.
- It assumes the default IPython prompts, not customized ones.
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[(?P<N>[0-9]+)\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[(?P<N>[0-9]+)\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows that the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
pass
#-----------------------------------------------------------------------------
# Register the extension as a valid pygments lexer
highlighting.lexers['ipython'] = IPythonConsoleLexer()
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/sphinxext/ipython_console_highlighting.py
| 0.4856 | 0.175679 |
ipython_console_highlighting.py
|
pypi
|
Title: Continuous Scan Implementation
SEP: 6
State: ACCEPTED
Date: 2013-07-29
Drivers: Zbigniew Reszela <[email protected]>
URL: http://www.sardana-controls.org/sep/?SEP6.md
License: http://www.jclark.com/xml/copying.txt
Abstract:
Generic Scan Framework requires extension for a new type of scans: continuous scans.
Various types of synchronization between moveable and acquisition elements exists:
software (so called best effort) and hardware (either position or time driven).
The challenge of this SEP is to achieve the maximum generalization and transparency
level between all types of continuous scans (and probably step scans as well). This
design and implementation will require enhancement of the already existing elements of
Sardana: controllers, moveables, experimental channels and measurement group and
probably definition of new elements e.g. triggers. A proof of concept work was already
done at ALBA, and it could be used as a base for the further design and development.
Current situation
=================
In the present situation, step scans can be executed using the software synchronization mode. The software-synchronized continuous scans are already implemented in Sardana as well and are described [here](http://www.sardana-controls.org/en/latest/users/scan.html#continuous-scans). As a proof-of-concept, very limited scan macros that abstracts some specifics of the totally hardware-synchronized systems were added to Sardana in 2013. They require generalization and this SEP aims to provide it.
Specification
==========
Contents
-------------
- Transparency
- Generic Scan Framework
- Measurement Group
- Trigger/Gate
- Experimental Channel
- Data collection, merging and storage
- Data transfer
- Motion
Transparency
-------------------
Sardana scans can be executed in the following modes: step, continuous and hybrid. The main difference between them is how the motion and the acquisition phases are executed. The step scans execute motion and acquisition sequentially, accelerating and decelerating the moveables in between each scan point. The acquisition phase is executed while the moveables are stopped. The continuous scans execute all the acquisitions simultanously to the motion of the moveables from the scan start to the end positions. Acquisitions are synchronized to be executed on the scan intervals commanded by the user. In the hybrid scans motion to each scan point is acompanied with the acquisition from the previous scan point, so the acquisition is also executed while moveables are moving, however they stop in between each scan point.
A step scan can be executed with one of the standard macros e.g. ascan, d2scan, mesh. While the continuous scans developed in this SEP can be executed with their equivalents terminated with *ct* suffix. Basic parameters of the continuous scans are the same as in the step scans and continuous scans add an extra optional parameter latency_time. Also the scan records comprise the same fields as in the step scan. The only difference in the continuous scan records is that the: motor positions and the delta times are filled with the nominal values instead of the real ones. The measurement group configuration should allow to scan in step and continuous modes without the need to reconfigure any parameters, of course if the hardware allows that. For the reference, a different type of the continuous scans, terminated with *c* suffix, is also present in Sardana.
Example of a continuous scan - the absolute equidistant continuous scan:
ascanct <motor> <start_pos> <final_pos> <nr_interv> <integ_time> <latency_time>
Generic Scan Framework (GSF)
------------------------------------
GSF and its components have basically 3 roles:
* It uses the input parameters of the scan together with the already present configuration of the involved in the scan elements and transforms it into more specific parameters. These parameters are set to the involved elements in the process of preparation to scanning: e.g. move motors to the correct positions, set synchronization specification to the Measurement Group
* Control the scan flow e.g. starts in the correct sequence the involved elements
* Receive the data chunks, compose the scan records and perform the interpolation if necessary, finally passing all the data to the recorders.
User inputs:
* moveable(s)
* start position(s)
* end position(s)
* number of intervals
* integration time
* latency time (optional) - It may be especially useful for acquisitions involving the software synchronized channels. The software overhead may cause acquisitions to be skipped if the previous acquisition is still in progress while the new synchronization event arrives. The *latency time* parameter, by default, has zero value.
Other inputs:
* acceleration times(s), deceleration time(s) and base rate(s) obtained from the Motor(s)
* latency time obtained from the MeasurementGroup
Calculation outputs:
* pre-start and post-end position(s)
* common acceleration and deceleration time
* velocitie(s)
* master (reference) moveable
* synchronization specification
Formulas:
* pre-start, post-end positions, acceleration time, deceleration time and velocity - see Motion chapter
* Synchronization structure (this example describes an equidistant scan of a single physical motor):
* The maximum of the user input latency time and the active measurement group latency time is used.
Parameter | Time | Position
---------- | ---------- | ------
Offset | acceleration time | velocity * acceleration time / 2
Initial | None | start position
Active | integration time | integration time * velocity
Total | integration time + latency time | end - start / number of intervals
Repeats = number of intervals + 1
Measurement Group (MG)
-----------------------------
MeasurementGroup is a group element. It aggregates other elements of types ExpChannel (these are CounterTimer, 0D, 1D and 2D ExperimentalChannels) and TriggerGate. The MeasurementGroup's role is to execute acquisitions using the aggregated elements.
The acquisition is synchronized by the TriggerGate elements or by the software synchronizer. The hardware synchronized ExpChannel controllers are configured with the number of acquisitions to be executed while the software synchronized do not know a priori the number of acqusitions.
Latency time is a new attribute of the measurement group and it introduces an extra dead time in between two consecutive acquisitions. Its value corresponds to the maximum latency time of all the ExpChannel controllers present in the measurement group. It may be especially useful for acquisitions involving software synchronized channels. The software overhead may cause acquisitions to be skipped if the previous acquisition is still in progress while the new synchronization event arrives. The *latency time* parameter, by default, has zero value.
On the MeasurementGroup creation, the software synchronizer is assigned by default to the newly added ExperimentalChannel controllers, so the MeasurementGroup could start measuring without any additional configurations.
The configuration parameters:
- **Configuration** (type: dictionary) - stores static configuration e.g. which synchronizer is associated to which ExperimentalChannel controller and which synchronization type is used (Trigger or Gate)
- **Synchronization** (type: list of dictionaries) - express the measurement synchronization, it is composed from the groups of equidistant acquisitions described by: the initial point and delay, total and active intervals and the number of repetitions, these information can be expressed in different synchronization domains if necessary (time and/or position)
- **LatencyTime** (type: float, unit: seconds, access: read only): latency time between two consecutive acquisitions
- **Moveable** (type: string) - full name of the master moveable element
Format of the configuration parameter:
~~~~
dict <str, obj=""> with (at least) keys:
- 'timer' : the MG master timer channel name / id
- 'monitor' : the MG master monitor channel name / id
- 'controllers' : dict <Controller, dict=""> with one entry per controller:
- ctrl_object : dict<str, dict=""> with (at least) keys:
- 'timer' : the timer channel name / id
- 'monitor' : the monitor channel name / id
- 'synchronization' : 'Trigger' / 'Gate'
- 'synchronizer': the TriggerGate name / id or 'software' to indicate software synchronizer
- 'channels' where value is a dict<str, obj=""> with (at least) keys:
- ...
~~~~
The configuration parameter had changed during the SEP6 developments. First of all the [feature request #372](https://sourceforge.net/p/sardana/tickets/372/) was developed and the units level disappeared from the configuration. Furthermore the controller parameters _trigger_type_ was renamed to _synchronization_. In both cases one-way backwards compatibility was maintained. That means that the measurement groups created with the previous versions of Sardana are functional. Once their configuration gets saved again (either after modification or simply by re-applying), the configuration is no more reverse compatible. **IMPORTANT: when deploying the SEP6 consider back-up of the measurement groups configurations in case you would need to rollback**
Format of the synchronization parameter:
~~~~
list of dicts <SynchParam, obj> with the following keys:
- SynchParam.Delay : dict <SynchDomain, float> initial delay (relative to start) expressed in Time and Position* domain
- SynchParam.Initial : dict <SynchDomain, float> initial point (absolute) expressed in Time and Position* domain
- SynchParam.Active : dict <SynchDomain, float> active interval (with sign indicating direction) expressed in Time (and Position domain) or Monitor domain
- SynchParam.Total': dict <SynchDomain, float> total interval (with sign indicating direction) expressed in Time and Position* domain
- SynchParam.Repeats: <long> - how many times the group has to be repeated
* Position domain is optional - lack of it implicitly forces synchronization in the Time domain e.g. timescan
~~~~
Trigger/Gate
----------------
TriggerGate is a new Sardana element type and it represents devices with trigger and/or gate generation capabilities. Their main role is to synchronize acquisition of the ExpChannel. Trigger or gate characteristics could be described in either time and/or position configuration domains. In the time domain, elements are configured in time units (seconds) and generation of the synchronization signals is based on passing time. The concept of position domain is based on the relation between the TriggerGate and the Moveable element. In the position domain, elements are configured in distance units of the Moveable element configured as the feedback source (this could be mm, mrad, degrees, etc.). In this case generation of the synchronization signals is based on receiving updates from the source.
Each ExpChannel controller can have one TriggerGate element associated to it. Its role is to control at which moment each single measurement has to start in case of trigger or start and stop in case of gate.
The allowed states for TriggerGate element are:
- On - the element is ready to generate synchronization signals
- Moving - the element is currently generating synchronization signals
- Fault - the device is faulty
**Tango interface**
Each TriggerGate element is represented in the Tango world by one device of TriggerGate Tango class. They implement State and Status Tango attributes.
**TriggerGate controller (plug-in)**
A custom TriggerGate controller must inherit from the TriggerGateController class. The dynamic configuration is accessed via the Synch methods. This configuration has the same format as the MeasurementGroup Synchronization parameter.
TriggerGateController API (**bold** are mandatory):
- AddDevice
- DeleteDevice
- PreStateAll
- PreStateOne
- StateAll
- **StateOne**
- PreStartOne
- PreStartAll
- **StartOne**
- StartAll
- StopOne
- StopAll
- **AbortOne**
- AbortAll
- PreSynchOne
- PreSynchAll
- **SynchOne**
- SynchAll
- SetAxisPar and GetAxisPar
In case that the sychronization description contains information in both domains (position and time), the Synch methods should configure the trigger on position and only if not supported by the hardware on time. Similarly the gate duration should be configured on time and only if not supported by the hardware on position. This are only the recommendations to the controllers developers. In some special cases it may be needed to ignore this recommendation. In this case an extra axis attributes could be defined in the controller to control the domain selection.
Sardana provides one TriggerGate controllers DummyTriggerGateController which does not synchronize acquisition and just provides dummy behavior. DummyTriggerGateController imitate the behavior of the hardware with trigger and/or gate signal generation capabilities. It emulates the state machine: changes state from On to Moving on start and from Moving to On based on the configuration parameters or when stopped.
Software synchronizer resides in the core of Sardana and generates software events of type: *active* and *passive*. The acquisition action listens to these events and start or start and stop the acquisition process when they arrive.
In case the MeasurementGroup Synchronization contains position domain characteristics the software synchronizer is added as a listener to the moveable's position attribute. Then the generation of the synchronization events is based on these updates.
**Pool Synchronization action**
PoolSynchronization is the Pool's action in charge of the control of the TriggerGate elements during the generation, which usually take place in the MeasurementGroup acquisition process.
Its **start_action** method executes the following:
- load dynamic configuration to the hardware by calling Synch methods
- for each controller implied in the generation call PreSynchAll
- for each axis implied in the generation call PreSynchOne and SynchOne
- for each controller implied in the generation call SynchAll
- in case the software synchronizer is in use, add the acquisition action as the listener of the software synchronzier
- in case the position domain synchronization is in use it adds the software synchronizer as the listener of the moveable's position updates
- start the involved axes
- for each controller implied in the generation call PreStartAll
- for each axis implied in the generation call PreStartOne and StartOne
- for each controller implied in the generation call StartAll
- for each TriggerGate element implied in the generation set state to Moving
Its **action_loop** method executes the following:
- while there is at least one axis in Moving state:
- for each controller implied in the generation call PreStateAll
- for each axis implied in the generation call PreStartOne
- for each controller implied in the generation call StartAll
- for each axis implied in the generation call StartOne
- wait some time
- for each TriggerGate element implied in the generation set state to On
The action_loop waits some time between interrogating controllers for their states. The wait time by default is 0.01 [s].
ExpChannel
-----------------------------
When hardware TriggerGate is associated to the ExpChannel controller, the second one must know how many acquisitions has to be performed. In case of synchronization with triggering signals the controller must also know the integration time. Both of this parameters are configured on the controller level and not the channel level. During the acquisition process hardware synchronized channels may not report data until the end of the acquisition or report data in blocks.
**ExpChannel controller (plug-in)**
Configuration parameters implemented as controller parameters: SetCtrlPar and GetCtrlPar:
- **synchronization** (type: enumeration, options: SoftwareTrigger|SoftwareGate|HardwareTrigger|HardwareGate) - how acquisition will be started or started and stopped
- **timer** (type: integer) - correspnds to the axis number of the timer assigned to this controller *
- **monitor** (type: monitor) - corresponds to the axis number of the monitor assigned to this controller *
- **acquisition_mode** (type: enumeration, options: Timer|Monitor) - corresponds to the selected acquisition mode *
Configuration parameters pass in LoadOne method:
- **value** (type: float, unit: seconds or counts) - integration time or monitor counts of each single acquisition
- **repetitions** (type: long) - number of single acquisitions executed after the start (it is always 1 for software synchronized acquisition)
Defines static characteristics of the hardware, implemented as controller parameters: GetCtrlPar
- **latency_time** (type: float, unit: seconds) - time required to prepare the hardware for the next hardware trigger or gate
The *Read* methods usually implement the data retrieval from the device and return the acquired data. The same method is foreseen for software and hardware synchronized acquisitions, both by trigger and gate. In case that access to the data in a device differenciate between the synchronization mode, the *Read* methods would need to implement different cases based on the confiugured synchronization.
ReadOne method may return data in blocks corresponding to multiple acquisitions or just single values. The following return values are allowed:
* float
* sequence of float e.g. list of floats
* SardanaValue
* sequence of SardanaValue e.g. list of SardanaValues
**Acquisition actions**
Several sub-acquisition actions may participate in the global acquisition, what depends on the involved experimental channels and their synchronization mode. These includes:
* HardwareSynchronizedAcquisition
* SoftwareSynchronizedAcquisition
* 0DAcquisition
**HardwareSynchronizedAcquisition** acts on the ExpChannel synchronized by the hardware TriggerGate controller.
Their synchronization mode whether trigger or gate does not affect flow of the action.
Prior to the action execution the following parameters are loaded to the involved controllers:
- synchronization
- timer
- monitor
- acquisition_mode
Its **start_action** method executes the following:
- load the involved timer/monitors with integration time/mnitor counts and repetitions
- for each controller implied in the acquistion call PreLoadAll
- for the timer/monitor axis implied in the acquisition call PreLoadOne and LoadOne
- for each controller implied in the acquisition call LoadAll
- start the involved axes
- for each controller implied in the acquistion call PreStartAll
- for each axis implied in the acquisition call PreStartOne and StartOne
- for each controller implied in the acquisition call StartAll
- for each ExpChannel implied in the acquisition set state to Moving
Its **action_loop** method executes the following:
- while there is at least one axis in Moving state:
- for each controller implied in the acquisition call PreStateAll
- for each axis implied in the acquisition call PreStartOne
- for each controller implied in the acquisition call StartAll
- for each axis implied in the acquisition call StartOne
- wait some time
- every certain number of iterations read new data:
- for each controller implied in the acquisition call PreReadAll
- for each axis implied in the acquisition call PreReadOne
- for each controller implied in the acquisition call ReadAll
- for each axis implied in the acquisition call ReadOne
- for each controller implied in the acquisition call PreReadAll
- for each axis implied in the acquisition call PreReadOne
- for each controller implied in the acquisition call ReadAll
- for each axis implied in the acquisition call ReadOne
- for each ExpChannel implied in the acquisition set state to On
The action_loop waits some time between interrogating controllers for their states. The wait time by default is 0.01 [s] and is configurable with the AcqLoop_SleepTime property (unit: milliseconds) of the Pool Tango Device.
The action_loop reads new data every certain number of state readout iterations. This number is by default 10 and is configurable with the AcqLoop_StatesPerValue property of the PoolTangoDevice.
**SoftwareSynchronizedAcquisition** acts on the ExpChannels synchronized by the software synchronizer. This action is launched on the active event comming from the software synchronizer, and lasts until all the ExpChannel terminates their acquisitions.
This action assigns index to the acquired data (returned by the ReadOne). The index originates from the events generated by the software synchronizer.
Prior to the action execution load parameters to the involved controllers:
- synchronization
- timer
- monitor
- acquisition_mode
Its **start_action** method executes the following:
- load the involved timer/monitors with integration time/mnitor counts and repetitions
- for each controller implied in the acquistion call PreLoadAll
- for the timer/monitor axis implied in the acquisition call PreLoadOne and LoadOne
- for each controller implied in the acquisition call LoadAll
- start the involved axes
- for each controller implied in the acquisition call PreStartAll
- for each axis implied in the acquisition call PreStartOne and StartOne
- for each controller implied in the acquisition call StartAll
- for each ExpChannel implied in the acquisition set state to Moving
Its **action_loop** method executes the following:
- while there is at least one axis in Moving state:
- for each controller implied in the acquisition call PreStateAll
- for each axis implied in the acquisition call PreStateOne
- for each controller implied in the acquisition call StateAll
- for each axis implied in the acquisition call StateOne
- wait some time
- read data
- for each controller implied in the acquisition call PreReadAll
- for each axis implied in the acquisition call PreReadOne
- for each controller implied in the acquisition call ReadAll
- for each axis implied in the acquisition call ReadOne
- for each ExpChannel implied in the acquisition set state to On
The action_loop waits some time between interrogating controllers for their states. The wait time by default is 0.01 [s] and is configurable with the AcqLoop_SleepTime property (unit: milliseconds) of the Pool Tango Device.
The action_loop reads new data every certain number of state readout iterations. This number is by default 10 and is configurable with the AcqLoop_StatesPerValue property of the PoolTangoDevice.
**0DAcquisition** was not changed in this SEP, it is slave to the SoftwareSynchronizedAcquisition.
**IMPORTANT:** SEP6 sacrify intermediate events with the CTExpChannel count updates. It could be readded in the future.
Data merging
-----------------------------------------------
Every value acquired or read during the continuous scan execution is stamped with an absolute time and the acquisition index. The experimental channels synchronized by hardware (gate or trigger) provide the core part of each scan record.
The software synchronized channels does not guarantee to provide data for each scan record. The RecordList class, part of the GSF, applies the [zero order hold](http://en.wikipedia.org/wiki/Zero-order_hold) ("constant interpolation") method to fill the missing part of the records. Different interpolation methods could be available to the user end executed as a post-scan processing, however implementation of them is out of scope of this SEP.
Data transfer
-------------
The data collected by the MG needs to be transferred back to to the GSF for proper organization of scan records, using indexes by the RecordList, and storage by the Data Handler and its servants - recorders. Data are passed with the Tango change events of the Data attribute of the Experimental Channels.
Motion
------
This SEP will deal only with the linear motion. Any combination of Sardana motors and pseudomotors could be used as a scan moveable. The following attributes: acceleration time, velocity and deceleration time are configured, so all the motors reach and leave the constant velocity region at the same time.
**pre-start position** - is calculated for each motor separately:
- start position - (velocity * acceleration time) / 2 (scanning in positive direction)
- start position + (velocity * acceleration time) / 2 (scanning in negative direction)
**acceleration time** - is common to all the motors and is determine by the slower accelerating motor involved in the scan.
**velocity** - is calculated for each motor separately from the following parameters: the scan range = abs(end position - start position) and the scan time. The scan time is equal to number of intervals * (integration time + latency time).
**deceleration time** - is common to all the motors and is determine by the slower decelerating motor involved in the scan.
**post-end position** - is calculated for each motor separately:
- end position + (velocity * integration time) + (velocity * deceleration time) / 2 (scanning in positive direction)
- end position - (velocity * integration time) - (velocity * deceleration time) / 2 (scanning in negative direction
Some scans require execution of multiple sub-scans e.g. mesh. In this case a sequence of sub-scans will be executed in a loop, substituting the "Move to end position" action with a "Move to pre-start position" (of the next sub-scan).

More about the motion control could be found in [1](http://accelconf.web.cern.ch/AccelConf/ICALEPCS2013/papers/wecoaab03.pdf).
Out of scope of SEP6
=================
* support software Gate synchronization
* support of different trigger types: pre-, mid- or post-triggers
* ascanct does not support
* pseudocounters
* 0D ExpChannel
* 1D ExpChannel
* 2D ExpChannel
* external channels (Tango attributes)
* merge ascanc and ascanct into one macro
* make the overshoot correction optional
* make interpolated data easily distinguishable from the real data
References
=========
1. [WECOAAB03, "Synchronization of Motion and Detectors and Continuous Scans as the Standard Data Acquisition Technique", D.F.C. Fernández-Carreiras et al.](http://accelconf.web.cern.ch/AccelConf/ICALEPCS2013/papers/wecoaab03.pdf)
Changes
=======
- 2016-11-30 [mrosanes](https://github.com/sagiss) Migrate SEP6 from SF wiki to independent markdown language file.
- 2017-01-01 [reszelaz](https://github.com/reszelaz) Remove last pending TODOs and fix the scope in order to open for final discussions.
- 2017-04-03 [reszelaz](https://github.com/reszelaz) Accept SEP6 after positive votes from DESY, MAXIV, SOLARIS and ALBA.
- 2017-05-25 [reszelaz](https://github.com/reszelaz) Correction: reflect 0D non-support in ascanct (Out of scope of SEP6 section).
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/sep/SEP6.md
| 0.894485 | 0.812793 |
SEP6.md
|
pypi
|
Title: Code contribution workflow
SEP: 7
State: OBSOLETE
Reason:
SEP15 obsoletes SEP7. Most of the contribution procedure is
no longer applicable due to the adoption of a workflow based on Pull Requests.
Date: 2013-12-13
Drivers: Carlos Pascual-Izarra <[email protected]>
URL: http://www.sardana-controls.org/sep/?SEP7.md
License: http://www.jclark.com/xml/copying.txt
Abstract:
Define the procedures for contributing code to sardana. It covers git
repository conventions and organization as well as workflows and tools
for reviewing code contributions.
Introduction
============
This is a proposal to define the mechanisms for contributing code to Sardana. It describes the agreed conventions for using the git repository as well as the workflow(s) and tools used for reviewing code prior to its acceptance into the official sardana repository.
This proposal tries to answer the following questions:
- Which conventions (e.g., naming, organization...) are used in the official git repository?
- How should one submit a proposed contribution?
- Who approves/rejects proposed contributions?
- Which tools/workflows are used for reviewing the proposed contributed code?
Goals and constraints
=====================
The following goals and constraints are taken into consideration for this proposal:
General:
- **Open development**: we want to encourage participation and contribution. We want an open development project (not just open source).
- **Code review**: we want sardana to be robust. Contributed code should be reviewed.
- **Integration manager availability**: currently none of the involved people can dedicate 100% of time to coordination tasks. We need to minimize and share the load of coordination/integration.
- **Autonomy for contributions to 3rd party code**: the sardana project also hosts repositories for specific hardware and 3rd party code (currently, Macros and Controllers, see [SEP1]). More flexible policies should apply to contributions to these repositories, giving just a minimum set of rules and giving more freedom to 3rd parties for self-organization.
Specific/technical:
- **Avoid multiplicity of platforms**: we host the code in 3 git repositories hosted in sourceforge.net (see [SEP1]) and favour the tools already provided by SourceForge to the Sardana project.
- **SF account required**: we assume that all contributors already have a sourceforge.net account
- **Minimise platform lock-down**: it should be possible to move the project to a different platform if needed in the future, without data loss and without forcing big workflow changes.
- **Minimise imposed log-ins**: contributors and reviewers should be able to do most (if possible, all) their daily work without needing to log into SourceForge. Workflows of contribution/code review which integrate a mailing-list interface are preferred.
- **Contributions traceability**: We would like to have a way of tracking the status of contributions (e.g., proposed / changes needed / accepted / rejected).
Which conventions (e.g., naming, organization...) are used in the official git repository?
==========================================================================================
Branching model for the core repository of sardana
--------------------------------------------------
The official repository of sardana (from now on also called "origin") is organised following the [gitflow](http://nvie.com/posts/a-successful-git-branching-model/) branching model, in which there are two main long-running branches (*master* and *develop*) and a number of support finite-life branches (feature, release and hotfix branches).
Please refer to http://nvie.com/posts/a-successful-git-branching-model for a full description of the gitflow. The following are notes to complement the gitflow general information with specific details on the implementation of the gitflow model in Sardana:
- The *master* branch reflects the latest official Sardana release. Only the Integration/Release Managers can push to the *master* branch.
- The *develop* branch reflects the latest development changes that have already been integrated for the next release. Only the Integration Managers can push to the *develop* branch.
- New features, bug fixes, etc. must be developed in *feature* branches. They branch off *develop*. Once they are ready and the code passed the review, the feature branch can be merged into *develop* by an Integration Manager. These branches may exist only in local clones of contributors, or in repositories forked from development or, in certain cases, in the official repository (see below).
- The two other types of supporting branches (release branches and hotfix branches) are managed by the Integration/Release managers for the purpose of preparing official releases.
In the Sardana project, we use a special type of *feature* branches called *sepX* branches: unlike other *feature* branches which typically only exist in the contributor local repository (or maybe in a public fork of the official repository), the *sepX* feature branches are hosted in the oficial repository. The *sepX* branch may be created if required during the DRAFT or CANDIDATE phases of the *X*th Sardana Enhancement Proposal, and is merged to *develop* if the SEPX is APPROVED. Only the person(s) dessignated by the SEPX driver -and approved by the Sardana project Admins- can push to the official *sepX* branch. These designated person(s) are considered **"SepX Integration Lieutenants"**.
**Tip**: You can find a set of practical examples on working with the sardana branching model in the [sardana git recipes](http://sf.net/p/sardana/wiki/git-recipes/)
Branching model for the 3rd party code repositories
---------------------------------------------------
The main differences between the core repository and the 3rd party code repositories are:
- The 3rd party code is not subject to the same release schedule than the core code (e.g. the release schedule of a given macro is up to the responsible for that macro, and not synchronized with the release of other macros).
- The 3rd party code repositories (e.g. the Controllers and Macros repositories) are open for pushing commits by a larger group of contributors (write permissions are granted liberally to people who request them to the Sardana project Administrators).
- Each file (or whole directory) in the 3rd party repositories must provide contact information of the person who assumes responsibility for it (and this person should have write permission). In absence of explicit information in the file headers, the last person who committed to the file is assumed to be the responsible for it.
- There are no appointed Integration Managers for the 3rd party code. The repository is self-organised, and conflicts are avoided by following conventions and discussing in the mailing lists.
Because of these differences, the branching model is much more simple and flexible for the 3rd party repositories:
- There is only one main branch, *master*, which contains code that is deemed to be production-ready by the responsibles of each piece of code.
- Feature branches may be created from (and merged into) *master* for developing new features, but the state of *master* should always be kept "production-ready". The decision on when to merge a given feature branch into *master* should be taken by consensus of the responsibles for all the pieces of code affected by the merge. If the discussions for this are not held publicly (i.e., in the mailing list), it is considered a nice courtesy to *at least* inform in the sardana-devel mailing list of the decission taken .
How should one submit a proposed contribution?
==============================================
In general, code submissions for inclusion in the sardana repositories should take the following into account:
- It must comply with the [**Sardana coding conventions**](http://www.tango-controls.org/static/sardana/latest/doc/html/devel/guide_coding.html).
- The **contributor must be clearly identified** and provide a valid email address which can be used to contact him/her.
- Commit messages should be [properly formatted](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
- The **licensing terms** for the contributed code must be compatible with (and preferably the same as) the license chosen for the Sardana project (at the time of writing this SEP, it is the [LGPL](http://www.gnu.org/licenses/lgpl.html), version 3 *or later*).
Submitting code for the core repository of sardana
---------------------------------------------------
The discussion and public tracking of contributed code is done on the [sardana-devel mailing list](https://lists.sf.net/lists/listinfo/sardana-devel).
Code contributions should be sent to [email protected] either in form of patches (formatted with **git format-patch**, as explained [here](http://www.git-scm.com/book/en/Distributed-Git-Contributing-to-a-Project#Public-Large-Project)) or as a pull request (formatted with **git request-pull** as explained [here](http://www.git-scm.com/book/en/Distributed-Git-Contributing-to-a-Project#Public-Small-Project)).
Specific notes for contributing via patches:
- The preferred way of sending the patch formatted with *git format-patch* is using *git send-email*
- Please read http://www.git-scm.com/book/en/Distributed-Git-Contributing-to-a-Project#Public-Large-Project (and use it as a guide)
Specific notes for contributing via pull requests:
- Please read http://www.git-scm.com/book/en/Distributed-Git-Contributing-to-a-Project#Public-Small-Project (and use it as a guide)
- Important: prepend the subject of your email to the mailing list with **`[PULL]`**
- If the changes are not too big, consider using the "-p" option to *git request-pull* (it includes the diff info in the body of the email)
**Tip**: You can find a set of practical examples on how to submit code according to the SEP7 specifications in the [sardana git recipes](http://sf.net/p/sardana/wiki/git-recipes/)
Submitting code for the 3rd party code repositories
---------------------------------------------------
No formal review process takes place for code contributed to the 3rd party repositories. Anyone with writing permission is allowed to create a branch in them and push code to it. But note that, before pushing to the master branch, you should seek permission from the people responsible for any files that are affected by the merge.
We also encourage contributors to use the sardana-devel mailing list for discussing and coordinating changes to the 3rd party repositories and, in any case, to at least send an email to the list when a relevant change is made.
Who approves/rejects proposed contributions?
============================================
The Sardana community elects a group of people to act as "Integration Managers".
For a given contribution to be accepted into the *develop* branch of the official **core** repository, it has to be submitted to the sardana-devel mailing list (as explained before) and approved by at least one **Integration Manager**. If the contributor happens to be an Integration Manager, it is considered good practice to get the approval of *another* Integration Manager before accepting it (although this can be relaxed for trivial contributions).
For a given contribution to be accepted into the *sepX* branch of the official **core** repository, it has to be submitted to the sardana-devel mailing list (as explained before) and approved by at least one **SepX Integration Lieutenant**. If the contributor happens to be a SepX Integration Lieutenant, the previous rule can be relaxed, and direct pushes may be allowed. Note that ultimately, the approval of an **Integration Manager** is required once the *sepX* branch is to be merged into the *develop* branch.
In the case of the **3rd party** repositories, no approval is required (see the section about "Submitting code for the 3rd party code repositories").
Which tools/workflows are used for reviewing the proposed contributed code?
===========================================================================
The code review process for contributions to the official sardana **core** repository is as follows:
1- The contributor submits a contribution to the mailing list (see "How should one submit a proposed contribution?" above).
2- The contribution is publicly reviewed in the mailing list (everyone is encouraged to participate in the review).
3- During this phase, the contributor may be asked for further clarifications and/or corrections to the contributed code (in which case a resubmission may be required).
4- Eventually, an Integration Manager (or a SepX Integration Lieutenant if the contribution is for a *sepX* branch) may either accept the contribution and integrate it into the official repository, or reject it. In both cases, he/she is posts a message in the mailing list informing of the decision.
**Tip**: You can find a set of practical examples on how to integrate contributed code according to the SEP7 specifications in the [sardana git recipes](http://sf.net/p/sardana/wiki/git-recipes/)
Naming convention for feature branches
--------------------------------------
The integration of contributed code by an Integration Manager (or Lieutenant) usually involves merging some local branch (let's call it *A*) into the branch that tracks the official repository. Although the *A* branch itself stays local, its name appears in the merge commit message (ending up in the official history). Therefore the following naming convention should be used:
- If the contributed code is related to a bug in the ticket tracker, the branch *A* should be called *bug-N*, where *N* is the ticket number.
- If the contributed code is related to a feature-request in the ticket tracker, the branch *A* should be called *feature-N*, where *N* is the ticket number. Note: in some occasions *feat-N* has mistakenly been used instead of *feature-N* for these branch names. *feature-N* is the recommended convention for branch names.
- In the remaining cases, any descriptive name can be used for branch *A* (preferably lower case and reasonably short) provided that it doesn't use any of the reserved names (i.e. *master*, *develop*, *release-\**, *hotfix-\**, *sepX*, *bug-N*, *feature-N*)
Note that those who contribute code via patches do not need to worry about this convention since their local branch names do not affect the official repository history. Nevertheless, it can be a good practice to follow anyway.
Transition phase (obsolete)
===========================
The development and contribution to Sardana could not be stopped till the approval of this SEP, so a set of transitional rules reflecting the *de-facto* conventions that were in place before its approval were summarised in the SEP7. Once SEP7 is approved, these conventions **no longer apply**, and are kept here only for reference (and in order to help understanding the commit history of the project).
Code review (Transitional-obsolete)
-----------------------------------
Before the migration to Git, the sardana and taurus SVN repositories were relatively open to unreviewed commits.
During the Sardana Workshop in Barcelona in June 2013, 3 people (Antonio Milan, Teresa Nunez and Carlos Pascual) were appointed to review new code contributions, but the workflow was not established.
Note: Since Zbigniew Reszela took the responsibility for the Sardana coordination in ALBA, he was added to the Project Admininstrators group and also to the code reviewing team.
Until this or another related SEP is approved, [using *git format-patch* and *git send-email*](http://git-scm.com/book/en/Distributed-Git-Contributing-to-a-Project#Public-Large-Project) for sending patch proposals to the sardana-devel list is the preferred option.
Repository organization (Transitional-obsolete)
-----------------------------------------------
The repository organization as of august 2013 reflects what was inherited from the previous SVN. Until the approval of this or another SEP which changes it, we shall continue working as follows:
- The development is done by committing to the master branch.
- Pushing to the official repository is limited to those with admin permissions in the Sardana project (other people may submit patches to the sardana-devel list)
- Work on feature branches (i.e. the *sepX* branches) is done on *sepX* branches **on separate forked repositories** (the fork can be done, e.g. with https://sourceforge.net/p/sardana/sardana.git/fork ), and when ready, a "merge-request" or a "request-pull" or a patch series is submitted for inclusion in the official repository. The discussion (and code review, if any) for the work on the feature branches, should be done in the sardana-devel list.
- Provisional permissions for pushing code to 3rd party macros and controllers repositories can be requested to the sardana project admins. Alternatively, working on a forked repository and submitting a patch or request-pull on the sardana-devel is also possible.
Links to more details and discussions
=====================================
The main discussions for this SEP take place in the [sardana-devel mailing list](https://sourceforge.net/p/sardana/mailman/).
This SEP uses concepts and nomenclature from [chapter 5 of Pro-Git book](http://git-scm.com/book/en/Distributed-Git)
License
=======
Copyright (c) 2013 Carlos Pascual-Izarra
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Changes
=======
* 2016-11-22: [mrosanes](https://github.com/sagiss/)
Migrate SEP7 from SF wiki to independent file,
modify URL and change SEP state from ACCEPTED to OBSOLETE according SEP15.
* 2015-01-27: [cpascual](https://sourceforge.net/u/cpascual/)
Added note in the naming convention for feature branches (about preference of feature-N over feat-N for branches associated to feature-request tickets)
* 2013-12-13: [cpascual](https://sourceforge.net/u/cpascual/)
Changedd state to ACCEPTED, after introducing modification to the procedure for accepting contributions to *sepX* branches, [as agreed in the sardana-devel mailing list](https://sourceforge.net/p/sardana/mailman/message/31694852/)
* 2013-11-29: [cpascual](https://sourceforge.net/u/cpascual/)
Preparing for passing to ACCEPTED. Transitional notes removed or moved to appendix and links to practical instructions for the workflow added.
* 2013-11-04: [cpascual](https://sourceforge.net/u/cpascual/)
Promoted from DRAFT to CANDIDATE state
* 2013-08-29: [cpascual](https://sourceforge.net/u/cpascual/)
First *complete* draft written after merging all inputs from the sardana-devel mailing list as well as private discussions.
* 2013-08-05: [cpascual](https://sourceforge.net/u/cpascual/)
Initial version written
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/sep/SEP7.md
| 0.665628 | 0.686038 |
SEP7.md
|
pypi
|
Title: Compact Read+Write widgets in Taurus
SEP: 9
State: ACCEPTED
Date: 2014-07-02
Drivers: Carlos Pascual-Izarra <[email protected]>
URL: http://www.sardana-controls.org/sep/?SEP9.md
License: http://www.jclark.com/xml/copying.txt
Abstract:
Providing a common pattern for compact widgets which
both display the read value of an attribute and allow
the user to change the write value
Introduction
============
Taurus currently provides a set of widgets for displaying values of attributes (implemented mostly in the taurus.qt.qtgui.display module) and a separate set of widgets allowing to modify such values (implemented mostly in the taurus.qt.qtgui.input module and which inherit from TaurusBaseWritableWidget abstract class).
In practice, these two sets are often used together to provide control of an attribute, and very often this is done via a TaurusForm which puts a display widget next to an input widget for the same attribute (and also typically uses another widget for a label and another for displaying units).
A recurrent feature request from users is to provide a more compact way of viewing and writing to an attribute. This SEP intends to provide a "canonical" way of doing it.
Goals and Constrains
====================
The following goals and constraints are taken into consideration for this proposal:
1. Existing widgets should be used as much as possible (i.e., combine, not rewrite)
2. The resulting compact widgets should work well stand-alone
3. The resulting compact widgets should be integrable with TaurusForms
4. TaurusForms should offer a choice of showing a "compact" or "extended" (aka "traditional") mode . Possibly even allow for system-wide configuration.
5. The aspect of existing GUIs should be kept (i.e., the "compact" mode should not be imposed as default)
6. All the expected features of both display and input widgets should be implemented in the compact version (e.g., a compact viewer+editor of an scalar attribute, should allow displaying quality of the attribute **and** notifying pending operations from the input widget)
7. If possible, a common abstract class (or limited set of abstract classes) should be implemented to help in creating compact widgets for different types of attributes.
Implementation
==============
A sep9 branch in the official repository has being created to host an implementation proposal:
https://sf.net/p/sardana/sardana.git/ci/sep9/tree/
Proposed implementation 1
-------------------------
The compact widget is implemented by adding a "read" widget and a "write" widget to a QStackLayout, and allowing the user to switch between the two.
A very basic prototype which already fulfilled goals 1, 2, 3 and 6 was submitted to the sardana-devel mailing list:
https://sf.net/p/sardana/mailman/message/31624240/
This prototype was further developed in the [sep9 branch](https://sourceforge.net/p/sardana/sardana.git/ci/sep9/tree/) which, at its [commit f90ed28](https://sourceforge.net/p/sardana/sardana.git/ci/f90ed285c5ccb0295389426dd1eeef1205c0aea1/) already fulfilled all stated goals of this SEP.
Other considerations
--------------------
- Should we go further and include the units in the "display widget"? (e.g., for a Taurus Label, use a suffix).
- pros:
- much more compact
- cons:
- more complicated to integrate (not all display widgets would be ale to implement this, so we should allow combinations of "compactness")
- requires new display widgets
- Widgets inheritting from TaurusValue: Sardana currently provides some widgets which inherit from TaurusValue (PoolMotorTV, PoolIORegisterTV,...) and users may have created their own versions as well. Some of these widgets may not be currently compatible with the compact mode (note that at least the compact mode is not used by default). Since the user may switch to compact mode, we propose that, until the widgets support it, they should just reimplement setCompact() to ignore the compact mode request:
~~~~~
def setCompact(*a):
pass
~~~~~
Links to more details and discussions
=====================================
The main discussions for this SEP take place in the [sardana-devel mailing list](https://sourceforge.net/p/sardana/mailman/). See:
- The [initial SEP9 thread](https://sourceforge.net/p/sardana/mailman/message/31709538/).
License
=======
This document is under the Expat License. The following copyright statement and license apply to this document.
Copyright (c) 2013 Carlos Pascual-Izarra
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Changes
-------
* 2016-11-29:
[mrosanes](https://github.com/sagiss) Migrate SEP9 from SF wiki to an independent file.
* 2014-07-02:
[cpascual](https://sourceforge.net/u/cpascual/) Promoting to ACCEPTED
* 2013-12-09:
[cpascual](https://sourceforge.net/u/cpascual/) Promoting to CANDIDATE
* 2013-12-03:
[cpascual](https://sourceforge.net/u/cpascual/) Initial draft written
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/sep/SEP9.md
| 0.623721 | 0.705639 |
SEP9.md
|
pypi
|
Title: Introducing Sardana Enhancement Proposals (SEPs)
SEP: 0
State: OBSOLETE
Reason:
SEP15 obsoletes TEP0. https://sourceforge.net/p/sardana/wiki/SEP is no
longer the index for SEPs, nor it is a wiki. The "Creating a SEP section" of
SEP0 is superseded by the one with the same name in SEP15.
Date: 2016-04-07
Drivers: Carlos Pascual-Izarra <[email protected]>
URL: http://www.sardana-controls.org/sep/?SEP0.md
License: http://www.jclark.com/xml/copying.txt
Abstract:
Workflow for managing discussions about improvements to Sardana
and archiving their outcomes.
Introduction
------------
This is a proposal to organize discussions about Sardana enhancements,
reflect their current status and, in particular, to archive their
outcomes, via a new lightweight process based on Sardana Enhancement
Proposals (SEPs). This idea is a shameless copy of the Debian Enhancement Proposal
system with a few adjustments to the Sardana project reality.
Motivation
----------
The main reason for using SEPs is to provide a central index in which to list such
proposals, which would be useful to see at a glance what open fronts
there are at a given moment in Sardana, and who is taking care of them
and, additionally, to serve as a storage place for successfully
completed proposals, documenting the outcome of the discussion and the
details of the implementation.
Workflow
--------
A "Sardana enhancement" can be pretty much any change to Sardana,
technical or otherwise. Examples of situations when the SEP process
might be or might have been used include:
* Introducing a new feature in Sardana (e.g. HKL support)
* Introducing/modifying a policy or workflow for the community
The workflow is very simple, and is intended to be quite lightweight:
an enhancement to Sardana is suggested, discussed, implemented, and
becomes accepted practice (or policy, if applicable), in the normal
Sardana way. As the discussion progresses, the enhancement is assigned
certain states, as explained below. During all the process, a single URL
maintained by the proposers can be used to check the status of the
proposal.
The result of all this is:
1. an implementation of the enhancement and
2. a document that can be referred to later on without having to dig
up and read through large discussions.
The actual discussions should happen in the sardana mailing lists (normally sardana-devel, unless the discussion may benefit from getting input from the wider audience of sardana-users). This way, SEPs do not act as yet another forum to be followed.
In the same way, SEPs do not give any extra powers or authority to
anyone: they rely on reaching consensus,
by engaging in discussions on mailing lists, IRC, or real life meetings
as appropriate. In case of dispute, the ultimate decision lies in the Sardana Executive Committee defined in the Sardana MoU.
The person or people who do the suggestion are the "drivers" of the
proposal and have the responsibility of writing the initial draft, and
of updating it during the discussions, see below.
Proposal states
---------------

A given SEP can be in one of the following *states*:
* DRAFT
* CANDIDATE
* ACCEPTED
* REJECTED
* OBSOLETE
The ideal progression of states is DRAFT -> CANDIDATE -> ACCEPTED, but
reality requires a couple of other states and transitions as well.
### DRAFT state: discussion
* every new proposal starts as a DRAFT
* anyone can propose a draft
* each draft has a number (next free one from document index)
* normal discussion and changes to the text happen in this state
* drafts should include *extra* criteria for success (in addition to
having obtained consensus, see below), that is, requirements to
finally become ACCEPTED
#### DRAFT -> CANDIDATE: rough consensus
In order for a SEP to become CANDIDATE, the following condition should
be met:
* consensus exists for *what* should be done, and *how* it should be
done (agreement needs to be expressed by all affected parties, not
just the drivers; silence is not agreement, but unanimity is not
required, either)
### CANDIDATE: implementation + testing
The CANDIDATE state is meant to prove, via a suitable implementation
and its testing, that a given SEP is feasible.
* of course, implementation can start in earlier states
* changes to the text can happen also in this period, primarily based
on feedback from implementation
* this period must be long enough that there is consensus that the
enhancement works (on the basis of implementation evaluation)
* since SEP are not necessarily technical, "implementation" does not
necessarily mean coding
#### CANDIDATE -> ACCEPTED: working implementation
In order for a SEP to become ACCEPTED, the following condition should
be met:
* consensus exists that the implementation has been a success
### ACCEPTED: have fun
Once accepted:
* the final version of the SEP text is archived on the Sardana wiki
* if applicable, the proposed SEP change is integrated into
authoritative texts such as policy, developer's reference, etc.
#### {DRAFT, CANDIDATE} -> REJECTED
A SEP can become REJECTED in the following cases:
* the drivers are no longer interested in pursuing the SEP and
explicitly acknowledge so
* there are no modifications to a SEP in DRAFT state for 6 months or
more
* there is no consensus either on the draft text or on the fact that
the implementation is working satisfactorily
#### ACCEPTED -> OBSOLETE: no longer relevant
A SEP can become OBSOLETE when it is no longer relevant, for example:
* a new SEP gets accepted overriding previous SEPs (in that case the
new SEP should refer to the one it OBSOLETE-s)
* the object of the enhancement is no longer in use
### {REJECTED, OBSOLETE}
In one of these states, no further actions are needed.
It is recommended that SEPs in one of these states carry a reason
describing why they have moved to such a state.
What the drivers should do
--------------------------
The only additional burden of the SEP process falls on the shoulders of its
drivers. They have to take care of all the practical work of writing
and maintaining the text, so that everyone else can just continue
discussing things as before. Driver's burden can be summarized as:
* Write the draft text and update it during discussion.
* Determine when (rough) consensus in discussion has been reached.
* Implement, or find volunteers to implement.
* Determine when consensus of implementation success has been reached,
when the testing of the available implementation has been satisfactory.
* Update the SEP with progress updates at suitable intervals, until the
SEP has been accepted (or rejected).
If the drivers go missing in action, other people may step in and
courteously take over the driving position.
**Note**: the drivers can of course participate in the discussion as
everybody else, but have no special authority to impose their ideas to
others. <q>SEP gives pencils, not hammers.</q>
Format and content
------------------
A SEP is basically a free-form plain text file, except that it must
start with a paragraph of the following RFC822-style headers:
* Title: the full title of the document
* SEP: the number for this SEP
* State: the current state of this revision
* Date: the date of this revision
* Drivers: a list of drivers (names and e-mail addresses), in RFC822
syntax for the To: header
* URL: during DRAFT state, a link to the wiki place of the draft
(typically probably https://sourceforge.net/p/sardana/wiki/SEPxxx)
* Abstract: a short paragraph describing the SEP
(Additionally, REJECTED SEPs can carry a "Reason:" field describing
why they were rejected.)
The rest of the file is free form. Since the SEP is kept in a wiki, using
its markup syntax is, of course, a good idea.
Suggested document contents:
* An introduction, giving an overview of the situation and the motivation
for the SEP.
* A plan for implementation, especially indicating what parts of Sardana need
to be changed, and preferably indicating who will do the work.
* Preferably a list of criteria to judge whether the implementation has been
a success.
* Links to mailing list threads, perhaps highlighting particularly important
messages.
License
-------
The SEP must have a license that is DFSG free. You may choose the
license freely, but the "Expat" license is recommended. The
official URL for it is <http://www.jclark.com/xml/copying.txt> and
the license text is:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright (c) <year> <your names>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The justification for this recommendation is that this license is one
of the most permissive of the well-known licenses. By using this
license, it is easy to copy parts of the SEP to other places, such as
documentation for Sardana development or embedded in individual
packages.
Creating a SEP
--------------
The procedure to create a SEP is simple: send an e-mail to
`[email protected]`, stating that you're taking the next
available number, and including the first paragraph of the SEP as
explained above. It is very important to include the list of drivers,
and the URL where the draft will be kept up to date. The next available
SEP number can be obtained by consulting
<https://sourceforge.net/p/sardana/wiki/SEP>.
It is also a very good idea to mention in this mail the place where the
discussion is going to take place, with a pointer to the thread in the
mailing list archives if it has already started.
The actual place where the SEP draft is going to be published is up to the SEP driver (e.g., it can be a plain text file or sphinx file in a code repository) but the sardana project provides infrastructure to host it in its wiki for convenience. If you decide to host the SEP draft in the sardana wiki, just create a new wiki page named <https://sourceforge.net/p/sardana/wiki/SEPxxx>, where xxx is the SEP number.
Independently of where the draft is hosted you should edit the list of SEPs in <https://sourceforge.net/p/sardana/wiki/SEP> to add a link to the new SEP.
Revising an accepted SEP
------------------------
If the feature, or whatever, of the SEP needs further changing later,
the process can start over with the accepted version of the SEP document
as the initial draft. The new draft will get a new SEP number. Once the
new SEP is accepted, the old one should move to OBSOLETE state.
As an exception, **trivial** changes may be done in the same SEP without
requiring a new SEP number as long as:
- the intention to change is communicated by the usual channels, and
- the change is approved by the community, and
- the change gets registered in the document (e.g., in a "Changes"
section of the document)
**Note:** A *trivial change* here is understood as a *small modification* that
*does not alter the intention* of the previous text and simply *corrects*
something that is clearly an *unintended* mistake (e.g., fixing a typo,
fixing a broken link, fixing a formatting mistake). *Format translations* (e.g.
adapting the Markdown formatting to reStructuredText format), can also be considered
trivial changes. In case of doubt or discrepancies, it is always better
to opt for the standard procedure of creating a new SEP that obsoletes
the current one.
License
-------
The following copyright statement and license apply to SEP0 (this
document).
Copyright (c) 2013 Carlos Pascual-Izarra
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Changes
-------
* 2016-11-22: [mrosanes](https://github.com/sagiss/)
Migrate SEP from SF wiki to independent file,
modify URL. Pass from ACCEPTED to OBSOLETE according SEP15.
* 2016-11-22:
[mrosanes](https://github.com/sagiss/) Create SEP0.md.
* 2016-04-07:
[cpascual](https://sourceforge.net/u/cpascual/) Pass from CANDIDATE to ACCEPTED (it was in candidate for testing its application with several real cases, but its text has been basically unaltered since 2013)
* 2014-05-22:
[cpascual](https://sourceforge.net/u/cpascual/) Minor formatting changes
* 2013-12-09:
[cpascual](https://sourceforge.net/u/cpascual/) Added provision for Trivial Changes in "Revising an accepted SEP" section
* 2013-08-21:
[cpascual](https://sourceforge.net/u/cpascual/) Clarification of the procedure for creating a new SEP
* 2013-06-06:
[cpascual](https://sourceforge.net/u/cpascual/) Initial version written after crude "translation" of the [DEP0](http://dep.debian.net/deps/dep0/)
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/sep/SEP0.md
| 0.713232 | 0.718051 |
SEP0.md
|
pypi
|
Title: HKL integration in Sardana
SEP: 4
State: ACCEPTED
Date: 2013-06-28
Drivers: Teresa Nunez <[email protected]>
URL: http://www.sardana-controls.org/sep/?SEP4.md
License: http://www.jclark.com/xml/copying.txt
Abstract:
This SEP describes the integration in Sardana of the HKL library developed
by Frederic Picca.
Introduction
============
The integration of the HKL library will allow Sardana to control different types of diffractometers. This document describes how this integration is done and the interface for the user.
Status
======
The SEP4 implements the diffractometer control inside of Sardana using the HKL library developed by
Frederic Picca.
This implemention is used for the full control of the diffractometer in one of the Petra beamlines at DESY.
Description of the current implementation
=========================================
The diffractometers are introduced in Sardana as controllers of the type PseudoMotor, for that reason the use of the hkl library binding is exclusively done in the code implementing these diffractometer controllers.
There is a basis diffractometer controller class, implementing the common part of the different types of diffractometers, and several other classes, derived from the basis one, which implement what is specific for each diffractometer type. Up to now the only difference between these several classes is the number of pseudo motor and motor roles requiered: hkl library diffractometer types with the same number of motors and pseudomotors are represented by the same sardana diffractometer controller type.
## Basis Diffractometer Class
It contains the common code for all the diffractometers.
The initialization of the class set the sample to 'default_crystal', set lattice values to default ones, and creates geometry corresponding to the diffractometer type selected in a class property (see below). The engine is set to 'hkl'.
The functions calc_all_physical and calc_all_pseudos implement the calculation of the positions for real and pseudo axes associated to the diffractometer. This calculations are used for performing the movements. The different engines are taken into account and the calculation of the positions and movements are done according to the selected one.
The following extra properties and attributes are implemented for this class.
### Class properties
The value of the class properties has to be assigned when the instance of the diffractometer controller is generated in the Pool. It is fixed for each diffractometer controller, it can not be changed during running time. The class properties appear as Tango Device Properties of the device corresponding to this controller in the Pool process.
* DiffractometerType: the name of the type of this diffractometers so like it is in the hkl library. This property is used for generating the right geometry and engine and getting the number of axes (real motors) implied.
### Controller attributes
The controller attributes appear as Tango Attributes of the device corresponding to the controller. As controller attributes are implemented all the parameters which characterize the current status of the diffractrometer. Since there is no possibility of adding command to the controller device, also the commands for performing actions, computations, etc. are implemented as attributes.
* **A**: a parameter of the current lattice.
* **AddCrystal**: add a new sample.
* **AddReflection**: add reflection to current sample.
* **SubstituteReflection**: substitute reflexion with the given index by the given one.
* **AdjustAnglesToReflection**: changes the angles associated to the selected reflection.
* **AffineCrystal**: creates a new sample with '(affine)' attached to the name of the current one and performs the affine. This affine sample is set as the current one.
* **Afit**: fit value of the a parameter of the current lattice.
* **Alpha**: alpha parameter of the current lattice.
* **AlphaFit**: fit value of the alpha parameter of the current lattice.
* **AutoEnergyUpdate**: if set to 1 the energy is read from the device set in the attribute EnergyDevice every time the wavelength attribute is read. The readout of the wavelength attribute is done internally every time the pseudomotor positions are recalculated.
* **B**: b parameter of the current lattice.
* **Beta**: beta parameter of the current lattice.
* **BetaFit**: fit value of the beta parameter of the current lattice.
* **Bfit**: fit value of the b parameter of the current lattice.
* **C**: c parameter of the current lattice.
* **Cfit**: fit value of the c parameter of the current lattice.
* **ComputeHKL**: compute the hkl values corresponding to the angles set in these attribute.
* **ComputeTrajectoriesSim**: computes the list of trajectories for the current engine and engine mode corresponding to the values of the pseudo axes written in this attribute. The number of arguments has to correspond to the number of pseudo axes for the current engine. The computed trajectories are shown in the TrajectoryList attribute.
* **ComputeUB**: computes UB matrix using the reflections corresponding to the indexes given as arguments.
* **Crystal**: sample.
* **CrystalList**: list of samples.
* **DeleteCrystal**: delete the crystal given in the argument.
* **EnergyDevice**: name of the Tango device the energy will be read from. The readout of the energy is done every time the wavelength is required if the attribute AutoEnergyUpdate is set to 1. The name of the attribute the energy is read from is Position.
* **Engine**: selected engine. It is taken into account for computing the physical positions corresponding to a movement of a pseudo axis.
* **EngineList**: list of engines for the diffractometer type corresponding to this controller.
* **EngineMode**: selected mode for the current.
* **EngineModeList**: list of the modes corresponding to the current engine.
* **HKLModeList**: list of the modes corresponding to the hkl engine.
* **HKLPseudoMotorList**: list of the hkl motor names.
* **Gamma**: gamma parameter of the current lattice.
* **GammaFit**: fit value of the gamma parameter of the current lattice.
* **LoadCrystal**: loads crystal name, wavelength, lattice parameters, reflections 0 and 1, engine mode and psi ref vector (in case available in mode) from ascii file. It also set the SaveDirectory attribute if it is empty. These settings will be loaded from the last loaded file when the Pool is started.
* **LoadReflections**: loads reflections for current crystal from ascii file.
* **LatticeReciprocal**: reads the values of the reciprocal lattice.
* **ModeParametersNames**: name of the parameters assotiated to the current engine mode (if any).
* **ModeParametersValues**: get/set the value of the parameters assotiated to the current engine mode (if any).
* **MotorList**: names of the physical motors associated to the diffractometer.
* **MotorRoles**: names of the motor roles corresponding to this diffractometer.
* **PsiRefH**: H coordinate of the psi reference vector, for the modes that it applies. -999 if it does not apply.
* **PsiRefK**: K coordinate of the psi reference vector, for the modes that it applies. -999 if it does not apply.
* **PsiRefL**: L coordinate of the psi reference vector, for the modes that it applies. -999 if it does not apply.
* **ReflectionAngles**: angles (computed and theoretical) between the reflections of the current sample.
* **ReflectionList**: list of reflections for current sample.
* **RemoveReflection**: remove reflection with given index.
* **SaveCrystal**: saves current crystal name, wavelength, lattice parameters, first and second reflections, engine mode, ub matrix, u vector, psi reference vector and SaveDirectory attribute to two files: one with the name defaultcrystal.txt and other with the name of the current crystal/sample. The files are saved in the directory written in the attribute SaveDirectory.
Example of a saved file:
```
Created at 2015-01-09 11:32
Crystal default_crystal
Wavelength 1.54
A 1.5 B 1.5 C 1.5
Alpha 90.0 Beta 90.0 Gamma 90.0
R0 0 1.0 1.0 1.0 0 1 1.0 63.0 4.0 -44.623 10.0 93.192
R1 1 1.0 1.0 1.0 0 1 1.0 63.0 4.0 -44.623 10.0 93.192
Mode constant_phi_vertical
PsiRef not available in current engine mode
U00 4.189 U01 -0.000 U02 -0.000
U10 0.000 U11 4.189 U12 -0.000
U20 0.000 U21 0.000 U22 4.189
Ux 0.0 Uy 0.0 Uz 0.0
SaveDirectory /home/tnunez
```
* **SaveDirectory**: name of the directory where the files with the crystal information will be saved.
* **SaveReflections**: saves reflections from current crystal to ascii file. The value written to this attribute is the path to the file, the name of the file is the name of the sample with the termination .ref. If this file already exists a copy will be created adding to the name the current time in seconds.
* **SelectedTrajectory**: index of the trajectory you want to take when you perform a movement for a given set of pseudo axes positions.
* **SwapReflections01**: swap primary and secondary reflections.
* **TrajectoryList**: list of trajectories for the current engine and engine mode corresponding to the pseudo axes values written in the ComputeTrajectoriesSim attribute and the engine and engine mode when the calculation was performed. It gives the possibility of checking the trajectories before performing a movement.
* **UBMatrix**: reads current UB matrix values.
* **Ux**: reads/writes current ux value.
* **Uy**: reads/writes current uy value.
* **Uz**: reads/writes current uz value.
* **Wavelength**.
The controller attributes are by default stored and written at initialization. However we have set most of the attributes to memorized but not written at init. This is due to the fact that for the hkl library it is important
the order in which crystal, reflections, lattice parameters and other geometry settings are set, and this
required order can not be controlled if the attributes are automatically set at init. The best practise is to
load a saved crystal file after the initialization is done.
## Diffractometer Types
The different diffractometer types covered by the hkl library have been grouped according to the axes and pseudo axes involved. A controller class has been developed for each of these groups. These classes derive from the basis one and only differs in the defined motor and pseudo motor roles. Creating an instance of any of these controller classes requires to give a value to all the associated motor and pseudo motor roles.
Even if the choose of one of these diffractometer classes is already determined for the type of the diffractometer we are going to use, the property DiffratometerType described in the basis class is still required because several library diffractometer types have the same motors and pseudo motors.
The following diffractometer controller classes are implemented:
```
* Diffrac6C (covers diffractometer type "PETRA3 P09 EH2")
pseudo_motor_roles = "h", "k", "l"
motor_roles = "mu", "th", "chi", "phi", "gamma", "delta"
* DiffracE6C (covers diffractometer types "E6C", "SOLEIL SIXS MED2+2")
pseudo_motor_roles = "h", "k", "l","psi","q21","q22","qperqpar1","qperpar2"
motor_roles = "omega", "chi", "phi", "tth"
* DiffracE4C (covers diffractometer types "E4CV", "E4CH", "SOLEIL MARS")
pseudo_motor_roles = "h", "k", "l","psi","q"
motor_roles = "omega", "chi", "phi", "tth"
```
The following diffractometer controller classes were removed from a previous version of the controller, since
they were no supported or tested any more inside the hkl library. We expect, they will be available in the future:
```
* DiffracK6C (covers diffractometer type "K6C")
pseudo_motor_roles = "h", "k", "l","psi","q21","q22","qperqpar1","qperpar2","eulerians1", "eulerians2","eulerians3"
motor_roles = "omega", "chi", "phi", "tth"
* Diffrac4C (default 4 circles diffractometers)
pseudo_motor_roles = "h", "k", "l"
motor_roles = "omega", "chi", "phi", "tth"
* DiffracK4C (covers diffractometer type "K4CV")
pseudo_motor_roles = "h", "k", "l","psi","q","eulerians1", "eulerians2","eulerians3"
motor_roles = "omega", "chi", "phi", "tth"
* Diffrac4CZAXIS (covers diffractometer types "ZAXIS", "SOLEIL SIXS MED1+2")
pseudo_motor_roles = "h", "k", "l","q21","q22","qperqpar1","qperqpar2"
motor_roles = "omega", "chi", "phi", "tth"
* Diffrac2C (default 2 circles diffractometers)
pseudo_motor_roles = "h", "k", "l"
motor_roles = "omega","tth"
```
## Diffractometer GUI
A Graphical User Interface is being developed for controlling the diffractometer.
The GUI is based on PyQt/Taurus and connected to the diffractometer Pool controller device.
Up to now there are three main GUI applications dedicated to hkl scans, diffractometer
alignment and UB matrix/lattice parameters and reflections.
## Diffractometer Macros
A set of macros for controlling the diffractometer and displaying information have been
developed. They try to follow the spec sintax.
## Required HKL Package
The required package containing the hkl calculations is gir1.2-hkl-5.0 and it is
available for Debian _jessie_ (from backports):
<https://packages.debian.org/jessie-backports/gir1.2-hkl-5.0>
If you want to install this on the _jessie_ version add this line:
`deb http://ftp.debian.org/debian jessie-backports main`
to your sources.list (or add a new file with the ".list" extension to `/etc/apt/sources.list.d/`) You can also find a list of other mirrors at https://www.debian.org/mirror/list
Run `apt-get update`
and then for the python binding
`apt-get source -t jessie-backports gir1.2-hkl-5.0`
You can also install it by rebuilding it on your system.
The steps to do are:
1) add the source of the jessie-backports distribution into `/etc/apt/sources`:
`deb-src http://ftp.debian.org/debian jessie-backports main contrib non-free`
2) `apt-get update`
3) `apt-get build-dep hkl`
4) `cd /tmp && apt-get source -b hkl`
It should build a bunch of .deb files and you could install them with
`dpkg -i *.deb`
To test it:
`python -c "from gi.repository import Hkl"`
Known issues & possible improvements
-------------------------------------
* General
* Documentation is missing.
* Current implementaion of the diffractometer controllers is not generic and requires a new controller class per each geometry. Whenever the following Sardana [feature-request](https://github.com/sardana-org/sardana/issues/86) is implemented the diffractometer controllers should take profit of it.
* GUIs
* As it is now, the hklscan widget "Display Angles" requires all dimensions (H & K & L) to be specified even if one wants to execute less than 3 dimensions scan e.g. to execute the ascan of H axis, the K & L dimensions needs equal the start and the end positions. This could be improved.
* "ComputeUB" of the ubmatrix widget works only when at least 2 reflexions are defined for the current crystal. If one tries to execute it and this requirement is not fulfield, the widget does silently ignore the requets. It could inform the user about the missing reflexions.
* diffractometeralignment widget parses the \_diff_scan output. The more generic way of executing scan and finding a peak in order to send the motor there should be found.
* Controller
* I see that the controller defines many MemorizedNoInit attribute but the memorized values are never extracted from the database (no calls to get_attribute_memorized_value). We should decide whether we need any of the attributes as memorize and make a proper use of them.
* Many of the attributes are only foreseen to write (they are substituting commands). I think it would be a good idea to raise exceptions when someone tries to read them, explaining that they are not foreseen to read.
* Attributes' formats could be better documented - now there are comments in the code, which could be transformed into the sphinx documentation of the methods or even to the attribute's description in the attribute's definition.
* There are some places in the code that the abnormal conditions are silently ignored, or just logged, instead of raising descriptive exceptions e.g. when write value of the attribute does not follow the expected syntax, etc. I suggest to use exceptions.
* All the fit attributes (e.g. AFit or GammaFit) should change its type to bool.
* There other already existing TODOs in the code.
* Macros:
* use taurus instead of PyTango API e.g. read_attribute, write_attribute. The hkl macro module is full of PyTango centric calls.
* use explicit getters to obtain Sardana elements (controller - getController, pseudomotor - getPseudoMotor, ...) instead of using getDevice. However this getter seems to accept only the elements names and not the full names.
* it should not be necessary to implement on_stop methods in the macros in order to stop the moveables. Macro API should provide this kind of emergency stop (if the moveables are correctly reserved with the getMotion method) in case of aborting a macro.
* br and ubr macros require refactoring in order to use events instead of polling to obtain the position updates. See umv macro as an example.
* br and ubr macro parameters: H, K, L should be of type float and not string
* luppsi is not tested
Links to more details and discussions
-------------------------------------
Some discussions about integration of this sep in sardana develop branch:
<https://sourceforge.net/p/sardana/mailman/sardana-devel/thread/5698B4B5.10903%40cells.es/#msg34768418>
License
-------
The following copyright statement and license apply to SEP4 (this
document).
Copyright (c) 2013 Teresa Nunez
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Changes
-------
* 2013-06-28:
[tere29](https://sourceforge.net/u/tere29/) Implementation started in sep4 branch
* 2016-01-21:
State changed from DRAFT to CANDIDATE
* 2016-04-07:
State changed from CANDIDATE to ACCEPTED
* 2016-11-18 by zreszela:
Minor change to add info how to install HKL library from jessie-backports
* 2016-11-29:
[mrosanes](https://github.com/sagiss) Migrate SEP4 from SF wiki to independent file, modify URL, fix formatting and correct links.
|
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/sep/SEP4.md
| 0.433262 | 0.86113 |
SEP4.md
|
pypi
|
# sardem
A tool for making Digital Elevation Maps (DEMs) in binary data format (16-bit integers, little endian) for use in interferometric synthetic aperture radar (InSAR) processing (e.g. using [isce2](https://github.com/isce-framework/isce2).
The `sardem` command creates a cropped (and possibly upsampled) digital elevation map:
```bash
usage: sardem [-h] [--bbox left bottom right top] [--geojson GEOJSON] [--wkt-file WKT_FILE] [--xrate XRATE] [--yrate YRATE] [--output OUTPUT] [--data-source {NASA,NASA_WATER,COP}] [-isce] [--keep-egm] [--shift-rsc]
[left_lon] [top_lat] [dlon] [dlat]
```
## Setup and installation
Using conda:
```bash
conda install -c conda-forge sardem
# Or, if mamba is installed, mamba install -c conda-forge sardem
```
Using pip:
```bash
pip install sardem
```
This creates the command line executable `sardem`
Alternatively, you can clone to build/install:
```bash
git clone https://github.com/scottstanie/sardem
cd sardem
# Install requirements using either pip or conda
# conda install -c conda-forge --file environment.yml
# pip install -r requirements.txt
# the conda environment.yml is more complete, as GDAL is required for some of the functionality
pip install -e .
```
which will run `pip install --upgrade .` and create the command line script.
## Data sources
The default data source, `--data-source NASA`, uses the SRTM 1 arcsecond data. You can also use the newer [Copernicus Digital Surface Model (DSM)](https://registry.opendata.aws/copernicus-dem/).
To see a comparison of the two, see the [srtm_copernicus_comparison](notebooks/srtm_copernicus_comparison.ipynb) notebook.
**Note:** To convert the elevation values to heights about the WGS84 ellipsoid (which is the default), or to use the Copernicus data, **GDAL is required**.
For the Copernicus data, the minimum required GDAL version is 3.4.2; versions earlier than 3.4.0 seem to hang upon using `gdalwarp` on the global VRT, and <3.4.2 have an internal bug https://github.com/isce-framework/isce2/issues/556 .
## Bounding box convention
`sardem` uses the gdal convention ("pixel is area") where `--bbox` points to the *edges* of the [left, bottom, right, top] pixels.
I.e. (left, bottom) refers to the lower left corner of the lower left pixel.
### Converting to WGS84 ellipsoidal heights from EGM96/EGM2008 geoid heights
GDAL is required for the conversion, which is installed when using `conda install -c conda-forge sardem`.
If you already are using an existing environment, make sure that the GDAL version is >=3.4.2.
```bash
conda install -c conda-forge "gdal>=3.4.2"
# or
# conda install -c conda-forge mamba
# mamba install -c conda-forge "gdal>=3.4.2"
```
## Command Line Interface
The full options for the command line tool in `sardem/cli.py` can be found using
```
$ sardem -h
usage: sardem [-h] [--bbox left bottom right top] [--geojson GEOJSON] [--wkt-file WKT_FILE] [--xrate XRATE] [--yrate YRATE] [--output OUTPUT] [--data-source {NASA,NASA_WATER,COP}] [-isce] [--keep-egm] [--shift-rsc]
[--cache-dir CACHE_DIR]
[left_lon] [top_lat] [dlon] [dlat]
Stiches SRTM .hgt files to make (upsampled) DEM
Pick a lat/lon bounding box for a DEM, and it will download
the necessary SRTM1 tiles, stitch together, then upsample.
The `--bbox` convention points to the *edges* of the [left, bottom, right, top]
pixels, following the "pixel is area" convention as used in gdal.
I.e. (left, bottom) refers to the lower left corner of the lower left pixel.
Usage Examples:
sardem --bbox -156 18.8 -154.7 20.3 # bounding box: [left bottom right top]
sardem -156.0 20.2 1 2 --xrate 2 --yrate 2 # Makes a box 1 degree wide, 2 deg high
sardem --bbox -156 18.8 -154.7 20.3 --data-source COP # Copernicus DEM
sardem --geojson dem_area.geojson -x 11 -y 3 # Use geojson file to define area
sardem --bbox -156 18.8 -154.7 20.3 --data-source NASA_WATER -o my_watermask.wbd # Water mask
sardem --bbox -156 18.8 -154.7 20.3 --data COP -isce # Generate .isce XML files as well
Default out is elevation.dem for the final upsampled DEM.
Also creates elevation.dem.rsc with start lat/lon, stride, and other info.
positional arguments:
left_lon Left (western) most longitude of DEM box (degrees, west=negative)
top_lat Top (northern) most latitude of DEM box (degrees)
dlon Width of DEM box (degrees)
dlat Height of DEM box (degrees)
options:
-h, --help show this help message and exit
--bbox left bottom right top
Bounding box of area of interest (e.g. --bbox -106.1 30.1 -103.1 33.1 ).
--bbox points to the *edges* of the pixels,
following the 'pixel is area' convention as used in gdal.
--geojson GEOJSON, -g GEOJSON
Alternate to corner/dlon/dlat box specification:
File containing the geojson object for DEM bounds
--wkt-file WKT_FILE Alternate to corner/dlon/dlat box specification:
File containing the WKT string for DEM bounds
--xrate XRATE, -x XRATE
Rate in x dir to upsample DEM (default=1, no upsampling)
--yrate YRATE, -y YRATE
Rate in y dir to upsample DEM (default=1, no upsampling)
--output OUTPUT, -o OUTPUT
Name of output dem file (default=elevation.dem for DEM, watermask.wbd for water mask)
--data-source {NASA,NASA_WATER,COP}, -d {NASA,NASA_WATER,COP}
Source of DEM data (default NASA). See README for more.
-isce, --make-isce-xml
Make an isce2 XML file for the DEM.
--keep-egm Keep the DEM heights as geoid heights above EGM96 or EGM2008. Default is to convert to WGS84 for InSAR processing.
--shift-rsc Shift the .rsc file by half a pixel so that X_FIRST and Y_FIRST are at the pixel center (instead of GDAL's convention of the top left edge). Default is GDAL's top-left edge convention.
--cache-dir CACHE_DIR
Location to save downloaded files (Default = /Users/staniewi/.cache/sardem)
```
## NASA SRTM Data access
The default data source is NASA's Shuttle Radar Topography Mission (SRTM) version 3 global 1 degree data.
See https://lpdaac.usgs.gov/dataset_discovery/measures/measures_products_table/srtmgl3s_v003 .
The data is valid outside of arctic regions (-60 to 60 degrees latitude), and is zeros over open ocean.
This data requires a username and password from here:
https://urs.earthdata.nasa.gov/users/new
You will be prompted for a username and password when running with NASA data.
It will save into your ~/.netrc file for future use, which means you will not have to enter a username and password any subsequent times.
The entry will look like this:
```
machine urs.earthdata.nasa.gov
login USERNAME
password PASSWORD
```
|
/sardem-0.11.3.tar.gz/sardem-0.11.3/README.md
| 0.715722 | 0.929087 |
README.md
|
pypi
|
from typing import Any, Callable, Optional, ParamSpec, TypeVar
from ..handlers.sender import Number, NumericElement, Sender
from ..utils import alias_param, get_snap_deadline, lerp
from ..scheduler import AsyncRunner
from dataclasses import dataclass
from ..base import BaseHandler
from functools import wraps
__all__ = ("Player",)
P = ParamSpec("P")
T = TypeVar("T")
def for_(n: int) -> Callable[[Callable[P, T]], Callable[P, T]]:
"""Allows to play a swimming function x times. It swims for_ n iterations."""
def decorator(func: Callable[P, T]) -> Callable[P, T]:
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
nonlocal n
n -= 1
if n >= 0:
return func(*args, **kwargs)
return wrapper
return decorator
@dataclass
class PatternInformation:
sender: Sender
send_method: Callable[P, T]
args: tuple[Any]
kwargs: dict[str, Any]
period: NumericElement
iterator: Optional[Number]
iterator_span: NumericElement
iterator_limit: NumericElement
divisor: NumericElement
rate: NumericElement
snap: Number
timespan: Optional[float]
until: Optional[int]
class Player(BaseHandler):
"""
Players are holders used to support one-line specialised swimming functions. Many
instances of 'Player' are injected in globals() at boot time as a way to provide a
quick interface for the user to output musical and data patterns. Players are han-
dling the whole lifetime of a pattern, from its initial entry in the scheduler to
its death when the silence() or panic() method is called.
"""
def __init__(self, name: str):
super().__init__()
self._name = name
self.runner = AsyncRunner(name=name)
self.iterator: Number = 0
self._iteration_span: Number = 1
self._iteration_limit: Optional[Number] = None
self._period: int | float = 1.0
@property
def name(self) -> str:
return self._name
def fit_period_to_timespan(self, period: NumericElement, timespan: float):
"""
Fit a given period to a certain timestamp (forcing a pattern to have a fixed
duration. This feature can be useful for preventing users from creating loops
that will phase out too easily.
"""
if isinstance(period, (int, float)):
return lerp(period, 0, period, 0, timespan)
period = self.env.parser.parse(period)
period = list(map(lambda x: lerp(x, 0, sum(period), 0, timespan), period))
return period
@staticmethod
@alias_param(name="period", alias="p")
@alias_param(name="iterator_span", alias="i")
@alias_param(name="iterator_limit", alias="l")
@alias_param(name="divisor", alias="d")
@alias_param(name="rate", alias="r")
@alias_param(name="timespan", alias="span")
def _play_factory(
sender: Sender,
send_method: Callable[P, T],
*args: P.args,
timespan: Optional[float] = None,
until: Optional[int] = None,
period: NumericElement = 1,
iterator: Optional[Number] = None,
iterator_span: Optional[Number] = 1,
iterator_limit: Optional[Number] = None,
divisor: NumericElement = 1,
rate: NumericElement = 1,
snap: Number = 0,
**kwargs: P.kwargs,
):
"""Entry point of a pattern into the Player"""
# iteration_span = kwargs.pop("iteration_span", 1)
# iteration_limit = kwargs.pop("iteration_limit", 1)
return PatternInformation(
sender,
send_method,
args,
kwargs,
period,
iterator,
iterator_span,
iterator_limit,
divisor,
rate,
snap,
timespan,
until,
)
def __rshift__(self, pattern: Optional[PatternInformation]) -> None:
"""
This method acts as a cosmetic disguise for feeding PatternInformation into a
given player. Its syntax is inspired by FoxDot (Ryan Kirkbride), another very
popular live coding library.
"""
if pattern is not None and pattern.timespan is not None:
pattern.period = self.fit_period_to_timespan(
pattern.period, pattern.timespan
)
self.push(pattern)
def __mul__(self, pattern: Optional[PatternInformation]) -> None:
"""
This method acts as a cosmetic disguise for feeding PatternInformation into a
given player. Its syntax is inspired by FoxDot (Ryan Kirkbride), another very
popular live coding library.
"""
if pattern is not None and pattern.timespan is not None:
pattern.period = self.fit_period_to_timespan(
pattern.period, pattern.timespan
)
self.push(pattern)
def get_new_period(self, pattern: PatternInformation) -> Number:
"""Get period value for the current cycle"""
for message in pattern.sender.pattern_reduce(
{"period": pattern.period},
self.iterator,
pattern.divisor,
pattern.rate,
# use_divisor_to_skip=False,
# TODO: why was this untoggled?
use_divisor_to_skip=True,
):
return message["period"]
return 1
def func(
self,
pattern: PatternInformation,
p: NumericElement = 1, # pylint: disable=invalid-name,unused-argument
) -> None:
"""Central swimming function defined by the player"""
self._iterator_span = pattern.iterator_span
self._iterator_limit = pattern.iterator_limit
if pattern.iterator is not None:
self.iterator = pattern.iterator
pattern.iterator = None
dur = pattern.send_method(
*pattern.args,
**pattern.kwargs,
iterator=self.iterator,
divisor=pattern.divisor,
rate=pattern.rate,
)
# Reset the iterator when it reaches a certain ceiling
if self._iterator_limit:
if self.iterator >= self._iterator_limit:
self.iterator = 0
# Moving the iterator up
self.iterator += self._iterator_span
period = self.get_new_period(pattern)
if not dur:
self.again(pattern=pattern, p=period)
else:
self.again(pattern=pattern, p=dur)
def stop(self):
"""Stop the player by removing the Player"""
self.env.scheduler.stop_runner(self.runner)
def push(self, pattern: Optional[PatternInformation]):
"""
Managing lifetime of the pattern, similar to managing a swimming function
manually. If PatternInformation is hot-swapped by None, the Player will stop
scheduling its internal function, defined in self.func.
"""
# This is a local equivalent to the silence() function.
if pattern is None:
return self.env.scheduler.stop_runner(self.runner)
elif not self.runner.is_running():
# Assume we are queuing the first state
self.iterator = 0
self.runner.reset_states()
# Forcibly reset the interval shift back to 0 to make sure
# the new pattern can be synchronized
self.runner.interval_shift = 0.0
period = self.get_new_period(pattern)
deadline = get_snap_deadline(self.env.clock, pattern.snap)
self.runner.push_deferred(
deadline,
for_(pattern.until)(self.func) if pattern.until else self.func,
pattern=pattern,
p=period,
)
self.env.scheduler.start_runner(self.runner)
self.runner.reload()
def again(self, *args, **kwargs):
self.runner.update_state(*args, **kwargs)
self.runner.swim()
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/handlers/player.py
| 0.898668 | 0.36832 |
player.py
|
pypi
|
from typing import Any, Callable, Optional, Union
from osc4py3.as_eventloop import *
from osc4py3.oscchannel import TransportChannel, get_channel
from osc4py3.oscmethod import *
from ..logger import print
from ..base.handler import BaseHandler
from .osc_loop import OSCLoop
__all__ = ("OSCInHandler",)
def flatten(l):
if isinstance(l, (list, tuple)):
if len(l) > 1:
return [l[0]] + flatten(l[1:])
else:
return l[0]
else:
return [l]
class OSCInHandler(BaseHandler):
def __init__(
self,
loop: OSCLoop,
ip: str = "127.0.0.1",
port: int = 11223,
name: str = "OSCIn",
):
super().__init__()
self.loop = loop
self._ip, self._port, self._name = ip, port, name
self._watched_values = {}
loop.add_child(self, setup=True)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self._name} ip={self._ip!r} port={self._port}>"
# Handler methods
def setup(self):
osc_udp_server(self._ip, self._port, self._name)
def teardown(self):
channel: Optional[TransportChannel] = get_channel(self._name)
if channel is not None:
channel.terminate()
# Interface
def _generic_store(self, address) -> None:
"""Generic storage function to attach to a given address"""
def generic_value_tracker(*args, **kwargs):
"""Generic value tracker to be attached to an address"""
self._watched_values[address] = {"args": flatten(args), "kwargs": kwargs}
return (args, kwargs)
osc_method(address, generic_value_tracker, argscheme=OSCARG_DATA)
def watch(self, address: str):
"""
Watch the value of a given OSC address. Will be recorded in memory
in the self._watched_values dictionary accessible through the get()
method
"""
print(f"[yellow]Watching address [red]{address}[/red].[/yellow]")
self._generic_store(address)
def attach(self, address: str, function: Callable, watch: bool = False):
"""
Attach a callback to a given address. You can also toggle the watch
boolean value to tell if the value should be tracked by the receiver.
It allows returning values from the callback to be retrieved later in
through the get(address) method.
"""
print(
f"[yellow]Attaching function [red]{function.__name__}[/red] to address [red]{address}[/red][/yellow]"
)
osc_method(address, function)
if watch:
self.watch(address)
def remote(self, address: str):
"""
Remote for controlling Sardine from an external client by talking directly to
the fish_bowl dispatch system. If the address matches an internal function de-
clared by some handler, the dispatch function will be called and *args will be
forwarded as well.
address: address matching to a dispatch function (like 'pause', 'stop', etc..)
"""
print("Attaching address to matching incoming message")
def event_dispatcher(address, *args) -> None:
print(f"Event Name: {address}")
self.env.dispatch(address, *args)
osc_method(address, event_dispatcher, argscheme=OSCARG_DATA)
def get(self, address: str) -> Union[Any, None]:
"""Get a watched value. Return None if not found"""
try:
return self._watched_values[address]
except KeyError:
return None
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/handlers/osc_in.py
| 0.885117 | 0.183703 |
osc_in.py
|
pypi
|
import asyncio
from math import floor
from random import random
from typing import Callable, Generator, ParamSpec, TypeVar, Union, Optional
from ..base import BaseHandler
from ..utils import maybe_coro
from ..sequences import euclid
__all__ = ("Sender",)
P = ParamSpec("P")
T = TypeVar("T")
Number = Union[float, int]
ReducedElement = TypeVar("ReducedElement")
RecursiveElement = Union[ReducedElement, list] # assume list is list[RecursiveElement]
ParsableElement = Union[RecursiveElement, str]
# Sub-types of ParsableElement
NumericElement = Union[Number, list, str]
StringElement = Union[str, list] # assume list is list[StringElement]
Pattern = dict[str, list[ParsableElement]]
ReducedPattern = dict[str, ReducedElement]
def _maybe_index(val: RecursiveElement, i: int) -> RecursiveElement:
if not isinstance(val, list):
return val
length = len(val)
return val[i % length]
def _maybe_length(val: RecursiveElement) -> int:
if isinstance(val, list):
return len(val)
return 0
class Sender(BaseHandler):
"""
Handlers can inherit from 'Sender' if they are in charge of some output operation.
Output operations in Sardine generally involve some amount of pattern parsing and
monophonic/polyphonic message composition. This class implements most of the inter-
nal behavior necessary for patterning. Each handler rely on these methods in the
final 'send' method called by the user.
pattern_element: return the right index number for the pattern.
reduce_polyphonic_message: turn any dict pattern into a list of patterns.
pattern_reduce: reduce a pattern to a dictionary of values corresponding to iterator
index.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._timed_tasks: set[asyncio.Task] = set()
def call_timed(
self,
deadline: float,
func: Callable[P, T],
*args: P.args,
**kwargs: P.kwargs,
) -> None:
"""Schedules the given (a)synchronous function to be called.
Senders should always use this method to properly account for time shift.
"""
async def scheduled_func():
await self.env.sleeper.sleep_until(deadline)
await maybe_coro(func, *args, **kwargs)
task = asyncio.create_task(scheduled_func())
self._timed_tasks.add(task)
task.add_done_callback(self._timed_tasks.discard)
@staticmethod
def pattern_element(
val: RecursiveElement,
iterator: Number,
divisor: Number,
rate: Number,
) -> RecursiveElement:
"""Joseph Enguehard's algorithm for solving iteration speed"""
# For simplicity, we're allowing non-sequences to be passed through
if not isinstance(val, list):
return val
length = len(val)
if length > 0:
i = floor(iterator * rate / divisor) % length
return val[i]
raise ValueError(f"Cannot pattern an empty sequence: {val!r}")
def pattern_reduce(
self,
pattern: Pattern,
iterator: Number,
divisor: NumericElement = 1,
rate: NumericElement = 1,
*,
use_divisor_to_skip: bool = True,
) -> Generator[ReducedPattern, None, None]:
"""Reduces a pattern to an iterator yielding subpatterns.
First, any string values are parsed using the fish bowl's parser.
Afterwards, if the pattern is a dictionary where none of its values
are lists, the pattern is wrapped in a list and returned, ignoring
the iterator/divisor/rate parameters. For example::
>>> pat = {"note": 60, "velocity": 100}
>>> list(sender.pattern_reduce(pat, 0, 1, 1))
[{'note': 60, 'velocity': 100}]
If it is a monophonic pattern, i.e. a dictionary where one or more
of its values are lists, the corresponding element of those lists
are indexed using the `pattern_element()` method which implements
Joseph Enguehard's algorithm::
>>> pat = {"note": [60, 70, 80, 90], "velocity": 100}
>>> for i in range(1, 4):
... list(sender.pattern_reduce(pat, i, 2, 3))
[{'note': 70, 'velocity': 100}]
[{'note': 90, 'velocity': 100}]
[{'note': 60, 'velocity': 100}]
If it is a polyphonic pattern, i.e. a dictionary where one or more
of the values indexed by the above algorithm are also lists, the
elements of each list are paired together into several reduced patterns.
The number of messages is determined by the length of the longest list.
Any lists that are shorter than the longest list will repeat its
elements from the start to match the length of the longest list.
Any values that are not lists are simply repeated.
When `use_divisor_to_skip` is True and the `divisor` is a number
other than 1, patterns are only generated if the iterator is
divisible by the divisor, and will otherwise yield zero messages.
"""
# TODO: more examples for pattern_reduce()
# TODO: document pattern_reduce() arguments
def maybe_parse(val: ParsableElement) -> RecursiveElement:
if isinstance(val, str):
return self.env.parser.parse(val)
if isinstance(val, list) and all(isinstance(item, str) for item in val):
val = " ".join(val)
return self.env.parser.parse(val)
return val
if any(isinstance(n, (list, str)) for n in (divisor, rate)):
divisor, rate = next(
self.pattern_reduce({"divisor": divisor, "rate": rate}, iterator)
).values()
if use_divisor_to_skip and iterator % divisor != 0:
return
pattern = {k: maybe_parse(v) for k, v in pattern.items()}
for k, v in pattern.items():
pattern[k] = self.pattern_element(v, iterator, divisor, rate)
if not any(isinstance(v, list) for v in pattern.values()):
# Base case where we have a monophonic message
yield pattern
# For polyphonic messages, recursively reduce them
# to a list of monophonic messages
max_length = max(_maybe_length(v) for v in pattern.values())
for i in range(max_length):
sub_pattern = {k: _maybe_index(v, i) for k, v in pattern.items()}
yield from self.pattern_reduce(sub_pattern, iterator, divisor, rate)
def cycle_loaf(self, loaf: Optional[int], on: Optional[tuple | int]) -> bool:
"""
Will slice time in group of bars of size "loaf". Will
check if the current bar matches with one of the selected
bars in the sliced group (e.g. for a slice of 5, select
when we are on bar 1 and 3).
"""
def mod_cycles(on: int | tuple) -> bool:
"""
Modulo operator working on bar numbers. This function will
be used with the "on" operator if no "loaf" argument is used
by the pattern.
"""
on = on[0] if isinstance(on, tuple) else on
return self.env.clock.bar % on == 0
if loaf is None and on is None:
return True
if loaf is None:
return mod_cycles(on=on)
measure = self.env.clock.bar
elapsed_bars = measure // loaf
bar_in_current_group = measure - (elapsed_bars * loaf)
if isinstance(on, tuple):
return bar_in_current_group in tuple(x - 1 for x in on)
return bar_in_current_group == (on - 1)
def euclid_bars(
self,
steps: int,
pulses: int,
rotation: Optional[int] = None,
negative: bool = False,
):
"""
Euclidian rhythm but on the measure level!
"""
if rotation is None:
rotation = 0
euclidian_pattern = euclid(steps, pulses, rotation)
if negative:
euclidian_pattern = list(map(lambda x: x ^ 1, euclidian_pattern))
to_bars, len_in_bars = [], len(euclidian_pattern)
for count, value in enumerate(euclidian_pattern):
if value == 1:
to_bars.append(count + 1)
return self.cycle_loaf(loaf=len_in_bars, on=tuple(to_bars))
def binary_bars(self, binary_pattern: list):
"""
Euclidian rhythm but on the measure level!
"""
# We can't tolerate any other thing than 1 and 0
if not all(e in [1, 0] for e in binary_pattern):
return False
to_bars, len_in_bars = [], len(binary_pattern)
for count, value in enumerate(binary_pattern):
if value == 1:
to_bars.append(count + 1)
return self.cycle_loaf(loaf=len_in_bars, on=tuple(to_bars))
def chance_operation(self, frequency: str):
"""
Port of the TidalCycles sometimes family of functions:
- always: 100%
- almostAlways: 90%
- often: 75%
- sometimes: 50%
- rarely: 25%
- AlmostNever: 10%
- never: 0%
These functions represent a likelihood for an event to be played.
"""
chance = {
"always": True,
"almostAlways": random() <= 0.90,
"often": random() <= 0.75,
"sometimes": random() <= 0.5,
"rarely": random() <= 0.25,
"almostNever": random() <= 0.10,
"never": False,
}
return chance.get(frequency, False)
def key_deleter(self, dictionary: dict, list_of_keys: list[str]):
"""
Remove multiple keys from one dictionary in one-go
while taking care of possible index errors.
"""
for key in list_of_keys:
try:
del dictionary[key]
except KeyError:
pass
def apply_conditional_mask_to_bars(self, pattern: ParsableElement) -> bool:
boolean_masks = []
# Cycle loaf
boolean_masks.append(
self.cycle_loaf(loaf=pattern.get("loaf", None), on=pattern.get("on", None))
)
# Euclidian
if (
pattern.get("euclid", None) is not None
or pattern.get("eu", None) is not None
):
steps, pulses = pattern.get("euclid")[0:2]
try:
rotation = pattern["euclid"][2]
except IndexError:
rotation = None
boolean_masks.append(self.euclid_bars(steps, pulses, rotation))
# Negative euclidian
if (
pattern.get("neuclid", None) is not None
or pattern.get("neu", None) is not None
):
steps, pulses = pattern.get("neuclid")[0:2]
try:
rotation = pattern["neuclid"][2]
except IndexError:
rotation = None
boolean_masks.append(
self.euclid_bars(steps, pulses, rotation, negative=True)
)
# Binary pattern
if pattern.get("binary", None) is not None:
boolean_masks.append(self.binary_bars(binary_pattern=pattern["binary"]))
# Chance operation
if pattern.get("chance", None) is not None:
boolean_masks.append(self.chance_operation(frequency=pattern["chance"]))
# Cleaning up the messy keys
self.key_deleter(
dictionary=pattern,
list_of_keys=["euclid", "neuclid", "on", "loaf", "binary" "chance"],
)
# Returning if one False in the boolean masks
return False in boolean_masks
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/handlers/sender.py
| 0.847858 | 0.27513 |
sender.py
|
pypi
|
import asyncio
import heapq
from collections import deque
from typing import Optional, Union
from exceptiongroup import BaseExceptionGroup
from ...base import BaseHandler
from .time_handle import *
__all__ = ("SleepHandler", "TimeHandle")
NUMBER = Union[float, int]
class SleepHandler(BaseHandler):
"""The primary interface for other components to sleep.
Args:
delta_record_size (int):
The maximum number of recordings to store when averaging the
delta for anti-drift. Set to 0 to disable drift correction.
WARNING: this is an experimental setting and may severely degrade
sleep accuracy when enabled.
poll_interval (float):
The polling interval to use when the current clock does not
support its own method of sleep.
"""
def __init__(
self,
delta_record_size: int = 0,
poll_interval: float = 0.001,
):
super().__init__()
self.poll_interval = poll_interval
self._poll_task: Optional[asyncio.Task] = None
self._interrupt_event = asyncio.Event()
self._wake_event = asyncio.Event()
self._time_handles: list[TimeHandle] = []
self._previous_deltas: deque[float] = deque(maxlen=delta_record_size)
def __repr__(self) -> str:
return f"<{type(self).__name__} interval={self.poll_interval}>"
# Public methods
async def sleep(self, duration: NUMBER):
"""Sleeps for the specified duration."""
deadline = self.env.clock.time + duration
return await self.sleep_until(deadline)
async def sleep_until(self, deadline: NUMBER) -> None:
"""Sleeps until the given time has been reached.
The deadline is based on the fish bowl clock's time.
"""
if self.env is None:
raise ValueError("SleepHandler must be added to a fish bowl")
elif not self.env.is_running():
raise RuntimeError("cannot use sleep until fish bowl has started")
elif self.env.clock.time >= deadline:
await self._wake_event.wait()
return
clock = self.env.clock
while True:
# Handle stop/pauses before proceeding
if self._is_terminated():
asyncio.current_task().cancel()
await self._wake_event.wait()
corrected_deadline = deadline - self._get_avg_delta()
# Use clock sleep if available, else polling implementation
if clock.can_sleep():
sleep_task = asyncio.create_task(
clock.sleep(corrected_deadline - clock.time)
)
else:
sleep_task = asyncio.create_task(self._sleep_until(corrected_deadline))
# Wait until sleep completes or interruption
intrp_task = asyncio.create_task(self._interrupt_event.wait())
tasks = (sleep_task, intrp_task)
done, pending = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
delta = clock.time - corrected_deadline
for t in pending:
t.cancel()
exceptions = (t.exception() for t in done)
exceptions = [exc for exc in exceptions if exc is not None]
if exceptions:
raise BaseExceptionGroup(
f"Error occurred while sleeping until {deadline = }", exceptions
)
if sleep_task in done:
self._previous_deltas.append(delta)
return
# Internal methods
def _get_avg_delta(self) -> float:
if self._previous_deltas:
return sum(self._previous_deltas) / len(self._previous_deltas)
return 0.0
def _check_running(self):
if self._time_handles and not self._is_polling():
self._poll_task = asyncio.create_task(self._run_poll())
elif not self._time_handles and self._is_polling():
self._poll_task.cancel()
def _create_handle(self, deadline: NUMBER) -> TimeHandle:
handle = TimeHandle(deadline)
if self.env.clock.time >= deadline:
handle.fut.set_result(None)
else:
heapq.heappush(self._time_handles, handle)
self._check_running()
return handle
def _is_terminated(self) -> bool:
# This might be called after teardown, in which case `env` is None
return self.env is None or not self.env.is_running()
def _is_polling(self) -> bool:
return self._poll_task is not None and not self._poll_task.done()
async def _run_poll(self):
"""Continuously polls the clock's time until all TimeHandles resolve.
TimeHandles will resolve when their deadline is reached,
or they are cancelled.
Note that when a pause/stop occurs, all `sleep_until()` calls
cancel the `_sleep_until()` task, which should indirectly
cancel the handle being awaited on.
"""
# this is implemented very similarly to asyncio.BaseEventLoop
while self._time_handles:
while self._time_handles:
handle = self._time_handles[0]
if handle.cancelled():
heapq.heappop(self._time_handles)
elif self.env.clock.time >= handle.when:
handle.fut.set_result(None)
heapq.heappop(self._time_handles)
else:
# all handles afterwards are either still waiting or cancelled
break
await asyncio.sleep(self.poll_interval)
async def _sleep_until(self, deadline: NUMBER):
await self._create_handle(deadline)
# Handler hooks
def setup(self):
for event in ("start", "pause", "resume", "stop"):
self.register(event)
def teardown(self):
self._interrupt_event.set()
self._wake_event.set() # just in case
def hook(self, event: str, *args):
if event in ("start", "resume"):
self._wake_event.set()
self._interrupt_event.clear()
if event == "pause":
self._interrupt_event.set()
self._wake_event.clear()
elif event == "stop":
self.teardown()
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/handlers/sleep_handler/__init__.py
| 0.912646 | 0.202838 |
__init__.py
|
pypi
|
import sys
from .pattern import *
# Create functions for making control patterns (patterns of dictionaries)
generic_params = [
("s", "s", "sound"),
("s", "toArg", "for internal sound routing"),
# ("f", "from", "for internal sound routing"), <- TODO - 'from' is a reserved word in python..
("f", "to", "for internal sound routing"),
(
"f",
"accelerate",
"a pattern of numbers that speed up (or slow down) samples while they play.",
),
("f", "amp", "like @gain@, but linear."),
(
"f",
"attack",
"a pattern of numbers to specify the attack time (in seconds) of an envelope applied to each sample.",
),
(
"f",
"bandf",
"a pattern of numbers from 0 to 1. Sets the center frequency of the band-pass filter.",
),
(
"f",
"bandq",
"a pattern of anumbers from 0 to 1. Sets the q-factor of the band-pass filter.",
),
(
"f",
"begin",
"a pattern of numbers from 0 to 1. Skips the beginning of each sample, e.g. `0.25` to cut off the first quarter from each sample.",
),
("f", "legato", "controls the amount of overlap between two adjacent sounds"),
("f", "clhatdecay", ""),
(
"f",
"crush",
"bit crushing, a pattern of numbers from 1 (for drastic reduction in bit-depth) to 16 (for barely no reduction).",
),
(
"f",
"coarse",
"fake-resampling, a pattern of numbers for lowering the sample rate, i.e. 1 for original 2 for half, 3 for a third and so on.",
),
("i", "channel", "choose the channel the pattern is sent to in superdirt"),
(
"i",
"cut",
"In the style of classic drum-machines, `cut` will stop a playing sample as soon as another samples with in same cutgroup is to be played. An example would be an open hi-hat followed by a closed one, essentially muting the open.",
),
(
"f",
"cutoff",
"a pattern of numbers from 0 to 1. Applies the cutoff frequency of the low-pass filter.",
),
("f", "cutoffegint", ""),
("f", "decay", ""),
(
"f",
"delay",
"a pattern of numbers from 0 to 1. Sets the level of the delay signal.",
),
(
"f",
"delayfeedback",
"a pattern of numbers from 0 to 1. Sets the amount of delay feedback.",
),
(
"f",
"delaytime",
"a pattern of numbers from 0 to 1. Sets the length of the delay.",
),
("f", "detune", ""),
("f", "djf", "DJ filter, below 0.5 is low pass filter, above is high pass filter."),
(
"f",
"dry",
"when set to `1` will disable all reverb for this pattern. See `room` and `size` for more information about reverb.",
),
(
"f",
"end",
"the same as `begin`, but cuts the end off samples, shortening them; e.g. `0.75` to cut off the last quarter of each sample.",
),
(
"f",
"fadeTime",
"Used when using begin/end or chop/striate and friends, to change the fade out time of the 'grain' envelope.",
),
(
"f",
"fadeInTime",
"As with fadeTime, but controls the fade in time of the grain envelope. Not used if the grain begins at position 0 in the sample.",
),
("f", "freq", ""),
(
"f",
"gain",
"a pattern of numbers that specify volume. Values less than 1 make the sound quieter. Values greater than 1 make the sound louder. For the linear equivalent, see @amp@.",
),
("f", "gate", ""),
("f", "hatgrain", ""),
(
"f",
"hcutoff",
"a pattern of numbers from 0 to 1. Applies the cutoff frequency of the high-pass filter. Also has alias @hpf@",
),
(
"f",
"hold",
"a pattern of numbers to specify the hold time (in seconds) of an envelope applied to each sample. Only takes effect if `attack` and `release` are also specified.",
),
(
"f",
"hresonance",
"a pattern of numbers from 0 to 1. Applies the resonance of the high-pass filter. Has alias @hpq@",
),
("f", "lagogo", ""),
("f", "lclap", ""),
("f", "lclaves", ""),
("f", "lclhat", ""),
("f", "lcrash", ""),
("f", "leslie", ""),
("f", "lrate", ""),
("f", "lsize", ""),
("f", "lfo", ""),
("f", "lfocutoffint", ""),
("f", "lfodelay", ""),
("f", "lfoint", ""),
("f", "lfopitchint", ""),
("f", "lfoshape", ""),
("f", "lfosync", ""),
("f", "lhitom", ""),
("f", "lkick", ""),
("f", "llotom", ""),
(
"f",
"lock",
"A pattern of numbers. Specifies whether delaytime is calculated relative to cps. When set to 1, delaytime is a direct multiple of a cycle.",
),
(
"f",
"loop",
"loops the sample (from `begin` to `end`) the specified number of times.",
),
("f", "lophat", ""),
("f", "lsnare", ""),
("f", "n", "The note or sample number to choose for a synth or sampleset"),
("f", "note", "The note or pitch to play a sound or synth with"),
("f", "degree", ""),
("f", "mtranspose", ""),
("f", "ctranspose", ""),
("f", "harmonic", ""),
("f", "stepsPerOctave", ""),
("f", "octaveR", ""),
(
"f",
"nudge",
"Nudges events into the future by the specified number of seconds. Negative numbers work up to a point as well (due to internal latency)",
),
("i", "octave", ""),
("f", "offset", ""),
("f", "ophatdecay", ""),
(
"i",
"orbit",
"a pattern of numbers. An `orbit` is a global parameter context for patterns. Patterns with the same orbit will share hardware output bus offset and global effects, e.g. reverb and delay. The maximum number of orbits is specified in the superdirt startup, numbers higher than maximum will wrap around.",
),
("f", "overgain", ""),
("f", "overshape", ""),
(
"f",
"pan",
"a pattern of numbers between 0 and 1, from left to right (assuming stereo), once round a circle (assuming multichannel)",
),
(
"f",
"panspan",
"a pattern of numbers between -inf and inf, which controls how much multichannel output is fanned out (negative is backwards ordering)",
),
(
"f",
"pansplay",
"a pattern of numbers between 0.0 and 1.0, which controls the multichannel spread range (multichannel only)",
),
(
"f",
"panwidth",
"a pattern of numbers between 0.0 and inf, which controls how much each channel is distributed over neighbours (multichannel only)",
),
(
"f",
"panorient",
"a pattern of numbers between -1.0 and 1.0, which controls the relative position of the centre pan in a pair of adjacent speakers (multichannel only)",
),
("f", "pitch1", ""),
("f", "pitch2", ""),
("f", "pitch3", ""),
("f", "portamento", ""),
("f", "rate", "used in SuperDirt softsynths as a control rate or 'speed'"),
(
"f",
"release",
"a pattern of numbers to specify the release time (in seconds) of an envelope applied to each sample.",
),
(
"f",
"resonance",
"a pattern of numbers from 0 to 1. Specifies the resonance of the low-pass filter.",
),
("f", "room", "a pattern of numbers from 0 to 1. Sets the level of reverb."),
("f", "sagogo", ""),
("f", "sclap", ""),
("f", "sclaves", ""),
("f", "scrash", ""),
("f", "semitone", ""),
(
"f",
"shape",
"wave shaping distortion, a pattern of numbers from 0 for no distortion up to 1 for loads of distortion.",
),
(
"f",
"size",
"a pattern of numbers from 0 to 1. Sets the perceptual size (reverb time) of the `room` to be used in reverb.",
),
("f", "slide", ""),
(
"f",
"speed",
"a pattern of numbers which changes the speed of sample playback, i.e. a cheap way of changing pitch. Negative values will play the sample backwards!",
),
("f", "squiz", ""),
("f", "stutterdepth", ""),
("f", "stuttertime", ""),
("f", "sustain", ""),
("f", "timescale", ""),
("f", "timescalewin", ""),
("f", "tomdecay", ""),
(
"s",
"unit",
'used in conjunction with `speed`, accepts values of "r" (rate, default behavior), "c" (cycles), or "s" (seconds). Using `unit "c"` means `speed` will be interpreted in units of cycles, e.g. `speed "1"` means samples will be stretched to fill a cycle. Using `unit "s"` means the playback speed will be adjusted so that the duration is the number of seconds specified by `speed`.',
),
("f", "velocity", ""),
("f", "vcfegint", ""),
("f", "vcoegint", ""),
("f", "voice", ""),
(
"s",
"vowel",
"formant filter to make things sound like vowels, a pattern of either `a`, `e`, `i`, `o` or `u`. Use a rest (`~`) for no effect.",
),
("f", "waveloss", ""),
("f", "dur", ""),
("f", "modwheel", ""),
("f", "expression", ""),
("f", "sustainpedal", ""),
(
"f",
"tremolodepth",
"Tremolo Audio DSP effect | params are 'tremolorate' and 'tremolodepth'",
),
(
"f",
"tremolorate",
"Tremolo Audio DSP effect | params are 'tremolorate' and 'tremolodepth'",
),
(
"f",
"phaserdepth",
"Phaser Audio DSP effect | params are 'phaserrate' and 'phaserdepth'",
),
(
"f",
"phaserrate",
"Phaser Audio DSP effect | params are 'phaserrate' and 'phaserdepth'",
),
("f", "fshift", "frequency shifter"),
("f", "fshiftnote", "frequency shifter"),
("f", "fshiftphase", "frequency shifter"),
("f", "triode", "tube distortion"),
("f", "krush", "shape/bass enhancer"),
("f", "kcutoff", ""),
("f", "octer", "octaver effect"),
("f", "octersub", "octaver effect"),
("f", "octersubsub", "octaver effect"),
("f", "ring", "ring modulation"),
("f", "ringf", "ring modulation"),
("f", "ringdf", "ring modulation"),
("f", "distort", "noisy fuzzy distortion"),
("f", "freeze", "Spectral freeze"),
("f", "xsdelay", ""),
("f", "tsdelay", ""),
("f", "real", "Spectral conform"),
("f", "imag", ""),
("f", "enhance", "Spectral enhance"),
("f", "partials", ""),
("f", "comb", "Spectral comb"),
("f", "smear", "Spectral smear"),
("f", "scram", "Spectral scramble"),
("f", "binshift", "Spectral binshift"),
("f", "hbrick", "High pass sort of spectral filter"),
("f", "lbrick", "Low pass sort of spectral filter"),
("f", "midichan", ""),
("f", "control", ""),
("f", "ccn", ""),
("f", "ccv", ""),
("f", "polyTouch", ""),
("f", "midibend", ""),
("f", "miditouch", ""),
("f", "ctlNum", ""),
("f", "frameRate", ""),
("f", "frames", ""),
("f", "hours", ""),
("s", "midicmd", ""),
("f", "minutes", ""),
("f", "progNum", ""),
("f", "seconds", ""),
("f", "songPtr", ""),
("f", "uid", ""),
("f", "val", ""),
("f", "cps", ""),
]
controls = []
module_obj = sys.modules[__name__]
# This had to go in its own function, for weird scoping reasons..
def make_control(name):
def ctrl(*args):
return sequence(*[reify(arg) for arg in args]).fmap(lambda v: {name: v})
def ctrl_pattern(self, *args):
return self >> sequence(*[reify(arg) for arg in args]).fmap(lambda v: {name: v})
# setattr(Pattern, name, lambda pat: Pattern(reify(pat).fmap(lambda v: {name: v}).query))
setattr(module_obj, name, ctrl)
setattr(Pattern, name, ctrl_pattern)
return ctrl
for t, name, desc in generic_params:
make_control(name)
def create_param(name):
"""Creates a new control function with the given name"""
return make_control(name)
def create_params(names):
"""Creates a new control functions from the given list of names"""
return [make_control(name) for name in names]
sound = s
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/sequences/tidal_parser/control.py
| 0.486819 | 0.608187 |
control.py
|
pypi
|
from osc4py3 import oscbuildparse
from osc4py3.as_eventloop import osc_send, osc_udp_client
from abc import ABC
from typing import Dict, Any, Optional
from .pattern import *
from time import time
class BaseStream(ABC):
"""
A class for playing control pattern events
It should be subscribed to a LinkClock instance.
Parameters
----------
name: Optional[str]
Name of the stream instance
"""
def __init__(self, name: str = None):
self.name = name
self.pattern = None
def notify_tick(
self,
clock,
cycle: tuple,
cycles_per_second: float,
beats_per_cycle: int,
now: int | float,
):
"""Called by a Clock every time it ticks, when subscribed to it"""
if not self.pattern:
return
# Querying the pattern using time information
cycle_from, cycle_to = cycle
es = self.pattern.onsets_only().query(TimeSpan(cycle_from, cycle_to))
# Processing individual events
for e in es:
cycle_on, cycle_off = e.whole.begin, e.whole.end
on = clock.timeAtBeat(cycle_on * beats_per_cycle)
off = clock.timeAtBeat(cycle_off * beats_per_cycle)
delta_secs = off - on
link_secs = clock.shifted_time + clock._tidal_nudge
nudge = e.value.get("nudge", 0)
ts = (on) + self.latency + nudge
self.notify_event(
e.value,
timestamp=ts,
cps=float(cycles_per_second),
cycle=float(on),
delta=float(delta_secs),
)
def notify_event(
self,
event: Dict[str, Any],
timestamp: float,
cps: float,
cycle: float,
delta: float,
):
"""Called by `notify_tick` with the event and timestamp that should be played"""
raise NotImplementedError
def __repr__(self):
pattern_repr = " \n" + repr(self.pattern) if self.pattern else ""
return f"<{self.__class__.__name__} {repr(self.name)}{pattern_repr}>"
class TidalStream(BaseStream):
def __init__(self, osc_client, data_only: bool, latency=0.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data_only = data_only
self.latency = latency
self.name = "vortex"
self._osc_client = osc_client
self._last_value: Optional[list] = None
def get(self) -> Optional[dict]:
"""Return a dictionary of the last message played by the stream"""
try:
format_to_dict = dict(zip(msg[::2], msg[1::2]))
return self._last_value
except Exception:
return None
def notify_event(
self,
event: Dict[str, Any],
timestamp: float,
cps: float,
cycle: float,
delta: float,
):
msg = []
for key, val in event.items():
if isinstance(val, Fraction):
val = float(val)
msg.append(key)
msg.append(val)
# This second parsing mechanism is needed to get rid
# of the faultly messages generated by the Vortex mini
# notation. TODO: remove when correcting the notation.
correct_msg = []
for _ in msg:
if isinstance(_, dict):
correct_msg = [
correct_msg,
*list(sum([(i, v) for (i, v) in _.items()], ())),
]
else:
correct_msg.append(_)
correct_msg.extend(["cps", cps, "cycle", cycle, "delta", delta])
# We need to remove a rogue ['s'] if using a sample with index
# This operation can fail so we need to use a try-except block
if "n" in correct_msg:
try:
correct_msg.remove(["s"])
except ValueError:
pass
self._last_value = correct_msg
if not self.data_only:
self._osc_client._send_timed_message(
address="/dirt/play",
message=correct_msg,
)
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/sequences/tidal_parser/stream.py
| 0.801742 | 0.301973 |
stream.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.