code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import keras
from keras.applications.resnet50 import ResNet50
from keras.models import Model
from keras.layers import Conv2D, concatenate, BatchNormalization, Lambda, Input, multiply, add, ZeroPadding2D, Activation, Layer, MaxPooling2D, Dropout
from keras import regularizers
import keras.backend as K
import tensorflow as tf
import numpy as np
RESIZE_FACTOR = 2
def resize_bilinear(x):
return tf.image.resize_bilinear(x, size=[K.shape(x)[1]*RESIZE_FACTOR, K.shape(x)[2]*RESIZE_FACTOR])
def resize_output_shape(input_shape):
shape = list(input_shape)
assert len(shape) == 4
shape[1] *= RESIZE_FACTOR
shape[2] *= RESIZE_FACTOR
return tuple(shape)
class EAST_model:
def __init__(self, input_size=512):
input_image = Input(shape=(None, None, 3), name='input_image')
overly_small_text_region_training_mask = Input(shape=(None, None, 1), name='overly_small_text_region_training_mask')
text_region_boundary_training_mask = Input(shape=(None, None, 1), name='text_region_boundary_training_mask')
target_score_map = Input(shape=(None, None, 1), name='target_score_map')
resnet = ResNet50(input_tensor=input_image, weights='imagenet', include_top=False, pooling=None)
x = resnet.get_layer('activation_49').output
x = Lambda(resize_bilinear, name='resize_1')(x)
x = concatenate([x, resnet.get_layer('activation_40').output], axis=3)
x = Conv2D(128, (1, 1), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
x = Activation('relu')(x)
x = Lambda(resize_bilinear, name='resize_2')(x)
x = concatenate([x, resnet.get_layer('activation_22').output], axis=3)
x = Conv2D(64, (1, 1), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
x = Activation('relu')(x)
x = Lambda(resize_bilinear, name='resize_3')(x)
x = concatenate([x, ZeroPadding2D(((1, 0),(1, 0)))(resnet.get_layer('activation_10').output)], axis=3)
x = Conv2D(32, (1, 1), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
x = Activation('relu')(x)
x = Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
x = Activation('relu')(x)
x = Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
x = Activation('relu')(x)
pred_score_map = Conv2D(1, (1, 1), activation=tf.nn.sigmoid, name='pred_score_map')(x)
rbox_geo_map = Conv2D(4, (1, 1), activation=tf.nn.sigmoid, name='rbox_geo_map')(x)
rbox_geo_map = Lambda(lambda x: x * input_size)(rbox_geo_map)
angle_map = Conv2D(1, (1, 1), activation=tf.nn.sigmoid, name='rbox_angle_map')(x)
angle_map = Lambda(lambda x: (x - 0.5) * np.pi / 2)(angle_map)
pred_geo_map = concatenate([rbox_geo_map, angle_map], axis=3, name='pred_geo_map')
model = Model(inputs=[input_image, overly_small_text_region_training_mask, text_region_boundary_training_mask, target_score_map], outputs=[pred_score_map, pred_geo_map])
self.model = model
self.input_image = input_image
self.overly_small_text_region_training_mask = overly_small_text_region_training_mask
self.text_region_boundary_training_mask = text_region_boundary_training_mask
self.target_score_map = target_score_map
self.pred_score_map = pred_score_map
self.pred_geo_map = pred_geo_map
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/detector/EAST/model.py
| 0.921344 | 0.558207 |
model.py
|
pypi
|
from __future__ import annotations
import json
import logging
import os
import posixpath
import re
import shlex
import tempfile
from platform import system
from typing import Literal, Optional, TypedDict, Union, cast
from ffmpeg_progress_yield import FfmpegProgress
from tqdm import tqdm
IS_WIN = system() in ["Windows", "cli"]
logger = logging.getLogger("scenecut-extractor")
def win_path_check(path: str) -> str:
"""
Format a file path correctly for Windows
Args:
path (str): The path to format
Returns:
str: The formatted path
"""
if IS_WIN:
# inside filters, we need to escape the colon twice
return path.replace("\\", "/").replace(":", "\\\\:")
return path
class ScenecutInfo(TypedDict):
frame: int
"""The frame number"""
pts: int
"""The PTS of the frame"""
pts_time: float
"""The PTS in wall clock time of the frame"""
score: float
"""The scenecut detection score"""
class ScenecutExtractor:
DEFAULT_THRESHOLD: float = 0.3
def __init__(self, input_file: str) -> None:
"""
Create a new ScenecutExtractor instance.
Args:
input_file (str): the input file
"""
self.scenecuts: Optional[list[ScenecutInfo]] = None
self.input_file = input_file
def get_as_csv(self) -> str:
"""
Return the scene cuts as CSV.
Returns:
str: the scene cuts as CSV
Raises:
RuntimeError: if no scene cuts have been calculated yet
"""
if self.scenecuts is None:
raise RuntimeError("No scene cuts calculated yet")
ret = ",".join(self.scenecuts[0].keys()) + "\n"
ret += "\n".join(
[",".join([str(r) for r in row.values()]) for row in self.scenecuts]
)
return ret
def get_as_json(self) -> str:
"""
Return the scene cuts as JSON.
Returns:
str: the scene cuts as JSON
Raises:
RuntimeError: if no scene cuts have been calculated yet
"""
if self.scenecuts is None:
raise RuntimeError("No scene cuts calculated yet")
return json.dumps(self.scenecuts, indent=2)
def get_scenecuts(self) -> list[ScenecutInfo]:
"""
Get the scene cuts.
Returns:
list[ScenecutInfo]: the scene cuts
Raises:
RuntimeError: if no scene cuts have been calculated yet
"""
if self.scenecuts is None:
raise RuntimeError("No scene cuts calculated yet")
return self.scenecuts
def calculate_scenecuts(
self, threshold: float = DEFAULT_THRESHOLD, progress: bool = False
) -> None:
"""
Calculate scene cuts with ffmpeg.
Args:
threshold (float): Threshold (between 0 and 1)
progress (bool): Show a progress bar on stderr
"""
if not (0 <= threshold <= 1):
raise RuntimeError("Threshold must be between 0 and 1")
temp_dir = tempfile.mkdtemp()
temp_file_name = posixpath.join(
temp_dir, "scenecut-extractor-" + os.path.basename(self.input_file) + ".txt"
)
logger.debug("Writing to temp file: " + temp_file_name)
try:
cmd = [
"ffmpeg",
"-nostdin",
"-loglevel",
"error",
"-y",
"-i",
self.input_file,
"-vf",
"select=gte(scene\,0),metadata=print:file="
+ win_path_check(temp_file_name),
"-an",
"-f",
"null",
os.devnull,
]
logger.debug(
"Running ffmpeg command: " + " ".join([shlex.quote(c) for c in cmd])
)
ff = FfmpegProgress(cmd)
if progress:
with tqdm(total=100, position=1) as pbar:
for p in ff.run_command_with_progress():
pbar.update(p - pbar.n)
else:
for _ in ff.run_command_with_progress():
pass
lines: list[str] = []
if os.path.isfile(temp_file_name):
with open(temp_file_name, "r") as out_f:
lines = out_f.readlines()
frames: list[ScenecutInfo] = []
last_frame_info: dict = {}
for line in lines:
line = line.strip()
if line.startswith("frame"):
if ret := re.match(
r"frame:(?P<frame>\d+)\s+pts:(?P<pts>[\d\.]+)\s+pts_time:(?P<pts_time>[\d\.]+)",
line,
):
ret_matches = ret.groupdict()
last_frame_info["frame"] = int(ret_matches["frame"])
last_frame_info["pts"] = float(ret_matches["pts"])
last_frame_info["pts_time"] = float(ret_matches["pts_time"])
else:
raise RuntimeError("Wrongly formatted line: " + line)
continue
if line.startswith("lavfi.scene_score") and (splits := line.split("=")):
if len(splits):
last_frame_info["score"] = float(splits[1])
else:
raise RuntimeError("Wrongly formatted line: " + line)
frames.append(cast(ScenecutInfo, last_frame_info))
last_frame_info = {}
self.scenecuts = [f for f in frames if f["score"] >= threshold]
except Exception as e:
raise e
finally:
if os.path.isfile(temp_file_name):
logger.debug("Removing temp file: " + temp_file_name)
os.remove(temp_file_name)
def extract_scenes(
self,
output_directory: str,
no_copy: bool = False,
progress: bool = False,
):
"""
Extract all scenes to individual files.
Args:
output_directory (str): Output directory.
no_copy (bool, optional): Do not copy the streams, reencode them. Defaults to False.
progress (bool, optional): Show progress bar. Defaults to False.
"""
if self.scenecuts is None:
raise RuntimeError("No scene cuts calculated yet")
# insert one at the beginning
scenecuts = self.scenecuts[:]
scenecuts.insert(0, {"pts_time": 0, "pts": 0, "frame": 0, "score": 0})
if not os.path.exists(output_directory):
os.makedirs(output_directory, exist_ok=True)
for scene, next_scene in zip(scenecuts, scenecuts[1:]):
self.cut_part_from_file(
self.input_file,
output_directory,
scene["pts_time"],
next_scene["pts_time"],
no_copy,
progress,
)
@staticmethod
def cut_part_from_file(
input_file: str,
output_directory: str,
start: Union[float, None] = None,
end: Union[float, None, Literal[""]] = None,
no_copy: bool = False,
progress: bool = False,
):
"""
Cut a part of a video.
Args:
input_file (str): Input file.
output_directory (str): Output directory.
start (Union[float, None], optional): Start time. Defaults to None.
end (Union[float, None, Literal[""]], optional): End time. Defaults to None.
no_copy (bool, optional): Do not copy the streams, reencode them. Defaults to False.
progress (bool, optional): Show progress bar. Defaults to False.
FIXME: This has been copy-pasted from ffmpeg-black-split.
"""
if start is None:
start = 0
if end is not None and end != "":
to_args = ["-t", str(end - start)]
else:
end = ""
to_args = []
if no_copy:
codec_args = ["-c:v", "libx264", "-c:a", "aac"]
else:
codec_args = ["-c", "copy"]
suffix = f"{start:.3f}-{end:.3f}.mkv"
prefix = os.path.splitext(os.path.basename(input_file))[0]
output_file = os.path.join(output_directory, f"{prefix}_{suffix}")
cmd = [
"ffmpeg",
"-hide_banner",
"-y",
"-ss",
str(start),
"-i",
input_file,
*to_args,
*codec_args,
"-map",
"0",
output_file,
]
cmd_q = " ".join([shlex.quote(c) for c in cmd])
logger.debug("Running ffmpeg command: {}".format(cmd_q))
ff = FfmpegProgress(cmd)
if progress:
with tqdm(total=100, position=1) as pbar:
for p in ff.run_command_with_progress():
pbar.update(p - pbar.n)
else:
for _ in ff.run_command_with_progress():
pass
|
/scenecut_extractor-0.6.2.tar.gz/scenecut_extractor-0.6.2/scenecut_extractor/_scenecut_extractor.py
| 0.805938 | 0.23699 |
_scenecut_extractor.py
|
pypi
|
# scenedataset
PyTorch dataset which uses PySceneDetect to split videos into scenes.
This dataset is useful when you have a large video dataset and you want to train a model on each scene of each video.
Instead of splitting the videos into scenes manually, this dataset uses PySceneDetect to automatically split the videos into scenes.
Decord is used to load the video frames on the fly so that not all videos are loaded on memory to keep the dataset lightweight.
The basic pipeline is the following:
1. Use PySceneDetect to split the videos into scenes. During this step, each videos are processed with PySceneDetect and the scene informations (Scene 1 is composed of frames 0 to 100, Scene 2 is composed of frames 101 to 200, etc.) are saved in a JSON file.
2. [Optional] It can be useful to remove duplicate frames inside a scene, and scenes that does not provide meaningful information (when all frames are identical). When this option is enabled, the dataset will go through each scene and remove duplicate frames.
3. [Optional] Since scenes can be of varying length, it can be useful for a model to have scenes of about the same length. It's not good for a model to have a scene of 100 frames and another scene of 200 frames. To solve this problem, the dataset can split each scene into multiple scenes of a fixed length. For example, if a scene is composed of 100 frames and the fixed length is 50, the dataset will split the scene into two scenes of 50 frames each.
4. The dataset is created by loading the JSON file created in step 1 (or on step 2). When the dataset is loaded, the frames of each scene are loaded on the fly using Decord.
Note that the time consuming steps 1 and 2 are only done once. The JSON file is saved and can be reused for future training. When the parameters of the dataset are changed, the JSON file is automatically regenerated with a different name.
## Installation
```bash
$ pip install scenedataset
```
## Usage
A practical example can be found on the [TorchGANime](https://github.com/Kurokabe/TorchGANime/blob/master/torchganime/data/dataloader/video.py) project
```python
from scenedataset import SceneDataset
from torchvision import transforms
from torchvision.transforms import _transforms_video as video_transforms
transformations = transforms.Compose(
[
# Example of transformations for data augmentation
video_transforms.RandomCropVideo(224),
video_transforms.RandomHorizontalFlipVideo(),
video_transforms.ToTensorVideo(),
]
)
dataset = SceneDataset(
# The paths can be a directory or a list of files. The wildcard * can be used to select multiple files.
paths=["/path/to/video1.mp4", "/path/to/specific/directory/*.mp4"],
transform=transformations, # Transformations will be applied before the scene is returned
recursive=True, # Whether or not the directory should be searched recursively
show_progress=True, # Whether or not to show the progress when scenes are detected with PySceneDetect
min_max_len=(15, 25), # When specified, long scenes will be split into shorter scenes where the length is between min_max_len
duplicate_threshold=0.01, # When specified, subsequent frames in a scenes which difference is below this threshold will be removed. For instance two identical frames have a difference of 0.0, which is below the threshold of 0.01
duplicate_metric="mse", # The metric used to compute the difference between two frames. Can be "mse" or "mae" or "lpips"
device="cpu", # The device used to compute the difference between two frames. Can be "cpu" or "gpu"
initial_shuffle=True, # Whether or not to shuffle the dataset will be shuffled before the first epoch. Can be useful for the validation dataset to have scenes from different videos (if you take only the first N scenes for instance)
root_dir="/path/to/my/cache/folder", # The folder where the JSON file will be saved. If not specified, the JSON file will be saved in ~/.scene_dataset
detector="content", # The detector used by PySceneDetect. Can be "content" or "threshold" or "adaptive"
# Additional arguments passed to the PySceneDetect detector
threshold=30, # The threshold used by the threshold detector
min_scene_len=15, # The minimum length of a scene
)
first_scene = dataset[0]
number_of_scenes = len(dataset)
```
For the `duplicate_metric` parameter, the following metrics are available:
* `mse`: Mean Squared Error
* `mae`: Mean Absolute Error
* `lpips`: [Perceptual Similarity](https://github.com/richzhang/PerceptualSimilarity). This metric is slower than the other two and is recommended to run with `device` set to `gpu`.
More informations about the PySceneDetect detectors and parameters can be found [on the PySceneDetect documentation](http://scenedetect.com/projects/Manual/en/latest/api/detectors.html).
## Contributing
Interested in contributing? Check out the contributing guidelines. Please note that this project is released with a Code of Conduct. By contributing to this project, you agree to abide by its terms.
## License
`scenedataset` was created by Farid Abdalla. It is licensed under the terms of the BSD 3-Clause license.
## Credits
`scenedataset` was created with [`cookiecutter`](https://cookiecutter.readthedocs.io/en/latest/) and the `py-pkgs-cookiecutter` [template](https://github.com/py-pkgs/py-pkgs-cookiecutter).
|
/scenedataset-1.0.2.tar.gz/scenedataset-1.0.2/README.md
| 0.871966 | 0.989957 |
README.md
|
pypi
|
__author__ = 'Dirk Meulenbelt'
__date__ = '10.05.22'
import datetime
import json
import logging
import random
import requests
import urllib3
from .jwt_decode import validate_jwt_token
from .logger import configure_logger
from .nodesequencer_header_schema import nodesequencer_header_schema
from .scenemark_schema import scenemark_schema
from .spec import (
EventType,
NICEItemType,
ProcessingStatus,
DataType,
MediaFormat
)
from .utils import (
get_my_version_number,
extract_node_datatype_mode,
get_regions_of_interest,
get_latest_scenedata_version_number
)
from .validators import (
ValidationError,
request_json_validator
)
logger = logging.getLogger(__name__)
logger = configure_logger(logger, debug=True)
# Disable warning for local development
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class SceneMark:
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-instance-attributes
"""
This class loads a SceneMark and contains various methods to update the
SceneMark in the course of Node Processing. It provides various methods
for retrieving information from, and adding information to, the SceneMark
data structure.
:param request: Incoming request including a SceneMark and a NodeSequencerAddress
:param node_id: Environment variable assigned to the Node through the Developer Portal.
The Node ID is the unique identifier of the Node.
:type node_id: string
:param disable_token_verification: Allows you to turn off the token validation.
:type disable_token_verification: bool
"""
def __init__ (
self,
request,
node_id : str,
disable_token_verification: bool = False,
disable_linter: bool = False
):
# --- Validation
self.nodesequencer_header = request.json['NodeSequencerHeader']
if not disable_token_verification:
validate_jwt_token(self.nodesequencer_header['NodeToken'])
self.scenemark = request.json['SceneMark']
self.disable_linter = disable_linter
if not self.disable_linter:
request_json_validator(
self.nodesequencer_header,
nodesequencer_header_schema,
"NodeSequencer Header"
)
# Verify SceneMark input to match the Spec
request_json_validator(
self.scenemark,
scenemark_schema,
"SceneMark"
)
logger.info(f"Processing SceneMark: {self.scenemark['SceneMarkID']}")
# --- Set Node Parameters
self.node_id = node_id
self.device_id = self.scenemark['SceneMarkID'][41:45]
# --- Version Control
# Get the number of the current node in the NodeSequence
self.my_version_number = get_my_version_number(self.scenemark)
# Update the version control with the NodeID & TimeStamp
self.my_timestamp = self.get_current_utc_timestamp()
# Automatically add a Version Control item to the Node
self.add_version_control_item()
# --- Node Objectives
# Get the DataType the Node works on
self.node_datatype_mode = extract_node_datatype_mode(self.nodesequencer_header)
# Get the polygon if there is one.
self.regions_of_interest = get_regions_of_interest(self.nodesequencer_header)
# Find the latest scenedata additions
self.latest_sd_version = get_latest_scenedata_version_number(self.scenemark)
# Get the targets to work on
self.targets = self.get_scenedata_uri_list()
logger.info(f"Working on these items: {self.targets}")
def save_request(self, request_type : str, name : str):
"""
Used for development purposes to manually check the request.
Saves the request as a json to file
:param request_type: 'SM' for SceneMark, 'NSH' for the NodeSequencerHeader
:type request_type: string
:param name: The name you want to save the json as. Defaults to "scenemark" or
"nodesequencer_header" based on request_type setting
:type name: string
"""
assert request_type in ("SM", "NSH")
if request_type == "SM":
logger.info(f"Saving SceneMark as '{name}.json'")
if not name:
name = "scenemark"
with open(f"{name}.json", 'w', encoding="utf-8") as json_file:
json.dump(self.scenemark, json_file)
elif request_type == "NSH":
logger.info(f"Saving NodeSequener Header as '{name}.json'")
if not name:
name = "nodesequencer_header"
with open(f"{name}.json", 'w', encoding="utf-8") as json_file:
json.dump(self.nodesequencer_header, json_file)
def get_scenedata_uri_list(self):
"""
Creates a list that contains all the URIs
like so:
:Example:
[\n
https://scenedatauri.example.com/1234_still.jpg,\n
https://scenedatauri.example.com/1234_still2.jpg\n
]
:return: List of target SceneData URIs
:rtype: list
"""
return [scenedata_item['SceneDataURI'] \
for scenedata_item in self.scenemark['SceneDataList'] \
if (scenedata_item['DataType'] == self.node_datatype_mode) and \
(scenedata_item['VersionNumber'] == self.latest_sd_version)]
def get_scenedata_id_uri_dict(self, targets_only = True):
"""
Creates a dictionary that has the SceneDataID as key, and the SceneDataURI as the value.
like so:
:Example:
{\n
'SDT_4f2b308f-851a-43ae-819a-0a255dc194a0_dd37_73ac01':\n
'https://scenedatauri.example.com/1234_thumb.jpg',\n
'SDT_4f2b308f-851a-43ae-819a-0a255dc194a0_dd37_73ac02':\n
'https://scenedatauri.example.com/1234_still.jpg',\n
'SDT_4f2b308f-851a-43ae-819a-0a255dc194a0_dd37_73ac03':\n
https://scenedatauri.example.com/1234_vid.mp4',\n
}
:return: dictionary of {scenedata_id -> scenedata_uri}
:rtype: dict
"""
if targets_only:
return {scenedata_item['SceneDataID']:scenedata_item['SceneDataURI'] \
for scenedata_item in self.scenemark['SceneDataList'] \
if (scenedata_item['DataType'] == self.node_datatype_mode) and \
(scenedata_item['VersionNumber'] == self.latest_sd_version)}
return {scenedata_item['SceneDataID']:scenedata_item['SceneDataURI'] \
for scenedata_item in self.scenemark['SceneDataList']}
def get_uri_scenedata_id_dict(self, targets_only = True):
"""
Creates a dictionary that has the SceneDataURI as the key, and the SceneDataID as the value.
like so:
{\n
'https://scenedatauri.example.com/1234_thumb.jpg':\n
'SDT_4f2b308f-851a-43ae-819a-0a255dc194a0_dd37_73ac01',\n
'https://scenedatauri.example.com/1234_still.jpg':\n
'SDT_4f2b308f-851a-43ae-819a-0a255dc194a0_dd37_73ac02',\n
'https://scenedatauri.example.com/1234_vid.mp4':\n
'SDT_4f2b308f-851a-43ae-819a-0a255dc194a0_dd37_73ac03',\n
}
:return: dictionary of {scenedata_uri -> scenedata_id}
:rtype: dict
"""
if targets_only:
return {scenedata_item['SceneDataURI']:scenedata_item['SceneDataID'] \
for scenedata_item in self.scenemark['SceneDataList'] \
if (scenedata_item['DataType'] == self.node_datatype_mode) and \
(scenedata_item['VersionNumber'] == self.latest_sd_version)}
return {scenedata_item['SceneDataURI']:scenedata_item['SceneDataID'] \
for scenedata_item in self.scenemark['SceneDataList']}
def get_id_from_uri(self, uri : str):
"""
Gets the SceneDataID of the SceneData piece relating to the URI you put in.
:param scenedata_uri: Uri to access SceneData
:type scenedata_uri: string
:return: SceneDataID corresponding to the URI
:rtype: string
"""
for scenedata in self.scenemark['SceneDataList']:
if scenedata['SceneDataURI'] == uri:
return scenedata['SceneDataID']
logger.warning("No SceneData associated with the SceneMark")
return "No SceneDataID found!"
def get_uri_from_id(self, scenedata_id : str):
"""
Gets the SceneDataURI of the SceneDataID you put in.
:param scenedata_id: SceneDataID
:type scenedata_id: string
:return: SceneDataURI corresponding to the ID
:rtype: string
:raises ValidationError: "No match" if there isn't a match
"""
for scenedata in self.scenemark['SceneDataList']:
if scenedata['SceneDataID'] == scenedata_id:
return scenedata['SceneDataURI']
error = "No match found"
logger.exception(error)
raise ValidationError(error)
def get_detected_objects_from_sd_id(self, scenedata_id):
"""
Creates a list of DetectedObjects that have thge pased
SceneDataID listed as RelatedSceneData.
[\n
{\n
{\n
"NICEItemType": "Human",\n
"CustomItemType": "",\n
"ItemID": "",\n
"ItemTypeCount": 1,\n
"Probability": 0.95,\n
"Attributes": [\n
],\n
"BoundingBox": {\n
"XCoordinate": 0.0244,\n
"YCoordinate": 0.4522,\n
"Height": 0.873,\n
"Width": 0.566,\n
},\n
"RelatedSceneData": "SDT_3c84184e-7a50-449e-af06-dcdf415bebce_c88e_360302"\n
}\n
}\n
]
:return: list of DetectedObjects
:rtype: list
"""
det_objects = []
for analysis_list_item in self.scenemark['AnalysisList']:
for det_object_item in analysis_list_item['DetectedObjects']:
if det_object_item['RelatedSceneData'] == scenedata_id:
det_objects.append(det_object_item)
return det_objects
def get_detected_objects_from_sd_uri(self, scenedata_uri):
"""
Creates a list of DetectedObjects that have the pased
SceneDataURI's associated ID listed as RelatedSceneData.
:return: list of DetectedObjects
:rtype: list
"""
id = self.get_id_from_uri(scenedata_uri)
return self.get_detected_objects_from_sd_id(id)
def generate_scenedata_id(self):
"""
Generates a SceneDataID using the Node ID
:Example:
SDT_9cdff73f-5db1-4e64-9656-ef83bdfeeb90_0001_e4041246
:return: SceneDataID (see example)
:rtype: string
"""
return f"SDT_{self.node_id}_{self.device_id}_{self.generate_random_id(6)}"
@staticmethod
def generate_bounding_box(
x_c : float,
y_c : float,
height : float,
width: float,
):
"""
Generates a Bounding Box using coordinates (from e.g. YOLO)
Can take both pixels and relative coordinates. The latter is preferred.
:Example:
{\n
"XCoordinate": 0.12,\n
"YCoordinate": 0.3,\n
"Height": 0.53,\n
"Width": 0.39\n
}
:param x_c: Top-left x coordinate
:type x_c: int
:param y_c: Top-left y coordinate
:type y_c: int
:param height: Height of the bounding box
:type height: int
:param width: Width of the bounding box
:type width: int
:return: A dictionary bounding box object, see example
:rtype: dict
"""
assert (isinstance(x_c, float) \
and isinstance(y_c, float) \
and isinstance(height, float) \
and isinstance(width, float)), \
logger.exception("Arguments need to be integers")
bounding_box_item = {}
bounding_box_item['XCoordinate'] = x_c
bounding_box_item['YCoordinate'] = y_c
bounding_box_item['Height'] = height
bounding_box_item['Width'] = width
return bounding_box_item
@staticmethod
def generate_directional_movement_item(id : str, uri : str = None):
"""
Generate a directional movement item dictionary with specified ID and URI.
This method creates a dictionary with keys 'ID' and 'URI' and assigns the
provided values to these keys. This dictionary represents a directional
movement item.
:param id: The ID to assign to the 'ID' key in the resulting dictionary
:type id: str
:param uri: The URI to assign to the 'URI' key in the resulting dictionary
:type uri: str
:return: The resulting dictionary representing a directional movement item.
:rtype: dict
Example
-------
>>> generate_directional_movement_item('123', 'http://example.com')
{'ID': '123', 'URI': 'http://example.com'}
"""
dm_item = {}
dm_item['ID'] = id
dm_item['URI'] = uri
logger.info(f"DirectionalMovement of ID: {id} & URI: {uri} created")
return dm_item
def generate_attribute_item(
self,
attribute : str,
value : str,
probability_of_attribute : float = 1.0,
):
"""
Generates an Attribute list item, to Specify Attributes found
associated with the Detected Object.
:Example:
{\n
"VersionNumber": 1.0,\n
"Attribute": "Mood",\n
"Value": "Anger",\n
"ProbabilityOfAttribute": 0.8\n
}
:param attribute: Name of the attribute
:type attribute: string
:param value: Value of the attribute
:type value: string
:param probability_of_attribute: Confidence of the attribute found,
optional, defaulted to 1.0 == 100%
:type probability_of_attribute: float
:return: attribute item
:rtype: dict
"""
attribute_item = {}
attribute_item['VersionNumber'] = self.my_version_number
attribute_item['Attribute'] = attribute
attribute_item['Value'] = value
attribute_item['ProbabilityOfAttribute'] = probability_of_attribute
logger.info(f"Attribute item of {attribute}:{value} created")
return attribute_item
@staticmethod
def generate_detected_object_item(
nice_item_type : str,
related_scenedata_id : str = "None",
custom_item_type : str = "",
item_id : str = "",
item_type_count : int = 1,
probability : float = 1.0,
frame : int = 0,
timestamp : str = "",
directional_movement : dict = None,
attributes : list = [],
bounding_box : dict = None,
):
# pylint: disable=dangerous-default-value
"""
Generates a detected object item
:Example:
{\n
"NICEItemType": "Human",\n
"CustomItemType": "",\n
"ItemID": "Chris",\n
"ItemTypeCount": 1,\n
"Probability": 0.93,\n
"DirectionalMovement": {\n
"ID": "123-track1-123",\n
"URI": "mystorage.com/123-track-123?acessTOKEN"\n
},\n
"Frame": 10,
"TimeStamp": "",
"DirectionalMovement": {
"ID": "123",
"URI": "https://example.com"
},
"Attributes": [\n
{\n
"VersionNumber": 1.0,\n
"Attribute": "Mood",\n
"Value": "Anger",\n
"ProbabilityOfAttribute": 0.8\n
}\n
],\n
"BoundingBox": {\n
"XCoordinate": 10,\n
"YCoordinate": 30,\n
"Height": 10,\n
"Width": 10\n
},\n
"RelatedSceneData": "SDT_9cdff73f-5db1-4e64-9656-ef83bdfeeb90_0001_e4041246"\n
}
:param nice_item_type: Indicating the NICEITemType found
:type nice_item_type: string
:param related_scenedata_id: Indication of what item the algorithm ran on
:type related_scenedata_id: string
:param custom_item_type: Allows specifying of the custom NICEItemType,
defaults to "". Optional.
:type custom_item_type: string
:param item_id: Indicating an ID on the object.
E.g. Name of the person Defaults to "". Optional.
:type item_id: string
:param item_type_count: Counting the amount of the stated NICEItemType,
optional, defaults to 1.
:type item_type_count: int
:param probability: Indicating the confidence on the item. Optional, defaults to 1.0.
:type probability: float
:param directional_movement: Contains the track_id & track_uri
:type directional_movement: dictionary
:param frame: Frame number where the item is detected. Defaults to 0.
:type frame: int
:param timestamp: Timestamp when the item is detected. Defaults to "".
:type timestamp: str
:param directional_movement: Contains directional movement item, defaults to None
:type directional_movement: dict
:param attributes: Contains attribute items, defaults to []
:type attributes: list
:param bounding_box: Contains the bounding box, defaults to None
:type bounding_box: dict
:return: dictionary containing a DetectedObject item, see example.
:rtype: dict
"""
assert nice_item_type in NICEItemType, \
logger.exception("This Item Type is not part of the Spec.")
detected_object = {}
detected_object['NICEItemType'] = nice_item_type
detected_object['RelatedSceneData'] = related_scenedata_id
detected_object['CustomItemType'] = custom_item_type
detected_object['ItemID'] = item_id
detected_object['ItemTypeCount'] = item_type_count
detected_object['Probability'] = probability
detected_object['Frame'] = frame
detected_object['TimeStamp'] = timestamp
detected_object['DirectionalMovement'] = directional_movement
detected_object['Attributes'] = attributes
detected_object['BoundingBox'] = bounding_box
logger.info(f"DetectedObjects item of NICEItemType '{nice_item_type}' generated")
return detected_object
def add_analysis_list_item(
self,
processing_status : str,
event_type : str,
custom_event_type : str = "",
analysis_description : str = "",
analysis_id : str = "",
total_item_count : int = 0,
error_message : str = "",
detected_objects : list = [],
):
# pylint: disable=dangerous-default-value
"""
Updates the SceneMark state with the unique analysis list item that is added by an AI Node.
This could be considered the main event of the Node SDKs. Updates the SceneMark in place.
:Example:
{\n
"VersionNumber": 1.0,\n
"AnalysisID": "9cdff73f-5db1-4e64-9656-ef83bdfeeb90",\n
"AnalysisDescription": "Loitering detection",\n
"EventType": "Loitering",\n
"CustomEventType": "",\n
"ProcessingStatus": "Detected",\n
"ErrorMessage": "",\n
"TotalItemCount": 4,\n
"DetectedObjects": [ .. ]\n
}
:param processing_status: One the following values: 'CustomAnalysis', 'Motion', 'Detected',
'Recognized', 'Characterized', 'Undetected', 'Failed', 'Error'
:type processing_status: string
:param event_type: Environment variable assigned to the Node through the Developer Portal.
The main thing this Node is 'interested in'. Can take the following range of values from
the Specification: 'Custom', 'ItemPresence', Loitering, Intrusion, Falldown, Violence,
Fire, Abandonment, SpeedGate, Xray, Facility
:type event_type: string
:param custom_event_type: Set when EventType is set to 'Custom', defaults to "". Optional
:type custom_event_type: string
:param analysis_description: string, default "", env variable assigned to the Node
through the Developer Portal. Used to describe what the analysis is about,
what it is 'doing'. By default set to an empty string. Optional.
:type analysis_description: string
:param analysis_id: Environment variable assigned to the Node through the Developer Portal.
Should be a unique identifier refering to the particular algorithm
used within the node. Defaults to "". Optional.
:type analysis_id: string
:param total_item_count: Total amount of items detected in the scene, defaults to 0
:type total_item_count: int
:param error_message: Used to propagate errors, optional, defaults to ""
:type error_message: string
:param detected_objects: Holds detected objects, defaults to an empty list
:type detected_objects: list
:raises AssertionError: When the ProcessingStatus is not recognized as part of the Spec.
:raises AssertionError: When the EventType is not recognized as part of the Spec.
"""
assert event_type in EventType, logger.exception("EventType given not in Spec")
analysis_list_item = {}
analysis_list_item['VersionNumber'] = self.my_version_number
assert processing_status in ProcessingStatus, \
logger.exception("This Processing Status is not part of the Spec.")
analysis_list_item['ProcessingStatus'] = processing_status
analysis_list_item['EventType'] = event_type
analysis_list_item['CustomEventType'] = custom_event_type
analysis_list_item['AnalysisID'] = analysis_id
analysis_list_item['AnalysisDescription'] = analysis_description
analysis_list_item['ErrorMessage'] = str(error_message)
analysis_list_item['TotalItemCount'] = total_item_count
analysis_list_item['DetectedObjects'] = detected_objects
self.scenemark['AnalysisList'].append(analysis_list_item)
logger.info(f"AnalysisList item of EventType '{event_type}' added")
def add_thumbnail_list_item(self, scenedata_id : str):
"""
Adds a new thumbnail list item to the thumbnail list in the SceneMark
Use this method when you want to instruct the app to use a different
image as the thumbnail to be displayed. Changes the SceneMark in place.
:Example:
{\n
"VersionNumber": 1.0,\n
"SceneDataID": "SDT_83d6a043-00d9-49aa-a295-86a041fff6d8_d3e7_8a7d01"\n
}
:param scenedata_id: SceneDataID of the thumbnail
:type scenedata_id: string
:Note:
Use the Thumbnail id of any existing SceneData, such as a SceneData item
that you have added yourself.
"""
thumbnail_list_item = {}
thumbnail_list_item['VersionNumber'] = self.my_version_number
thumbnail_list_item['SceneDataID'] = scenedata_id
self.scenemark['ThumbnailList'].append(thumbnail_list_item)
logger.info(f"Thumbnail set to: {scenedata_id}")
def add_scenedata_item(
self,
scenedata_uri : str,
datatype : str,
source_node_description : str = "",
timestamp : str = "",
duration : str = "",
media_format : str = "",
encryption : dict = {},
embedded_scenedata : str = "",
):
# pylint: disable=dangerous-default-value
"""
Adds a SceneData item to the SceneMark in place.
:Example:
{\n
"VersionNumber": 2.0,\n
"SceneDataID": "SDT_83d6a043-00d9-49aa-a295-86a041fff6d8_d3e7_123456",\n
"TimeStamp": "2021-10-29T21:12:17.245Z",\n
"SourceNodeID": "83d6a043-00d9-49aa-a295-86a041fff6d8_d3e7",\n
"SourceNodeDescription": "Scenera Bridge",\n
"Duration": "30",\n
"DataType": "RGBVideo",\n
"Status": "Upload in Progress",\n
"MediaFormat": "H.264",\n
"SceneDataURI": "https://sduri.example.com/vid.mp4",\n
"Resolution": {\n
"Height": 100,\n
"Width": 100\n
},\n
"EmbeddedSceneData": None,\n
"Encryption": False\n
}
:param scenedata_uri: The URI where we find the image
:type scenedata_uri: string
:param datatype:
:type datatype: string
:param source_node_description: str = ""
:type source_node_description: string
:param timestamp: Timestamp of event, defaults to ""
:type timestamp: str = "",
:param duration: Duration of a videoclip, defaults to ""
:type duration: string
:param media_format: Format, e.g. H.264
:type media_format: string
:param encryption: Encryption on or off, defaults to False
:type encryption: bool
:param embedded_scenedata: Embedded scenedata, no idea what this is for to be honest
:type embedded_scenedata: string
:raises AssertionError: "No SceneData URI is present."
:raises AssertionError: "This DataType is not part of the Spec."
:raises AssertionError: "This Media Format is not part of the Spec."
"""
scenedata_list_item = {}
#First, we generate a new id for this new entry
scenedata_list_item['VersionNumber'] = self.my_version_number
scenedata_list_item['SceneDataID'] = self.generate_scenedata_id()
# We update the new item with the URI that you have to provide
assert scenedata_uri, \
logger.exception("No SceneData URI is present.")
scenedata_list_item['SceneDataURI'] = scenedata_uri
scenedata_list_item['Status'] = "Available at Provided URI"
# Update the DataType
assert datatype in DataType, \
logger.exception("This DataType is not part of the Spec.")
scenedata_list_item['DataType'] = datatype
# If the DataType is a thumbnail, we update the ThumbnailList
if datatype == 'Thumbnail':
self.add_thumbnail_list_item(scenedata_list_item['SceneDataID'])
# You are allowed to set your own timestamp but will otherwise take the default
scenedata_list_item['TimeStamp'] = timestamp if timestamp else self.my_timestamp
assert media_format in MediaFormat, \
logger.exception("This Media Format is not part of the Spec.")
scenedata_list_item['MediaFormat'] = media_format if media_format else 'UNSPECIFIED'
# This equals the Node ID that is assigned to your node
scenedata_list_item['SourceNodeID'] = self.node_id
scenedata_list_item['Encryption'] = encryption
# The following parameters are all left out unless you specify them.
scenedata_list_item['SourceNodeDescription'] = source_node_description
scenedata_list_item['Duration'] = duration
scenedata_list_item['EmbeddedSceneData'] = embedded_scenedata
self.scenemark['SceneDataList'].append(scenedata_list_item)
logger.info(f"SceneData item '{scenedata_list_item['SceneDataID']}' added")
def update_scenedata_item(self, scenedata_id, key, value):
"""
Updates existing SceneData pieces, to for example update its VersionNumber
:param scenedata_id: SceneDataID of the item you want to change
:param key: the key that needs changing
:param value: the value that this key should take
"""
try:
for sd_item in self.scenemark['SceneDataList']:
if sd_item['SceneDataID'] == scenedata_id:
if sd_item[key]:
sd_item_for_change = sd_item
break
sd_item_for_change[key] = value
logger.info(f"SceneData item '{scenedata_id}' updated: '{key}' set to '{value}'")
except KeyError as _e:
error = "Can't update the SceneData item"
logger.exception(error)
raise KeyError(error) from _e
def add_version_control_item(self):
"""
Adds a Version Control item to the SceneMark. Uses already existing
data and is called automatically by the __init__ method
:Example:
{\n
"VersionNumber": 2.0,\n
"DateTimeStamp": "2021-07-19T16:25:21.647Z",\n
"NodeID": "NodeID"\n
}
"""
version_list_item = {}
version_list_item['VersionNumber'] = self.my_version_number
version_list_item['DateTimeStamp'] = self.my_timestamp
version_list_item['NodeID'] = self.node_id
self.scenemark['VersionControl']['VersionList'].append(version_list_item)
def add_custom_notification_message(self, message : str):
"""
Adds a custom notification message to the SceneMark to display in the notification
:param message: Text for the body of the push notification, capped at 200 characters
:type message: string
"""
assert len(str(message)) <= 200, logger.exception("Custom message exceeds 200 chars")
self.scenemark['NotificationMessage'] = str(message)
logger.info("Custom push notififation message added")
def return_scenemark_to_ns(self, test = False, load_test = False):
# pylint: disable=inconsistent-return-statements
"""
Returns the SceneMark to the NodeSequencer with an HTTP call using the received address
:param test: If set to True this returns the scenemark
straight to the caller so you can test the node from Postman or
some other app, defaults to False
:type test: bool
"""
# Update our original request with the updated SceneMark
if not self.disable_linter:
request_json_validator(self.scenemark, scenemark_schema, "SceneMark schema")
scenemark = json.dumps(self.scenemark)
if test:
logger.info("Sending the SceneMark back directly")
return scenemark
if load_test:
logger.info("Load Test: Doing nothing with the resulting SceneMark")
return {}, 200
# We add the token to the HTTP header.
ns_header = {'Authorization': 'Bearer ' + self.nodesequencer_header['Token'],
'Accept': 'application/json',
'Content-Type': 'application/json'}
verify = True if self.nodesequencer_header['Ingress'].startswith("https") else False
# Call NodeSequencer with an updated SceneMark
answer = requests.post(
self.nodesequencer_header['Ingress'],
data=scenemark,
headers=ns_header,
verify=verify,
stream=False)
try:
logger.info(f"Returned SceneMark to NodeSequencer: {answer}")
print(json.dumps(answer.json(), indent = 3))
except:
logger.info(f"Returned SceneMark to NodeSequencer: {answer}")
# Helper Functions
@staticmethod
def get_current_utc_timestamp():
"""
Helper function to create a UTC timestamp in the required format.
:Example:
'2022-03-14T15:43:04.010Z'
"""
time = str(datetime.datetime.utcnow())
time = time[:-3]
time = time.replace(" ","T")
time = time + "Z"
return time
@staticmethod
def generate_random_id(length):
"""
Helper function to create a random ID
"""
return ''.join([random.choice('0123456789abcdefghijklmnopqrstuvwxyz') \
for _ in range(length)])
|
/scenera.node-0.3.19.tar.gz/scenera.node-0.3.19/scenera/node/scenemark.py
| 0.498291 | 0.209955 |
scenemark.py
|
pypi
|
nodesequencer_header_schema = {
"$schema": "http://json-schema.org/draft-06/schema#",
"$ref": "#/definitions/Welcome6",
"definitions": {
"Welcome6": {
"type": "object",
"additionalProperties": False,
"properties": {
"Ingress": {
"type": "string",
"format": "uri",
"qt-uri-protocols": [
"http"
]
},
"Token": {
"type": "string"
},
"NodeToken": {
"type": "string"
},
"NodeInput": {
"$ref": "#/definitions/NodeInput"
}
},
"required": [
"Ingress",
"Token"
],
"title": "Welcome6"
},
"NodeInput": {
"type": ["object","null"],
"additionalProperties": False,
"properties": {
"DataTypeMode": {
"type": ["string", "null"]
},
"RegionsOfInterest": {
"type": ["array", "null"],
"items": {
"$ref": "#/definitions/RegionsOfInterest"
}
}
},
"title": "NodeInput"
},
"RegionsOfInterest": {
"type": "object",
"additionalProperties": False,
"properties": {
"Polygon": {
"type": "array",
"items": {
"$ref": "#/definitions/Polygon"
}
}
},
"required": [
"Polygon"
],
"title": "RegionsOfInterest"
},
"Polygon": {
"type": "object",
"additionalProperties": False,
"properties": {
"XCoord": {
"type": "number"
},
"YCoord": {
"type": "number"
}
},
"required": [
"XCoord",
"YCoord"
],
"title": "Polygon"
}
}
}
|
/scenera.node-0.3.19.tar.gz/scenera.node-0.3.19/scenera/node/nodesequencer_header_schema.py
| 0.583797 | 0.437583 |
nodesequencer_header_schema.py
|
pypi
|
scenemark_schema = {
"$schema": "http://json-schema.org/draft-06/schema#",
"definitions": {
"Encryption": {
"type": ["object", "null"],
"required": [],
"oneOf": [
{
"type": "object",
"properties": {
"EncryptionOn": {
"type": "boolean",
"enum": [False]
}
},
"required": ["EncryptionOn"]
},
{
"type": "object",
"properties": {
"EncryptionOn": {
"type": "boolean",
"enum": [True]
},
"SceneEncryptionKeyID": {
"type": "string",
"description": "Unique Key Identifier that enables the key used to encrypt the data. If EncryptionOn is False this value will be ignored."
},
"SceneMarkEncryption": {
"type": "object",
"properties": {
"JWEAlg": {
"type": "string"
},
"JWEEnc": {
"type": "string"
}
}
},
"SceneDataEncryption": {
"type": "string"
},
"PrivacyServerEndPoint": {
"type": "object",
"properties": {
"AppEndPoint": {
"ApplicationEndPointSpecifier": {
"type": "object",
"properties": {
"APIVersion": {
"type": "string",
"enum": ["1.0"]
},
"EndPointID": {
"type": "string",
"description": "The NICE Identifier for the Application that is ultimatley the end point for messages."
},
"X.509Certificate": {
"type": "array",
"items": {
"type": "string"
}
},
"AccessToken": {
"type": "string",
"description": "This token is used by the receiving NICE entity. It shall always comply ot the JWT (RFC 7519) format"
}
},
"required": ["APIVersion", "EndPointID"]
}
},
"NetEndPoint": {
"NetworkEndPointSpecifier": {
"type": "object",
"properties": {
"APIVersion": {
"type": "string",
"enum": ["1.0"]
},
"EndPointID": {
"type": "string"
},
"NodeID": {
"type": "string"
},
"PortID": {
"type": "string"
},
"Scheme": {
"type": "array",
"uniqueItems": True,
"items": {
"anyOf": [
{
"MQTTScheme": {
"type": "object",
"title": "Network end point specifier for MQTT",
"properties": {
"Protocol": {
"type": "string",
},
"Authority": {
"type": "string"
},
"Username": {
"type": "string"
},
"Password": {
"type": "string",
"description": "Network AccessToken."
},
"ClientID": {
"type": "string"
},
"QoS": {
"type": "integer",
"enum": [0, 1, 2]
}
},
"required": [
"Protocol",
"Authority",
"Username",
"Password",
"ClientID"
]
}
},
{
"WebAPIScheme": {
"type": "object",
"title": "Network end point specifier for WebAPI",
"properties": {
"Protocol": {
"type": "string",
},
"Authority": {
"type": "string"
},
"AccessToken": {
"type": "string"
},
"Role": {
"type": "string",
"description": "If set to Client, the port shall initiate GET or SET data requests. If Server then the port shall act as a server. ",
"enum": ["Client", "Server"]
}
},
"required": ["Protocol", "Authority"]
}
},
{
"WebRTCScheme": {
"type": "object",
"title": "Network end point specifier for WebRTC",
"properties": {
"Protocol": {
"type": "string",
},
"IceServers": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"urls": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "string",
"description": "STUN/TURN server URL. e.g. turn:turnserver.example.org"
}
},
"username": {
"type": "string"
},
"credential": {
"type": "string"
}
},
"required": ["urls"]
}
}
},
"required": ["Protocol"]
}
},
{
"type": "object",
"title": "Network end point specifier for local connection",
"properties": {
"Protocol": {
"type": "string",
}
},
"required": ["Protocol"]
}
]
}
}
},
"required": ["APIVersion", "EndPointID", "Scheme"]
}
}
},
"required": ["NetEndPoint"]
}
},
"required": ["EncryptionOn", "SceneEncryptionKeyID"]
}
]
}
},
"type": "object",
"title": "SceneMark",
"description": "The SceneMark contains data describing what has been captured in SceneData and either contains references to SceneData or contains the SceneData itself.",
"properties": {
"Version": {
"type": "string",
"enum": ["1.0"]
},
"TimeStamp": {
"type": ["string", "null"],
"description": "Time stamp for when the SceneMark is first generated."
},
"SceneMarkID": {
"type": "string",
"description": "Unique ID for a SceneMark. This ID is unique across the NICE ecosystem."
},
"DestinationID": {
"type": ["array", "null"],
"description": "DataService or App ID initiated the request for this SceneMark",
"items": {
"type": "string"
}
},
"SceneMarkStatus": {
"type": "string",
"enum": ["Removed", "Active", "Processed"]
},
"NodeID": {
"type": "string"
},
"VersionControl": {
"type": "object",
"properties": {
"DataPipelineInstanceID": {
"type": "string"
},
"VersionList": {
"type": "array",
"items": {
"type": "object",
"properties": {
"VersionNumber": {
"type": "number"
},
"DateTimeStamp": {
"type": ["string", "null"]
},
"NodeID": {
"type": "string"
}
},
"required": ["VersionNumber", "NodeID"]
}
}
}
},
"ThumbnailList": {
"type": ["array", "null"],
"items": {
"type": "object",
"properties": {
"VersionNumber": {
"type": "number"
},
"SceneDataID": {
"type": "string",
"description": "SceneDataID should appear in the SceneDataList that is inclulded in the SeneMark."
}
},
"required": ["VersionNumber", "SceneDataID"]
}
},
"AnalysisList": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"VersionNumber": {
"type": "number"
},
"AnalysisID": {
"type": ["string", "null"],
"description": "Each algorithm and set of weights has a unique ID that is defined by NICE. This value shall be carried in this record."
},
"EventType": {
"type": "string"
},
"CustomEventType": {
"type": ["string", "null"]
},
"AnalysisDescription": {
"type": ["string", "null"]
},
"ProcessingStatus": {
"type": "string",
"enum": [
"Motion",
"Detected",
"Recognized",
"Characterized",
"Undetected",
"Failed",
"Error",
"CustomAnalysis"
]
},
"ErrorMessage": {
"type": ["string", "null"]
},
"TotalItemCount": {
"type": ["number", "null"]
},
"DetectedObjects": {
"type": ["array", "null"],
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"NICEItemType": {
"type": "string",
"description": "NICE defines diferent DeviceModes which target specific types of data associated with the DeviceMode.",
"enum": [
"Motion",
"Face",
"Human",
"Vehicle",
"Label",
"TextLogoQRCode",
"Animal",
"Custom",
"Scene",
"Fire",
"Furniture",
"Bag",
"Accessory",
"Weapon",
"Undefined",
"Test"
]
},
"CustomItemType": {
"type": ["string", "null"],
"description": "Devices may have proprietary AI algorithms embedded in the device or processing node. If this algorithm was used, the label generated by the algorithm shall be carreid in this field."
},
"ItemID": {
"type": ["string", "null"],
"description": "Unique ID that is associated with this instance of object. This is an optional field that may be used to track objects across different scenemarks."
},
"Probability": {
"type": ["number", "null"],
"description": "Certainty of the Attribute According to the Algorithm"
},
"Frame": {
"type": ["integer", "null"],
},
"TimeStamp": {
"type": ["string", "null"],
},
"DirectionalMovement": {
"type": ["object", "null"],
"properties": {
"ID": {
"type": "string"
},
"URI": {
"type": ["string", "null"]
}
}
},
"Attributes": {
"type": ["array", "null"],
"description": "Different AI algorithms are capable of identifying different attribute of objects that have been identified. For example if a face is detected the attribute may be \"smiling\". These attributes depend on the AI algorithm used and are not specified.",
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"Attribute": {
"type": "string",
"description": "Attribute of face recognized - mood etc"
},
"Value": {
"type": "string"
},
"ProbabilityofAttribute": {
"type": ["number", "null"],
"description": "Degree of certainty of the attribute"
},
"VersionNumber": {
"type": "number",
"description": "Unique ID for the algorithm."
}
},
"required": []
}
},
"BoundingBox": {
"type": ["object", "null"],
"properties": {
"XCoordinate": {
"type": "number"
},
"YCoordinate": {
"type": "number"
},
"Height": {
"type": "number"
},
"Width": {
"type": "number"
}
},
"required": ["Height", "Width", "XCoordinate", "YCoordinate"]
},
"RelatedSceneData": {
"type": "string"
},
"ItemTypeCount": {
"type": ["number", "null"]
}
},
"required": []
}
}
},
"required": []
}
},
"ParentSceneMarks": {
"type": ["array", "null"],
"items": {
"type": "object",
"properties": {
"VersionNumber": {
"type": "number"
},
"SceneMarkID": {
"type": "string"
}
}
}
},
"ChildSceneMarks": {
"type": ["array", "null"],
"items": {
"type": "object",
"properties": {
"VersionNumber": {
"type": "number"
},
"SceneMarkID": {
"type": "string"
}
}
}
},
"SceneDataList": {
"type": "array",
"items": {
"type": "object",
"description": "For a particular SceneMark there may be several SceneData objects. This array contains one or more SceneData objects.",
"properties": {
"VersionNumber": {
"type": "number"
},
"TimeStamp": {
"type": ["string", "null"]
},
"SourceNodeID": {
"type": "string"
},
"SourceNodeDescription": {
"type": ["string", "null"]
},
"Duration": {
"type": ["string", "integer", "number", "null"]
},
"DataType": {
"type": "string",
"description": "Types of data that is in the SceneData object.",
"enum": [
"RGBStill",
"IRStill",
"DepthStill",
"RGBStereoStill",
"ThermalStill",
"RGBVideo",
"IRVideo",
"DepthVideo",
"RGBStereoVideo",
"ThermalVideo",
"Audio",
"Temperature",
"Humidity",
"PIR",
"CarbonMonoxide",
"AudioTranscript",
"IRDetection",
"Pressure",
"Proximity",
"LiquidLevel",
"Acceleration",
"Rotation",
"Thumbnail",
"Other"
]
},
"Status": {
"type": "string",
"enum": ["Available at Provided URI", "Upload in Progress"]
},
"MediaFormat": {
"type": ["string"],
"enum": [
"UNSPECIFIED",
"JPEG",
"PNG",
"H.264",
"H.265",
"RAW",
"JSON"
]
},
"Encryption": {
"$ref": "#/definitions/Encryption"
},
"Resolution": {
"type": ["object", "null"],
"properties": {
"Height": {
"type": "integer"
},
"Width": {
"type": "integer"
}
}
},
"SceneDataID": {
"type": "string",
"description": "Unique Identifier for the SceneData referenced by this data structure."
},
"SceneDataURI": {
"type": "string",
"description": "This is URI to an external data object."
},
"EmbeddedSceneData": {
"type": ["string", "null"],
"description": "Data may be directly embedded in the SceneMark. The Data is encoded as Base64."
}
},
"required": ["TimeStamp", "Encryption", "SceneDataID"]
}
},
"SceneModeConfig": {
"type": ["array", "null"],
"description": "This defines the depth of analysis performed and whether a resut of an output can be used to drive a subsequent capture of frames.",
"uniqueItems": True,
"items": {
"type": "object",
"description": "If this value is set to 20s the node should not generaate another SceneMark for 20s after the first SceneMark was generated.",
"properties": {
"Analysis": {
"type": "string"
},
"AnalysisStage": {
"type": "string",
"enum": [
"CustomAnalysis",
"Motion",
"Detect",
"Recognize",
"Characterize"
]
},
"CustomAnalysisID": {
"type": ["string", "null"],
"description": "Each algorithm and set of weights has a unique ID that is defined by NICE. This value shall be carried in this record."
},
"AnalysisDescription": {
"type": ["string", "null"],
"description": "Description of algorithm."
},
"CustomAnalysisStage": {
"type": ["string", "null"],
"description": "This defines analysis stages that are proprietary."
},
"ExecuteOnPipeline": {
"type": "boolean"
},
"LabelRefDataList": {
"type": ["array", "null"],
"description": "For a specific label the following are reference data such as images for the particular label. The Node shall process these images to create the appropriate reference vector and store which RefDataIDs have been used to create the vector. If new RefDataIDs are detected in the SceneMode object the vector shall be regenerated with the listed RefData.",
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"LabelName": {
"type": "string",
"description": "Label name for example for facial recognition this would be the name or id of an individual."
},
"RefDataList": {
"type": ["array", "null"],
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"RefDataID": {
"type": "string"
},
"RefDataEndPoint": {
"NetEndPoint": {
"NetworkEndPointSpecifier": {
"type": "object",
"properties": {
"APIVersion": {
"type": "string",
"enum": ["1.0"]
},
"EndPointID": {
"type": "string"
},
"NodeID": {
"type": "string"
},
"PortID": {
"type": "string"
},
"Scheme": {
"type": "array",
"uniqueItems": True,
"items": {
"anyOf": [
{
"MQTTScheme": {
"type": "object",
"title": "Network end point specifier for MQTT",
"properties": {
"Protocol": {
"type": "string",
},
"Authority": {
"type": "string"
},
"Username": {
"type": "string"
},
"Password": {
"type": "string",
"description": "Network AccessToken."
},
"ClientID": {
"type": "string"
},
"QoS": {
"type": "integer",
"enum": [0, 1, 2]
}
},
"required": [
"Protocol",
"Authority",
"Username",
"Password",
"ClientID"
]
}
},
{
"WebAPIScheme": {
"type": "object",
"title": "Network end point specifier for WebAPI",
"properties": {
"Protocol": {
"type": "string",
},
"Authority": {
"type": "string"
},
"AccessToken": {
"type": "string"
},
"Role": {
"type": "string",
"description": "If set to Client, the port shall initiate GET or SET data requests. If Server then the port shall act as a server. ",
"enum": ["Client", "Server"]
}
},
"required": ["Protocol", "Authority"]
}
},
{
"WebRTCScheme": {
"type": "object",
"title": "Network end point specifier for WebRTC",
"properties": {
"Protocol": {
"type": "string",
},
"IceServers": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"urls": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "string",
"description": "STUN/TURN server URL. e.g. turn:turnserver.example.org"
}
},
"username": {
"type": "string"
},
"credential": {
"type": "string"
}
},
"required": ["urls"]
}
}
},
"required": ["Protocol"]
}
},
{
"type": "object",
"title": "Network end point specifier for local connection",
"properties": {
"Protocol": {
"type": "string",
"enum": ["Local"]
}
},
"required": ["Protocol"]
}
]
}
}
},
"required": ["APIVersion", "EndPointID", "Scheme"]
}
}
}
},
"required": ["RefDataID"]
}
},
"RefData": {
"type": ["array", "null"],
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"RefDataID": {
"type": "string"
},
"RefData": {
"type": "string",
"description": "Reference data encoded in Base64. For example an image of a persons face."
},
"Encryption": {
"$ref": "#/definitions/Encryption"
}
},
"required": ["RefDataID", "RefData", "Encryption"]
}
},
"ProcessingStage": {
"type": "string",
"description": "This indicates which analysis stage should use the reference data.",
"enum": [
"CustomAnalysis",
"Motion",
"Detect",
"Recognize",
"Characterize"
]
}
},
"required": ["ProcessingStage", "LabelName"]
}
},
"AnalysisThreshold": {
"type": ["number", "null"],
"description": "The output of the analysis should be greater than this value to trigger the Capture Sequence."
},
"AnalysisSampleRate": {
"type": ["number", "null"]
},
"AnalysisRegion": {
"type": ["object", "null"],
"properties": {
"ROIType": {
"type": "string",
"enum": [
"MultiPolygon",
"MultiLine",
"SingleLine",
"SinglePolygon"
]
},
"ROICoords": {
"type": "array",
"items": {
"type": "object",
"properties": {
"Severity": {
"type": "string",
"enum": ["None", "Warning", "Critical"]
},
"Coords": {
"type": "array",
"items": {
"type": "object",
"properties": {
"XCoord": {
"type": "number"
},
"YCoord": {
"type": "number"
}
}
}
}
}
}
}
}
},
"IgnoreObjectDetection": {
"type": ["object", "null"],
"properties": {
"ObjectLargerThan": {
"type": ["number", "null"],
"description": "If object is larger than this fraction of screen Area, ignore inferencing or Motion Detection"
},
"ObjectSmallerThan": {
"type": ["number", "null"],
"description": "if smaller than this value (fraction of screen), ignore inferencing or Motion Detection"
}
}
},
"Scheduling": {
"type": ["array", "null"],
"items": {
"type": "object",
"properties": {
"SchedulingType": {
"type": "string",
"enum": [
"Default",
"ScheduledOnce",
"ScheduledHourly",
"ScheduledDaily",
"ScheduledWeekDay",
"ScheduledWeekEnd",
"ScheduledWeekly",
"ScheduledMonthly",
"ScheduledAnnually",
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Holiday"
]
},
"StartTime": {
"type": "string"
},
"EndTime": {
"type": "string"
}
}
}
},
"Encryption": {
"$ref": "#/definitions/Encryption"
},
"Filters": {
"type": ["object", "null"],
"description": "These items are used to either explicitly trigger a SceneMark or should be ignored when they are triggered.",
"properties": {
"IgnoreTheseDetectedItems": {
"type": ["array", "null"],
"description": "If the algorithm detects any items in this list, these should items should be ignored.",
"items": {
"type": ["string", "null"]
}
},
"TriggerOnTheseDetectedItems": {
"type": ["array", "null"],
"items": {
"type": ["string", "null"],
"description": "The SceneMarks should only be triggered if one of the items in the list are detected."
}
}
}
},
"MinimumSceneData": {
"type": ["array", "null"],
"items": {
"type": "object",
"properties": {
"DataType": {
"type": "string",
"enum": ["RGBStill", "RGBVideo"]
},
"Count": {
"type": "integer"
},
"Required": {
"type": "boolean"
}
}
}
},
"AnalysisParams": {
"type": ["array", "null"],
"items": [
{
"type": "object",
"properties": {
"ParamName": {
"type": "string"
},
"ParamValue": {
"type": "string"
}
}
}
]
},
"StartTimeRelTrigger": {
"type": ["number", "null"]
},
"EndTimeRelTrigger": {
"type": ["number", "null"]
},
"SceneMarkWindow": {
"type": ["number", "null"],
"description": "The period of time during which after a first SceneMark is generate a second SceneMark is not generated. For example if set to 10 no new SceneMark should be sent for 10 seconds."
},
"SceneMarkFrequency": {
"type": ["number", "null"],
"description": "If \"Analysis\" is \"Continuous\" this is the period for generating each new SceneMark."
},
"AIServer": {
"type": ["object", "null"],
"properties": {
"Protocol": {
"type": "string"
},
"Authority": {
"type": "string"
},
"ID": {
"type": "string"
},
"Pass": {
"type": "string"
}
}
},
"Blurring": {
"type": ["object", "null"],
"properties": {
"Blurring": {
"type": "object",
"properties": {
"Blur": {
"type": "array",
"items": {
"type": "string"
}
},
"ExecuteOnPipeline": {
"type": "boolean"
}
},
"required": ["Blur", "ExecuteOnPipeline"]
}
}
},
"DrawBoundingBoxes": {
"type": ["object", "null"],
"description": "If True draw bounding box on detected items.",
"properties": {
"Draw": {
"type": "boolean"
},
"ExecuteOnPipeline": {
"type": "boolean"
}
}
},
"Resolution": {
"type": ["object", "null"],
"properties": {
"Height": {
"type": "integer"
},
"Width": {
"type": "integer"
}
}
}
},
"required": []
}
}
},
"required": ["SceneMarkID", "Version", "SceneMarkStatus"]
}
|
/scenera.node-0.3.19.tar.gz/scenera.node-0.3.19/scenera/node/scenemark_schema.py
| 0.629775 | 0.48182 |
scenemark_schema.py
|
pypi
|
import logging
from .logger import configure_logger
from .validators import ValidationError
from .spec import DataType
logger = logging.getLogger(__name__)
logger = configure_logger(logger, debug=True)
def get_my_version_number(scenemark):
"""
Used internally to infer the Node's VersionNumer in the node
sequence. Which is a +1 from the last object in the list.
:param scenemark: the SceneMark
:scenemark type: dictionary
:return: The Version Number of the Node.
:rtype: float
:raises ValidationError: VersionControl is missing or malformed
"""
try:
return max([vc_item['VersionNumber'] \
for vc_item in scenemark['VersionControl']['VersionList']]) + 1.0
except ValidationError as _e:
error = "The VersionControl item is missing or malformed"
logger.exception(error)
raise ValidationError(error) from _e
def extract_node_datatype_mode(nodesequencer_header):
"""
Used internally to extract the DataType the Node should work on.
:param nodesequencer_header: NodeSequencer header structure
:type nodesequencer_header: dictionary
:return: DataType, defaults to RGBStill
:rtype: string
"""
try:
datatype_mode = nodesequencer_header['NodeInput']['DataTypeMode']
# We default to using the RGBStill image in case it is not defined
except Exception as _e:
logger.warning(f"NodeInput and/or DataTypeMode missing, setting default to RGBStill. ({_e})")
datatype_mode = "RGBStill"
logger.info(f"DataTypeMode: {datatype_mode}")
return datatype_mode
def get_regions_of_interest(nodesequencer_header):
"""
Extracts the polygon from the node input object in a list of lists as follows:
:Example:
[ [ (1, 2), (3, 4), (5, 6) ], [ (7, 8), (9, 10), (11, 12), (13, 14), (15, 16) ] ]
:param nodesequencer_header: NodeSequencer header structure
:type nodesequencer_header: dictionary
:return: regions of interest coordinates
:rtype: a list of lists, containing tuples
"""
try:
regions = []
for polygon in nodesequencer_header['NodeInput']['RegionsOfInterest']:
region = [(coord['XCoord'],coord['YCoord']) for coord in polygon['Polygon']]
if len(region) >= 3:
regions.append(region)
else:
logger.warning(
"There is a Region of Interest with fewer than 3 coordinates. Discarding.")
logger.info(f"Region(s) of Interest: {regions}")
return regions
except Exception as _e:
logger.info(f"Region of Interest missing. Setting to an empty list. ({_e})")
return []
def get_latest_scenedata_version_number(scenemark):
"""
Get latest SceneData VersionNumber. This is what the Node should run on.
:param scenemark: SceneMark structure
:type scenemark: dictionary
:return: VersionNumber
:rtype: float
"""
try:
return max([sd_item['VersionNumber'] for sd_item in scenemark['SceneDataList']])
except ValueError:
logger.warning("There is no SceneData attached to this SceneMark.")
return 0.0
|
/scenera.node-0.3.19.tar.gz/scenera.node-0.3.19/scenera/node/utils.py
| 0.712632 | 0.401072 |
utils.py
|
pypi
|
*Please note:* at the moment this package is being actively developed and might not always be stable.
# SCEPIA - Single Cell Epigenome-based Inference of Activity
SCEPIA predicts transcription factor motif activity from single cell RNA-seq data. It uses computationally inferred epigenomes of single cells to identify transcription factors that determine cellular states. The regulatory inference is based on a two-step process:
1) Single cells are matched to a combination of (bulk) reference H3K27ac ChIP-seq or ATAC-seq profiles.
2) Using the H3K27ac ChIP-seq or ATAC-seq signal in enhancers associated with hypervariable genes the TF motif activity is inferred.
Currently five different references are available, three for human and two for mouse. Different
data sets may give different results, based on a) the type of data (H3K27ac
ChIP-seq or ATAC-seq) and b) the different cell types being represented. While
SCEPIA does not require exact matching cell types to give good results, it does
work best when relatively similar cell types are in the reference.
The following references can be used:
* `ENCODE.H3K27ac.human` - All H3K27ac experiments from ENCODE. Includes cell
lines, tissues
* `BLUEPRINT.H3K27ac.human` - All H3K27ac cell types from BLUEPRINT (mostly
hematopoietic cell types)
* `Domcke.ATAC.fetal.human` - Fetal single cell-based ATAC-seq clusters from
15 different organs ([Domcke et al 2020](http://dx.doi.org/10.1126/science.aba7612)).
* `Cusanovich.ATAC.mouse` - ATAC-seq data of single cell-based clusters from 13
adult mouse tissues ([Cusanovich et al, 2018](http://dx.doi.org/doi:10.1016/j.cell.2018.06.052)).
* `ENCODE.H3K27ac.mouse` - All H3K27ac experiments from mouse ENCODE.
So sorry, but only human and mouse are supported for now. However, if you have data from other species you can try it if gene names tend to match. Make sure you use gene names as identifiers, and `scepia` will run fine. In our (very limited) experience this *can* yield good results, but there are a lot of assumptions on conservation of regulatory interactions. If you have a large collection of ATAC-seq or ChIP-seq reference experiments available you can also create your own reference with `ScepiaDataset.create()`. This is not well-documented at the moment, let us know if you need help to do so.
## Requirements and installation
You will need [conda](https://docs.continuum.io/anaconda/) using the [bioconda](https://bioconda.github.io/) channel.
Make sure you have conda installed. If you have not used bioconda before, first set up the necessary channels (in this order!). You only have to do this once.
```
$ conda config --add channels defaults
$ conda config --add channels bioconda
$ conda config --add channels conda-forge
```
Now you can create an environment for scepia:
```
conda create -n scepia scepia>=0.5.0
# Note: if you want to use scepia in a Jupyter notebook, you also have to install the following packages: `ipywidgets nb_conda`.
conda activate scepia
```
## Usage
### Before using SCEPIA
You have to install genomes that scepia uses through [genomepy](https://github.com/vanheeringen-lab/genomepy). The genomes that are used include `hg38`, `hg19`, `mm10` and `mm9`, depending on the reference. For example, to install `hg38`:
```
$ conda activate scepia
$ genomepy install hg38
```
You only need to do this once for each genome.
**Note: this is independent of which genome / annotation you used for your
single cell RNA-seq!**
### Command line
Remember to activate the environment before using it
```
conda activate scepia
```
The command line script `scepia infer_motifs` works on any file that is supported by [`scanpy.read()`](https://scanpy.readthedocs.io/en/stable/api/scanpy.read.html). We recommend to process your data, including QC, filtering, normalization and clustering, using scanpy. If you save the results to an `.h5ad` file, `scepia` can continue from your analysis to infer motif activity. However, the command line tool also works on formats such as CSV files or tab-separated files. In that case, `scepia` will run some basic pre-processing steps. To run `scepia`:
```
scepia infer_motifs <input_file> <output_dir>
```
### Jupyter notebook tutorial
A tutorial on how to use `scepia` interactively in Jupyter can be found [here](tutorials/scepia_tutorial.ipynb).
Single cell data should be loaded in an [AnnData](https://anndata.readthedocs.io/en/latest/anndata.AnnData.html) object.
Make sure of the following:
* Gene names are used in `adata.var_names`, not Ensembl identifiers or any other gene identifiers.
* `adata.raw` stores the raw, log-transformed single cell expression data.
* The main `adata` object is filtered to contain only hypervariable genes.
* Louvain or Leiden clustering has been run.
Once these preprocessing steps are met, `infer_motifs()` can be run to infer the TF motif activity. The first time the reference data will be downloaded, so this will take somewhat longer.
```
from scepia.sc import infer_motifs
# load and preprocess single-cell data using scanpy
adata = infer_motifs(adata, dataset="ENCODE.H3K27ac.human")
```
The resulting `AnnData` object can be saved with the `.write()` method to a `h5ad` file. However, due to some difficulties with storing the motif annotation in the correct format, the file cannot be loaded with the `scanpy` load() method. Instead, use the `read()` method from the scepia package:
```
from scepia.sc import read
adata = read("my_saved_data.h5ad")
```
The resulting object can now be treated as a normal `AnnData` object.
### Determine enhancer-based regulatory potential
The approach to determine the enhancer-based regulatory potential (ERP) score per gene is based on the approach developed by [Wang et al., 2016](https://dx.doi.org/10.1101%2Fgr.201574.115). There is one difference, in this approach the score is calculates based only on H3K27ac signal in enhancers. We use log-transformed, z-score normalized H3K27ac read counts in 2kb windows centered at enhancer locations. The ERP score is used to match single cell RNA-seq data to the reference H3K27ac profiles.
To use, an H3K27ac BAM file is needed (mapped to hg38). The `-N` argument
specifies the number of threads to use.
```
scepia area27 <bamfile> <outfile> -N 12
```
|
/scepia-0.5.0.tar.gz/scepia-0.5.0/README.md
| 0.92427 | 0.947235 |
README.md
|
pypi
|
from six import string_types
from sceptre.hooks import Hook
from sceptre.exceptions import InvalidHookArgumentTypeError
from sceptre.exceptions import InvalidHookArgumentSyntaxError
from sceptre.exceptions import InvalidHookArgumentValueError
class ASGScalingProcesses(Hook):
"""
Resumes or suspends autoscaling group scaling processes. This is
useful as scheduled actions must be suspended when updating stacks with
autoscaling groups.
"""
def __init__(self, *args, **kwargs):
super(ASGScalingProcesses, self).__init__(*args, **kwargs)
def run(self):
"""
Either suspends or resumes any scaling processes on all autoscaling
groups within the current stack.
:raises: InvalidHookArgumentSyntaxError, when syntax is not using "::".
:raises: InvalidHookArgumentTypeError, if argument is not a string.
:raises: InvalidHookArgumentValueError, if not using resume or suspend.
"""
if not isinstance(self.argument, string_types):
raise InvalidHookArgumentTypeError(
'The argument "{0}" is the wrong type - asg_scaling_processes '
'hooks require arguments of type string.'.format(self.argument)
)
if "::" not in str(self.argument):
raise InvalidHookArgumentSyntaxError(
'Wrong syntax for the argument "{0}" - asg_scaling_processes '
'hooks use:'
'- !asg_scaling_processes <suspend|resume>::<process-name>'
.format(self.argument)
)
action, scaling_processes = self.argument.split("::")
if action not in ["resume", "suspend"]:
raise InvalidHookArgumentValueError(
'The argument "{0}" is invalid - valid arguments for '
'asg_scaling_processes hooks are "resume" or "suspend".'
.format(action)
)
action += "_processes"
autoscaling_group_names = self._find_autoscaling_groups()
for autoscaling_group in autoscaling_group_names:
self.stack.connection_manager.call(
service="autoscaling",
command=action,
kwargs={
"AutoScalingGroupName": autoscaling_group,
"ScalingProcesses": [scaling_processes]
}
)
def _get_stack_resources(self):
"""
Retrieves all resources in stack.
:return: list
"""
response = self.stack.connection_manager.call(
service="cloudformation",
command="describe_stack_resources",
kwargs={"StackName": self.stack.external_name}
)
return response.get("StackResources", [])
def _find_autoscaling_groups(self):
"""
Retrieves all the autoscaling groups
:return: list [str]
"""
asg_names = []
resources = self._get_stack_resources()
resource_type = "AWS::AutoScaling::AutoScalingGroup"
for resource in resources:
if resource.get("ResourceType", False) == resource_type:
asg_names.append(resource["PhysicalResourceId"])
return asg_names
|
/sceptre-aws-asg-scaling-processes-hook-1.0.1.tar.gz/sceptre-aws-asg-scaling-processes-hook-1.0.1/hook/asg_scaling_processes.py
| 0.76769 | 0.230454 |
asg_scaling_processes.py
|
pypi
|
import abc
import six
import logging
import shlex
from botocore.exceptions import ClientError
from sceptre.resolvers import Resolver
from sceptre.exceptions import DependencyStackMissingOutputError
from sceptre.exceptions import StackDoesNotExistError
TEMPLATE_EXTENSION = ".yaml"
@six.add_metaclass(abc.ABCMeta)
class StackOutputBase(Resolver):
"""
A abstract base class which provides methods for getting Stack outputs.
"""
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
super(StackOutputBase, self).__init__(*args, **kwargs)
def _get_output_value(self, stack_name, output_key, profile=None, region=None):
"""
Attempts to get the Stack output named by ``output_key``
:param stack_name: Name of the Stack to collect output for.
:type stack_name: str
:param output_key: The name of the Stack output in which to return.
:type output_key: str
:returns: Stack output value.
:rtype: str
:raises: sceptre.exceptions.DependencyStackMissingOutputError
"""
outputs = self._get_stack_outputs(stack_name, profile, region)
try:
return outputs[output_key]
except KeyError:
raise DependencyStackMissingOutputError(
"The Stack '{0}' does not have an output named '{1}'".format(
stack_name, output_key
)
)
def _get_stack_outputs(self, stack_name, profile=None, region=None):
"""
Communicates with AWS CloudFormation to fetch outputs from a specific
Stack.
:param stack_name: Name of the Stack to collect output for.
:type stack_name: str
:returns: A formatted version of the Stack outputs.
:rtype: dict
:raises: sceptre.stack.DependencyStackNotLaunchedException
"""
self.logger.debug("Collecting outputs from '{0}'...".format(
stack_name
))
connection_manager = self.stack.connection_manager
try:
response = connection_manager.call(
service="cloudformation",
command="describe_stacks",
kwargs={"StackName": stack_name},
profile=profile,
region=region,
stack_name=stack_name
)
except ClientError as e:
if "does not exist" in e.response["Error"]["Message"]:
raise StackDoesNotExistError(e.response["Error"]["Message"])
else:
raise e
else:
outputs = response["Stacks"][0].get("Outputs", {})
self.logger.debug("Outputs: {0}".format(outputs))
formatted_outputs = dict(
(output["OutputKey"], output["OutputValue"])
for output in outputs
)
return formatted_outputs
class StackOutputExternal(StackOutputBase):
"""
Resolver for retrieving the value of an output of any Stack within the
current Sceptre stack_group's account and region.
:param argument: The Stack name and output name to get.
:type argument: str in the format ``"<full stack name>::<output key>"``
"""
def __init__(self, *args, **kwargs):
super(StackOutputExternal, self).__init__(*args, **kwargs)
def resolve(self):
"""
Retrieves the value of CloudFormation output of the external Stack
:returns: The value of the Stack output.
:rtype: str
"""
self.logger.debug(
"Resolving external Stack output: {0}".format(self.argument)
)
profile = None
arguments = shlex.split(self.argument)
stack_argument = arguments[0]
if len(arguments) > 1:
profile = arguments[1]
dependency_stack_name, output_key = stack_argument.split("::")
return self._get_output_value(
dependency_stack_name, output_key, profile
)
|
/sceptre-aws-stackoutput-external-resolver-1.0.0.tar.gz/sceptre-aws-stackoutput-external-resolver-1.0.0/resolver/stack_output_external.py
| 0.760651 | 0.235988 |
stack_output_external.py
|
pypi
|
import abc
import six
import logging
from botocore.exceptions import ClientError
from sceptre.resolvers import Resolver
from sceptre.helpers import normalise_path, sceptreise_path
from sceptre.exceptions import DependencyStackMissingOutputError
from sceptre.exceptions import StackDoesNotExistError
TEMPLATE_EXTENSION = ".yaml"
@six.add_metaclass(abc.ABCMeta)
class StackOutputBase(Resolver):
"""
A abstract base class which provides methods for getting Stack outputs.
"""
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
super(StackOutputBase, self).__init__(*args, **kwargs)
def _get_output_value(self, stack_name, output_key, profile=None, region=None):
"""
Attempts to get the Stack output named by ``output_key``
:param stack_name: Name of the Stack to collect output for.
:type stack_name: str
:param output_key: The name of the Stack output in which to return.
:type output_key: str
:returns: Stack output value.
:rtype: str
:raises: sceptre.exceptions.DependencyStackMissingOutputError
"""
outputs = self._get_stack_outputs(stack_name, profile, region)
try:
return outputs[output_key]
except KeyError:
raise DependencyStackMissingOutputError(
"The Stack '{0}' does not have an output named '{1}'".format(
stack_name, output_key
)
)
def _get_stack_outputs(self, stack_name, profile=None, region=None):
"""
Communicates with AWS CloudFormation to fetch outputs from a specific
Stack.
:param stack_name: Name of the Stack to collect output for.
:type stack_name: str
:returns: A formatted version of the Stack outputs.
:rtype: dict
:raises: sceptre.stack.DependencyStackNotLaunchedException
"""
self.logger.debug("Collecting outputs from '{0}'...".format(
stack_name
))
connection_manager = self.stack.connection_manager
try:
response = connection_manager.call(
service="cloudformation",
command="describe_stacks",
kwargs={"StackName": stack_name},
profile=profile,
region=region,
stack_name=stack_name
)
except ClientError as e:
if "does not exist" in e.response["Error"]["Message"]:
raise StackDoesNotExistError(e.response["Error"]["Message"])
else:
raise e
else:
outputs = response["Stacks"][0].get("Outputs", {})
self.logger.debug("Outputs: {0}".format(outputs))
formatted_outputs = dict(
(output["OutputKey"], output["OutputValue"])
for output in outputs
)
return formatted_outputs
class StackOutput(StackOutputBase):
"""
Resolver for retrieving the value of a Stack output within the current
Sceptre StackGroup. Adds the target Stack to the dependencies of the
Stack using the Resolver.
:param argument: The Stack name and output name to get.
:type argument: str in the format ``"<stack name>::<output key>"``
"""
def __init__(self, *args, **kwargs):
super(StackOutput, self).__init__(*args, **kwargs)
def setup(self):
"""
Adds dependency to a Stack.
"""
dep_stack_name, self.output_key = self.argument.split("::")
self.dependency_stack_name = sceptreise_path(normalise_path(dep_stack_name))
self.stack.dependencies.append(self.dependency_stack_name)
def resolve(self):
"""
Retrieves the value of an output of an internal Stack.
:returns: The value of the Stack output.
:rtype: str
"""
self.logger.debug("Resolving Stack output: {0}".format(self.argument))
friendly_stack_name = self.dependency_stack_name.replace(TEMPLATE_EXTENSION, "")
stack = next(
stack for stack in self.stack.dependencies if stack.name == friendly_stack_name
)
stack_name = "-".join([stack.project_code, friendly_stack_name.replace("/", "-")])
return self._get_output_value(stack_name, self.output_key,
profile=stack.profile, region=stack.region)
|
/sceptre-aws-stackoutput-resolver-1.0.0.tar.gz/sceptre-aws-stackoutput-resolver-1.0.0/resolver/stack_output.py
| 0.776623 | 0.254631 |
stack_output.py
|
pypi
|
import importlib.machinery
import importlib.util
import sys
from pathlib import Path
from typing import Type
from sceptre import exceptions
from sceptre_cdk_handler.cdk_builder import SceptreCdkStack
class ClassImporter:
def import_class(self, template_path: Path, class_name: str) -> Type[SceptreCdkStack]:
"""
Import the CDK Python template module.
Args:
template_path: The path of the CDK Template
class_name: The name of the class
Returns:
A ModuleType object containing the imported CDK Python module
Raises:
SceptreException: Template File not found
SceptreException: importlib general exception
"""
template_module_name = self._enable_import_hierarchy(template_path)
loader = importlib.machinery.SourceFileLoader(template_module_name, str(template_path))
spec = importlib.util.spec_from_loader(template_module_name, loader)
template_module = importlib.util.module_from_spec(spec)
loader.exec_module(template_module)
try:
return getattr(template_module, class_name)
except AttributeError:
raise exceptions.SceptreException(
f"No class named {class_name} on template at {template_path}"
)
def _enable_import_hierarchy(self, template_path: Path) -> str:
resolved_template_path = template_path.resolve()
cwd = Path.cwd()
# If the template path we're importing isn't somewhere in the CWD, we can't know how far up
# to go with adding directories to the PATH, so we're not going to try. That could get kinda
# screwy and cause unintended consequences.
if cwd not in resolved_template_path.parents:
# We'll consider the file name (without the stem) to be the module name.
return template_path.stem
module_path_segments = [template_path.stem]
in_package_structure = True
# We're going to climb up the hierarchy and add the whole directory structure to the PATH.
# This would theoretically allow for imports from any level of the hierarchy. It's not ideal
# but it's really the only way we can know how high up the import chain goes. However, we do
# require each directory to have an __init__.py to consider it a part of the importable
# hierarchy.
for parent in resolved_template_path.parents:
sys.path.append(str(parent))
# If the parent directory is a valid python package in name and structure, we'll add it
# to the module name segments and keep climbing
if in_package_structure and (parent / '__init__.py').exists() and parent.name.isidentifier():
module_path_segments.insert(0, parent.name)
# But if the parent directory isn't a valid python package in name and structure, we'll
# stop building out the module path.
elif in_package_structure:
in_package_structure = False
# If we've climbed all the way up to the CWD.
if parent == cwd:
break
# We'll make the full module path by joining all the segments together.
return '.'.join(module_path_segments)
|
/sceptre-cdk-handler-2.1.0.tar.gz/sceptre-cdk-handler-2.1.0/sceptre_cdk_handler/class_importer.py
| 0.575349 | 0.230075 |
class_importer.py
|
pypi
|
import functools
import logging
import threading
import time
import boto3
from os import environ
from botocore.exceptions import ClientError
from .helpers import mask_key
from .exceptions import InvalidAWSCredentialsError, RetryLimitExceededError
def _retry_boto_call(func):
"""
Retries a Boto3 call up to 30 times if request rate limits are hit.
The time waited between retries increases linearly. If rate limits are
hit 30 times, _retry_boto_call raises a
sceptre.exceptions.RetryLimitExceededException.
:param func: A function that uses boto calls
:type func: function
:returns: The decorated function.
:rtype: function
:raises: sceptre.exceptions.RetryLimitExceededException
"""
logger = logging.getLogger(__name__)
@functools.wraps(func)
def decorated(*args, **kwargs):
max_retries = 30
attempts = 1
while attempts < max_retries:
try:
return func(*args, **kwargs)
except ClientError as e:
if e.response["Error"]["Code"] == "Throttling":
logger.error("Request limit exceeded, pausing...")
time.sleep(attempts)
attempts += 1
else:
raise
raise RetryLimitExceededError(
"Exceeded request limit {0} times. Aborting.".format(
max_retries
)
)
return decorated
class ConnectionManager(object):
"""
The Connection Manager is used to create boto3 clients for
the various AWS services that Sceptre needs to interact with.
:param profile: The AWS credentials profile that should be used.
:type profile: str
:param stack_name: The CloudFormation stack name for this connection.
:type stack_name: str
:param region: The region to use.
:type region: str
"""
_session_lock = threading.Lock()
_client_lock = threading.Lock()
_boto_sessions = {}
_clients = {}
_stack_keys = {}
def __init__(self, region, profile=None, stack_name=None):
self.logger = logging.getLogger(__name__)
self.region = region
self.profile = profile
self.stack_name = stack_name
if stack_name:
self._stack_keys[stack_name] = (region, profile)
def __repr__(self):
return (
"sceptre.connection_manager.ConnectionManager(region='{0}', "
"profile='{1}', stack_name='{2}')".format(
self.region, self.profile, self.stack_name
)
)
def _get_session(self, profile, region=None):
"""
Returns a boto session in the target account.
If a ``profile`` is specified in ConnectionManager's initialiser,
then the profile is used to generate temporary credentials to create
the Boto session. If ``profile`` is not specified then the default
profile is assumed to create the boto session.
:returns: The Boto3 session.
:rtype: boto3.session.Session
:raises: botocore.exceptions.ClientError
"""
with self._session_lock:
self.logger.debug("Getting Boto3 session")
key = (region, profile)
if self._boto_sessions.get(key) is None:
self.logger.debug("No Boto3 session found, creating one...")
self.logger.debug("Using cli credentials...")
# Credentials from env take priority over profile
config = {
"profile_name": profile,
"region_name": region,
"aws_access_key_id": environ.get("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": environ.get(
"AWS_SECRET_ACCESS_KEY"
),
"aws_session_token": environ.get("AWS_SESSION_TOKEN")
}
session = boto3.session.Session(**config)
self._boto_sessions[key] = session
if session.get_credentials() is None:
raise InvalidAWSCredentialsError(
"Session credentials were not found. Profile: {0}. Region: {1}.".format(
config["profile_name"], config["region_name"]
)
)
self.logger.debug(
"Using credential set from %s: %s",
session.get_credentials().method,
{
"AccessKeyId": mask_key(
session.get_credentials().access_key
),
"SecretAccessKey": mask_key(
session.get_credentials().secret_key
),
"Region": session.region_name
}
)
self.logger.debug("Boto3 session created")
return self._boto_sessions[key]
def _get_client(self, service, region, profile, stack_name):
"""
Returns the Boto3 client associated with <service>.
Equivalent to calling Boto3.client(<service>). Gets the client using
``boto_session``.
:param service: The Boto3 service to return a client for.
:type service: str
:returns: The Boto3 client.
:rtype: boto3.client.Client
"""
with self._client_lock:
key = (service, region, profile, stack_name)
if self._clients.get(key) is None:
self.logger.debug(
"No %s client found, creating one...", service
)
self._clients[key] = self._get_session(
profile, region
).client(service)
return self._clients[key]
@_retry_boto_call
def call(
self, service, command, kwargs=None, profile=None, region=None,
stack_name=None
):
"""
Makes a thread-safe Boto3 client call.
Equivalent to ``boto3.client(<service>).<command>(**kwargs)``.
:param service: The Boto3 service to return a client for.
:type service: str
:param command: The Boto3 command to call.
:type command: str
:param kwargs: The keyword arguments to supply to <command>.
:type kwargs: dict
:returns: The response from the Boto3 call.
:rtype: dict
"""
if region is None and profile is None:
if stack_name and stack_name in self._stack_keys:
region, profile = self._stack_keys[stack_name]
else:
region = self.region
profile = self.profile
if kwargs is None: # pragma: no cover
kwargs = {}
client = self._get_client(service, region, profile, stack_name)
return getattr(client, command)(**kwargs)
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/connection_manager.py
| 0.723993 | 0.162347 |
connection_manager.py
|
pypi
|
from os import path
from sceptre.helpers import normalise_path
class SceptreContext(object):
"""
SceptreContext is a place that holds data that is relevant to the
project, including references to the project paths such as the path to your
Sceptre project, templates path, config path, and the default names for
your configuration files.
:param project_path: Absolute path to the base sceptre project folder
:type project_path: str
:param command_path: The relative path to either StackGroup or Stack.
:type command_path: str
:param user_variables: Used to replace the value of anyvitem in a Config\
file with a value defined by the CLI flag or in a YAML variable\
file
:type user_variables: dict
:param options: The options specified in by the CLI command
:type options: dict
:param output_format: Specify the output format. Available formats:\
[yaml, json]
:type output_format: str
:param no_colour: Specify whether colouring should be used in the CLI\
output
:type no_colour: bool
"""
def __init__(self, project_path, command_path,
user_variables=None, options=None, output_format=None,
no_colour=False, ignore_dependencies=False):
# project_path: absolute path to the base sceptre project folder
# e.g. absolute_path/to/sceptre_directory
self.project_path = normalise_path(project_path)
# config_path: holds the project stack_groups
# e.g {project_path}/config
self.config_path = "config" # user definable later in v2
# command_path path to either stack group or stack
# e.g. {project_path/config_path}/command_path
self.command_path = normalise_path(command_path)
self.normal_command_path = normalise_path(command_path)
# config_file: stack group config. User definable later in v2
# e.g. {project_path/config/command_path}/config_file
self.config_file = "config.yaml"
# templates_path: holds tempaltes. User definable later in v2
# e.g. {project_path/}templates
self.templates_path = "templates"
self.user_variables = user_variables if user_variables else {}
self.user_variables = user_variables\
if user_variables is not None else {}
self.options = options if options else {}
self.output_format = output_format if output_format else ""
self.no_colour = no_colour if no_colour is True else False
self.ignore_dependencies = ignore_dependencies if ignore_dependencies is True else False
def __repr__(self):
return ("sceptre.context.SceptreContext("
"project_path='{project_path}', "
"command_path='{command_path}', "
"user_variables={user_variables}, "
"options={options}, "
"output_format='{output_format}', "
"no_colour={no_colour}, "
"ignore_dependencies={ignore_dependencies})".format(
project_path=self.project_path,
command_path=self.command_path,
user_variables=self.user_variables,
options=self.options,
output_format=self.output_format,
no_colour=self.no_colour,
ignore_dependencies=self.ignore_dependencies
))
def __eq__(self, context):
return (
self.project_path == context.project_path
and self.config_path == context.config_path
and self.command_path == context.command_path
and self.normal_command_path == context.normal_command_path
and self.config_file == context.config_file
and self.templates_path == context.templates_path
and self.user_variables == context.user_variables
and self.options == context.options
and self.output_format == context.output_format
and self.no_colour == context.no_colour
and self.ignore_dependencies == context.ignore_dependencies
)
def __hash__(self):
return hash(str(self))
def full_config_path(self):
"""
Returns the config path in the format: ``project_path/config_path``.
:returns: The absolute path to the config directory
:rtype: str
"""
return path.join(self.project_path, self.config_path)
def full_command_path(self):
"""
Returns the command path in the format:
``project_path/config_path/command_path``.
:returns: The absolute path to the path that will be executed
:rtype: str
"""
return path.join(self.project_path, self.config_path,
self.command_path)
def full_templates_path(self):
"""
Returns the templates path in the format: project_path/templates_path.
:returns: The absolute path to the templates directory
:rtype: str
"""
return path.join(self.project_path, self.templates_path)
def command_path_is_stack(self):
"""
Returns True if the command path is a file.
:returns: True if the command path is a file
:rtype: bool
"""
return path.isfile(
path.join(
self.project_path,
self.config_path,
self.command_path
)
)
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/context.py
| 0.81468 | 0.301041 |
context.py
|
pypi
|
import logging
from typing import Mapping, Sequence
from sceptre.connection_manager import ConnectionManager
from sceptre.helpers import get_external_stack_name
from sceptre.helpers import sceptreise_path
from sceptre.hooks import HookProperty
from sceptre.resolvers import ResolvableProperty
from sceptre.template import Template
class Stack(object):
"""
Stack stores information about a particular CloudFormation Stack.
:param name: The name of the Stack.
:type project: str
:param project_code: A code which is prepended to the Stack names\
of all Stacks built by Sceptre.
:type project_code: str
:param template_path: The relative path to the CloudFormation, Jinja2\
or Python template to build the Stack from.
:type template_path: str
:param region: The AWS region to build Stacks in.
:type region: str
:param template_bucket_name: The name of the S3 bucket the Template is uploaded to.
:type template_bucket_name: str
:param template_key_prefix: A prefix to the key used to store templates uploaded to S3
:type template_key_prefix: str
:param required_version: A PEP 440 compatible version specifier. If the Sceptre version does\
not fall within the given version requirement it will abort.
:type required_version: str
:param parameters: The keys must match up with the name of the parameter.\
The value must be of the type as defined in the template.
:type parameters: dict
:param sceptre_user_data: Data passed into\
`sceptre_handler(sceptre_user_data)` function in Python templates\
or accessible under `sceptre_user_data` variable within Jinja2\
templates.
:type sceptre_user_data: dict
:param hooks: A list of arbitrary shell or python commands or scripts to\
run.
:type hooks: sceptre.hooks.Hook
:param s3_details:
:type s3_details: dict
:param dependencies: The relative path to the Stack, including the file\
extension of the Stack.
:type dependencies: list
:param role_arn: The ARN of a CloudFormation Service Role that is assumed\
by CloudFormation to create, update or delete resources.
:type role_arn: str
:param protected: Stack protection against execution.
:type protected: bool
:param tags: CloudFormation Tags to be applied to the Stack.
:type tags: dict
:param external_name:
:type external_name: str
:param notifications: SNS topic ARNs to publish Stack related events to.\
A maximum of 5 ARNs can be specified per Stack.
:type notifications: list
:param on_failure: This parameter describes the action taken by\
CloudFormation when a Stack fails to create.
:type on_failure: str
:param profile: The name of the profile as defined in ~/.aws/config and\
~/.aws/credentials.
:type profile: str
:param stack_timeout: A timeout in minutes before considering the Stack\
deployment as failed. After the specified timeout, the Stack will\
be rolled back. Specifiyng zero, as well as ommiting the field,\
will result in no timeout. Supports only positive integer value.
:type stack_timeout: int
:param stack_group_config: The StackGroup config for the Stack
:type stack_group_config: dict
"""
parameters = ResolvableProperty("parameters")
_sceptre_user_data = ResolvableProperty("_sceptre_user_data")
notifications = ResolvableProperty("notifications")
hooks = HookProperty("hooks")
def __init__(
self, name, project_code, template_path, region, template_bucket_name=None,
template_key_prefix=None, required_version=None, parameters=None,
sceptre_user_data=None, hooks=None, s3_details=None,
dependencies=None, role_arn=None, protected=False, tags=None,
external_name=None, notifications=None, on_failure=None, profile=None,
stack_timeout=0, stack_group_config={}
):
self.logger = logging.getLogger(__name__)
self.name = sceptreise_path(name)
self.project_code = project_code
self.region = region
self.template_bucket_name = template_bucket_name
self.template_key_prefix = template_key_prefix
self.required_version = required_version
self.external_name = external_name or get_external_stack_name(self.project_code, self.name)
self.template_path = template_path
self.s3_details = s3_details
self._template = None
self._connection_manager = None
self.protected = protected
self.role_arn = role_arn
self.on_failure = on_failure
self.dependencies = dependencies or []
self.tags = tags or {}
self.stack_timeout = stack_timeout
self.profile = profile
self.hooks = hooks or {}
self.parameters = parameters or {}
self._sceptre_user_data = sceptre_user_data or {}
self._sceptre_user_data_is_resolved = False
self.notifications = notifications or []
self.stack_group_config = stack_group_config or {}
def __repr__(self):
return (
"sceptre.stack.Stack("
"name='{name}', "
"project_code={project_code}, "
"template_path={template_path}, "
"region={region}, "
"template_bucket_name={template_bucket_name}, "
"template_key_prefix={template_key_prefix}, "
"required_version={required_version}, "
"profile={profile}, "
"sceptre_user_data={sceptre_user_data}, "
"parameters={parameters}, "
"hooks={hooks}, "
"s3_details={s3_details}, "
"dependencies={dependencies}, "
"role_arn={role_arn}, "
"protected={protected}, "
"tags={tags}, "
"external_name={external_name}, "
"notifications={notifications}, "
"on_failure={on_failure}, "
"stack_timeout={stack_timeout}, "
"stack_group_config={stack_group_config}"
")".format(
name=self.name,
project_code=self.project_code,
template_path=self.template_path,
region=self.region,
template_bucket_name=self.template_bucket_name,
template_key_prefix=self.template_key_prefix,
required_version=self.required_version,
profile=self.profile,
sceptre_user_data=self.sceptre_user_data,
parameters=self.parameters,
hooks=self.hooks,
s3_details=self.s3_details,
dependencies=self.dependencies,
role_arn=self.role_arn,
protected=self.protected,
tags=self.tags,
external_name=self.external_name,
notifications=self.notifications,
on_failure=self.on_failure,
stack_timeout=self.stack_timeout,
stack_group_config=self.stack_group_config
)
)
def __str__(self):
return self.name
def __eq__(self, stack):
return (
self.name == stack.name and
self.project_code == stack.project_code and
self.template_path == stack.template_path and
self.region == stack.region and
self.template_bucket_name == stack.template_bucket_name and
self.template_key_prefix == stack.template_key_prefix and
self.required_version == stack.required_version and
self.profile == stack.profile and
self.sceptre_user_data == stack.sceptre_user_data and
self.parameters == stack.parameters and
self.hooks == stack.hooks and
self.s3_details == stack.s3_details and
self.dependencies == stack.dependencies and
self.role_arn == stack.role_arn and
self.protected == stack.protected and
self.tags == stack.tags and
self.external_name == stack.external_name and
self.notifications == stack.notifications and
self.on_failure == stack.on_failure and
self.stack_timeout == stack.stack_timeout and
self.stack_group_config == stack.stack_group_config
)
def __hash__(self):
return hash(str(self))
@property
def connection_manager(self):
"""
Returns ConnectionManager.
:returns: ConnectionManager.
:rtype: ConnectionManager
"""
if self._connection_manager is None:
self._connection_manager = ConnectionManager(
self.region, self.profile, self.external_name
)
return self._connection_manager
@property
def sceptre_user_data(self):
"""Returns sceptre_user_data after ensuring that it is fully resolved.
:rtype: dict or list or None
"""
if not self._sceptre_user_data_is_resolved:
self._sceptre_user_data_is_resolved = True
self._resolve_sceptre_user_data()
return self._sceptre_user_data
@property
def template(self):
"""
Returns the CloudFormation Template used to create the Stack.
:returns: The Stack's template.
:rtype: str
"""
if self._template is None:
self._template = Template(
path=self.template_path,
sceptre_user_data=self.sceptre_user_data,
s3_details=self.s3_details,
connection_manager=self.connection_manager
)
return self._template
def _resolve_sceptre_user_data(self):
data = self._sceptre_user_data
if isinstance(data, Mapping):
iterator = data.values()
elif isinstance(data, Sequence):
iterator = data
else:
return
for value in iterator:
if isinstance(value, ResolvableProperty.ResolveLater):
value()
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/stack.py
| 0.909689 | 0.384017 |
stack.py
|
pypi
|
from os import sep
from functools import partial
import six
import yaml
from sceptre.exceptions import PathConversionError
def get_external_stack_name(project_code, stack_name):
"""
Returns the name given to a stack in CloudFormation.
:param project_code: The project code, as defined in config.yaml.
:type project_code: str
:param stack_name: The name of the stack.
:type stack_name: str
:returns: The name given to the stack in CloudFormation.
:rtype: str
"""
return "-".join([
project_code,
stack_name.replace("/", "-")
])
def mask_key(key):
"""
Returns an masked version of ``key``.
Returned version has all but the last four characters are replaced with the
character "*".
:param key: The string to mask.
:type key: str
:returns: An masked version of the key
:rtype: str
"""
num_mask_chars = len(key) - 4
return "".join([
"*" if i < num_mask_chars else c
for i, c in enumerate(key)
])
def _call_func_on_values(func, attr, cls):
"""
Searches through dictionary or list for objects of type `cls` and calls the
supplied function `func`. Supports nested dictionaries and lists.
Does not detect objects used as keys in dictionaries.
:param attr: A dictionary or list to search through.
:type attr: dict or list
:return: The dictionary or list structure.
:rtype: dict or list
"""
def func_on_instance(key):
if isinstance(value, cls):
func(attr, key, value)
elif isinstance(value, list) or isinstance(value, dict):
_call_func_on_values(func, value, cls)
if isinstance(attr, dict):
for key, value in attr.items():
func_on_instance(key)
elif isinstance(attr, list):
for index, value in enumerate(attr):
func_on_instance(index)
return attr
def normalise_path(path):
"""
Converts a path to use correct path separator.
Raises an PathConversionError if the path has a
trailing slash.
:param path: A directory path
:type path: str
:raises: sceptre.exceptions.PathConversionError
:returns: A normalised path with forward slashes.
:returns: string
"""
if sep is '/':
path = path.replace('\\', '/')
elif sep is '\\':
path = path.replace('/', '\\')
if path.endswith("/") or path.endswith("\\"):
raise PathConversionError(
"'{0}' is an invalid path string. Paths should "
"not have trailing slashes.".format(path)
)
return path
def sceptreise_path(path):
"""
Converts a path to use correct sceptre path separator.
Raises an PathConversionError if the path has a
trailing slash.
:param path: A directory path
:type path: str
:raises: sceptre.exceptions.PathConversionError
:returns: A normalised path with forward slashes.
:returns: string
"""
path = path.replace('\\', '/')
if path.endswith("/") or path.endswith("\\"):
raise PathConversionError(
"'{0}' is an invalid path string. Paths should "
"not have trailing slashes.".format(path)
)
return path
CFN_FNS = [
'And',
'Base64',
'Cidr',
'Equals',
'FindInMap',
'GetAtt',
'GetAZs',
'If',
'ImportValue',
'Join',
'Not',
'Or',
'Select',
'Split',
'Sub',
'Transform',
]
CFN_TAGS = [
'Condition',
'Ref',
]
def _getatt_constructor(loader, node):
if isinstance(node.value, six.text_type):
return node.value.split('.', 1)
elif isinstance(node.value, list):
seq = loader.construct_sequence(node)
for item in seq:
if not isinstance(item, six.text_type):
raise ValueError(
"Fn::GetAtt does not support complex datastructures")
return seq
else:
raise ValueError("Fn::GetAtt only supports string or list values")
def _tag_constructor(loader, tag_suffix, node):
if tag_suffix not in CFN_FNS and tag_suffix not in CFN_TAGS:
raise ValueError("Bad tag: !{tag_suffix}. Supported tags are: "
"{supported_tags}".format(
tag_suffix=tag_suffix,
supported_tags=", ".join(sorted(CFN_TAGS + CFN_FNS))
))
if tag_suffix in CFN_FNS:
tag_suffix = "Fn::{tag_suffix}".format(tag_suffix=tag_suffix)
data = {}
yield data
if tag_suffix == 'Fn::GetAtt':
constructor = partial(_getatt_constructor, (loader, ))
elif isinstance(node, yaml.ScalarNode):
constructor = loader.construct_scalar
elif isinstance(node, yaml.SequenceNode):
constructor = loader.construct_sequence
elif isinstance(node, yaml.MappingNode):
constructor = loader.construct_mapping
data[tag_suffix] = constructor(node)
class CfnYamlLoader(yaml.SafeLoader):
pass
CfnYamlLoader.add_multi_constructor("!", _tag_constructor)
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/helpers.py
| 0.844409 | 0.360911 |
helpers.py
|
pypi
|
import imp
import logging
import os
import sys
import threading
import botocore
import jinja2
from .exceptions import UnsupportedTemplateFileTypeError
from .exceptions import TemplateSceptreHandlerError
class Template(object):
"""
Template represents an AWS CloudFormation template. It is responsible for
loading, storing and optionally uploading local templates for use by
CloudFormation.
:param path: The absolute path to the file which stores the CloudFormation\
template.
:type path: str
:param sceptre_user_data: A dictionary of arbitrary data to be passed to\
a handler function in an external Python script.
:type sceptre_user_data: dict
:param connection_manager:
:type connection_manager: sceptre.connection_manager.ConnectionManager
:param s3_details:
:type s3_details: dict
"""
_boto_s3_lock = threading.Lock()
def __init__(
self, path, sceptre_user_data, connection_manager=None, s3_details=None
):
self.logger = logging.getLogger(__name__)
self.path = path
self.sceptre_user_data = sceptre_user_data
self.connection_manager = connection_manager
self.s3_details = s3_details
self.name = os.path.basename(path).split(".")[0]
self._body = None
def __repr__(self):
return (
"sceptre.template.Template(name='{0}', path='{1}', "
"sceptre_user_data={2}, s3_details={3})".format(
self.name, self.path, self.sceptre_user_data, self.s3_details
)
)
@property
def body(self):
"""
Represents body of the CloudFormation template.
:returns: The body of the CloudFormation template.
:rtype: str
"""
if self._body is None:
file_extension = os.path.splitext(self.path)[1]
if file_extension in {".json", ".yaml", ".template"}:
with open(self.path) as template_file:
self._body = template_file.read()
elif file_extension == ".j2":
self._body = self._render_jinja_template(
os.path.dirname(self.path),
os.path.basename(self.path),
{"sceptre_user_data": self.sceptre_user_data}
)
elif file_extension == ".py":
self._body = self._call_sceptre_handler()
else:
raise UnsupportedTemplateFileTypeError(
"Template has file extension %s. Only .py, .yaml, "
".template, .json and .j2 are supported.",
os.path.splitext(self.path)[1]
)
return self._body
def _call_sceptre_handler(self):
"""
Calls the function `sceptre_handler` within templates that are python
scripts.
:returns: The string returned from sceptre_handler in the template.
:rtype: str
:raises: IOError
:raises: TemplateSceptreHandlerError
"""
# Get relative path as list between current working directory and where
# the template is
# NB: this is a horrible hack...
relpath = os.path.relpath(self.path, os.getcwd()).split(os.path.sep)
relpaths_to_add = [
os.path.sep.join(relpath[:i+1])
for i in range(len(relpath[:-1]))
]
# Add any directory between the current working directory and where
# the template is to the python path
for directory in relpaths_to_add:
sys.path.append(os.path.join(os.getcwd(), directory))
self.logger.debug(
"%s - Getting CloudFormation from %s", self.name, self.path
)
if not os.path.isfile(self.path):
raise IOError("No such file or directory: '%s'", self.path)
module = imp.load_source(self.name, self.path)
try:
body = module.sceptre_handler(self.sceptre_user_data)
except AttributeError as e:
if 'sceptre_handler' in str(e):
raise TemplateSceptreHandlerError(
"The template does not have the required "
"'sceptre_handler(sceptre_user_data)' function."
)
else:
raise e
for directory in relpaths_to_add:
sys.path.remove(os.path.join(os.getcwd(), directory))
return body
def upload_to_s3(self):
"""
Uploads the template to ``bucket_name`` and returns its URL.
The Template is uploaded with the ``bucket_key``.
:returns: The URL of the Template object in S3.
:rtype: str
:raises: botocore.exceptions.ClientError
"""
self.logger.debug("%s - Uploading template to S3...", self.name)
with self._boto_s3_lock:
if not self._bucket_exists():
self._create_bucket()
# Remove any leading or trailing slashes the user may have added.
bucket_name = self.s3_details["bucket_name"]
bucket_key = self.s3_details["bucket_key"]
bucket_region = self.s3_details["bucket_region"]
self.logger.debug(
"%s - Uploading template to: 's3://%s/%s'",
self.name, bucket_name, bucket_key
)
self.connection_manager.call(
service="s3",
command="put_object",
kwargs={
"Bucket": bucket_name,
"Key": bucket_key,
"Body": self.body,
"ServerSideEncryption": "AES256"
}
)
china_regions = ["cn-north-1", "cn-northwest-1"]
if bucket_region in china_regions:
url = "https://{0}.s3.{1}.amazonaws.com.cn/{2}".format(
bucket_name, bucket_region, bucket_key
)
else:
url = "https://{0}.s3.amazonaws.com/{1}".format(
bucket_name, bucket_key
)
self.logger.debug("%s - Template URL: '%s'", self.name, url)
return url
def _bucket_exists(self):
"""
Checks if the bucket ``bucket_name`` exists.
:returns: Boolean whether the bucket exists
:rtype: bool
:raises: botocore.exception.ClientError
"""
bucket_name = self.s3_details["bucket_name"]
self.logger.debug(
"%s - Attempting to find template bucket '%s'",
self.name, bucket_name
)
try:
self.connection_manager.call(
service="s3",
command="head_bucket",
kwargs={"Bucket": bucket_name}
)
except botocore.exceptions.ClientError as exp:
if exp.response["Error"]["Message"] == "Not Found":
self.logger.debug(
"%s - %s bucket not found.", self.name, bucket_name
)
return False
else:
raise
self.logger.debug(
"%s - Found template bucket '%s'", self.name, bucket_name
)
return True
def _create_bucket(self):
"""
Create the s3 bucket ``bucket_name``.
:raises: botocore.exception.ClientError
"""
bucket_name = self.s3_details["bucket_name"]
self.logger.debug(
"%s - Creating new bucket '%s'", self.name, bucket_name
)
if self.connection_manager.region == "us-east-1":
self.connection_manager.call(
service="s3",
command="create_bucket",
kwargs={"Bucket": bucket_name}
)
else:
self.connection_manager.call(
service="s3",
command="create_bucket",
kwargs={
"Bucket": bucket_name,
"CreateBucketConfiguration": {
"LocationConstraint": self.connection_manager.region
}
}
)
def get_boto_call_parameter(self):
"""
Returns the CloudFormation template location.
Uploads the template to S3 and returns the object's URL, or returns
the template itself.
:returns: The boto call parameter for the template.
:rtype: dict
"""
if self.s3_details:
url = self.upload_to_s3()
return {"TemplateURL": url}
else:
return {"TemplateBody": self.body}
@staticmethod
def _render_jinja_template(template_dir, filename, jinja_vars):
"""
Renders a jinja template.
Sceptre supports passing sceptre_user_data to JSON and YAML
CloudFormation templates using Jinja2 templating.
:param template_dir: The directory containing the template.
:type template_dir: str
:param filename: The name of the template file.
:type filename: str
:param jinja_vars: Dict of variables to render into the template.
:type jinja_vars: dict
:returns: The body of the CloudFormation template.
:rtype: str
"""
logger = logging.getLogger(__name__)
logger.debug("%s Rendering CloudFormation template", filename)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
undefined=jinja2.StrictUndefined
)
template = env.get_template(filename)
body = template.render(**jinja_vars)
return body
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/template.py
| 0.672762 | 0.150247 |
template.py
|
pypi
|
class SceptreException(Exception):
"""
Base class for all Sceptre errors
"""
pass
class ProjectAlreadyExistsError(SceptreException):
"""
Error raised when Sceptre project already exists.
"""
pass
class InvalidSceptreDirectoryError(SceptreException):
"""
Error raised if a sceptre directory is invalid.
"""
pass
class UnsupportedTemplateFileTypeError(SceptreException):
"""
Error raised if an unsupported template file type is used.
"""
pass
class TemplateSceptreHandlerError(SceptreException):
"""
Error raised if sceptre_handler() is not defined correctly in the template.
"""
pass
class DependencyStackNotLaunchedError(SceptreException):
"""
Error raised when a dependency stack has not been launched
"""
pass
class DependencyStackMissingOutputError(SceptreException):
"""
Error raised if a dependency stack does not have the correct outputs.
"""
pass
class CircularDependenciesError(SceptreException):
"""
Error raised if there are circular dependencies
"""
pass
class UnknownStackStatusError(SceptreException):
"""
Error raised if an unknown stack status is received.
"""
pass
class RetryLimitExceededError(SceptreException):
"""
Error raised if the request limit is exceeded.
"""
pass
class UnknownHookTypeError(SceptreException):
"""
Error raised if an unrecognised hook type is received.
"""
class VersionIncompatibleError(SceptreException):
"""
Error raised if configuration incompatible with running version.
"""
pass
class ProtectedStackError(SceptreException):
"""
Error raised upon execution of an action under active protection
"""
pass
class UnknownStackChangeSetStatusError(SceptreException):
"""
Error raised if an unknown stack change set status is received.
"""
pass
class InvalidHookArgumentTypeError(SceptreException):
"""
Error raised if a hook's argument type is invalid.
"""
pass
class InvalidHookArgumentSyntaxError(SceptreException):
"""
Error raised if a hook's argument syntax is invalid.
"""
pass
class InvalidHookArgumentValueError(SceptreException):
"""
Error raised if a hook's argument value is invalid.
"""
pass
class CannotUpdateFailedStackError(SceptreException):
"""
Error raised when a failed stack is updated.
"""
pass
class StackDoesNotExistError(SceptreException):
"""
Error raised when a stack does not exist.
"""
pass
class ConfigFileNotFoundError(SceptreException):
"""
Error raised when a config file does not exist.
"""
pass
class InvalidConfigFileError(SceptreException):
"""
Error raised when a config file lacks mandatory keys.
"""
pass
class PathConversionError(SceptreException):
"""
Error raised when a path is unable to be converted.
"""
pass
class InvalidAWSCredentialsError(SceptreException):
"""
Error raised when AWS credentials are invalid.
"""
pass
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/exceptions.py
| 0.780955 | 0.158337 |
exceptions.py
|
pypi
|
import abc
import logging
from functools import wraps
from sceptre.context import SceptreContext
from sceptre.helpers import _call_func_on_values
class HookData(object):
def __init__(self, context):
if isinstance(context, SceptreContext):
self.context = context
class Hook(HookData):
"""
Hook is an abstract base class that should be inherited by all hooks.
:param argument: The argument of the hook.
:type argument: str
:param stack: The associated stack of the hook.
:type stack: sceptre.stack.Stack
"""
__metaclass__ = abc.ABCMeta
def __init__(self, argument=None, stack=None):
self.logger = logging.getLogger(__name__)
self.argument = argument
self.stack = stack
def setup(self):
"""
setup is a method that may be overwritten by inheriting classes. Allows
hooks to run so initalisation steps when config is first read.
"""
pass # pragma: no cover
@abc.abstractmethod
def run(self):
"""
run is an abstract method which must be overwritten by all
inheriting classes. Run should execute the logic of the hook.
"""
pass # pragma: no cover
class HookProperty(object):
"""
This is a descriptor class used to store an attribute that may contain
Hook objects. Used to setup Hooks when added as a attribute. Supports
nested dictionary and lists.
:param name: Attribute suffix used to store the property in the instance.
:type name: str
"""
def __init__(self, name):
self.name = "_" + name
self.logger = logging.getLogger(__name__)
def __get__(self, instance, type):
"""
Attribute getter for Hook containing data structure.
:return: The attribute stored with the suffix ``name`` in the instance.
:rtype: dict or list
"""
return getattr(instance, self.name)
def __set__(self, instance, value):
"""
Attribute setter which adds a stack reference to any hooks in the
data structure `value` and calls the setup method.
"""
def setup(attr, key, value):
value.stack = instance
value.setup()
_call_func_on_values(setup, value, Hook)
setattr(instance, self.name, value)
def execute_hooks(hooks):
"""
Searches through dictionary or list for Resolver objects and replaces
them with the resolved value. Supports nested dictionaries and lists.
Does not detect Resolver objects used as keys in dictionaries.
:param attr: A complex data structure to search through.
:type attr: dict or list
:return: A complex data structure without Resolver objects.
:rtype: dict or list
"""
if isinstance(hooks, list):
for hook in hooks:
if isinstance(hook, Hook):
hook.run()
def add_stack_hooks(func):
"""
A function decorator to trigger the before and after hooks, relative
to the decorated function's name.
:param func: a function that operates on a stack
:type func: function
"""
@wraps(func)
def decorated(self, *args, **kwargs):
execute_hooks(self.stack.hooks.get("before_" + func.__name__))
response = func(self, *args, **kwargs)
execute_hooks(self.stack.hooks.get("after_" + func.__name__))
return response
return decorated
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/hooks/__init__.py
| 0.790126 | 0.255164 |
__init__.py
|
pypi
|
import logging
import networkx as nx
from sceptre.exceptions import CircularDependenciesError
class StackGraph(object):
"""
A Directed Acyclic Graph representing the relationship between a Stack
and its dependencies. Responsible for initalising the graph based on a set
of Stacks.
"""
def __init__(self, stacks):
"""
Initialises a StackGraph based on a `set` of Stacks.
:param stacks: A set of Stacks.
:type stacks: set
"""
self.logger = logging.getLogger(__name__)
self.graph = nx.DiGraph()
self._generate_graph(stacks)
def __repr__(self):
return str(nx.convert.to_dict_of_lists(self.graph))
def __iter__(self):
return self.graph.__iter__()
def filtered(self, source_stacks, reverse=False):
graph = (nx.reverse if reverse else nx.DiGraph)(self.graph)
relevant = set(source_stacks)
for stack in source_stacks:
relevant |= nx.algorithms.dag.ancestors(graph, stack)
graph.remove_nodes_from({stack for stack in graph if stack not in relevant})
filtered = StackGraph(set())
filtered.graph = graph
return filtered
def count_dependencies(self, stack):
"""
Returns the number of incoming edges a given Stack has in the
StackGraph. The number of incoming edge also represents the number
of Stacks that depend on the given Stack.
"""
return self.graph.in_degree(stack)
def remove_stack(self, stack):
"""
Removes a Stack from the StackGraph. This operation will also remove
all adjecent edges that represent a 'depends on' relationship with
other Stacks.
"""
return self.graph.remove_node(stack)
def _generate_graph(self, stacks):
"""
Generates the graph for the StackGraph object.
:param stacks: A set of Stacks
:type stacks: set
"""
for stack in stacks:
self._generate_edges(stack, stack.dependencies)
self.graph.remove_edges_from(nx.selfloop_edges(self.graph))
def _generate_edges(self, stack, dependencies):
"""
Adds edges to the graph based on a list of dependencies that are
generated from the inital stack config. Each of the paths
in the inital_dependency_paths list are a depency that the inital
Stack config depends on.
:param stack: A Sceptre Stack
:type stack: sceptre.stack.Stack
:param dependencies: a collection of dependency paths
:type dependencies: list
"""
self.logger.debug(
"Generate dependencies for stack {0}".format(stack)
)
for dependency in dependencies:
self.graph.add_edge(dependency, stack)
if not nx.is_directed_acyclic_graph(self.graph):
raise CircularDependenciesError(
"Dependency cycle detected: {} {}".format(stack,
dependency))
self.logger.debug(" Added dependency: {}".format(dependency))
if not dependencies:
self.graph.add_node(stack)
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/config/graph.py
| 0.875121 | 0.458106 |
graph.py
|
pypi
|
import collections
import datetime
import fnmatch
import logging
from os import environ, path, walk
from pkg_resources import iter_entry_points
import yaml
import jinja2
from packaging.specifiers import SpecifierSet
from packaging.version import Version
from sceptre import __version__
from sceptre.exceptions import InvalidConfigFileError
from sceptre.exceptions import InvalidSceptreDirectoryError
from sceptre.exceptions import VersionIncompatibleError
from sceptre.exceptions import ConfigFileNotFoundError
from sceptre.helpers import sceptreise_path
from sceptre.stack import Stack
from sceptre.config import strategies
ConfigAttributes = collections.namedtuple("Attributes", "required optional")
CONFIG_MERGE_STRATEGIES = {
'dependencies': strategies.list_join,
'hooks': strategies.child_wins,
'notifications': strategies.child_wins,
'on_failure': strategies.child_wins,
'parameters': strategies.child_wins,
'profile': strategies.child_wins,
'project_code': strategies.child_wins,
'protect': strategies.child_wins,
'region': strategies.child_wins,
'required_version': strategies.child_wins,
'role_arn': strategies.child_wins,
'sceptre_user_data': strategies.child_wins,
'stack_name': strategies.child_wins,
'stack_tags': strategies.child_wins,
'stack_timeout': strategies.child_wins,
'template_bucket_name': strategies.child_wins,
'template_key_value': strategies.child_wins,
'template_path': strategies.child_wins
}
STACK_GROUP_CONFIG_ATTRIBUTES = ConfigAttributes(
{
"project_code",
"region"
},
{
"template_bucket_name",
"template_key_prefix",
"required_version"
}
)
STACK_CONFIG_ATTRIBUTES = ConfigAttributes(
{
"template_path"
},
{
"dependencies",
"hooks",
"notifications",
"on_failure",
"parameters",
"profile",
"protect",
"role_arn",
"sceptre_user_data",
"stack_name",
"stack_tags",
"stack_timeout"
}
)
INTERNAL_CONFIG_ATTRIBUTES = ConfigAttributes(
{
"project_path",
"stack_group_path",
},
{
}
)
REQUIRED_KEYS = STACK_GROUP_CONFIG_ATTRIBUTES.required.union(
STACK_CONFIG_ATTRIBUTES.required
)
class ConfigReader(object):
"""
Parses YAML configuration files and produces Stack objects.
Responsible for loading Resolvers and Hook classes and adding them as
constructors to the PyYAML parser.
:param context: A SceptreContext.
:type sceptre.context.SceptreContext:
"""
def __init__(self, context):
self.logger = logging.getLogger(__name__)
self.context = context
self.full_config_path = self.context.full_config_path()
if not self.context.user_variables:
self.context.user_variables = {}
self.templating_vars = {"var": self.context.user_variables}
# Check is valid sceptre project folder
self._check_valid_project_path(self.full_config_path)
# Load plugins
self._hooks = self._load_entry_points('sceptre.hooks')
self._resolvers = self._load_entry_points('sceptre.resolvers')
# Add Resolver and Hook classes to PyYAML loader
self._add_yaml_constructors([self._hooks, self._resolvers])
def _load_entry_points(self, entry_point_name):
points = {}
for entry_point in iter_entry_points(entry_point_name):
points[entry_point.name] = entry_point.load()
return points
def _add_yaml_constructors(self, entry_points):
"""
Adds PyYAML constructor functions for all classes found registered at
the given entry point groups. Classes are registered whereby the node
tag is the entry point name.
:param entry_points: Names of entry point groups.
:type entry_points: list
"""
self.logger.debug(
"Adding yaml constructors for the entry point groups {0}".format(
entry_points
)
)
def constructor_factory(node_class):
"""
Returns constructor that will initialise objects from a
given node class.
:param node_class: Class representing the node.
:type node_class: class
:returns: Class initialiser.
:rtype: func
"""
# This function signture is required by PyYAML
def class_constructor(loader, node):
return node_class(
loader.construct_scalar(node)
) # pragma: no cover
return class_constructor
for entry_point in entry_points:
for entry_point_name in entry_point:
node_tag = u'!' + entry_point_name
node_class = entry_point.get(entry_point_name)
node_class.context = self.context
yaml.SafeLoader.add_constructor(
node_tag, constructor_factory(node_class)
)
self.logger.debug(
"Added constructor for %s with node tag %s",
str(node_class), node_tag
)
def construct_stacks(self):
"""
Traverses the files under the command path.
For each file encountered, a Stack is constructed
using the correct config. Dependencies are traversed
and a final set of Stacks is returned.
:returns: A set of Stacks.
:rtype: set
"""
stack_map = {}
command_stacks = set()
if self.context.ignore_dependencies:
root = self.context.full_command_path()
else:
root = self.context.full_config_path()
if path.isfile(root):
todo = {root}
else:
todo = set()
for directory_name, sub_directories, files in walk(root, followlinks=True):
for filename in fnmatch.filter(files, '*.yaml'):
if filename.startswith('config.'):
continue
todo.add(path.join(directory_name, filename))
stack_group_configs = {}
while todo:
abs_path = todo.pop()
rel_path = path.relpath(
abs_path, start=self.context.full_config_path())
directory, filename = path.split(rel_path)
if directory in stack_group_configs:
stack_group_config = stack_group_configs[directory]
else:
stack_group_config = stack_group_configs[directory] = \
self.read(path.join(directory, self.context.config_file))
stack = self._construct_stack(rel_path, stack_group_config)
stack_map[sceptreise_path(rel_path)] = stack
if abs_path.startswith(self.context.full_command_path()):
command_stacks.add(stack)
stacks = set()
for stack in stack_map.values():
if not self.context.ignore_dependencies:
stack.dependencies = [
stack_map[sceptreise_path(dep)]
for dep in stack.dependencies
]
else:
stack.dependencies = []
stacks.add(stack)
return stacks, command_stacks
def read(self, rel_path, base_config=None):
"""
Reads in configuration from one or more YAML files
within the Sceptre project folder.
:param rel_path: Relative path to config to read.
:type rel_path: str
:param base_config: Base config to provide defaults.
:type base_config: dict
:returns: Config read from config files.
:rtype: dict
"""
self.logger.debug("Reading in '%s' files...", rel_path)
directory_path, filename = path.split(rel_path)
abs_path = path.join(self.full_config_path, rel_path)
# Adding properties from class
config = {
"project_path": self.context.project_path,
"stack_group_path": directory_path
}
# Adding defaults from base config.
if base_config:
config.update(base_config)
# Check if file exists, but ignore config.yaml as can be inherited.
if not path.isfile(abs_path)\
and not filename.endswith(self.context.config_file):
raise ConfigFileNotFoundError(
"Config file \"{0}\" not found.".format(rel_path)
)
# Parse and read in the config files.
this_config = self._recursive_read(directory_path, filename, config)
if "dependencies" in config or "dependencies" in this_config:
this_config['dependencies'] = \
CONFIG_MERGE_STRATEGIES['dependencies'](
this_config.get("dependencies"),
config.get("dependencies")
)
config.update(this_config)
self._check_version(config)
self.logger.debug("Config: %s", config)
return config
def _recursive_read(self, directory_path, filename, stack_group_config):
"""
Traverses the directory_path, from top to bottom, reading in all
relevant config files. If config attributes are encountered further
down the StackGroup they are merged with the parent as defined in the
`CONFIG_MERGE_STRATEGIES` dict.
:param directory_path: Relative directory path to config to read.
:type directory_path: str
:param filename: File name for the config to read.
:type filename: dict
:param stack_group_config: The loaded config file for the StackGroup
:type stack_group_config: dict
:returns: Representation of inherited config.
:rtype: dict
"""
parent_directory = path.split(directory_path)[0]
# Base condition for recursion
config = {}
if directory_path:
config = self._recursive_read(parent_directory, filename, stack_group_config)
# Read config file and overwrite inherited properties
child_config = self._render(directory_path, filename, stack_group_config) or {}
for config_key, strategy in CONFIG_MERGE_STRATEGIES.items():
value = strategy(
config.get(config_key), child_config.get(config_key)
)
if value:
child_config[config_key] = value
config.update(child_config)
return config
def _render(self, directory_path, basename, stack_group_config):
"""
Reads a configuration file, loads the config file as a template
and returns config loaded from the file.
:param directory_path: Relative directory path to config to read.
:type directory_path: str
:param basename: The filename of the config file
:type basename: str
:param stack_group_config: The loaded config file for the StackGroup
:type stack_group_config: dict
:returns: rendered template of config file.
:rtype: dict
"""
config = {}
abs_directory_path = path.join(self.full_config_path, directory_path)
if path.isfile(path.join(abs_directory_path, basename)):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(abs_directory_path),
undefined=jinja2.StrictUndefined
)
template = jinja_env.get_template(basename)
self.templating_vars.update(stack_group_config)
rendered_template = template.render(
self.templating_vars,
command_path=self.context.command_path.split(path.sep),
environment_variable=environ
)
config = yaml.safe_load(rendered_template)
return config
@staticmethod
def _check_valid_project_path(config_path):
"""
Raises an InvalidSceptreDirectoryError if ``path`` is not a directory.
:param path: A config directory path.
:type path: str
:raises: sceptre.exceptions.InvalidSceptreDirectoryError
"""
if not path.isdir(config_path):
raise InvalidSceptreDirectoryError(
"Check '{0}' exists.".format(config_path)
)
def _check_version(self, config):
"""
Raises a VersionIncompatibleException when the current Sceptre version
does not comply with the configured version requirement.
:raises: sceptre.exceptions.VersionIncompatibleException
"""
sceptre_version = __version__
if 'required_version' in config:
required_version = config['required_version']
if Version(sceptre_version) not in SpecifierSet(required_version, True):
raise VersionIncompatibleError(
"Current sceptre version ({0}) does not meet version "
"requirements: {1}".format(
sceptre_version, required_version
)
)
@staticmethod
def _collect_s3_details(stack_name, config):
"""
Collects and constructs details for where to store the Template in S3.
:param stack_name: Stack name.
:type stack_name: str
:param config: Config with details.
:type config: dict
:returns: S3 details.
:rtype: dict
"""
s3_details = None
if "template_bucket_name" in config:
template_key = "/".join([
sceptreise_path(stack_name), "{time_stamp}.json".format(
time_stamp=datetime.datetime.utcnow().strftime(
"%Y-%m-%d-%H-%M-%S-%fZ"
)
)
])
bucket_region = config.get("region", None)
if "template_key_prefix" in config:
prefix = config["template_key_prefix"]
template_key = "/".join([prefix.strip("/"), template_key])
s3_details = {
"bucket_name": config["template_bucket_name"],
"bucket_key": template_key,
"bucket_region": bucket_region
}
return s3_details
def _construct_stack(self, rel_path, stack_group_config=None):
"""
Constructs an individual Stack object from a config path and a
base config.
:param rel_path: A relative config file path.
:type rel_path: str
:param stack_group_config: The Stack group config to use as defaults.
:type stack_group_config: dict
:returns: Stack object
:rtype: sceptre.stack.Stack
"""
directory, filename = path.split(rel_path)
if filename == self.context.config_file:
pass
self.templating_vars["stack_group_config"] = stack_group_config
parsed_stack_group_config = self._parsed_stack_group_config(stack_group_config)
config = self.read(rel_path, stack_group_config)
stack_name = path.splitext(rel_path)[0]
# Check for missing mandatory attributes
for required_key in REQUIRED_KEYS:
if required_key not in config:
raise InvalidConfigFileError(
"Required attribute '{0}' not found in configuration of '{1}'.".format(
required_key, stack_name
)
)
abs_template_path = path.join(
self.context.project_path, self.context.templates_path,
sceptreise_path(config["template_path"])
)
s3_details = self._collect_s3_details(
stack_name, config
)
stack = Stack(
name=stack_name,
project_code=config["project_code"],
template_path=abs_template_path,
region=config["region"],
template_bucket_name=config.get("template_bucket_name"),
template_key_prefix=config.get("template_key_prefix"),
required_version=config.get("required_version"),
profile=config.get("profile"),
parameters=config.get("parameters", {}),
sceptre_user_data=config.get("sceptre_user_data", {}),
hooks=config.get("hooks", {}),
s3_details=s3_details,
dependencies=config.get("dependencies", []),
role_arn=config.get("role_arn"),
protected=config.get("protect", False),
tags=config.get("stack_tags", {}),
external_name=config.get("stack_name"),
notifications=config.get("notifications"),
on_failure=config.get("on_failure"),
stack_timeout=config.get("stack_timeout", 0),
stack_group_config=parsed_stack_group_config
)
del self.templating_vars["stack_group_config"]
return stack
def _parsed_stack_group_config(self, stack_group_config):
"""
Remove all config items that are supported by Sceptre and
remove the `project_path` and `stack_group_path` added by `read()`.
Return a dictionary that has only user-specified config items.
"""
parsed_config = {
key: stack_group_config[key]
for key in
set(stack_group_config) - set(CONFIG_MERGE_STRATEGIES)
}
parsed_config.pop("project_path")
parsed_config.pop("stack_group_path")
return parsed_config
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/config/reader.py
| 0.573678 | 0.178454 |
reader.py
|
pypi
|
import abc
import logging
from contextlib import contextmanager
import six
from sceptre.context import SceptreContext
from sceptre.helpers import _call_func_on_values
class RecursiveGet(Exception):
pass
class ResolverData(object):
def __init__(self, context):
if isinstance(context, SceptreContext):
self.context = context
@six.add_metaclass(abc.ABCMeta)
class Resolver(ResolverData):
"""
Resolver is an abstract base class that should be inherited by all
Resolvers.
:param argument: The argument of the resolver.
:type argument: str
:param stack: The associated stack of the resolver.
:type stack: sceptre.stack.Stack
"""
__metaclass__ = abc.ABCMeta
def __init__(self, argument=None, stack=None):
self.logger = logging.getLogger(__name__)
self.argument = argument
self.stack = stack
def setup(self):
"""
This method is called at during stack initialisation.
Implementation of this method in subclasses can be used to do any
initial setup of the object.
"""
pass # pragma: no cover
@abc.abstractmethod
def resolve(self):
"""
An abstract method which must be overwritten by all inheriting classes.
This method is called to retrieve the final desired value.
Implementation of this method in subclasses must return a suitable
object or primitive type.
"""
pass # pragma: no cover
class ResolvableProperty(object):
"""
This is a descriptor class used to store an attribute that may contain
Resolver objects. When retrieving the dictionary or list, any Resolver
objects contains are a value or within a list are resolved to a primitive
type. Supports nested dictionary and lists.
:param name: Attribute suffix used to store the property in the instance.
:type name: str
"""
def __init__(self, name):
self.name = "_" + name
self.logger = logging.getLogger(__name__)
self._get_in_progress = False
def __get__(self, instance, type):
"""
Attribute getter which resolves any Resolver object contained in the
complex data structure.
:return: The attribute stored with the suffix ``name`` in the instance.
:rtype: dict or list
"""
with self._no_recursive_get():
def resolve(attr, key, value):
try:
attr[key] = value.resolve()
except RecursiveGet:
attr[key] = self.ResolveLater(instance, self.name, key,
lambda: value.resolve())
if hasattr(instance, self.name):
retval = _call_func_on_values(
resolve, getattr(instance, self.name), Resolver
)
return retval
def __set__(self, instance, value):
"""
Attribute setter which adds a stack reference to any resolvers in the
data structure `value` and calls the setup method.
"""
def setup(attr, key, value):
value.stack = instance
value.setup()
_call_func_on_values(setup, value, Resolver)
setattr(instance, self.name, value)
class ResolveLater(object):
"""
Represents a value that could not yet be resolved but can be
resolved in the future.
"""
def __init__(self, instance, name, key, resolution_function):
self._instance = instance
self._name = name
self._key = key
self._resolution_function = resolution_function
def __call__(self):
"""Resolve the value."""
attr = getattr(self._instance, self._name)
attr[self._key] = self._resolution_function()
@contextmanager
def _no_recursive_get(self):
if self._get_in_progress:
raise RecursiveGet()
self._get_in_progress = True
try:
yield
finally:
self._get_in_progress = False
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/resolvers/__init__.py
| 0.781414 | 0.200949 |
__init__.py
|
pypi
|
from os import path, walk
from sceptre.exceptions import ConfigFileNotFoundError
from sceptre.config.graph import StackGraph
from sceptre.config.reader import ConfigReader
from sceptre.plan.executor import SceptrePlanExecutor
from sceptre.helpers import sceptreise_path
class SceptrePlan(object):
def __init__(self, context):
"""
Intialises a SceptrePlan and generates the Stacks, StackGraph and
launch order of required.
:param context: A SceptreContext
:type sceptre.context.SceptreContext:
"""
self.context = context
self.command = None
self.reverse = None
self.launch_order = None
config_reader = ConfigReader(context)
all_stacks, command_stacks = config_reader.construct_stacks()
self.graph = StackGraph(all_stacks)
self.command_stacks = command_stacks
def _execute(self, *args):
executor = SceptrePlanExecutor(self.command, self.launch_order)
return executor.execute(*args)
def _generate_launch_order(self, reverse=False):
if self.context.ignore_dependencies:
return [self.command_stacks]
graph = self.graph.filtered(self.command_stacks, reverse)
if self.context.ignore_dependencies:
return [self.command_stacks]
launch_order = []
while graph.graph:
batch = set()
for stack in graph:
if graph.count_dependencies(stack) == 0:
batch.add(stack)
launch_order.append(batch)
for stack in batch:
graph.remove_stack(stack)
if not launch_order:
raise ConfigFileNotFoundError(
"No stacks detected from the given path '{}'. Valid stack paths are: {}"
.format(sceptreise_path(self.context.command_path), self._valid_stack_paths())
)
return launch_order
def resolve(self, command, reverse=False):
if command == self.command and reverse == self.reverse:
return
self.command = command
self.reverse = reverse
self.launch_order = self._generate_launch_order(reverse)
def template(self, *args):
"""
Returns the CloudFormation Template used to create the Stack.
:returns: A dictionary of Stacks and their templates.
:rtype: dict
"""
self.resolve(command=self.template.__name__)
return self._execute(*args)
def create(self, *args):
"""
Creates the Stack.
:returns: A dictionary of Stacks and their status.
:rtype: dict
"""
self.resolve(command=self.create.__name__)
return self._execute(*args)
def update(self, *args):
"""
Updates the Stack.
:returns: A dictionary of Stacks and their status.
:rtype: dict
"""
self.resolve(command=self.update.__name__)
return self._execute(*args)
def cancel_stack_update(self, *args):
"""
Cancels a Stack update.
:returns: A dictionary of Stacks and their cancelled statuses.
:rtype: dict
"""
self.resolve(command=self.cancel_stack_update.__name__)
return self._execute(*args)
def launch(self, *args):
"""
Launches the Stack.
If the Stack status is create_failed or rollback_complete, the
Stack is deleted. Launch then tries to create or update the Stack,
depending if it already exists. If there are no updates to be
performed, launch exits gracefully.
:returns: A dictionary of Stacks and their status.
:rtype: dict
"""
self.resolve(command=self.launch.__name__)
return self._execute(*args)
def delete(self, *args):
"""
Deletes the Stack.
:returns: A dictionary of Stacks and their status.
:rtype: dict
"""
self.resolve(command=self.delete.__name__, reverse=True)
return self._execute(*args)
def lock(self, *args):
"""
Locks the Stack by applying a deny all updates Stack policy.
:returns: A dictionary of Stacks
:rtype: dict
"""
self.resolve(command=self.lock.__name__)
return self._execute(*args)
def unlock(self, *args):
"""
Unlocks the Stack by applying an allow all updates Stack policy.
:returns: A dictionary of Stacks
:rtype: dict
"""
self.resolve(command=self.unlock.__name__)
return self._execute(*args)
def describe(self, *args):
"""
Returns the a description of the Stack.
:returns: A dictionary of Stacks and their description.
:rtype: dict
"""
self.resolve(command=self.describe.__name__)
return self._execute(*args)
def describe_events(self, *args):
"""
Returns a dictionary contianing the Stack events.
:returns: A dictionary of Stacks and their CloudFormation events.
:rtype: dict
"""
self.resolve(command=self.describe_events.__name__)
return self._execute(*args)
def describe_resources(self, *args):
"""
Returns the logical and physical resource IDs of the Stack's resources.
:returns: A dictionary of Stacks and their resources.
:rtype: dict
"""
self.resolve(command=self.describe_resources.__name__)
return self._execute(*args)
def describe_outputs(self, *args):
"""
Returns a list of Stack outputs.
:returns: A dictionary of Stacks and their outputs.
:rtype: dict
"""
self.resolve(command=self.describe_outputs.__name__)
return self._execute(*args)
def continue_update_rollback(self, *args):
"""
Rolls back a Stack in the UPDATE_ROLLBACK_FAILED state to
UPDATE_ROLLBACK_COMPLETE.
:returns: A dictionary of Stacks
:rtype: dict
"""
self.resolve(command=self.continue_update_rollback.__name__)
return self._execute(*args)
def set_policy(self, *args):
"""
Applies a Stack policy.
:param policy_path: the path of json file containing a aws policy
:type policy_path: str
:returns: A dictionary of Stacks
:rtype: dict
"""
self.resolve(command=self.set_policy.__name__)
return self._execute(*args)
def get_policy(self, *args):
"""
Returns a Stack's policy.
:returns: A dictionary of Stacks and their Stack policy.
:rtype: dict
"""
self.resolve(command=self.get_policy.__name__)
return self._execute(*args)
def create_change_set(self, *args):
"""
Creates a Change Set with the name ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: A dictionary of Stacks
:rtype: dict
"""
self.resolve(command=self.create_change_set.__name__)
return self._execute(*args)
def delete_change_set(self, *args):
"""
Deletes the Change Set ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: A dictionary of Stacks
:rtype: dict
"""
self.resolve(command=self.delete_change_set.__name__)
return self._execute(*args)
def describe_change_set(self, *args):
"""
Describes the Change Set ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: A dictionary of Stacks and their Change Set description.
:rtype: dict
"""
self.resolve(command=self.describe_change_set.__name__)
return self._execute(*args)
def execute_change_set(self, *args):
"""
Executes the Change Set ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: A dictionary of Stacks and their status.
:rtype: dict
"""
self.resolve(command=self.execute_change_set.__name__)
return self._execute(*args)
def list_change_sets(self, *args):
"""
Lists the Stack's Change Sets.
:returns: TA dictionary of Stacks and their Change Sets.
:rtype: dict
"""
self.resolve(command=self.list_change_sets.__name__)
return self._execute(*args)
def get_status(self, *args):
"""
Returns the Stack's status.
:returns: A dictionary of Stacks and their status.
:rtype: dict
:raises: sceptre.exceptions.StackDoesNotExistError
"""
self.resolve(command=self.get_status.__name__)
return self._execute(*args)
def wait_for_cs_completion(self, *args):
"""
Waits while the Stack Change Set status is "pending".
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:rtype: dict
:rtype: sceptre.stack_status.StackChangeSetStatus
"""
self.resolve(command=self.wait_for_cs_completion.__name__)
return self._execute(*args)
def validate(self, *args):
"""
Validates the Stack's CloudFormation template.
Raises an error if the Template is invalid.
:returns: A dictionary of Stacks and their template validation information.
:rtype: dict
:raises: botocore.exceptions.ClientError
"""
self.resolve(command=self.validate.__name__)
return self._execute(*args)
def estimate_cost(self, *args):
"""
Estimates a Stack's cost.
:returns: A dictionary of Stacks and their estimated costs.
:rtype: dict
:raises: botocore.exceptions.ClientError
"""
self.resolve(command=self.estimate_cost.__name__)
return self._execute(*args)
def generate(self, *args):
"""
Returns a generated Template for a given Stack
:returns: A dictionary of Stacks and their template body.
:rtype: dict
"""
self.resolve(command=self.generate.__name__)
return self._execute(*args)
def _valid_stack_paths(self):
return [
sceptreise_path(path.relpath(path.join(dirpath, f), self.context.config_path))
for dirpath, dirnames, files in walk(self.context.config_path)
for f in files
if not f.endswith(self.context.config_file)
]
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/plan/plan.py
| 0.77907 | 0.270334 |
plan.py
|
pypi
|
import logging
import time
from os import path
from datetime import datetime, timedelta
import botocore
import json
from dateutil.tz import tzutc
from sceptre.connection_manager import ConnectionManager
from sceptre.hooks import add_stack_hooks
from sceptre.stack_status import StackStatus
from sceptre.stack_status import StackChangeSetStatus
from sceptre.exceptions import CannotUpdateFailedStackError
from sceptre.exceptions import UnknownStackStatusError
from sceptre.exceptions import UnknownStackChangeSetStatusError
from sceptre.exceptions import StackDoesNotExistError
from sceptre.exceptions import ProtectedStackError
class StackActions(object):
"""
StackActions stores the operations a Stack can take, such as creating or
deleting the Stack.
:param stack: A Stack object
:type stack: sceptre.stack.Stack
"""
def __init__(self, stack):
self.stack = stack
self.name = self.stack.name
self.logger = logging.getLogger(__name__)
self.connection_manager = ConnectionManager(
self.stack.region, self.stack.profile, self.stack.external_name
)
@add_stack_hooks
def create(self):
"""
Creates a Stack.
:returns: The Stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Creating Stack", self.stack.name)
create_stack_kwargs = {
"StackName": self.stack.external_name,
"Parameters": self._format_parameters(self.stack.parameters),
"Capabilities": ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND'],
"NotificationARNs": self.stack.notifications,
"Tags": [
{"Key": str(k), "Value": str(v)}
for k, v in self.stack.tags.items()
]
}
if self.stack.on_failure:
create_stack_kwargs.update({"OnFailure": self.stack.on_failure})
create_stack_kwargs.update(
self.stack.template.get_boto_call_parameter())
create_stack_kwargs.update(self._get_role_arn())
create_stack_kwargs.update(self._get_stack_timeout())
try:
response = self.connection_manager.call(
service="cloudformation",
command="create_stack",
kwargs=create_stack_kwargs
)
self.logger.debug(
"%s - Create stack response: %s", self.stack.name, response
)
status = self._wait_for_completion()
except botocore.exceptions.ClientError as exp:
if exp.response["Error"]["Code"] == "AlreadyExistsException":
self.logger.info(
"%s - Stack already exists", self.stack.name
)
status = "COMPLETE"
else:
raise
return status
@add_stack_hooks
def update(self):
"""
Updates the Stack.
:returns: The Stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Updating Stack", self.stack.name)
try:
update_stack_kwargs = {
"StackName": self.stack.external_name,
"Parameters": self._format_parameters(self.stack.parameters),
"Capabilities": [
'CAPABILITY_IAM',
'CAPABILITY_NAMED_IAM',
'CAPABILITY_AUTO_EXPAND'
],
"NotificationARNs": self.stack.notifications,
"Tags": [
{"Key": str(k), "Value": str(v)}
for k, v in self.stack.tags.items()
]
}
update_stack_kwargs.update(
self.stack.template.get_boto_call_parameter())
update_stack_kwargs.update(self._get_role_arn())
response = self.connection_manager.call(
service="cloudformation",
command="update_stack",
kwargs=update_stack_kwargs
)
status = self._wait_for_completion(self.stack.stack_timeout)
self.logger.debug(
"%s - Update Stack response: %s", self.stack.name, response
)
# Cancel update after timeout
if status == StackStatus.IN_PROGRESS:
status = self.cancel_stack_update()
return status
except botocore.exceptions.ClientError as exp:
error_message = exp.response["Error"]["Message"]
if error_message == "No updates are to be performed.":
self.logger.info(
"%s - No updates to perform.", self.stack.name
)
return StackStatus.COMPLETE
else:
raise
def cancel_stack_update(self):
"""
Cancels a Stack update.
:returns: The cancelled Stack status.
:rtype: sceptre.stack_status.StackStatus
"""
self.logger.warning(
"%s - Update Stack time exceeded the specified timeout",
self.stack.name
)
response = self.connection_manager.call(
service="cloudformation",
command="cancel_update_stack",
kwargs={"StackName": self.stack.external_name}
)
self.logger.debug(
"%s - Cancel update Stack response: %s", self.stack.name, response
)
return self._wait_for_completion()
def launch(self):
"""
Launches the Stack.
If the Stack status is create_failed or rollback_complete, the
Stack is deleted. Launch then tries to create or update the Stack,
depending if it already exists. If there are no updates to be
performed, launch exits gracefully.
:returns: The Stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Launching Stack", self.stack.name)
try:
existing_status = self._get_status()
except StackDoesNotExistError:
existing_status = "PENDING"
self.logger.info(
"%s - Stack is in the %s state", self.stack.name, existing_status
)
if existing_status == "PENDING":
status = self.create()
elif existing_status in ["CREATE_FAILED", "ROLLBACK_COMPLETE"]:
self.delete()
status = self.create()
elif existing_status.endswith("COMPLETE"):
status = self.update()
elif existing_status.endswith("IN_PROGRESS"):
self.logger.info(
"%s - Stack action is already in progress state and cannot "
"be updated", self.stack.name
)
status = StackStatus.IN_PROGRESS
elif existing_status.endswith("FAILED"):
status = StackStatus.FAILED
raise CannotUpdateFailedStackError(
"'{0}' is in a the state '{1}' and cannot be updated".format(
self.stack.name, existing_status
)
)
else:
raise UnknownStackStatusError(
"{0} is unknown".format(existing_status)
)
return status
@add_stack_hooks
def delete(self):
"""
Deletes the Stack.
:returns: The Stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Deleting stack", self.stack.name)
try:
status = self._get_status()
except StackDoesNotExistError:
self.logger.info("%s - Does not exist.", self.stack.name)
status = StackStatus.COMPLETE
return status
delete_stack_kwargs = {"StackName": self.stack.external_name}
delete_stack_kwargs.update(self._get_role_arn())
self.connection_manager.call(
service="cloudformation",
command="delete_stack",
kwargs=delete_stack_kwargs
)
try:
status = self._wait_for_completion()
except StackDoesNotExistError:
status = StackStatus.COMPLETE
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Message"].endswith("does not exist"):
status = StackStatus.COMPLETE
else:
raise
self.logger.info("%s - delete %s", self.stack.name, status)
return status
def lock(self):
"""
Locks the Stack by applying a deny-all updates Stack Policy.
"""
policy_path = path.join(
# need to get to the base install path. __file__ will take us into
# sceptre/actions so need to walk up the path.
path.abspath(path.join(__file__, "..", "..")),
"stack_policies/lock.json"
)
self.set_policy(policy_path)
self.logger.info("%s - Successfully locked Stack", self.stack.name)
def unlock(self):
"""
Unlocks the Stack by applying an allow-all updates Stack Policy.
"""
policy_path = path.join(
# need to get to the base install path. __file__ will take us into
# sceptre/actions so need to walk up the path.
path.abspath(path.join(__file__, "..", "..")),
"stack_policies/unlock.json"
)
self.set_policy(policy_path)
self.logger.info("%s - Successfully unlocked Stack", self.stack.name)
def describe(self):
"""
Returns the a description of the Stack.
:returns: A Stack description.
:rtype: dict
"""
try:
return self.connection_manager.call(
service="cloudformation",
command="describe_stacks",
kwargs={"StackName": self.stack.external_name}
)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"].endswith("does not exist"):
return
raise
def describe_events(self):
"""
Returns the CloudFormation events for a Stack.
:returns: CloudFormation events for a Stack.
:rtype: dict
"""
return self.connection_manager.call(
service="cloudformation",
command="describe_stack_events",
kwargs={"StackName": self.stack.external_name}
)
def describe_resources(self):
"""
Returns the logical and physical resource IDs of the Stack's resources.
:returns: Information about the Stack's resources.
:rtype: dict
"""
self.logger.debug("%s - Describing stack resources", self.stack.name)
try:
response = self.connection_manager.call(
service="cloudformation",
command="describe_stack_resources",
kwargs={"StackName": self.stack.external_name}
)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"].endswith("does not exist"):
return {self.stack.name: []}
raise
self.logger.debug(
"%s - Describe Stack resource response: %s",
self.stack.name,
response
)
desired_properties = ["LogicalResourceId", "PhysicalResourceId"]
formatted_response = {self.stack.name: [
{k: v for k, v in item.items() if k in desired_properties}
for item in response["StackResources"]
]}
return formatted_response
def describe_outputs(self):
"""
Returns the Stack's outputs.
:returns: The Stack's outputs.
:rtype: list
"""
self.logger.debug("%s - Describing stack outputs", self.stack.name)
try:
response = self._describe()
except botocore.exceptions.ClientError:
return []
return {self.stack.name: response["Stacks"][0].get("Outputs", [])}
def continue_update_rollback(self):
"""
Rolls back a Stack in the UPDATE_ROLLBACK_FAILED state to
UPDATE_ROLLBACK_COMPLETE.
"""
self.logger.debug("%s - Continuing update rollback", self.stack.name)
continue_update_rollback_kwargs = {
"StackName": self.stack.external_name
}
continue_update_rollback_kwargs.update(self._get_role_arn())
self.connection_manager.call(
service="cloudformation",
command="continue_update_rollback",
kwargs=continue_update_rollback_kwargs
)
self.logger.info(
"%s - Successfully initiated continuation of update rollback",
self.stack.name
)
def set_policy(self, policy_path):
"""
Applies a Stack Policy.
:param policy_path: The relative path of JSON file containing\
the AWS Policy to apply.
:type policy_path: str
"""
with open(policy_path) as f:
policy = f.read()
self.logger.debug(
"%s - Setting Stack policy: \n%s",
self.stack.name,
policy
)
self.connection_manager.call(
service="cloudformation",
command="set_stack_policy",
kwargs={
"StackName": self.stack.external_name,
"StackPolicyBody": policy
}
)
self.logger.info("%s - Successfully set Stack Policy", self.stack.name)
def get_policy(self):
"""
Returns a Stack's Policy.
:returns: The Stack's Stack Policy.
:rtype: str
"""
self.logger.debug("%s - Getting Stack Policy", self.stack.name)
response = self.connection_manager.call(
service="cloudformation",
command="get_stack_policy",
kwargs={
"StackName": self.stack.external_name
}
)
json_formatting = json.loads(response.get(
"StackPolicyBody", json.dumps("No Policy Information")))
return {self.stack.name: json_formatting}
def create_change_set(self, change_set_name):
"""
Creates a Change Set with the name ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
"""
create_change_set_kwargs = {
"StackName": self.stack.external_name,
"Parameters": self._format_parameters(self.stack.parameters),
"Capabilities": ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND'],
"ChangeSetName": change_set_name,
"NotificationARNs": self.stack.notifications,
"Tags": [
{"Key": str(k), "Value": str(v)}
for k, v in self.stack.tags.items()
]
}
create_change_set_kwargs.update(
self.stack.template.get_boto_call_parameter()
)
create_change_set_kwargs.update(self._get_role_arn())
self.logger.debug(
"%s - Creating Change Set '%s'", self.stack.name, change_set_name
)
self.connection_manager.call(
service="cloudformation",
command="create_change_set",
kwargs=create_change_set_kwargs
)
# After the call successfully completes, AWS CloudFormation
# starts creating the Change Set.
self.logger.info(
"%s - Successfully initiated creation of Change Set '%s'",
self.stack.name, change_set_name
)
def delete_change_set(self, change_set_name):
"""
Deletes the Change Set ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
"""
self.logger.debug(
"%s - Deleting Change Set '%s'", self.stack.name, change_set_name
)
self.connection_manager.call(
service="cloudformation",
command="delete_change_set",
kwargs={
"ChangeSetName": change_set_name,
"StackName": self.stack.external_name
}
)
# If the call successfully completes, AWS CloudFormation
# successfully deleted the Change Set.
self.logger.info(
"%s - Successfully deleted Change Set '%s'",
self.stack.name, change_set_name
)
def describe_change_set(self, change_set_name):
"""
Describes the Change Set ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: The description of the Change Set.
:rtype: dict
"""
self.logger.debug(
"%s - Describing Change Set '%s'", self.stack.name, change_set_name
)
return self.connection_manager.call(
service="cloudformation",
command="describe_change_set",
kwargs={
"ChangeSetName": change_set_name,
"StackName": self.stack.external_name
}
)
def execute_change_set(self, change_set_name):
"""
Executes the Change Set ``change_set_name``.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: The Stack status
:rtype: str
"""
self._protect_execution()
self.logger.debug(
"%s - Executing Change Set '%s'", self.stack.name, change_set_name
)
self.connection_manager.call(
service="cloudformation",
command="execute_change_set",
kwargs={
"ChangeSetName": change_set_name,
"StackName": self.stack.external_name
}
)
status = self._wait_for_completion()
return status
def list_change_sets(self):
"""
Lists the Stack's Change Sets.
:returns: The Stack's Change Sets.
:rtype: dict or list
"""
self.logger.debug("%s - Listing change sets", self.stack.name)
try:
response = self.connection_manager.call(
service="cloudformation",
command="list_change_sets",
kwargs={
"StackName": self.stack.external_name
}
)
return {self.stack.name: response.get("Summaries", [])}
except botocore.exceptions.ClientError:
return []
def generate(self):
"""
Returns the Template for the Stack
"""
return self.stack.template.body
def validate(self):
"""
Validates the Stack's CloudFormation Template.
Raises an error if the Template is invalid.
:returns: Validation information about the Template.
:rtype: dict
:raises: botocore.exceptions.ClientError
"""
self.logger.debug("%s - Validating Template", self.stack.name)
response = self.connection_manager.call(
service="cloudformation",
command="validate_template",
kwargs=self.stack.template.get_boto_call_parameter()
)
self.logger.debug(
"%s - Validate Template response: %s", self.stack.name, response
)
return response
def estimate_cost(self):
"""
Estimates a Stack's cost.
:returns: An estimate of the Stack's cost.
:rtype: dict
:raises: botocore.exceptions.ClientError
"""
self.logger.debug("%s - Estimating template cost", self.stack.name)
parameters = [
{'ParameterKey': key, 'ParameterValue': value}
for key, value in self.stack.parameters.items()
]
kwargs = self.stack.template.get_boto_call_parameter()
kwargs.update({'Parameters': parameters})
response = self.connection_manager.call(
service="cloudformation",
command="estimate_template_cost",
kwargs=kwargs
)
self.logger.debug(
"%s - Estimate Stack cost response: %s", self.stack.name, response
)
return response
def get_status(self):
"""
Returns the Stack's status.
:returns: The Stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
try:
return self._get_status()
except StackDoesNotExistError:
return "PENDING"
def _format_parameters(self, parameters):
"""
Converts CloudFormation parameters to the format used by Boto3.
:param parameters: A dictionary of parameters.
:type parameters: dict
:returns: A list of the formatted parameters.
:rtype: list
"""
formatted_parameters = []
for name, value in parameters.items():
if value is None:
continue
if isinstance(value, list):
value = ",".join(value)
formatted_parameters.append({
"ParameterKey": name,
"ParameterValue": value
})
return formatted_parameters
def _get_role_arn(self):
"""
Returns the Role ARN assumed by CloudFormation when building a Stack.
Returns an empty dict if no Role is to be assumed.
:returns: The a Role ARN
:rtype: dict
"""
if self.stack.role_arn:
return {
"RoleARN": self.stack.role_arn
}
else:
return {}
def _get_stack_timeout(self):
"""
Return the timeout before considering the Stack to be failing.
Returns an empty dict if no timeout is set.
:returns: the creation/update timeout
:rtype: dict
"""
if self.stack.stack_timeout:
return {
"TimeoutInMinutes": self.stack.stack_timeout
}
else:
return {}
def _protect_execution(self):
"""
Raises a ProtectedStackError if protect == True.
:raises: sceptre.exceptions.ProtectedStackError
"""
if self.stack.protected:
raise ProtectedStackError(
"Cannot perform action on '{0}': Stack protection is "
"currently enabled".format(self.stack.name)
)
def _wait_for_completion(self, timeout=0):
"""
Waits for a Stack operation to finish. Prints CloudFormation events
while it waits.
:param timeout: Timeout before returning, in minutes.
:returns: The final Stack status.
:rtype: sceptre.stack_status.StackStatus
"""
timeout = 60 * timeout
def timed_out(elapsed):
return elapsed >= timeout if timeout else False
status = StackStatus.IN_PROGRESS
self.most_recent_event_datetime = (
datetime.now(tzutc()) - timedelta(seconds=3)
)
elapsed = 0
while status == StackStatus.IN_PROGRESS and not timed_out(elapsed):
status = self._get_simplified_status(self._get_status())
self._log_new_events()
time.sleep(4)
elapsed += 4
return status
def _describe(self):
return self.connection_manager.call(
service="cloudformation",
command="describe_stacks",
kwargs={"StackName": self.stack.external_name}
)
def _get_status(self):
try:
status = self._describe()["Stacks"][0]["StackStatus"]
except botocore.exceptions.ClientError as exp:
if exp.response["Error"]["Message"].endswith("does not exist"):
raise StackDoesNotExistError(exp.response["Error"]["Message"])
else:
raise exp
return status
@staticmethod
def _get_simplified_status(status):
"""
Returns the simplified Stack Status.
The simplified Stack status is represented by the struct
``sceptre.StackStatus()`` and can take one of the following options:
* complete
* in_progress
* failed
:param status: The CloudFormation Stack status to simplify.
:type status: str
:returns: The Stack's simplified status
:rtype: sceptre.stack_status.StackStatus
"""
if status.endswith("ROLLBACK_COMPLETE"):
return StackStatus.FAILED
elif status.endswith("_COMPLETE"):
return StackStatus.COMPLETE
elif status.endswith("_IN_PROGRESS"):
return StackStatus.IN_PROGRESS
elif status.endswith("_FAILED"):
return StackStatus.FAILED
else:
raise UnknownStackStatusError(
"{0} is unknown".format(status)
)
def _log_new_events(self):
"""
Log the latest Stack events while the Stack is being built.
"""
events = self.describe_events()["StackEvents"]
events.reverse()
new_events = [
event for event in events
if event["Timestamp"] > self.most_recent_event_datetime
]
for event in new_events:
self.logger.info(" ".join([
self.stack.name,
event["LogicalResourceId"],
event["ResourceType"],
event["ResourceStatus"],
event.get("ResourceStatusReason", "")
]))
self.most_recent_event_datetime = event["Timestamp"]
def wait_for_cs_completion(self, change_set_name):
"""
Waits while the Stack Change Set status is "pending".
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: The Change Set's status.
:rtype: sceptre.stack_status.StackChangeSetStatus
"""
while True:
status = self._get_cs_status(change_set_name)
if status != StackChangeSetStatus.PENDING:
break
time.sleep(2)
return status
def _get_cs_status(self, change_set_name):
"""
Returns the status of a Change Set.
:param change_set_name: The name of the Change Set.
:type change_set_name: str
:returns: The Change Set's status.
:rtype: sceptre.stack_status.StackChangeSetStatus
"""
cs_description = self.describe_change_set(change_set_name)
cs_status = cs_description["Status"]
cs_exec_status = cs_description["ExecutionStatus"]
possible_statuses = [
"CREATE_PENDING", "CREATE_IN_PROGRESS",
"CREATE_COMPLETE", "DELETE_COMPLETE", "FAILED"
]
possible_execution_statuses = [
"UNAVAILABLE", "AVAILABLE", "EXECUTE_IN_PROGRESS",
"EXECUTE_COMPLETE", "EXECUTE_FAILED", "OBSOLETE"
]
if cs_status not in possible_statuses:
raise UnknownStackChangeSetStatusError(
"Status {0} is unknown".format(cs_status)
)
if cs_exec_status not in possible_execution_statuses:
raise UnknownStackChangeSetStatusError(
"ExecutionStatus {0} is unknown".format(cs_status)
)
if (
cs_status == "CREATE_COMPLETE" and
cs_exec_status == "AVAILABLE"
):
return StackChangeSetStatus.READY
elif (
cs_status in [
"CREATE_PENDING", "CREATE_IN_PROGRESS", "CREATE_COMPLETE"
] and
cs_exec_status in ["UNAVAILABLE", "AVAILABLE"]
):
return StackChangeSetStatus.PENDING
elif (
cs_status in ["DELETE_COMPLETE", "FAILED"] or
cs_exec_status in [
"EXECUTE_IN_PROGRESS", "EXECUTE_COMPLETE",
"EXECUTE_FAILED", "OBSOLETE"
]
):
return StackChangeSetStatus.DEFUNCT
else: # pragma: no cover
raise Exception("This else should not be reachable.")
|
/sceptre-core-0.0.2.tar.gz/sceptre-core-0.0.2/sceptre/plan/actions.py
| 0.579876 | 0.233095 |
actions.py
|
pypi
|
import inspect
import json
from datetime import datetime
from sceptre.hooks import Hook
from sceptre.plan.actions import StackActions
from hook.constants import GIT_BRANCH_NAME, JOB_ID, AWS_REGION, AWS_ACCOUNT_ID, APP_VPC, LAST_GIT_COMMIT_MESSAGE
class CustomHook(Hook):
def __init__(self, *args, **kwargs):
super(CustomHook, self).__init__(*args, **kwargs)
self._function_name = \
f"arn:aws:lambda:{AWS_REGION}:{AWS_ACCOUNT_ID}:function:sceptre-lifecycle-provider-{APP_VPC}"
def run(self):
"""
run is the method called by Sceptre. It should carry out the work
intended by this hook.
"""
# self.argument == "deploy_start" || "deploy_end"
try:
stack = self._get_stack()
self.lambda_handler(self.argument, stack)
except AssertionError:
raise
except Exception as e:
print(e)
# just ignore all other errors for now
pass
def lambda_handler(self, method: str, stack) -> None:
payload = {
"method": method,
"git_commit_message": LAST_GIT_COMMIT_MESSAGE,
"git_branch_name": GIT_BRANCH_NAME,
"stack_name": stack.name,
"ci_job_id": JOB_ID,
"time": datetime.utcnow().isoformat()
}
self._invoke_lambda(payload)
def _invoke_lambda(self, payload: dict) -> None:
self.stack.connection_manager.call(
"lambda",
"invoke",
kwargs={
"FunctionName": self._function_name,
"InvocationType": "RequestResponse",
"Payload": json.dumps(payload)
},
region=AWS_REGION
)
@staticmethod
def _get_stack():
# Get reference to 'decorated' function in call stack. This is where sceptre hooks are applied.
# Moreover, the 'decorated' function has a reference to StackActions containing the correct Stack-instance.
# The 'self.stack' in this object is not necessarily the right Stack.
fr = next(stack for stack in inspect.stack() if stack.function == 'decorated')[0]
args, _, _, value_dict = inspect.getargvalues(fr)
instance = value_dict['self'] if len(args) and args[0] == 'self' else None
return instance.stack if isinstance(instance, StackActions) else None
|
/sceptre-deployment-hook-0.0.16.tar.gz/sceptre-deployment-hook-0.0.16/hook/hook.py
| 0.428114 | 0.16529 |
hook.py
|
pypi
|
from argparse import ArgumentParser
from shlex import split as args_split
from sceptre.resolvers import Resolver
from kubernetes import client
from kubernetes import config
__author__ = "Gustavo Pantuza <[email protected]>"
class EksLbUri(Resolver):
"""
Resolver for getting Load Balancer DNS on a Kubernetes resource of type
Service LoadBalancer
:param argument: -n | --namespace The namespace where the service lives in
:type argument: str
:param argument: -s | --service-name The service name. It must be of type LoadBalancer
:type argument: str
Examples Yaml usage:
LBArn: !eks_lb_uri --namespace {{ var.namespace }} --service-name {{ var.service }}
LBArn: !eks_lb_uri -n {{ var.namespace }} -s {{ var.service }}
LBArn: !eks_lb_uri -s {{ var.service }} # Assumes default namespace
LBArn: !eks_lb_uri -s "my_api_service_name"
LBArn: !eks_lb_uri -n backend -s data_api
Notes:
Be aware that the shell which sceptre will be called MUST be
authenticated on AWS and your kubeconfig properly updated to point to
your AWS EKS remote cluster.
You can refer to the following links for either AWS and Kubernetes
authentication:
. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html
. https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
"""
# Service type to be verified on k8s services
# https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
SERVICE_TYPE = "LoadBalancer"
def __init__(self, *args, **kwargs):
""" Configures the class to be able to properly communicate with
kubernetes remote cluster
"""
super(EksLbUri, self).__init__(*args, **kwargs)
self.load_args()
config.load_kube_config()
self.k8s = client.CoreV1Api()
def load_args(self):
""" Loads command line arguments from shell and turn it into class member """
parser = ArgumentParser()
parser.add_argument("-n", "--namespace", type=str, default="default")
parser.add_argument("-s", "--service-name", required=True, type=str)
args = parser.parse_args(args_split(self.argument))
self.namespace = args.namespace
self.service = args.service_name
def resolve(self):
""" Is called by Sceptre in order to retrieve k8s resource.
This method uses kubernetes client to connect on the remote cluster
and retrieve data of the given service name from inside the given
namespace.
Example Yaml usage:
LBArn: !eks_lb_uri --namespace {{ var.namespace }} --service-name {{ var.service }}
"""
services = self.k8s.list_service_for_all_namespaces_with_http_info()
service = None
for srv in services[0].items:
# Check if the service is the one we are looking for
if srv.metadata.namespace == self.namespace and srv.metadata.name == self.service:
service = srv
if not service:
raise ValueError("Could not find Service {0}".format(self.service))
if service.spec.type != self.SERVICE_TYPE:
raise ValueError("Only Services of type Load Balancer are accepted")
try:
# Returns the Load Balancer DNS URI
return service.status.load_balancer.ingress[0].hostname
except AttributeError:
raise
except Exception as e:
raise Exception("Could not get LB DNS. Error: {0}".format(str(e)))
|
/sceptre_eks_lb_resolver-0.2.1-py3-none-any.whl/resolver/eks_lb_uri.py
| 0.866669 | 0.211824 |
eks_lb_uri.py
|
pypi
|
import abc
import base64
import six
import logging
from botocore.exceptions import ClientError
from sceptre.resolvers import Resolver
from resolver.exceptions import ParameterNotFoundError
TEMPLATE_EXTENSION = ".yaml"
@six.add_metaclass(abc.ABCMeta)
class KmsBase(Resolver):
"""
A abstract base class which provides methods for getting KMS parameters.
"""
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
super(KmsBase, self).__init__(*args, **kwargs)
def _get_decoded_value(self, param, profile=None, region=None):
"""
Attempts to get the KMS parameter named by ``param``
:param param: The name of the KMS parameter in which to return.
:type param: str
:returns: KMS parameter value.
:rtype: str
:raises: KeyError
"""
response = self._request_kms_value(param, profile, region)
try:
binary_value = response[u'Plaintext']
decoded_value = binary_value.decode()
return decoded_value
except KeyError:
self.logger.error("%s - Invalid response looking for: %s",
self.stack.name, param)
raise
def _request_kms_value(self, param, profile=None, region=None):
"""
Communicates with AWS CloudFormation to fetch KMS parameters.
:returns: The decoded value of the parameter
:rtype: dict
:raises: resolver.exceptions.ParameterNotFoundError
"""
connection_manager = self.stack.connection_manager
ciphertext = param
ciphertext_blob = base64.b64decode(ciphertext)
try:
response = connection_manager.call(
service="kms",
command="decrypt",
kwargs={"CiphertextBlob": ciphertext_blob},
profile=profile,
region=region,
)
except TypeError as e:
raise e
except ClientError as e:
if "ParameterNotFound" in e.response["Error"]["Code"]:
self.logger.error("%s - ParameterNotFound: %s",
self.stack.name, param)
raise ParameterNotFoundError(e.response["Error"]["Message"])
else:
raise e
else:
return response
class KmsResolver(KmsBase):
"""
Resolver for retrieving the value of an KMS parameter.
:param argument: The parameter name to get.
:type argument: str
"""
def __init__(self, *args, **kwargs):
super(KmsResolver, self).__init__(*args, **kwargs)
def resolve(self):
"""
Retrieves the value of KMS parameter
:returns: The decoded value of the KMS parameter
:rtype: str
"""
self.logger.debug(
"Resolving KMS parameter: {0}".format(self.argument)
)
value = None
profile = self.stack.profile
region = self.stack.region
if self.argument:
param = self.argument
value = self._get_decoded_value(param, profile, region)
return value
|
/sceptre-kms-resolver-1.0.2.tar.gz/sceptre-kms-resolver-1.0.2/resolver/kms.py
| 0.768733 | 0.160661 |
kms.py
|
pypi
|
import jsonschema
import yaml
import zipfile
import urllib.request
import urllib.error
import tempfile
import shutil
from os import path
import sceptre.template_handlers.file
from sceptre.template_handlers import TemplateHandler
from sceptre.helpers import normalise_path
from sceptre.exceptions import SceptreException
REPO_SCHEMA = {
"type": "object",
"properties": {
"name": {"type": "string"},
"base_url": {"type": "string"},
"template_zip_url_format": {"type": "string"},
},
"required": ["name", "base_url"],
}
MANIFEST_SCHEMA = {
"type": "object",
"properties": {"entrypoint": {"type": "string"}},
"required": ["entrypoint"],
}
def path_is_parent(parent_path, child_path):
parent_path = path.abspath(parent_path)
child_path = path.abspath(child_path)
return path.commonpath([parent_path]) == path.commonpath([parent_path, child_path])
class PackageTemplateError(SceptreException):
pass
class ValidatedObject(object):
def __init__(self, schema, spec, defaults={}):
jsonschema.validate(spec, schema)
self._spec = spec
self._defaults = defaults
def __getattr__(self, attr):
if attr in self._spec:
return self._spec[attr]
return self._default(attr)
def _default(self, attr):
return self._defaults.get(attr)
class PackageRepository(ValidatedObject):
def __init__(self, spec):
super(PackageRepository, self).__init__(
REPO_SCHEMA,
spec,
defaults={
"template_zip_url_format": "{repo.base_url}/releases/download/r{release}/{package_name}-{release}.zip"
},
)
def template_zip_url(self, package_name, release):
return self.template_zip_url_format.format(
package_name=package_name, release=release, repo=self
)
class Manifest(ValidatedObject):
def __init__(self, spec):
super(Manifest, self).__init__(MANIFEST_SCHEMA, spec)
class PackageTemplateHandler(TemplateHandler):
"""
The following instance attributes are inherited from the parent class TemplateHandler.
Parameters
----------
name: str
The name of the template. Corresponds to the name of the Stack this template belongs to.
handler_config: dict
Configuration of the template handler. All properties except for `type` are available.
sceptre_user_data: dict
Sceptre user data defined in the Stack config
connection_manager: sceptre.connection_manager.ConnectionManager
Connection manager that can be used to call AWS APIs
"""
def __init__(self, *args, **kwargs):
super(PackageTemplateHandler, self).__init__(*args, **kwargs)
# print(yaml.dump({"args": args, "kwargs": kwargs}))
self._args = args
self._kwargs = kwargs
def schema(self):
"""
Return a JSON schema of the properties that this template handler requires.
For help filling this, see https://github.com/Julian/jsonschema
"""
return {
"type": "object",
"properties": {
"name": {"type": "string"},
"release": {"type": ["number", "string"]},
"repository": REPO_SCHEMA,
},
"required": ["name", "release", "repository"],
}
def handle(self):
"""
`handle` should return a CloudFormation template string or bytes. If the return
value is a byte array, UTF-8 encoding is assumed.
To use instance attribute self.<attribute_name>. See the class-level docs for a
list of attributes that are inherited.
Returns
-------
str|bytes
CloudFormation template
"""
repo = PackageRepository(self.arguments["repository"])
pkg_name = self.arguments["name"]
pkg_release = str(self.arguments["release"])
templates_path = path.join(self.stack_group_config["project_path"], "templates")
pkg_dir = path.join(
templates_path, normalise_path(f"{repo.name}/{pkg_name}-{pkg_release}")
)
if not path.exists(pkg_dir):
self.download(repo, pkg_name, pkg_release, pkg_dir)
manifest_path = path.join(pkg_dir, "manifest.yaml")
if not path.exists(manifest_path):
raise PackageTemplateError(f"package manifest not found: {manifest_path}")
with open(manifest_path, "r") as fp:
manifest = Manifest(yaml.safe_load(fp))
template_path = path.join(pkg_dir, manifest.entrypoint)
if not path_is_parent(pkg_dir, template_path):
raise PackageTemplateError(f"Invalid entrypoint: {manifest.entrypoint}")
return sceptre.template_handlers.file.File(
*self._args,
**{
**self._kwargs,
"arguments": {
"path": path.relpath(template_path, start=templates_path)
},
},
).handle()
def download(self, repo, pkg_name, pkg_release, pkg_dir):
zip_url = repo.template_zip_url(pkg_name, pkg_release)
req = urllib.request.Request(zip_url)
self.logger.info("Downloading %s", zip_url)
try:
with urllib.request.urlopen(req) as resp:
with tempfile.NamedTemporaryFile("w+b") as fp:
shutil.copyfileobj(resp, fp)
fp.seek(0)
zipfile.ZipFile(fp).extractall(pkg_dir)
except (urllib.error.HTTPError) as e:
self.logger.fatal("Error downloading template %s: %s", zip_url, e)
|
/sceptre_package_template_handler-0.1.1.tar.gz/sceptre_package_template_handler-0.1.1/package_template_handler.py
| 0.562177 | 0.159054 |
package_template_handler.py
|
pypi
|
import requests
from validator_collection import checkers
from sceptre.resolvers import Resolver
from sceptre.exceptions import SceptreException
from jsonschema import validate
from requests.auth import HTTPBasicAuth
VALID_AUTH_TYPES = ["basic"]
RESOLVER_ARGS_SCHEMA = {
"type": "object",
"properties": {
"url": {"type": "string"},
"auth": {"enum": VALID_AUTH_TYPES},
"user": {"type": "string"},
"password": {"type": "string"},
},
"required": ["url"],
"if": {"properties": {"auth": {"const": "basic"}}, "required": ["auth"]},
"then": {"required": ["user", "password"]},
}
class InvalidResolverArgumentValueError(SceptreException):
"""
Error raised if a resolver's argument value is invalid.
"""
pass
class Request(Resolver):
"""
Resolve data from a REST API endpoint.
"""
def _make_request(self, url, auth=None):
"""
Make a request to a REST API endpoint
:param url: The url endpoint reference
"""
content = None
if auth:
response = requests.get(url, auth=auth)
else:
response = requests.get(url)
response.raise_for_status()
content = response.text
return content
def resolve(self):
"""
This method is invoked by Sceptre
:returns: Response from the request
:rtype: str
"""
args = self.argument
url = args
if isinstance(args, dict):
validate(instance=args, schema=RESOLVER_ARGS_SCHEMA)
url = args["url"]
if not checkers.is_url(url):
raise InvalidResolverArgumentValueError(f"Invalid argument: {url}")
auth = None
if isinstance(args, dict) and "auth" in args:
auth_type = args["auth"].lower()
if auth_type == "basic":
user = args["user"]
password = args["password"]
auth = HTTPBasicAuth(user, password)
response = self._make_request(url, auth=auth)
return response
|
/sceptre_request_resolver-0.4.0.tar.gz/sceptre_request_resolver-0.4.0/resolver/request.py
| 0.664867 | 0.191384 |
request.py
|
pypi
|
# README
## What is this?
`sceptre-sam-handler` is a TemplateHandler for Sceptre (versions 2.7 and up) that lets you use an
AWS SAM template (and its associated project) as a stack's template.
This template handler will run `sam build` and then `sam package` from the indicated SAM Template's
directory in order to compile and generate a CloudFormation-ready template. Additionally, if you
define your SAM template with a `.j2` extension, you can utilize [Jinja2 templating syntax and logic](
https://jinja.palletsprojects.com/en/3.1.x/templates/) to render the SAM template prior to the build.
**By using the SAM Handler, you are letting SAM build your application, compile a SAM template, and
upload artifacts to S3, and then using Sceptre to actually do the deployment of the template to a stack.**
In other words, by using this handler with Sceptre, _you skip ever using `sam deploy`; It's not needed_.
You also shouldn't need a sam config file with deployment defaults, since you'll be using Sceptre to
deploy rather than SAM.
By using this handler, you can now use SAM templates with all your favorite Sceptre commands, like
`launch`, `validate`, `generate`, and `diff` (along with all the rest)!
## How to install sceptre-sam-handler
Simply `pip install scepre-sam-handler`. **Additionally, you need SAM CLI installed and accessible** on
the PATH for the handler to invoke as a subprocess.
There are three main ways you can install SAM CLI:
* You can follow [AWS's own documentation](
https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html)
on how to install SAM for your operating system. (Note: at least on Linux, this requires the ability
to use `sudo`; If you need to install SAM where permissions escalation is not possible, this won't
work.)
* You can use [pipx](https://github.com/pypa/pipx) to install `aws-sam-cli` into an isolated virtual
environment where it can have exclusive claim to its dependency versions. This can be done without
privilege escalations.
* If you want to install `aws-sam-cli` along with this handler using `pip`, you can use the "extra"
like `pip install sceptre-sam-handler[sam]`. However, **using pip to install SAM is generally not
recommended, according to SAM's own documentation.** This can lead to dependency conflicts, since
SAM CLI is particular about dependency versions.
## How to use sceptre-sam-handler
The template "type" for this handler is `sam`. There are two file extensions supported by this
handler:
* `.yaml`: Use this for a "normal" SAM template according to the SAM specification. This template
will be directly sent to the SAM CLI for building and packaging.
* `.j2`: Use this if you need to use [Jinja2 templating syntax and logic](
https://jinja.palletsprojects.com/en/3.1.x/templates/) in order to render a SAM template, such as
to interpolate values into the template prior to building it. See the section below on Jinja SAM
templates for more details.
This handler takes several arguments, two of which are required.
### Arguments:
* `path` (string, required): The path **from the current working directory** (NOT the
project path) to the SAM Template. The path _must_ end in either ".yaml" or ".j2".
* `artifact_bucket_name` (string, required): The bucket name where artifacts should be uploaded to
on S3 during the packaging process. If your project has a `template_bucket_name`, you can set this
to `{{ template_bucket_name }}`.
* `artifact_prefix` (string, optional): The prefix to apply to artifacts uploaded to S3. This can be
the project's `{{ template_key_prefix }}`.
* `build_args` (dict, optional): Additional key/value pairs to supply to `sam build`. For
flag-type arguments that have no value, set the value to "True".
* `package_args` (dict, optional): Additional key/value pairs to apply to `sam package`. The
same is true here as for `build_args` for flag-type arguments.
* `skip_jinja_cleanup` (bool): Can be set to True to retain the jinja file after processing. This
can be used to troubleshoot issues.
### How does this handler work?
When using _only_ sam CLI (not Sceptre) to deploy using `sam deploy`, SAM CLI effectively performs
3 steps:
1. SAM CLI builds the all the various resources special SAM resources, resolving dependencies. These would
include Lambda functions and Lambda layers. It copies any locally-referenced files and resolves any
dependencies into a directory called `.aws-sam`. This is the same behavior as running `sam build`.
2. SAM CLI then transforms all SAM template URIs that reference local filepaths to S3 keys (among other)
transformations it applies, uploads any built artifacts to those s3 keys, and saves the transformed
template. This is the same behavior as running `sam package`.
3. SAM CLI finally takes that transformed template (along with a local sam config and any other CLI
arguments) and performs CloudFormation stack create/update with them.
When you use Sceptre with this handler, the SAM handler performs steps 1-2 above to create a template
that Sceptre can use, **but it does not use SAM to deploy it!**. Instead, Sceptre can use that template
produced in step 2 above (via `sam package`) to perform all it's usual commands with all it's usual
magic!
In other words, using this handler lets you use resolvers, put your SAM stack into StackGroups, let
you name your stack according to Sceptre's naming conventions, `validate`, `diff`, and more! Basically,
the SAM stack can be managed using Sceptre just like any other.
### Default behavior
SAM commands are invoked using the system shell in a subprocess, with stdout redirected to stderr.
Artifacts will be uploaded using the `artifact_bucket_name` and `artifact_prefix` arguments, the
`project_code`, and the Sceptre stack name.
For example, given an `artifact_bucket_name` of "bucket", `artifact_prefix` of "prefix", a
`project_code` of "project" and a stack config located at "config/indigo/sam-application.yaml", SAM
artifacts will be uploaded to:
`s3://bucket/prefix/project/indigo/sam-application/sam_artifacts/`
By default, these will be the sam commands that are run _from the template's directory_:
```shell
sam build --cached --template-file [path as absolute path]
sam package \
--s3-bucket [artifact_bucket_name argument] \
--region [the stack region] \
--s3-prefix [the prefix described above] \
--template-file [path as absolute path]
```
If any additional arguments are desired for to be passed to SAM, you can specify those with dicts for
the `build_args` and `package_args` template handler arguments. These key/value pairs will
override the defaults. For any flag-type arguments, set the value to `True`. If you want to remove
a default argument (such as the `--cached` flag for `sam build`), set the value to `None`.
### IAM and authentication
This handler uses the stack's connection information to generate AWS environment variables and sets
those on the sam process, ensuring that the AWS authentication configuration on the stack config and
project is carried over to SAM without any need for additional arguments.
If you desire to use a different profile or region when invoking `sam package` than what is set on
the stack, you should specify "profile" and/or "region" values for "package_args".
**Important:** SAM creates CloudFormation-ready templates via `sam package`, which uploads built
artifacts to S3 in the process. This means that Sceptre commands that do not normally require S3
actions (such as `generate`, `validate`, `diff`, and others) will require them when using this
handler. You will need to ensure that any user or role executing these commands has proper
permissions for these operations. For more information on required permissions, see the
[documentation for SAM permissions](
https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-permissions.html).
### Jinja SAM Templates
The SAM Handler supports using SAM templates that have Jinja logic in them. These Jinja templates
will have access to the `sceptre_user_data` just like Jinja templates via the normal file handler
do. This can be useful for implementing additional template logic (such as loops and other actions)
in the template.
If you need to pass variables into the Jinja template for reference via Jinja syntax,
you should pass those variables via `sceptre_user_data`. Remember, resolvers can be used with
`sceptre_user_data`, so this can be a powerful tool to pre-render your templates or reference values
in areas that a SAM Template cannot use parameters (such as in Transforms).
### Resolvers in the SAM Handler parameters
It's likely that you'll want to use your template_bucket_name as your artifact_bucket_name, so you
don't need to have a separate bucket for your sam artifacts. However, since template_bucket_name is
technically a resolvable property that could be set via `!stack_output` on your StackGroup config,
you cannot directly reference it in your Stack Config with `{{ template_bucket_name }}` if you defined
it on the StackGroup Config using a resolver. For more information on _why_ you can't do this, you
should read about the [Resolution order of values](
https://docs.sceptre-project.org/3.1.0/docs/stack_config.html#resolution-order-of-values) for Sceptre
Stack Configs on Sceptre's documentation.
Nonetheless, the bottom line is this: `{{ template_bucket_name }}` may or may not actually work for
you, but `!stack_attr template_bucket_name` will **always** work for you, no matter how you've set
up your project.
### Example Stack Config (simple .yaml)
```yaml
# By using the SAM handler, you let SAM build and package the template and upload artifacts to S3
# and Sceptre will use the packaged template to create the CloudFormation stack, using the stack
# config.
template:
type: sam
path: path/from/my/cwd/template.yaml
# If your template_bucket_name is inherited from your StackGroup as a resolver, you cannot use
# {{ template_bucket_name }} to reference it, so you need to use the !stack_attr resolver. But
# using !stack_attr will always work for you anyway, so you might as well configure the SAM
# handler that way.
artifact_bucket_name: !stack_attr template_bucket_name
# It makes the most sense to use the same prefix as your template_key_prefix so that your SAM
# artifacts are foldered similar to your other templates... but it's not strictly necessary.
artifact_prefix: !stack_attr template_key_prefix
build_args:
use-container: True
# You can use resolvers to pass parameters, just like any other Sceptre stack!
parameters:
long_parameter: !file my/file/path
my_template_parameter: !stack_output some/other/stack.yaml::SomeOutput
# The SAM Handler will work with all the other stack parameters you might want to use too!
profile: my_profile
iam_role: arn:aws:iam::1111111111:role/My-Deployment-Role
region: us-east-1
stack_tags:
SomeTag: SomeValue
```
### Example Stack Config (Using Jinja2 .j2 template)
```yaml
template:
type: sam
path: path/from/my/cwd/template.j2
artifact_bucket_name: !stack_attr template_bucket_name
artifact_prefix: !stack_attr template_key_prefix
# Remember, Jinja logic cannot access parameter values; Those are accessed via CloudFormation functions
# like !Ref and !Sub when the stack is being deployed. If you need values to use with your Jinja logic,
# use sceptre_user_data instead.
parameters:
my_template_parameter: !stack_output some/other/stack.yaml::SomeOutput
# sceptre_user_data is resolved PRIOR TO building and deploying the template and it is passed to
# Jinja. So you can use sceptre_user_data to control Jinja logic or render values into the template.
# And because sceptre_user_data is resolvable, you can use resolvers to pass values and even whole
# template segments to render into the final SAM template before SAM build is ever invoked.
sceptre_user_data:
template_segmant: !file my/template/segment
```
|
/sceptre-sam-handler-1.0.0.tar.gz/sceptre-sam-handler-1.0.0/README.md
| 0.477067 | 0.768473 |
README.md
|
pypi
|
import posixpath
import subprocess
import sys
import tempfile
from pathlib import Path
from sceptre.connection_manager import ConnectionManager
from sceptre.exceptions import UnsupportedTemplateFileTypeError
from sceptre.template_handlers import TemplateHandler, helper
class SamInvoker:
def __init__(
self,
connection_manager: ConnectionManager,
sam_directory: Path,
*,
run_subprocess=subprocess.run
):
"""A utility for invoking SAM commands using subprocess
Args:
connection_manager: The TemplateHandler's ConnectionManager instance to use for obtaining
session environment variables
sam_directory: The directory of the SAM template to use as the CWD when invoking SAM
run_subprocess: The function to use for invoking subprocesses, matching the signature of
subprocess.run
"""
self.connection_manager = connection_manager
self.sam_directory = sam_directory
self.run_subprocess = run_subprocess
def invoke(self, command_name: str, args_dict: dict) -> None:
"""Invokes a SAM Command using the passed dict of arguments.
Args:
command_name: The name of the sam command to invoke (i.e. "build" or "package")
args_dict: The dictionary of arguments to pass to the command
"""
command_args = self._create_args(args_dict)
command = f'sam {command_name}'
if command_args.strip() != '':
command += f' {command_args}'
return self._invoke_sam_command(command)
def _create_args(self, parameters: dict) -> str:
"""Creates a CLI argument string by combining two dictionaries and then formatting them as
options.
How the dict will be converted to cli args:
* Keys with a value of None will be omitted, since they have no value
* Keys with a value of True will be converted to --flag type of arguments
* All other key/value pairs will be converted to --key "value" pairs
Args:
parameters: The default dictionary of arguments
Returns:
The CLI argument string
"""
args = []
for arg_name, arg_value in parameters.items():
if arg_value is None:
# It's an option with no value, so let's skip it
continue
argline = f'--{arg_name}'
if arg_value is not True:
# If the value is True, it's a flag, so we don't want a value
argline += f' "{arg_value}"'
args.append(argline)
return ' '.join(args)
def _invoke_sam_command(self, command: str) -> None:
environment_variables = self.connection_manager.create_session_environment_variables()
self.run_subprocess(
command,
shell=True,
cwd=self.sam_directory,
check=True,
# Redirect stdout to stderr so it doesn't combine with stdout that we might want
# to capture.
stdout=sys.stderr,
env=environment_variables
)
class SAM(TemplateHandler):
"""A template handler for AWS SAM templates. Using this will allow Sceptre to work with SAM to
build and package a SAM template and deploy it with Sceptre.
"""
SAM_ARTIFACT_DIRECTORY = 'sam_artifacts'
standard_template_extensions = ['.yaml']
jinja_template_extensions = ['.j2']
supported_template_extensions = standard_template_extensions + jinja_template_extensions
def __init__(
self,
name,
arguments=None,
sceptre_user_data=None,
connection_manager=None,
stack_group_config=None,
*,
invoker_class=SamInvoker,
get_temp_dir=tempfile.gettempdir,
render_jinja_template=helper.render_jinja_template
):
super().__init__(name, arguments, sceptre_user_data, connection_manager, stack_group_config)
self.invoker_class = invoker_class
self.get_temp_dir = get_temp_dir
self.render_jinja_template = render_jinja_template
def schema(self) -> dict:
"""This is the json schema of the template handler. It is required by Sceptre to define
template handler parameters.
"""
return {
"type": "object",
"properties": {
"path": {"type": "string"},
"artifact_prefix": {"type": "string"},
"artifact_bucket_name": {"type": "string"},
"build_args": {
"type": "object",
},
"package_args": {
"type": "object",
},
"skip_jinja_cleanup": {
"type": "boolean"
}
},
"required": [
"path",
"artifact_bucket_name",
]
}
def handle(self) -> str:
invoker = self.invoker_class(
connection_manager=self.connection_manager,
sam_directory=self.sam_directory
)
self._create_generation_destination()
template_path = self._prepare_template()
self._build(invoker, template_path)
skip_jinja_cleanup = self.arguments.get('skip_jinja_cleanup', False)
if not skip_jinja_cleanup and template_path != self.sam_template_path:
# We created a temporary file for the build, so let's remove it now.
template_path.unlink()
self._package(invoker)
return self.destination_template_path.read_text()
@property
def sam_template_path(self) -> Path:
return Path(self.arguments['path']).absolute()
@property
def sam_directory(self) -> Path:
return self.sam_template_path.parent
@property
def destination_template_path(self) -> Path:
suffix = self.sam_template_path.suffix
path_segments = self.name.split('/')
path_segments[-1] += suffix
return Path(self.get_temp_dir()).joinpath(*path_segments).absolute()
@property
def destination_template_directory(self) -> Path:
return self.destination_template_path.parent
@property
def artifact_key_prefix(self) -> str:
"""Returns the key prefix that should be passed to SAM CLI for uploading the packaged
artifacts.
"""
prefix_segments = [self.name, self.SAM_ARTIFACT_DIRECTORY]
sam_package_prefix = self.arguments.get('artifact_prefix')
if sam_package_prefix:
prefix_segments.insert(0, sam_package_prefix)
prefix = posixpath.join(*prefix_segments)
return prefix
@property
def artifact_bucket_name(self) -> str:
"""Returns the S3 bucket name that should be passed to SAM CLI for uploading the packaged
artifacts.
"""
return self.arguments['artifact_bucket_name']
def _create_generation_destination(self):
"""Creates the destination_template_directory, if it doesn't exist."""
self.destination_template_directory.mkdir(parents=True, exist_ok=True)
def _prepare_template(self) -> Path:
if self.sam_template_path.suffix not in self.supported_template_extensions:
raise UnsupportedTemplateFileTypeError(
f"Template has file extension {self.sam_template_path}. Only "
f"{self.supported_template_extensions} are supported."
)
if self.sam_template_path.suffix in self.standard_template_extensions:
return self.sam_template_path
elif self.sam_template_path.suffix in self.jinja_template_extensions:
return self._compile_jinja_template()
def _compile_jinja_template(self) -> Path:
self.logger.info("Compiling Jinja template...")
template_body = self.render_jinja_template(
str(self.sam_template_path),
{'sceptre_user_data': self.sceptre_user_data},
self.stack_group_config.get('j2_environment', {})
)
compiled_path = self.sam_template_path.parent / f'{self.sam_template_path.stem}.compiled'
compiled_path.write_text(template_body)
return compiled_path
def _build(self, invoker: SamInvoker, template_path: Path):
default_args = {
'cached': True,
'template-file': str(template_path)
}
build_args = {**default_args, **self.arguments.get('build_args', {})}
invoker.invoke('build', build_args)
def _package(self, invoker: SamInvoker):
default_args = {
's3-bucket': self.artifact_bucket_name,
'region': self.connection_manager.region,
's3-prefix': self.artifact_key_prefix,
'output-template-file': self.destination_template_path,
}
package_args = {**default_args, **self.arguments.get('package_args', {})}
invoker.invoke('package', package_args)
|
/sceptre-sam-handler-1.0.0.tar.gz/sceptre-sam-handler-1.0.0/sam_handler/handler.py
| 0.640636 | 0.188436 |
handler.py
|
pypi
|
import abc
import six
import logging
from botocore.exceptions import ClientError
from sceptre.resolvers import Resolver
from resolver.exceptions import ParameterNotFoundError
TEMPLATE_EXTENSION = ".yaml"
@six.add_metaclass(abc.ABCMeta)
class SsmBase(Resolver):
"""
A abstract base class which provides methods for getting SSM parameters.
"""
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
super(SsmBase, self).__init__(*args, **kwargs)
def _get_parameter_value(self, param, region, profile=None):
"""
Attempts to get the SSM parameter named by ``param``
:param param: The name of the SSM parameter in which to return.
:type param: str
:returns: SSM parameter value.
:rtype: str
:raises: KeyError
"""
response = self._request_parameter(param, region, profile)
try:
return response['Parameter']['Value']
except KeyError:
self.logger.error("%s - Invalid response looking for: %s",
self.stack.name, param)
raise
def _request_parameter(self, param, region, profile=None):
"""
Communicates with AWS CloudFormation to fetch SSM parameters.
:returns: The decoded value of the parameter
:rtype: dict
:raises: resolver.exceptions.ParameterNotFoundError
"""
connection_manager = self.stack.connection_manager
try:
response = connection_manager.call(
service="ssm",
command="get_parameter",
kwargs={"Name": param,
"WithDecryption": True},
region=region,
profile=profile
)
except ClientError as e:
if "ParameterNotFound" in e.response["Error"]["Code"]:
self.logger.error("%s - ParameterNotFound: %s",
self.stack.name, param)
raise ParameterNotFoundError(e.response["Error"]["Message"])
else:
raise e
else:
return response
class SSM(SsmBase):
"""
Resolver for retrieving the value of an SSM parameter.
:param argument: The parameter name to get.
:type argument: str
"""
def __init__(self, *args, **kwargs):
super(SSM, self).__init__(*args, **kwargs)
def resolve(self):
"""
Retrieves the value of SSM parameter
:returns: The decoded value of the SSM parameter
:rtype: str
"""
args = self.argument
if not args:
raise ValueError("Missing SSM parameter name")
value = None
self.logger.debug(
"Resolving SSM parameter: {0}".format(args)
)
name = self.argument
region = self.stack.region
profile = self.stack.profile
if isinstance(args, dict):
if 'name' in args:
name = args['name']
else:
raise ValueError("Missing SSM parameter name")
profile = args.get('profile', profile)
region = args.get('region', region)
value = self._get_parameter_value(name, region, profile)
return value
|
/sceptre-ssm-resolver-1.2.2.tar.gz/sceptre-ssm-resolver-1.2.2/resolver/ssm.py
| 0.75985 | 0.196518 |
ssm.py
|
pypi
|
import abc
import six
import logging
from botocore.exceptions import ClientError
from sceptre.hooks import Hook
@six.add_metaclass(abc.ABCMeta)
class StackTerminationProtection(Hook):
"""
Hook for cloudformation stack protection.
:param argument: The stack termination protection setting
:type argument: str
"""
ALLOWED_ARG_VALUES = ['enabled', 'disabled']
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
super(StackTerminationProtection, self).__init__(*args, **kwargs)
def run(self):
"""
Updates the cloudformation stack termination protection setting
:returns: parameter value
:rtype: str
"""
if not self.argument:
return
argument = (self.argument if self.argument else '').lower()
assert argument in self.ALLOWED_ARG_VALUES, \
"Invalid argument for !stack_termination_protection, " \
"please choose one of {0}".format(self.ALLOWED_ARG_VALUES)
enable_termination_protection = False
if argument == 'enabled':
enable_termination_protection = True
connection_manager = self.stack.connection_manager
try:
connection_manager.call(
service="cloudformation",
command="describe_stacks",
kwargs={"StackName": self.stack.external_name}
)
except ClientError:
self.logger.info(
"%s - stack not found. Skipping termination protection",
self.stack.name)
return
try:
connection_manager.call(
service="cloudformation",
command="update_termination_protection",
kwargs={"StackName": self.stack.external_name,
"EnableTerminationProtection":
enable_termination_protection},
profile=self.stack.profile,
region=self.stack.region,
stack_name=self.stack.name
)
self.logger.info(
"%s - termination protection set to '%s'",
self.stack.name, argument)
except ClientError as e:
raise e
|
/sceptre-stack-termination-protection-hook-1.1.0.tar.gz/sceptre-stack-termination-protection-hook-1.1.0/hook/stack_termination_protection.py
| 0.550124 | 0.18508 |
stack_termination_protection.py
|
pypi
|
# Sceptre
[](https://app.circleci.com/pipelines/github/Sceptre)
[](https://hub.docker.com/r/sceptreorg/sceptre)
[](https://pypi.org/project/sceptre/)
[](https://pypi.org/project/sceptre/)
[](https://pypi.org/project/sceptre/)
[](https://pypi.org/project/sceptre/)
[](https://github.com/Sceptre/sceptre/blob/main/LICENSE)
## About
Sceptre is a tool to drive
[AWS CloudFormation](https://aws.amazon.com/cloudformation). It automates the
mundane, repetitive and error-prone tasks, enabling you to concentrate on
building better infrastructure.
## Features
- Code reuse by separating a Stack's template and its configuration
- Support for templates written in JSON, YAML, Jinja2 or Python DSLs such as
Troposphere
- Dependency resolution by passing of Stack outputs to parameters of dependent
Stacks
- Stack Group support by bundling related Stacks into logical groups (e.g. dev
and prod)
- Stack Group-level commands, such as creating multiple Stacks with a single
command
- Fast, highly parallelised builds
- Built in support for working with Stacks in multiple AWS accounts and regions
- Infrastructure visibility with meta-operations such as Stack querying
protection
- Support for inserting dynamic values in templates via customisable Resolvers
- Support for running arbitrary code as Hooks before/after Stack builds
## Benefits
- Utilises cloud-native Infrastructure as Code engines (CloudFormation)
- You do not need to manage state
- Simple templates using popular templating syntax - Yaml & Jinja
- Powerful flexibility using a mature programming language - Python
- Easy to integrate as part of a CI/CD pipeline by using Hooks
- Simple CLI and API
- Unopinionated - Sceptre does not force a specific project structure
## Install
### Using pip
`$ pip install sceptre`
More information on installing sceptre can be found in our
[Installation Guide](https://docs.sceptre-project.org/latest/docs/install.html)
### Using Docker Image
View our [Docker repository](https://hub.docker.com/repositories/sceptreorg).
Images available from version 2.0.0 onward.
To use our Docker image follow these instructions:
1. Pull the image `docker pull sceptreorg/sceptre:[SCEPTRE_VERSION_NUMBER]` e.g.
`docker pull sceptreorg/sceptre:2.5.0`. Leave out the version number if you
wish to run `latest` or run `docker pull sceptreorg/sceptre:latest`.
2. Run the image. You will need to mount the working directory where your
project resides to a directory called `project`. You will also need to mount
a volume with your AWS config to your docker container. E.g.
`docker run -v $(pwd):/project -v /Users/me/.aws/:/root/.aws/:ro sceptreorg/sceptre:latest --help`
If you want to use a custom ENTRYPOINT simply amend the Docker command:
`docker run -ti --entrypoint='' sceptreorg/sceptre:latest sh`
The above command will enter you into the shell of the Docker container where
you can execute sceptre commands - useful for development.
If you have any other environment variables in your non-docker shell you will
need to pass these in on the Docker CLI using the `-e` flag. See Docker
documentation on how to achieve this.
## Example
Sceptre organises Stacks into "Stack Groups". Each Stack is represented by a
YAML configuration file stored in a directory which represents the Stack Group.
Here, we have two Stacks, `vpc` and `subnets`, in a Stack Group named `dev`:
```sh
$ tree
.
├── config
│ └── dev
│ ├── config.yaml
│ ├── subnets.yaml
│ └── vpc.yaml
└── templates
├── subnets.py
└── vpc.py
```
We can create a Stack with the `create` command. This `vpc` Stack contains a
VPC.
```sh
$ sceptre create dev/vpc.yaml
dev/vpc - Creating stack dev/vpc
VirtualPrivateCloud AWS::EC2::VPC CREATE_IN_PROGRESS
dev/vpc VirtualPrivateCloud AWS::EC2::VPC CREATE_COMPLETE
dev/vpc sceptre-demo-dev-vpc AWS::CloudFormation::Stack CREATE_COMPLETE
```
The `subnets` Stack contains a subnet which must be created in the VPC. To do
this, we need to pass the VPC ID, which is exposed as a Stack output of the
`vpc` Stack, to a parameter of the `subnets` Stack. Sceptre automatically
resolves this dependency for us.
```sh
$ sceptre create dev/subnets.yaml
dev/subnets - Creating stack
dev/subnets Subnet AWS::EC2::Subnet CREATE_IN_PROGRESS
dev/subnets Subnet AWS::EC2::Subnet CREATE_COMPLETE
dev/subnets sceptre-demo-dev-subnets AWS::CloudFormation::Stack CREATE_COMPLETE
```
Sceptre implements meta-operations, which allow us to find out information about
our Stacks:
```sh
$ sceptre list resources dev/subnets.yaml
- LogicalResourceId: Subnet
PhysicalResourceId: subnet-445e6e32
dev/vpc:
- LogicalResourceId: VirtualPrivateCloud
PhysicalResourceId: vpc-c4715da0
```
Sceptre provides Stack Group level commands. This one deletes the whole `dev`
Stack Group. The subnet exists within the vpc, so it must be deleted first.
Sceptre handles this automatically:
```sh
$ sceptre delete dev
Deleting stack
dev/subnets Subnet AWS::EC2::Subnet DELETE_IN_PROGRESS
dev/subnets - Stack deleted
dev/vpc Deleting stack
dev/vpc VirtualPrivateCloud AWS::EC2::VPC DELETE_IN_PROGRESS
dev/vpc - Stack deleted
```
> Note: Deleting Stacks will _only_ delete a given Stack, or the Stacks that are
> directly in a given StackGroup. By default Stack dependencies that are
> external to the StackGroup are not deleted.
Sceptre can also handle cross Stack Group dependencies, take the following
example project:
```sh
$ tree
.
├── config
│ ├── dev
│ │ ├── network
│ │ │ └── vpc.yaml
│ │ ├── users
│ │ │ └── iam.yaml
│ │ ├── compute
│ │ │ └── ec2.yaml
│ │ └── config.yaml
│ └── staging
│ └── eu
│ ├── config.yaml
│ └── stack.yaml
├── hooks
│ └── stack.py
├── templates
│ ├── network.json
│ ├── iam.json
│ ├── ec2.json
│ └── stack.json
└── vars
├── dev.yaml
└── staging.yaml
```
In this project `staging/eu/stack.yaml` has a dependency on the output of
`dev/users/iam.yaml`. If you wanted to create the Stack `staging/eu/stack.yaml`,
Sceptre will resolve all of it's dependencies, including `dev/users/iam.yaml`,
before attempting to create the Stack.
## Usage
Sceptre can be used from the CLI, or imported as a Python package.
## CLI
```text
Usage: sceptre [OPTIONS] COMMAND [ARGS]...
Sceptre is a tool to manage your cloud native infrastructure deployments.
Options:
--version Show the version and exit.
--debug Turn on debug logging.
--dir TEXT Specify sceptre directory.
--output [text|yaml|json] The formatting style for command output.
--no-colour Turn off output colouring.
--var TEXT A variable to replace the value of an item in
config file.
--var-file FILENAME A YAML file of variables to replace the values
of items in config files.
--ignore-dependencies Ignore dependencies when executing command.
--merge-vars Merge variables from successive --vars and var
files.
--help Show this message and exit.
Commands:
create Creates a stack or a change set.
delete Deletes a stack or a change set.
describe Commands for describing attributes of stacks.
estimate-cost Estimates the cost of the template.
execute Executes a Change Set.
generate Prints the template.
launch Launch a Stack or StackGroup.
list Commands for listing attributes of stacks.
new Commands for initialising Sceptre projects.
set-policy Sets Stack policy.
status Print status of stack or stack_group.
update Update a stack.
validate Validates the template.
```
## Python
Using Sceptre as a Python module is very straightforward. You need to create a
SceptreContext, which tells Sceptre where your project path is and which path
you want to execute on, we call this the "command path".
After you have created a SceptreContext you need to pass this into a
SceptrePlan. On instantiation the SceptrePlan will handle all the required steps
to make sure the action you wish to take on the command path are resolved.
After you have instantiated a SceptrePlan you can access all the actions you can
take on a Stack, such as `validate()`, `launch()`, `list()` and `delete()`.
```python
from sceptre.context import SceptreContext
from sceptre.plan.plan import SceptrePlan
context = SceptreContext("/path/to/project", "command_path")
plan = SceptrePlan(context)
plan.launch()
```
Full API reference documentation can be found in the
[Documentation](https://docs.sceptre-project.org/)
## Tutorial and Documentation
- [Get Started](https://docs.sceptre-project.org/latest/docs/get_started.html)
- [Documentation](https://docs.sceptre-project.org/)
## Communication
Sceptre community discussions happen in the #sceptre chanel in the
[og-aws Slack](https://github.com/open-guides/og-aws). To join click
on <http://slackhatesthe.cloud/> to create an account and join the
#sceptre channel.
Follow the [SceptreOrg Twitter account](https://twitter.com/SceptreOrg) to get announcements on the latest releases.
## Contributing
See our [Contributing Guide](CONTRIBUTING.md)
## Sponsors
[](https://sagebionetworks.org)
[](https://www.godaddy.com)
[](https://www.cloudreach.com)
|
/sceptre-4.2.2.tar.gz/sceptre-4.2.2/README.md
| 0.593963 | 0.925129 |
README.md
|
pypi
|
import re
# The regex captures any path information before the {proxy+}. This is to support paths that have other params in
# them. Otherwise the regex will match the first { with the last +} giving incorrect results.
# Example: /id/{id}/user/{proxy+}. g<1> = '/id/{id}/user/' g<2> = 'proxy'
PROXY_PATH_PARAMS_ESCAPED = r"(.*/){(.*)\+}"
# The regex replaces what was captured with PROXY_PATH_PARAMS_ESCAPED to construct the full path with the {proxy+}
# replaces. The first group is anything before {proxy+}, while the second group is the name given to the proxy.
# Example: /id/{id}/user/{resource+}; g<1> = '/id/{id}/user/'; g<2> = 'resource'
FLASK_CAPTURE_ALL_PATH = r"\g<1><path:\g<2>>"
# The regex will replace the first group from FLASK_CAPTURE_ALL_PATH_REGEX into the proxy name part of the APIGW path.
# Example: /<path:resource>; g<1> = 'resource'; output = /{resource+}
PROXY_PATH_PARAMS = r"/{\g<1>+}"
# The regex will capture the name of the path for the APIGW Proxy path.
# Example: /<path:resource> is equivalent to the APIGW path /{resource+}
FLASK_CAPTURE_ALL_PATH_REGEX = r"/<path:(.*)>"
LEFT_BRACKET = "{"
RIGHT_BRACKET = "}"
LEFT_ANGLE_BRACKET = "<"
RIGHT_ANGLE_BRACKET = ">"
APIGW_TO_FLASK_REGEX = re.compile(PROXY_PATH_PARAMS_ESCAPED)
FLASK_TO_APIGW_REGEX = re.compile(FLASK_CAPTURE_ALL_PATH_REGEX)
class PathConverter(object):
@staticmethod
def convert_path_to_flask(path):
"""
Converts a Path from an Api Gateway defined path to one that is accepted by Flask
Examples:
'/id/{id}' => '/id/<id>'
'/{proxy+}' => '/<path:proxy>'
:param str path: Path to convert to Flask defined path
:return str: Path representing a Flask path
"""
proxy_sub_path = APIGW_TO_FLASK_REGEX.sub(FLASK_CAPTURE_ALL_PATH, path)
# Replace the '{' and '}' with '<' and '>' respectively
return proxy_sub_path.replace(LEFT_BRACKET, LEFT_ANGLE_BRACKET).replace(RIGHT_BRACKET, RIGHT_ANGLE_BRACKET)
@staticmethod
def convert_path_to_api_gateway(path):
"""
Converts a Path from a Flask defined path to one that is accepted by Api Gateway
Examples:
'/id/<id>' => '/id/{id}'
'/<path:proxy>' => '/{proxy+}'
:param str path: Path to convert to Api Gateway defined path
:return str: Path representing an Api Gateway path
"""
proxy_sub_path = FLASK_TO_APIGW_REGEX.sub(PROXY_PATH_PARAMS, path)
# Replace the '<' and '>' with '{' and '}' respectively
return proxy_sub_path.replace(LEFT_ANGLE_BRACKET, LEFT_BRACKET).replace(RIGHT_ANGLE_BRACKET, RIGHT_BRACKET)
|
/scf-0.2.8.tar.gz/scf-0.2.8/tcfcli/cmds/local/libs/apigw/path_converter.py
| 0.742702 | 0.226121 |
path_converter.py
|
pypi
|
from .tcsam_macro import TcSamMacro as macro
from .tcsam_macro import TriggerMacro as trmacro
apigw_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "tcsam.ns.func.event.apigw",
"type": "object",
"properties": {
macro.Type: {"const": macro.TrApiGw},
macro.Properties: {
"type": "object",
"properties": {
trmacro.StageName: {
"type": "string",
"enum": ["test", "prepub", "release"]
},
trmacro.HttpMethod: {
"type": "string",
"enum": ["ANY", "GET", "POST", "PUT", "DELETE", "HEAD"]
},
trmacro.IntegratedResp: {
"type": "boolean",
},
trmacro.Enable: {
"enum": ["OPEN", "CLOSE", True, False]
}
}
}
},
"required": [macro.Type, macro.Properties],
"additionalProperties": False
}
timer_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "tcsam.ns.func.event.timer",
"type": "object",
"properties": {
macro.Type: {"const": macro.TrTimer},
macro.Properties: {
"type": "object",
"properties": {
trmacro.CronExp: {"type": "string"},
trmacro.Enable: {
"enum": ["OPEN", "CLOSE", True, False]
},
trmacro.Message: {
"type": "string"
}
},
"required": [trmacro.CronExp],
},
},
"required": [macro.Type, macro.Properties],
"additionalProperties": False,
}
cmq_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "tcsam.ns.func.event.cmq",
"type": "object",
"properties": {
macro.Type: {"const": macro.TrCMQ},
macro.Properties: {
"type": ["object", "null"],
"properties": {
"Name": {"type": "string"},
trmacro.Enable: {
"enum": ["OPEN", "CLOSE", True, False]
}
}
}
},
"required": [macro.Type, macro.Properties],
"additionalProperties": False
}
cos_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "tcsam.ns.func.event.cos",
"type": "object",
"properties": {
macro.Type: {"const": macro.TrCOS},
macro.Properties: {
"type": "object",
"properties": {
trmacro.Filter: {
"type": "object",
"properties": {
trmacro.Prefix: {"type": "string"},
trmacro.Suffix: {"type": "string"}
}
},
trmacro.Bucket: {"type": "string"},
macro.Events: {"type": "string"},
trmacro.Enable: {
"enum": ["OPEN", "CLOSE", True, False]
}
},
"required": [macro.Events]
}
},
"required": [macro.Type, macro.Properties],
"additionalProperties": False
}
ckafka_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "tcsam.ns.func.event.ckafka",
"type": "object",
"properties": {
macro.Type: {"const": macro.TrCKafka},
macro.Properties: {
"type": "object",
"properties": {
trmacro.Name: {"type": "string"},
trmacro.Topic: {"type": "string"},
trmacro.MaxMsgNum: {"type": "integer"},
trmacro.Offset: {"type": "string"},
trmacro.Enable: {
"enum": ["OPEN", "CLOSE", True, False]
}
},
"required": [trmacro.Name, trmacro.Topic, trmacro.MaxMsgNum]
}
},
"required": [macro.Type, macro.Properties],
"additionalProperties": False
}
|
/scf-0.2.8.tar.gz/scf-0.2.8/tcfcli/common/tcsam/event.py
| 0.408631 | 0.343452 |
event.py
|
pypi
|
from .event import apigw_schema
from .event import cmq_schema
from .event import timer_schema
from .event import cos_schema
from .event import ckafka_schema
from .tcsam_macro import TcSamMacro as macro
func_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "tcsam.resource.ns.func",
"type": "object",
"properties": {
macro.Type: {"const": "TencentCloud::Serverless::Function"},
macro.Properties: {
"type": "object",
"properties": {
macro.CodeUri: {"type": ["string", "object"]},
macro.Desc: {"type": "string"},
macro.Role: {"type": "string"},
macro.Envi: {
"type": "object",
"properties": {
macro.Vari: {
"type": ["object", "null"],
"properties": {},
"additionalProperties": {"type": "string"}
}
},
"required": [macro.Vari],
"additionalProperties": False
},
macro.Type: {"type": "string",
"enum": ["Event", "HTTP", "Service"]
},
macro.Handler: {"type": "string"},
macro.MemSize: {"type": "integer", "exclusiveMinimum": 0},
macro.Runtime: {
"type": "string",
"enum": ["Python2.7", "Python3.6", "Nodejs6.10", "Nodejs8.9", "Nodejs10.15", "Nodejs12.16",
"Php5", "Php7", "Go1", "Java8", "python2.7", "python3.6",
"nodejs6.10", "nodejs8.9", "nodejs10.15", "nodejs12.16", "php5", "php7", "go1", "java8"] #, "Nodejs8.9-service"]
},
macro.Timeout: {"type": "integer", "exclusiveMinimum": 0},
macro.Events: {
"type": ["object", "null"],
"properties": {},
"additionalProperties": {
"type": "object",
"oneOf": [apigw_schema, cos_schema, timer_schema, cmq_schema, ckafka_schema]
}
},
macro.VpcConfig: {
"type": "object",
"properties":{
macro.VpcId: {"type": "string"},
macro.SubnetId: {"type": "string"}
},
"additionalProperties": False
},
macro.LocalZipFile: {"type": "string"}
},
"required": [macro.Handler, macro.Runtime, macro.CodeUri],
"additionalProperties": False
}
},
"required": [macro.Type, macro.Properties],
"additionalProperties": False
}
|
/scf-0.2.8.tar.gz/scf-0.2.8/tcfcli/common/tcsam/function.py
| 0.406273 | 0.207716 |
function.py
|
pypi
|
from datetime import datetime
import json
import os
import random
from warnings import warn
class LocalDataManager:
_runinfofile = 'runinfo.json'
_samplesheetsfile = 'samplesheets.json'
_solexarunsfile = 'solexaruns.json'
_solexaflowcellsfile = 'solexaflowcells.json'
_pipelinerunsfile = 'pipelineruns.json'
_laneresultsfile = 'laneresults.json'
_mapperresultsfile = 'mapperresults.json'
_testdatadir = '../testdata'
def __init__(self):
self._runinfo = {}
self._samplesheets = {}
self._solexaruns = {}
self._solexaflowcells = {}
self._pipelineruns = {}
self._laneresults = {}
self._mapperresults = {}
self._loadall()
def getruninfo(self, run=None):
return self._runinfo.get(run)
def getsamplesheet(self, run=None, lane=None):
run = self._samplesheets.get(run)
if lane:
lane = str(lane)
return run.get(lane)
else:
return run
def showsolexarun(self, id=None):
return self._solexaruns.get(id)
def showsolexaflowcell(self, id=None):
return self._solexaflowcells.get(id)
def showpipelinerun(self, id=None):
"""
Args : id - Pipeline Run ID
"""
return self._pipelineruns.get(id)
def showlaneresult(self, id=None):
"""
Args : id - a Solexa Lane Result ID. For example, given the run 141117_MONK_0387_AC4JCDACXX, click on the analysis link with the date of '2014-11-30 20:22:00 -0800'.
Then on the resulting page, find the Analysis Results table. In the Details columns, those 'View' links take you to a page that refers to a lane result.
The lane result ID number is present at the end of the URL in the browser.
"""
return self._laneresults.get(id)
def showmapperresult(self, id=None):
return self._mapperresults.get(id)
def indexsolexaruns(self, run=None):
run_id =self.getrunid(run)
solexa_run = self._solexaruns.get(run_id)
if solexa_run is None:
return {}
else:
return solexa_run
def indexpipelineruns(self, run=None):
"""
Finds all pipeline runs for a given run name from the test file, and puts them into a dict keyed by the pipeline run id
and valued by a dict being with the metadata on the pipeline run.
"""
run_id =self.getrunid(run)
found_pipelineruns = {}
for id, pipelinerun in self._pipelineruns.iteritems():
if str(pipelinerun.get('solexa_run_id')) == str(run_id):
found_pipelineruns[str(pipelinerun.get('id'))] = pipelinerun
return found_pipelineruns
def indexlaneresults(self, run, lane=None, barcode=None, readnumber=None):
"""
Function : Finds all lane results for a given run name. Doesn't yet support filtering for a particular
lane, barcode, and readnumber. Puts retrieved lane results into a dict keyed by the lane result ID
and valued by a dict being the lane results for the particular barcode and readnumber retrieved.
"""
laneids = self._getlaneids(run)
found_laneresults = {}
for id, laneresult in self._laneresults.iteritems():
if str(laneresult.get('solexa_lane_id')) in laneids:
found_laneresults[str(laneresult.get('id'))] = laneresult
#TODO add other filters for lane, barcode, readnumber
return found_laneresults
def indexmapperresults(self, run=None):
laneresultids = self._getlaneresultids(run)
found_mapperresults = {}
for id, mapperresult in self._mapperresults.iteritems():
if str(mapperresult.get('dataset_id')) in laneresultids:
found_mapperresults[id] = mapperresult
return found_mapperresults
def createpipelinerun(self, run_id, lane, paramdict=None):
id =self._getrandomid()
pipelinerun = {
'id': id,
'solexa_run_id': run_id,
'started': True,
'active': True,
'finished': None,
'start_time':str(datetime.now()),
'created_at':str(datetime.now()),
'pass_read_count': None,
}
if paramdict:
pipelinerun.update(paramdict)
self.addpipelinerun(id, pipelinerun)
return pipelinerun
def createlaneresult(self, lane_id, paramdict):
id =self._getrandomid()
laneresult = {'id': id,
'solexa_lane_id': lane_id,
'solexa_pipeline_run_id': None,
'created_at': str(datetime.now()),
'active': True,
'codepoint': None,
}
laneresult.update(paramdict)
self.addlaneresult(id, laneresult)
return laneresult
def createmapperresult(self, paramdict):
id =self._getrandomid()
mapperresult = { 'id': id,
'created_at': str(datetime.now()),
'active': True
}
mapperresult.update(paramdict)
self.addmapperresult(id, mapperresult)
return mapperresult
def updatesolexarun(self, id, paramdict):
id=str(id)
try:
self._solexaruns.get(id).update(paramdict)
except:
return None
return self.showsolexarun(id)
def updatesolexaflowcell(self, id, paramdict):
id=str(id)
try:
self._solexaflowcells.get(id).update(paramdict)
except:
return None
return self.showsolexaflowcell(id)
def updatepipelinerun(self, id, paramdict):
id =str(id)
try:
self._pipelineruns.get(id).update(paramdict)
except:
return None
return self.showpipelinerun(id)
def updatelaneresult(self, id, paramdict):
id =str(id)
try:
self._laneresults.get(id).update(paramdict)
except:
return None
return self.showlaneresult(id)
def updatemapperresult(self, id, paramdict):
id =str(id)
try:
self._mapperresults.get(id).update(paramdict)
except:
return None
return self.showmapperresult(id)
def _getrandomid(self):
# High enough min to exclude valid ids in LIMS
# Large enough range to make repetition vanishingly improbable
return random.randint(1e12,2e12)
def deletelaneresults(self, run, lane):
# TODO
raise Exception("Todo. This method hasn't been implemented for local connection yet.")
def addruninfo(self, run, runinfo):
self._runinfo[run] = runinfo
def addrun(self, id, run):
self._runs[str(id)] = run
def addsamplesheet(self, run, samplesheet, lane=None):
# lane = None means samplesheet for all lanes.
run = self._samplesheets.setdefault(run, {})
run[lane] = samplesheet
def addsolexarun(self, id, solexarun):
self._solexaruns[str(id)] = solexarun
def addsolexaflowcell(self, id, solexaflowcell):
self._solexaflowcells[str(id)] = solexaflowcell
def addpipelinerun(self, id, pipelinerun):
self._pipelineruns[str(id)] = pipelinerun
def addlaneresult(self, id, laneresult):
self._laneresults[str(id)] = laneresult
def addmapperresult(self, id, mapperresult):
self._mapperresults[str(id)] = mapperresult
def addsolexaruns(self, solexaruns):
for id, solexarun in solexaruns.iteritems():
self.addsolexarun(id, solexarun)
def addsolexaflowcells(self, solexaflowcells):
for id, solexarun in solexaruns.iteritems():
self.addsolexaflowcell(id, solexaflowcell)
def addpipelineruns(self, pipelineruns):
for id, pipelinerun in pipelineruns.iteritems():
self.addpipelinerun(id, pipelinerun)
def addlaneresults(self, laneresults):
for id, laneresult in laneresults.iteritems():
self.addlaneresult(id, laneresult)
def addmapperresults(self, mapperresults):
for id, mapperresult in mapperresults.iteritems():
self.addmapperresult(id, mapperresult)
def getrunid(self, run):
try:
return str(self.getruninfo(run).get('id'))
except:
return None
def getlaneid(self, run, lane):
runinfo = self.getruninfo(run)
try:
id =runinfo.get('run_info').get('lanes').get(str(lane)).get('id')
except:
return None
return id
def _getlaneresultids(self, run):
laneresultids = []
for laneresult in self.indexlaneresults(run).values():
laneresultids.append(str(laneresult.get('id')))
return laneresultids
def _getlaneids(self, run_name):
runinfo = self.getruninfo(run_name)
try:
lanes = runinfo.get('run_info').get('lanes')
except:
return None
laneids = []
for lane in lanes.values(): #each value is a dict from the lane
laneids.append(str(lane.get('id')))
return laneids
def writeruninfotodisk(self):
self._writetodisk(self._runinfo, self._runinfofile)
def writesamplesheetstodisk(self):
self._writetodisk(self._samplesheets, self._samplesheetsfile)
def writesolexarunstodisk(self):
self._writetodisk(self._solexaruns, self._solexarunsfile)
def writesolexaflowcellstodisk(self):
self._writetodisk(self._solexaflowcells, self._solexaflowcellsfile)
def writepipelinerunstodisk(self):
self._writetodisk(self._pipelineruns, self._pipelinerunsfile)
def writelaneresultstodisk(self):
self._writetodisk(self._laneresults, self._laneresultsfile)
def writemapperresultstodisk(self):
self._writetodisk(self._mapperresults, self._mapperresultsfile)
def _writetodisk(self, info, datafile):
fullfilename = self._fullpath(datafile)
if os.path.exists(fullfilename):
os.remove(fullfilename)
with open(fullfilename,'w') as fp:
fp.write(json.dumps(info, sort_keys=True, indent=4, separators=(',', ': ')))
def _loadall(self):
self._loadruninfo()
self._loadsamplesheets()
self._loadsolexaruns()
self._loadsolexaflowcells()
self._loadpipelineruns()
self._loadlaneresults()
self._loadmapperresults()
def _loadruninfo(self):
self._runinfo = self._load(self._runinfofile)
def _loadsamplesheets(self):
self._samplesheets = self._load(self._samplesheetsfile)
def _loadsolexaruns(self):
self._solexaruns = self._load(self._solexarunsfile)
def _loadsolexaflowcells(self):
self._solexaflowcells = self._load(self._solexaflowcellsfile)
def _loadpipelineruns(self):
self._pipelineruns = self._load(self._pipelinerunsfile)
def _loadlaneresults(self):
self._laneresults = self._load(self._laneresultsfile)
def _loadmapperresults(self):
self._mapperresults = self._load(self._mapperresultsfile)
def _fullpath(self, infile):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), self._testdatadir, infile)
def _load(self, datafile):
try:
with open(self._fullpath(datafile)) as fp:
data = json.load(fp)
except (ValueError, IOError):
warn("Could not load testdata from %s" % datafile)
data = {}
return data
def testconnection(self):
# No-op. This mirrors the same method in remote to test a valid http connection.
pass
|
/scgpm_seqresults_dnanexus-0.5.0-py3-none-any.whl/scgpm_lims/components/local.py
| 0.507812 | 0.174868 |
local.py
|
pypi
|
import re
# These are convenience classes. You can work directly with the Connection class
# and the data objects that it returns, but any methods for working with those
# data objects live here.
class SolexaRun:
STATUS_SEQUENCING = 'sequencing'
STATUS_SEQUENCING_DONE = 'sequencing_done'
STATUS_SEQUENCING_FAILED = 'sequencing_failed'
STATUS_SEQUENCING_EXCEPTION = 'sequencing_exception'
STATUS_PREPROCESSING = 'preprocessing'
class SolexaFlowCell:
STATUS_INCOMPLETE = 'incomplete'
STATUS_ASSIGNED = 'assigned'
STATUS_CLUSTERING = 'clustering'
STATUS_SEQUENCING = 'sequencing'
STATUS_ANALYZING = 'analyzing'
STATUS_REVIEWING = 'reviewing'
STATUS_DONE = 'done'
STATUS_CANCELLED = 'cancelled'
class RunInfo:
def __init__(self, conn, run):
self.conn = conn
self.run = run
self._refresh()
def _refresh(self):
obj = self.conn.getruninfo(run=self.run)
self.data = obj['run_info']
self.solexarunid = obj['id']
def get_sequencing_platform(self):
"""
Figures out the platform of the sequencing run.
Currently, only knows about the HiSeq2000 and HiSeq4000 platforms.
Raises : Exception if the platform is not recognized.
"""
platform = self.data["platform"]
if platform == "miseq":
platform = "MiSeq"
elif platform == "hiseq4000":
platform == "HiSeq4000"
elif platform == "hiseq2000":
platform == "HiSeq2000"
else:
raise Exception("Unknown platform {platform} for sequencing run {run}".format(platform=platform,run=self.run))
return platform
def get_solexa_run_status(self):
return self.data['sequencing_run_status']
def get_flow_cell_status(self):
return self.data['flow_cell_status']
def get_solexa_run_name(self):
return self.data['run_name']
def get_solexa_run_id(self):
return self.solexarunid
def get_sequencing_instrument(self):
return self.data['sequencing_instrument']
def get_data_volume(self):
return self.data['data_volume']
def get_sequencer_software(self):
return self.data['seq_software']
def is_paired_end(self):
return self.data['paired_end']
def has_index_read(self):
return self.data['index_read']
def get_read1_cycles(self):
return self.data.get('read1_cycles')
def get_read2_cycles(self):
return self.data.get('read2_cycles')
def get_solexa_flow_cell_id(self):
return self.data['flow_cell_id']
def get_lane(self,lane):
lane = str(lane)
return self.data['lanes'][lane]
def get_pipeline_run(self, lane=None, status='done'):
VALID_STATA = ['done', 'inprogress', 'new']
if status not in VALID_STATA:
raise Exception('Invalid pipeline run status "%s" was requested.'
% (status, VALID_STATA))
done = {}
new = {}
inprogress = {}
for run_id, run in self.data['pipeline_runs'].iteritems():
if run['finished'] == True:
done[run_id] = run
elif not run['started'] and not run['finished']:
new[run_id] = run
elif run['started'] and not run['finished']:
inprogress[run_id] = run
def _getlatest(pipeline_runs, status):
if len(pipeline_runs.keys()) == 0:
raise Exception("No pipeline runs found with status %s" % status)
run_id = max(pipeline_runs.keys())
run = pipeline_runs[run_id]
return (run_id, run)
if status == 'done':
pipeline_runs = done
elif status == 'new':
pipeline_runs = new
elif status == 'inprogress':
pipeline_runs = inprogress
return _getlatest(pipeline_runs, status)
def has_status_sequencing_failed(self):
return self.get_solexa_run_status() == SolexaRun.STATUS_SEQUENCING_FAILED
def is_analysis_done(self):
return self.data['analysis_done']
def set_flags_for_sequencing_failed(self):
solexarunupdate = {
'sequencer_done': True,
'analysis_done': True,
'dnanexus_done': False,
'notification_done': True,
'archiving_done': True
}
self.conn.updatesolexarun(self.get_solexa_run_id(), solexarunupdate)
solexaflowcellupdate = {
'flow_cell_status': SolexaFlowCell.STATUS_DONE
}
self.conn.updatesolexaflowcell(self.get_solexa_flow_cell_id(), solexaflowcellupdate)
self._refresh()
def set_flags_for_sequencing_finished_analysis_started(self):
solexarunupdate = {
'sequencer_done': True
}
self.conn.updatesolexarun(self.get_solexa_run_id(), solexarunupdate)
solexaflowcellupdate = {
'flow_cell_status': SolexaFlowCell.STATUS_ANALYZING
}
self.conn.updatesolexaflowcell(self.get_solexa_flow_cell_id(), solexaflowcellupdate)
self._refresh()
|
/scgpm_seqresults_dnanexus-0.5.0-py3-none-any.whl/scgpm_lims/components/models.py
| 0.612426 | 0.184859 |
models.py
|
pypi
|
# scGPT
This is the official codebase for **scGPT: Towards Building a Foundation Model for Single-Cell Multi-omics Using Generative AI**.
[Preprint](https://www.biorxiv.org/content/10.1101/2023.04.30.538439)
[Documentation](https://scgpt.readthedocs.io/en/latest/)
!UPDATE: We have released several new pretrained scGPT checkpoints. Please see the [Pretrained scGPT checkpoints](#pretrained-scGPT-checkpoints) section for more details.
## Installation
scGPT works with Python >= 3.7 and R >=3.6.1. Please make sure you have the correct version of Python and R installed pre-installation.
scGPT is available on PyPI. To install scGPT, run the following command:
```bash
$ pip install scgpt
```
[Optional] We recommend using [wandb](https://wandb.ai/) for logging and visualization.
```bash
$ pip install wandb
```
For developing, we are using the [Poetry](https://python-poetry.org/) package manager. To install Poetry, follow the instructions [here](https://python-poetry.org/docs/#installation).
```bash
$ git clone this-repo-url
$ cd scGPT
$ poetry install
```
**Note**: The `flash-attn` dependency usually requires specific GPU and CUDA version. If you encounter any issues, please refer to the [flash-attn](https://github.com/HazyResearch/flash-attention/tree/main) repository for installation instructions. For now, May 2023, we recommend using CUDA 11.7 and flash-attn<1.0.5 due to various issues reported about installing new versions of flash-attn.
## Pretrained scGPT Model Zoo
Here is the list of pretrained models. Please find the links for downloading the checkpoint folders. We recommend using the `whole-human` model for most applications by default. If your fine-tuning dataset shares similar cell type context with the training data of the organ-specific models, these models can usually demonstrate competitive performance as well.
| Model name | Description | Download |
| :------------------------ | :------------------------------------------------------ | :------------------------------------------------------------------------------------------- |
| whole-human (recommended) | Pretrained on 33 million normal human cells. | [link](https://drive.google.com/drive/folders/1oWh_-ZRdhtoGQ2Fw24HP41FgLoomVo-y?usp=sharing) |
| brain | Pretrained on 13.2 million brain cells. | [link](https://drive.google.com/drive/folders/1vf1ijfQSk7rGdDGpBntR5bi5g6gNt-Gx?usp=sharing) |
| blood | Pretrained on 10.3 million blood and bone marrow cells. | [link](https://drive.google.com/drive/folders/1kkug5C7NjvXIwQGGaGoqXTk_Lb_pDrBU?usp=sharing) |
| heart | Pretrained on 1.8 million heart cells | [link](https://drive.google.com/drive/folders/1GcgXrd7apn6y4Ze_iSCncskX3UsWPY2r?usp=sharing) |
| lung | Pretrained on 2.1 million lung cells | [link](https://drive.google.com/drive/folders/16A1DJ30PT6bodt4bWLa4hpS7gbWZQFBG?usp=sharing) |
| kidney | Pretrained on 814 thousand kidney cells | [link](https://drive.google.com/drive/folders/1S-1AR65DF120kNFpEbWCvRHPhpkGK3kK?usp=sharing) |
| pan-cancer | Pretrained on 5.7 million cells of various cancer types | [link](https://drive.google.com/drive/folders/13QzLHilYUd0v3HTwa_9n4G4yEF-hdkqa?usp=sharing) |
## Fine-tune scGPT for scRNA-seq integration
Please see our example code in [examples/finetune_integration.py](examples/finetune_integration.py). By default, the script assumes the scGPT checkpoint folder stored in the `examples/save` directory.
## To-do-list
- [x] Upload the pretrained model checkpoint
- [x] Publish to pypi
- [ ] Provide the pretraining code with generative attention masking
- [ ] Finetuning examples for multi-omics integration, cell type annotation, perturbation prediction, cell generation
- [x] Example code for Gene Regulatory Network analysis
- [x] Documentation website with readthedocs
- [ ] Bump up to pytorch 2.0
- [x] New pretraining on larger datasets
- [ ] Reference mapping example
- [ ] Publish to huggingface model hub
## Contributing
We greatly welcome contributions to scGPT. Please submit a pull request if you have any ideas or bug fixes. We also welcome any issues you encounter while using scGPT.
## Acknowledgements
We sincerely thank the authors of following open-source projects:
- [flash-attention](https://github.com/HazyResearch/flash-attention)
- [scanpy](https://github.com/scverse/scanpy)
- [scvi-tools](https://github.com/scverse/scvi-tools)
- [scib](https://github.com/theislab/scib)
- [datasets](https://github.com/huggingface/datasets)
- [transformers](https://github.com/huggingface/transformers)
## Citing scGPT
```bibtex
@article{cui2023scGPT,
title={scGPT: Towards Building a Foundation Model for Single-Cell Multi-omics Using Generative AI},
author={Cui, Haotian and Wang, Chloe and Maan, Hassaan and Pang, Kuan and Luo, Fengning and Wang, Bo},
journal={bioRxiv},
year={2023},
publisher={Cold Spring Harbor Laboratory}
}
```
|
/scGPT-0.1.3.tar.gz/scGPT-0.1.3/README.md
| 0.517815 | 0.97631 |
README.md
|
pypi
|
# scgraph
[](https://badge.fury.io/py/scgraph)
[](https://opensource.org/licenses/MIT)
Supply chain graph package for Python

## Documentation
Getting Started: https://github.com/connor-makowski/scgraph
Low Level: https://connor-makowski.github.io/scgraph/core.html
## Key Features
- Calculate the shortest path between two points on earth using a latitude / longitude pair
- Inputs:
- A latitude / longitude pair for the origin
- A latitude / longitude pair for the destination
- Calculation:
- Algorithms:
- Dijkstra's algorithm (Modified for sparse networks)
- Modified to support sparse network data structures
- Makowski's Modified Sparse Dijkstra algorithm
- Modified for O(n) performance on particularly sparse networks
- Possible future support for other algorithms
- Distances:
- Uses the [haversine formula](https://en.wikipedia.org/wiki/Haversine_formula) to calculate the distance between two points on earth
- Returns:
- `path`:
- A list of dictionaries (`latitude` and `longitude`) that make up the shortest path
- `length`:
- The distance in kilometers between the two points
- Antimeridian support
- Arbitrary start and end points
- Arbitrary network data sets
## Setup
Make sure you have Python 3.6.x (or higher) installed on your system. You can download it [here](https://www.python.org/downloads/).
## Installation
```
pip install scgraph
```
## Use with Google Colab
See the example [here](https://colab.research.google.com/github/connor-makowski/scgraph/blob/main/example.ipynb)
# Getting Started
## Basic Usage
Get the shortest path between two points on earth using a latitude / longitude pair
In this case, calculate the shortest maritime path between Shanghai, China and Savannah, Georgia, USA.
```py
# Use a maritime network geograph
from scgraph.geographs.marnet import marnet_geograph
# Get the shortest path between
output = marnet_geograph.get_shortest_path(
origin_node={"latitude": 31.23,"longitude": 121.47},
destination_node={"latitude": 32.08,"longitude": -81.09}
)
print('Length: ',output['length']) #=> Length: 19596.4653
```
In the above example, the `output` variable is a dictionary with three keys: `length`, `path` and `coordinate_path`.
- `length`: The distance in kilometers between the two points
- `path`: A list of keys (from the network data set) that make up the shortest path
- `coordinate_path`: A list of dictionaries (`latitude` and `longitude`) that make up the shortest path
To get the latitude / longitude pairs that make up the shortest path, as a list of lists, you could do something like the following:
```py
# Use a maritime network geograph
from scgraph.geographs.marnet import marnet_geograph
# Get the shortest path between
output = marnet_geograph.get_shortest_path(
origin_node={"latitude": 31.23,"longitude": 121.47},
destination_node={"latitude": 32.08,"longitude": -81.09}
)
print(str([[i['latitude'],i['longitude']] for i in output['coordinate_path']]))
```
## Advanced Usage
You can specify your own custom graphs for direct access to the solving algorithms. This requires the use of the low level `Graph` class
```py
from scgraph import Graph
# Define a graph
# See the graph definitions here:
# https://connor-makowski.github.io/scgraph/core.html
graph = {
0:{1: 5, 2: 1},
1:{0: 5, 2: 2, 3: 1},
2:{0: 1, 1: 2, 3: 4, 4: 8},
3:{1: 1, 2: 4, 4: 3, 5: 6},
4:{2: 8, 3: 3},
5:{3: 6}
}
# Optional: Validate your graph
Graph.validate_graph(graph=graph)
# Get the shortest path between 0 and 5
output = Graph.dijkstra_makowski(graph=graph, origin_id=0, destination_id=5)
#=> {'path': [0, 2, 1, 3, 5], 'length': 10}
```
You can also use a slightly higher level `GeoGraph` class to work with latitude / longitude pairs
```py
from scgraph import GeoGraph
# Define nodes
# See the nodes definitions here:
# https://connor-makowski.github.io/scgraph/core.html
nodes = {
0: {"latitude": 0, "longitude": 0},
1: {"latitude": 0, "longitude": 1},
2: {"latitude": 1, "longitude": 0},
3: {"latitude": 1, "longitude": 1},
4: {"latitude": 1, "longitude": 2},
5: {"latitude": 2, "longitude": 1}
}
# Define a graph
# See the graph definitions here:
# https://connor-makowski.github.io/scgraph/core.html
graph = {
0:{1: 5, 2: 1},
1:{0: 5, 2: 2, 3: 1},
2:{0: 1, 1: 2, 3: 4, 4: 8},
3:{1: 1, 2: 4, 4: 3, 5: 6},
4:{2: 8, 3: 3},
5:{3: 6}
}
# Create a GeoGraph object
my_geograph = GeoGraph(nodes=nodes, graph=graph)
# Optional: Validate your graph
my_geograph.validate_graph()
# Optional: Validate your nodes
my_geograph.validate_nodes()
# Get the shortest path between two points
output = my_geograph.get_shortest_path(
origin_node = {'latitude': 0, 'longitude': 0},
destination_node = {'latitude': 2, 'longitude': 1}
)
#=>
# {
# "path": [6, 0, 2, 1, 3, 5, 7],
# "coordinate_path": [
# {'latitude': 0, 'longitude': 0},
# {'latitude': 0, 'longitude': 0},
# {'latitude': 1, 'longitude': 0},
# {'latitude': 0, 'longitude': 1},
# {'latitude': 1, 'longitude': 1},
# {'latitude': 2, 'longitude': 1},
# {'latitude': 2, 'longitude': 1}
# ],
# "length": 10
# }
```
## Included GeoGraphs
- marnet_geograph:
- What: A maritime network data set from searoute
- Use: `from scgraph.geographs.marnet import marnet_geograph`
- Attribution: [searoute](https://github.com/genthalili/searoute-py)
- oak_ridge_maritime_geograph:
- What: A maritime data set from the Oak Ridge National Laboratory campus
- Use: `from scgraph.geographs.oak_ridge_maritime import oak_ridge_maritime_geograph`
- Attribution: [Oak Ridge National Laboratory](https://www.ornl.gov/) with data from [Geocommons](http://geocommons.com/datasets?id=25)
- More to follow
## Attributions and Thanks
Originally inspired by [searoute](https://github.com/genthalili/searoute-py) including the use of one of their [datasets](https://github.com/genthalili/searoute-py/blob/main/searoute/data/marnet_densified_v2_old.geojson) that has been modified to work properly with this package.
|
/scgraph-1.0.1.tar.gz/scgraph-1.0.1/README.md
| 0.728941 | 0.985129 |
README.md
|
pypi
|
import scanpy as sc
import numpy as np
import pandas as pd
import dgl
import torch
from .graph import construct_gene_graph, add_degree
def preprocess(adata, filter_min_counts=True, size_factors=True, normalize_input=False, logtrans_input=True):
if size_factors or normalize_input or logtrans_input:
adata.raw = adata.copy()
else:
adata.raw = adata
if filter_min_counts:
sc.pp.filter_genes(adata, min_cells=3)
sc.pp.filter_cells(adata, min_genes=200)
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['cs_factor'] = adata.obs.n_counts / np.median(adata.obs.n_counts)
else:
adata.obs['cs_factor'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
gs_factor = np.max(adata.X, axis=0, keepdims=True)
adata.var['gs_factor'] = gs_factor.reshape(-1)
if normalize_input:
sc.pp.scale(adata)
return adata
def make_graph(adata, raw_exp=False, gene_similarity=False):
X = adata.X
num_cells, num_genes = X.shape
# Make expressioin/train graph
num_nodes_dict = {'cell': num_cells, 'gene': num_genes}
exp_train_cell, exp_train_gene = np.where(X > 0)
unexp_edges = np.where(X == 0)
# expression edges
exp_edge_dict = {
('cell', 'exp', 'gene'): (exp_train_cell, exp_train_gene),
('gene', 'reverse-exp', 'cell'): (exp_train_gene, exp_train_cell)
}
coexp_edges, uncoexp_edges = None, None
if gene_similarity:
coexp_edges, uncoexp_edges = construct_gene_graph(X)
exp_edge_dict[('gene', 'co-exp', 'gene')] = coexp_edges
# expression encoder/decoder graph
enc_graph = dgl.heterograph(exp_edge_dict, num_nodes_dict=num_nodes_dict)
exp_edge_dict.pop(('gene', 'reverse-exp', 'cell'))
dec_graph = dgl.heterograph(exp_edge_dict, num_nodes_dict=num_nodes_dict)
# add degree to cell/gene nodes
add_degree(enc_graph, ['exp'] + (['co-exp'] if gene_similarity else []))
# If use ZINB decoder, add size factor to cell/gene nodes
if raw_exp:
Raw = pd.DataFrame(adata.raw.X, index=list(adata.raw.obs_names), columns=list(adata.raw.var_names))
X = Raw[list(adata.var_names)].values
exp_value = X[exp_train_cell, exp_train_gene].reshape(-1,1)
dec_graph.nodes['cell'].data['cs_factor'] = torch.Tensor(adata.obs['cs_factor']).reshape(-1, 1)
dec_graph.nodes['gene'].data['gs_factor'] = torch.Tensor(adata.var['gs_factor']).reshape(-1, 1)
else:
## Deflate the edge values of the bipartite graph to between 0 and 1
X = X / adata.var['gs_factor'].values
exp_value = X[exp_train_cell, exp_train_gene].reshape(-1, 1)
return adata, exp_value, enc_graph, dec_graph, unexp_edges, coexp_edges, uncoexp_edges
|
/data/data_utils.py
| 0.489503 | 0.459015 |
data_utils.py
|
pypi
|
import math
from django import template
from django.template import RequestContext
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from askbot.utils import functions
from askbot.utils.slug import slugify
from askbot.conf import settings as askbot_settings
register = template.Library()
GRAVATAR_TEMPLATE = (
'<a style="text-decoration:none" '
'href="%(user_profile_url)s"><img class="gravatar" '
'width="%(size)s" height="%(size)s" '
'src="%(gravatar_url)s/%(gravatar_hash)s'
'?s=%(size)s&d=%(gravatar_type)s&r=PG" '
'title="%(username)s" '
'alt="%(alt_text)s" /></a>')
@register.simple_tag
def gravatar(user, size):
"""
Creates an ``<img>`` for a user's Gravatar with a given size.
This tag can accept a User object, or a dict containing the
appropriate values.
"""
#todo: rewrite using get_from_dict_or_object
user_id = functions.get_from_dict_or_object(user, 'id')
slug = slugify(user.username)
user_profile_url = reverse(
'user_profile',
kwargs={'id':user_id, 'slug':slug}
)
#safe_username = template.defaultfilters.urlencode(username)
return mark_safe(GRAVATAR_TEMPLATE % {
'gravatar_url': askbot_settings.GRAVATAR_BASE_URL,
'user_profile_url': user_profile_url,
'size': size,
'gravatar_hash': functions.get_from_dict_or_object(user, 'gravatar'),
'gravatar_type': askbot_settings.GRAVATAR_TYPE,
'alt_text': _('%(username)s gravatar image') % {'username': user.username},
'username': functions.get_from_dict_or_object(user, 'username'),
})
@register.simple_tag
def get_tag_font_size(tags):
max_tag = 0
for tag in tags:
if tag.used_count > max_tag:
max_tag = tag.used_count
min_tag = max_tag
for tag in tags:
if tag.used_count < min_tag:
min_tag = tag.used_count
font_size = {}
for tag in tags:
font_size[tag.name] = tag_font_size(max_tag,min_tag,tag.used_count)
return font_size
@register.simple_tag
def tag_font_size(max_size, min_size, current_size):
"""
do a logarithmic mapping calcuation for a proper size for tagging cloud
Algorithm from http://blogs.dekoh.com/dev/2007/10/29/choosing-a-good-
font-size-variation-algorithm-for-your-tag-cloud/
"""
MAX_FONTSIZE = 10
MIN_FONTSIZE = 1
#avoid invalid calculation
if current_size == 0:
current_size = 1
try:
weight = (math.log10(current_size) - math.log10(min_size)) / (math.log10(max_size) - math.log10(min_size))
except Exception:
weight = 0
return int(MIN_FONTSIZE + round((MAX_FONTSIZE - MIN_FONTSIZE) * weight))
class IncludeJinja(template.Node):
"""http://www.mellowmorning.com/2010/08/24/"""
def __init__(self, filename, request_var):
self.filename = filename
self.request_var = template.Variable(request_var)
def render(self, context):
request = self.request_var.resolve(context)
jinja_template = get_template(self.filename)
return jinja_template.render(RequestContext(request, context))
@register.tag
def include_jinja(parser, token):
bits = token.contents.split()
#Check if a filename was given
if len(bits) != 3:
error_message = '%r tag requires the name of the ' + \
'template and the request variable'
raise template.TemplateSyntaxError(error_message % bits[0])
filename = bits[1]
request_var = bits[2]
#Remove quotes or raise error
if filename[0] in ('"', "'") and filename[-1] == filename[0]:
filename = filename[1:-1]
else:
raise template.TemplateSyntaxError('file name must be quoted')
return IncludeJinja(filename, request_var)
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/templatetags/extra_tags.py
| 0.40439 | 0.189352 |
extra_tags.py
|
pypi
|
from __future__ import print_function
import sys
from django.core.management.base import NoArgsCommand
from django.db import transaction
from askbot import signals
from askbot.utils import console
class NoArgsJob(NoArgsCommand):
"""Base class for a job command -
the one that runs the same operation on
sets of items - each item operation in its own
transaction and prints progress in % of items
completed
The subclass must implement __init__() method
where self.batches data structure must be defined as follows
(#the whole thing is a tuple
{#batch is described by a dictionary
'title': <string>,
'query_set': <query set for the items>,
'function': <function or callable that performs
an operation on a single item
and returns True if item was changed
False otherwise
item is given as argument
>,
'items_changed_message': <string with one %d placeholder>,
'nothing_changed_message': <string>
},
#more batch descriptions
)
"""
batches = ()
def handle_noargs(self, **options): # pylint: disable=unused-argument
"""handler function that removes all signal listeners
then runs the job and finally restores the listerers
"""
signal_data = signals.pop_all_db_signal_receivers()
self.run_command(**options)
signals.set_all_db_signal_receivers(signal_data)
def run_command(self, **options): # pylint: disable=unused-argument
"""runs the batches"""
for batch in self.batches:
self.run_batch(batch)
@classmethod
def run_batch(cls, batch):
"""runs the single batch
prints batch title
then loops through the query set
and prints progress in %
afterwards there will be a short summary
"""
sys.stdout.write(batch['title'].encode('utf-8'))
changed_count = 0
checked_count = 0
total_count = batch['query_set'].count()
if total_count == 0:
return
for item in batch['query_set'].all():
with transaction.atomic(): # pylint: disable=no-member
item_changed = batch['function'](item)
if item_changed:
changed_count += 1
checked_count += 1
console.print_progress(checked_count, total_count)
console.print_progress(checked_count, total_count)
if changed_count:
print(batch['changed_count_message'] % changed_count)
else:
print(batch['nothing_changed_message'])
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/management/base.py
| 0.430387 | 0.177704 |
base.py
|
pypi
|
from django.core.management.base import NoArgsCommand
from django.conf import settings as django_settings
from django.utils import translation
from askbot import const
from askbot.mail.messages import ModerationQueueNotification
from askbot.models import Activity
from askbot.models import User
from askbot.models.user import get_invited_moderators
def get_moderators():
"""Returns query set of admins and moderators"""
return User.objects.filter(askbot_profile__status__in=('d', 'm'))
def get_last_mod_alert_activity():
"""return latest moderation alert activity"""
atype = const.TYPE_ACTIVITY_MODERATION_ALERT_SENT
acts = Activity.objects.filter(activity_type=atype).order_by('-id')
count = len(acts)
if count == 0:
return None
last_act = acts[0]
if count > 1:
#get last moderation activity and delete all others
acts = acts.exclude(id=last_act.id)
acts.delete()
return last_act
def get_last_notified_user():
"""Get user that was modified last about the queue"""
last_act = get_last_mod_alert_activity()
if last_act:
return last_act.content_object
return None
def select_moderators_to_notify(candidates, num_needed):
"""Selects some nomber of moderators to send the message,
in order to avoid spamming many people"""
candidates_count = candidates.count()
#special case - if we need to notify the same number of
#moderators that are available, then we don't rotate them
#and notify all, b/c otherwise we would stop notifications
#because there are not enough moderators
if candidates_count <= num_needed:
return list(candidates)
last_notified = get_last_notified_user()
if last_notified is None:
return candidates[:num_needed]
mods = list(candidates.filter(id__gt=last_notified.id))
num_mods = len(mods)
if num_mods >= num_needed:
return mods[:num_needed]
#wrap around the end to the beginning
num_missing = num_needed - num_mods
more_mods = get_moderators().order_by('id')
more_mods = more_mods[:num_missing]
mods.extend(list(more_mods))
return mods
def select_last_moderator(mods):
"""Returns object with the largest .id value."""
return max(mods, key=lambda item: item.id)
def remember_last_moderator(user):
"""Save the the `user` as the one that was notified last"""
act = get_last_mod_alert_activity()
if act:
act.content_object = user
act.save()
else:
act = Activity(
user=user,
content_object=user,
activity_type=const.TYPE_ACTIVITY_MODERATION_ALERT_SENT
)
act.save()
class Command(NoArgsCommand):
"""The management command class"""
def handle_noargs(self, **options): #pylint: disable=unused-argument
"""Function that does the job of the management command"""
#get size of moderation queue
translation.activate(django_settings.LANGUAGE_CODE)
act_types = const.MODERATED_ACTIVITY_TYPES
queue = Activity.objects.filter(activity_type__in=act_types)
if queue.count() == 0:
return
#get moderators
mods = get_moderators().order_by('id')
mods = select_moderators_to_notify(mods, 3)
mods = set(mods)
all_mods = mods | get_invited_moderators()
if not all_mods:
return
for mod in all_mods:
email = ModerationQueueNotification({'user': mod})
email.send([mod,])
if not mods:
return
last_mod = select_last_moderator(mods)
remember_last_moderator(last_mod)
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/management/commands/askbot_send_moderation_alerts.py
| 0.584627 | 0.16896 |
askbot_send_moderation_alerts.py
|
pypi
|
from django.db import models
class Badge(models.Model):
id = models.IntegerField(primary_key=True)
class_type = models.IntegerField(null=True)
name = models.CharField(max_length=50, null=True)
description = models.TextField(null=True)
single = models.NullBooleanField(null=True)
secret = models.NullBooleanField(null=True)
tag_based = models.NullBooleanField(null=True)
command = models.TextField(null=True)
award_frequency = models.IntegerField(null=True)
class CloseReason(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=200, null=True)
description = models.CharField(max_length=256, null=True)
display_order = models.IntegerField(null=True)
class Comment2Vote(models.Model):
id = models.IntegerField(primary_key=True)
post_comment = models.ForeignKey('PostComment', related_name='Comment2Vote_by_post_comment_set', null=True)
vote_type = models.ForeignKey('VoteType', related_name='Comment2Vote_by_vote_type_set', null=True)
creation_date = models.DateTimeField(null=True)
user = models.ForeignKey('User', related_name='Comment2Vote_by_user_set', null=True)
ip_address = models.CharField(max_length=40, null=True)
user_display_name = models.CharField(max_length=40, null=True)
deletion_date = models.DateTimeField(null=True)
class FlatPage(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
url = models.CharField(max_length=128, null=True)
value = models.TextField(null=True)
content_type = models.CharField(max_length=50, null=True)
active = models.NullBooleanField(null=True)
use_master = models.NullBooleanField(null=True)
class Message(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='Message_by_user_set', null=True)
message_type = models.ForeignKey('MessageType', related_name='Message_by_message_type_set', null=True)
is_read = models.NullBooleanField(null=True)
creation_date = models.DateTimeField(null=True)
text = models.TextField(null=True)
post = models.ForeignKey('Post', related_name='Message_by_post_set', null=True)
class MessageType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class ModeratorMessage(models.Model):
id = models.IntegerField(primary_key=True)
message_type = models.ForeignKey('MessageType', related_name='ModeratorMessage_by_message_type_set', null=True)
creation_date = models.DateTimeField(null=True)
creation_ip_address = models.CharField(max_length=40, null=True)
text = models.TextField(null=True)
user = models.ForeignKey('User', related_name='ModeratorMessage_by_user_set', null=True)
post = models.ForeignKey('Post', related_name='ModeratorMessage_by_post_set', null=True)
deletion_date = models.DateTimeField(null=True)
deletion_user = models.ForeignKey('User', related_name='ModeratorMessage_by_deletion_user_set', null=True)
deletion_ip_address = models.CharField(max_length=40, null=True)
user_display_name = models.CharField(max_length=40, null=True)
class PostComment(models.Model):
id = models.IntegerField(primary_key=True)
post = models.ForeignKey('Post', related_name='PostComment_by_post_set', null=True)
text = models.TextField(null=True)
creation_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=15, null=True)
user = models.ForeignKey('User', related_name='PostComment_by_user_set', null=True)
user_display_name = models.CharField(max_length=30, null=True)
deletion_date = models.DateTimeField(null=True)
deletion_user = models.ForeignKey('User', related_name='PostComment_by_deletion_user_set', null=True)
score = models.IntegerField(null=True)
class PostHistoryType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class PostHistory(models.Model):
id = models.IntegerField(primary_key=True)
post_history_type = models.ForeignKey('PostHistoryType', related_name='PostHistory_by_post_history_type_set', null=True)
post = models.ForeignKey('Post', related_name='PostHistory_by_post_set', null=True)
revision_guid = models.CharField(max_length=64, null=True)
creation_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=40, null=True)
user = models.ForeignKey('User', related_name='PostHistory_by_user_set', null=True)
comment = models.CharField(max_length=400, null=True)
text = models.TextField(null=True)
user_display_name = models.CharField(max_length=40, null=True)
user_email = models.CharField(max_length=100, null=True)
user_website_url = models.CharField(max_length=200, null=True)
class Post2Vote(models.Model):
id = models.IntegerField(primary_key=True)
post = models.ForeignKey('Post', related_name='Post2Vote_by_post_set', null=True)
user = models.ForeignKey('User', related_name='Post2Vote_by_user_set', null=True)
vote_type = models.ForeignKey('VoteType', related_name='Post2Vote_by_vote_type_set', null=True)
creation_date = models.DateTimeField(null=True)
deletion_date = models.DateTimeField(null=True)
target_user = models.ForeignKey('User', related_name='Post2Vote_by_target_user_set', null=True)
target_rep_change = models.IntegerField(null=True)
voter_rep_change = models.IntegerField(null=True)
comment = models.CharField(max_length=150, null=True)
ip_address = models.CharField(max_length=40, null=True)
linked_post = models.ForeignKey('Post', related_name='Post2Vote_by_linked_post_set', null=True)
class Post(models.Model):
id = models.IntegerField(primary_key=True)
post_type = models.ForeignKey('PostType', related_name='Post_by_post_type_set', null=True)
creation_date = models.DateTimeField(null=True)
score = models.IntegerField(null=True)
view_count = models.IntegerField(null=True)
body = models.TextField(null=True)
owner_user = models.ForeignKey('User', related_name='Post_by_owner_user_set', null=True)
last_editor_user = models.ForeignKey('User', related_name='Post_by_last_editor_user_set', null=True)
last_edit_date = models.DateTimeField(null=True)
last_activity_date = models.DateTimeField(null=True)
last_activity_user = models.ForeignKey('User', related_name='Post_by_last_activity_user_set', null=True)
parent = models.ForeignKey('self', related_name='Post_by_parent_set', null=True)
accepted_answer = models.ForeignKey('self', related_name='Post_by_accepted_answer_set', null=True)
title = models.CharField(max_length=250, null=True)
tags = models.CharField(max_length=150, null=True)
community_owned_date = models.DateTimeField(null=True)
history_summary = models.CharField(max_length=150, null=True)
answer_score = models.IntegerField(null=True)
answer_count = models.IntegerField(null=True)
comment_count = models.IntegerField(null=True)
favorite_count = models.IntegerField(null=True)
deletion_date = models.DateTimeField(null=True)
closed_date = models.DateTimeField(null=True)
locked_date = models.DateTimeField(null=True)
locked_duration = models.IntegerField(null=True)
owner_display_name = models.CharField(max_length=40, null=True)
last_editor_display_name = models.CharField(max_length=40, null=True)
bounty_amount = models.IntegerField(null=True)
bounty_closes = models.DateTimeField(null=True)
bounty_closed = models.DateTimeField(null=True)
last_owner_email_date = models.DateTimeField(null=True)
class PostType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class SchemaVersion(models.Model):
version = models.IntegerField(null=True)
class Setting(models.Model):
id = models.IntegerField(primary_key=True)
key = models.CharField(max_length=256, null=True)
value = models.TextField(null=True)
class SystemMessage(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='SystemMessage_by_user_set', null=True)
creation_date = models.DateTimeField(null=True)
text = models.TextField(null=True)
deletion_date = models.DateTimeField(null=True)
deletion_user = models.ForeignKey('User', related_name='SystemMessage_by_deletion_user_set', null=True)
class Tag(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
count = models.IntegerField(null=True)
user = models.ForeignKey('User', related_name='Tag_by_user_set', null=True)
creation_date = models.DateTimeField(null=True)
is_moderator_only = models.NullBooleanField(null=True)
is_required = models.NullBooleanField(null=True)
aliases = models.CharField(max_length=200, null=True)
class ThemeResource(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
value = models.TextField(null=True)
content_type = models.CharField(max_length=50, null=True)
version = models.CharField(max_length=6, null=True)
class ThemeTextResource(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
value = models.TextField(null=True)
content_type = models.CharField(max_length=50, null=True)
class ThrottleBucket(models.Model):
id = models.IntegerField(primary_key=True)
type = models.CharField(max_length=256, null=True)
ip_address = models.CharField(max_length=64, null=True)
tokens = models.IntegerField(null=True)
last_update = models.DateTimeField(null=True)
class UserHistoryType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class UserHistory(models.Model):
id = models.IntegerField(primary_key=True)
user_history_type = models.ForeignKey('UserHistoryType', related_name='UserHistory_by_user_history_type_set', null=True)
creation_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=40, null=True)
user = models.ForeignKey('User', related_name='UserHistory_by_user_set', null=True)
comment = models.CharField(max_length=400, null=True)
user_display_name = models.CharField(max_length=40, null=True)
moderator_user = models.ForeignKey('User', related_name='UserHistory_by_moderator_user_set', null=True)
reputation = models.IntegerField(null=True)
class User2Badge(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='User2Badge_by_user_set', null=True)
badge = models.ForeignKey('Badge', related_name='User2Badge_by_badge_set', null=True)
date = models.DateTimeField(null=True)
comment = models.CharField(max_length=50, null=True)
class User2Vote(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='User2Vote_by_user_set', null=True)
vote_type = models.ForeignKey('VoteType', related_name='User2Vote_by_vote_type_set', null=True)
target_user = models.ForeignKey('User', related_name='User2Vote_by_target_user_set', null=True)
creation_date = models.DateTimeField(null=True)
deletion_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=40, null=True)
class User(models.Model):
id = models.IntegerField(primary_key=True)
user_type = models.ForeignKey('UserType', related_name='User_by_user_type_set', null=True)
open_id = models.CharField(max_length=200, null=True)
reputation = models.IntegerField(null=True)
views = models.IntegerField(null=True)
creation_date = models.DateTimeField(null=True)
last_access_date = models.DateTimeField(null=True)
has_replies = models.NullBooleanField(null=True)
has_message = models.NullBooleanField(null=True)
opt_in_email = models.NullBooleanField(null=True)
opt_in_recruit = models.NullBooleanField(null=True)
last_login_date = models.DateTimeField(null=True)
last_email_date = models.DateTimeField(null=True)
last_login_ip = models.CharField(max_length=15, null=True)
open_id_alt = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=100, null=True)
display_name = models.CharField(max_length=40, null=True)
display_name_cleaned = models.CharField(max_length=40, null=True)
website_url = models.CharField(max_length=200, null=True)
real_name = models.CharField(max_length=100, null=True)
location = models.CharField(max_length=100, null=True)
birthday = models.DateTimeField(null=True)
badge_summary = models.CharField(max_length=50, null=True)
about_me = models.TextField(null=True)
preferences_raw = models.TextField(null=True)
timed_penalty_date = models.DateTimeField(null=True)
guid = models.CharField(max_length=64, null=True)
phone = models.CharField(max_length=20, null=True)
password_id = models.IntegerField(null=True)
class UserType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class VoteType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class Password(models.Model):
id = models.IntegerField(primary_key = True)
password = models.CharField(max_length = 128)
salt = models.CharField(max_length = 32)
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/importers/stackexchange/models.py
| 0.527073 | 0.190479 |
models.py
|
pypi
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LastVisitTime',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('message_type', models.SmallIntegerField(default=0, choices=[(0, b'email-like message, stored in the inbox'), (2, b'will be shown just once'), (1, b'will be shown until certain time')])),
('senders_info', models.TextField(default=b'')),
('headline', models.CharField(max_length=80)),
('text', models.TextField(help_text=b'source text for the message, e.g. in markdown format', null=True, blank=True)),
('html', models.TextField(help_text=b'rendered html of the message', null=True, blank=True)),
('sent_at', models.DateTimeField(auto_now_add=True)),
('last_active_at', models.DateTimeField(auto_now_add=True)),
('active_until', models.DateTimeField(null=True, blank=True)),
('parent', models.ForeignKey(related_name='children', blank=True, to='group_messaging.Message', null=True)),
('recipients', models.ManyToManyField(to='auth.Group')),
('root', models.ForeignKey(related_name='descendants', blank=True, to='group_messaging.Message', null=True)),
('sender', models.ForeignKey(related_name='group_messaging_sent_messages', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MessageMemo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.SmallIntegerField(default=0, choices=[(0, b'seen'), (1, b'archived'), (2, b'deleted')])),
('message', models.ForeignKey(related_name='memos', to='group_messaging.Message')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SenderList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('recipient', models.ForeignKey(to='auth.Group', unique=True)),
('senders', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UnreadInboxCounter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('count', models.PositiveIntegerField(default=0)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='messagememo',
unique_together=set([('user', 'message')]),
),
migrations.AddField(
model_name='lastvisittime',
name='message',
field=models.ForeignKey(to='group_messaging.Message'),
preserve_default=True,
),
migrations.AddField(
model_name='lastvisittime',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='lastvisittime',
unique_together=set([('user', 'message')]),
),
]
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/deps/group_messaging/migrations/0001_initial.py
| 0.622 | 0.165762 |
0001_initial.py
|
pypi
|
import logging
from django.conf import settings as django_settings
from django.contrib.auth.models import User
from django.forms import EmailField, ValidationError
from askbot.conf import settings as askbot_settings
from askbot.deps.django_authopenid.models import UserAssociation
from askbot.signals import user_registered
from askbot.utils.loading import load_module
LOG = logging.getLogger(__name__)
def split_name(full_name, name_format):
"""splits full name into first and last,
according to the order given in the name_format parameter"""
bits = full_name.strip().split()
if len(bits) == 1:
bits.push('')
elif len(bits) == 0:
bits = ['', '']
if name_format == 'first,last':
return bits[0], bits[1]
elif name_format == 'last,first':
return bits[1], bits[0]
else:
raise ValueError('Unexpected value of name_format')
def ldap_authenticate_default(username, password):
"""
Authenticate using ldap.
LDAP parameter setup is described in
askbot/doc/source/optional-modules.rst
See section about LDAP.
returns a dict with keys:
* first_name
* last_name
* ldap_username
* email (optional only if there is valid email)
* success - boolean, True if authentication succeeded
python-ldap must be installed
http://pypi.python.org/pypi/python-ldap/2.4.6
NOTE: if you are planning to implement a custom
LDAP authenticate function (python path to which can
be provided via setting `ASKBOT_LDAP_AUTHENTICATE`
setting in the settings.py file) - implement
the function just like this - accepting user name
and password and returning dict with the same values.
The returned dictionary can contain additional values
that you might find useful.
"""
import ldap
user_information = None
user_info = {}#the return value
try:
ldap_session = ldap.initialize(askbot_settings.LDAP_URL)
#set protocol version
if askbot_settings.LDAP_PROTOCOL_VERSION == '2':
ldap_session.protocol_version = ldap.VERSION2
elif askbot_settings.LDAP_PROTOCOL_VERSION == '3':
ldap_session.protocol_version = ldap.VERSION3
else:
raise NotImplementedError('unsupported version of ldap protocol')
ldap.set_option(ldap.OPT_REFERRALS, 0)
#set extra ldap options, if given
if hasattr(django_settings, 'LDAP_EXTRA_OPTIONS'):
options = django_settings.LDAP_EXTRA_OPTIONS
for key, value in options:
if key.startswith('OPT_'):
ldap_key = getattr(ldap, key)
ldap.set_option(ldap_key, value)
else:
raise ValueError('Invalid LDAP option %s' % key)
#add optional "master" LDAP authentication, if required
master_username = getattr(django_settings, 'LDAP_LOGIN_DN', None)
master_password = getattr(django_settings, 'LDAP_PASSWORD', None)
login_name_field = askbot_settings.LDAP_LOGIN_NAME_FIELD
base_dn = askbot_settings.LDAP_BASE_DN
login_template = login_name_field + '=%s,' + base_dn
encoding = askbot_settings.LDAP_ENCODING
if master_username and master_password:
ldap_session.simple_bind_s(
master_username.encode(encoding),
master_password.encode(encoding)
)
user_filter = askbot_settings.LDAP_USER_FILTER_TEMPLATE % (
askbot_settings.LDAP_LOGIN_NAME_FIELD,
username
)
email_field = askbot_settings.LDAP_EMAIL_FIELD
get_attrs = [
email_field.encode(encoding),
login_name_field.encode(encoding)
#str(askbot_settings.LDAP_USERID_FIELD)
#todo: here we have a chance to get more data from LDAP
#maybe a point for some plugin
]
common_name_field = askbot_settings.LDAP_COMMON_NAME_FIELD.strip()
given_name_field = askbot_settings.LDAP_GIVEN_NAME_FIELD.strip()
surname_field = askbot_settings.LDAP_SURNAME_FIELD.strip()
if given_name_field and surname_field:
get_attrs.append(given_name_field.encode(encoding))
get_attrs.append(surname_field.encode(encoding))
elif common_name_field:
get_attrs.append(common_name_field.encode(encoding))
# search ldap directory for user
user_search_result = ldap_session.search_s(
askbot_settings.LDAP_BASE_DN.encode(encoding),
ldap.SCOPE_SUBTREE,
user_filter.encode(encoding),
get_attrs
)
if user_search_result: # User found in LDAP Directory
user_dn = user_search_result[0][0]
user_information = user_search_result[0][1]
ldap_session.simple_bind_s(user_dn, password.encode(encoding)) #raises INVALID_CREDENTIALS
ldap_session.unbind_s()
if given_name_field and surname_field:
last_name = user_information.get(surname_field, [''])[0]
first_name = user_information.get(given_name_field, [''])[0]
elif surname_field:
common_name_format = askbot_settings.LDAP_COMMON_NAME_FIELD_FORMAT
common_name = user_information.get(common_name_field, [''])[0]
first_name, last_name = split_name(common_name, common_name_format)
user_info = {
'first_name': first_name,
'last_name': last_name,
'ldap_username': user_information[login_name_field][0],
'success': True
}
try:
email = user_information.get(email_field, [''])[0]
user_info['email'] = EmailField().clean(email)
except ValidationError:
user_info['email'] = ''
pass
else:
user_info['success'] = False
except ldap.INVALID_CREDENTIALS as e:
user_info['success'] = False
except ldap.LDAPError as e:
LOG.error("LDAPError Exception")
LOG.exception(e)
user_info['success'] = False
except Exception as e:
LOG.error("Unexpected Exception Occurred")
LOG.exception(e)
user_info['success'] = False
return user_info
def ldap_create_user_default(user_info, request):
"""takes the result returned by the :func:`ldap_authenticate`
and returns a :class:`UserAssociation` object
"""
# create new user in local db
user = User()
user.username = user_info.get('django_username', user_info['ldap_username'])
user.set_unusable_password()
user.first_name = user_info['first_name']
user.last_name = user_info['last_name']
user.email = user_info['email']
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
user_registered.send(None, user=user, request=request)
LOG.info('Created New User : [{0}]'.format(user_info['ldap_username']))
assoc = UserAssociation()
assoc.user = user
assoc.openid_url = user_info['ldap_username'] + '@ldap'
assoc.provider_name = 'ldap'
assoc.save()
return assoc
LDAP_AUTH_FUNC_PATH = getattr(django_settings, 'LDAP_AUTHENTICATE_FUNCTION', None)
if LDAP_AUTH_FUNC_PATH:
ldap_authenticate = load_module(LDAP_AUTH_FUNC_PATH)
else:
ldap_authenticate = ldap_authenticate_default
LDAP_CREATE_FUNC_PATH = getattr(django_settings, 'LDAP_CREATE_USER_FUNCTION', None)
if LDAP_CREATE_FUNC_PATH:
ldap_create_user = load_module(LDAP_CREATE_FUNC_PATH)
else:
ldap_create_user = ldap_create_user_default
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/deps/django_authopenid/ldap_auth.py
| 0.409457 | 0.166845 |
ldap_auth.py
|
pypi
|
__version__ = "0.1.1"
__author__ = 'Joe Gregorio'
__email__ = "[email protected]"
__credits__ = ""
def parse_mime_type(mime_type):
"""Carves up a mime_type and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would
get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict([tuple([s.strip() for s in param.split("=")])\
for param in parts[1:] ])
(type, subtype) = parts[0].split("/")
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Carves up a media range and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/*;q=0.5' would
get parsed into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime_type against
a list of media_ranges that have already been
parsed by parse_media_range(). Returns the
'q' quality parameter of the best match, 0 if no
match was found. This function bahaves the same as quality()
except that 'parsed_ranges' must be a list of
parsed media ranges. """
best_fitness = -1
best_match = ""
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
param_matches = reduce(lambda x, y: x+y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
if (type == target_type or type == '*' or target_type == '*') and \
(subtype == target_subtype or subtype == '*' or target_subtype == '*'):
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return float(best_fit_q)
def quality(mime_type, ranges):
"""Returns the quality 'q' of a mime_type when compared
against the media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Takes a list of supported mime-types and finds the best
match for all the media-ranges listed in header. The value of
header must be a string that conforms to the format of the
HTTP Accept: header. The value of 'supported' is a list of
mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
parsed_header = [parse_media_range(r) for r in header.split(",")]
weighted_matches = [(quality_parsed(mime_type, parsed_header), mime_type)\
for mime_type in supported]
weighted_matches.sort()
return weighted_matches[-1][0] and weighted_matches[-1][1] or ''
if __name__ == "__main__":
import unittest
class TestMimeParsing(unittest.TestCase):
def test_parse_media_range(self):
self.assert_(('application', 'xml', {'q': '1'}) == parse_media_range('application/xml;q=1'))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml'))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml;q='))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml ; q='))
self.assertEqual(('application', 'xml', {'q': '1', 'b': 'other'}), parse_media_range('application/xml ; q=1;b=other'))
self.assertEqual(('application', 'xml', {'q': '1', 'b': 'other'}), parse_media_range('application/xml ; q=2;b=other'))
def test_rfc_2616_example(self):
accept = "text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5"
self.assertEqual(1, quality("text/html;level=1", accept))
self.assertEqual(0.7, quality("text/html", accept))
self.assertEqual(0.3, quality("text/plain", accept))
self.assertEqual(0.5, quality("image/jpeg", accept))
self.assertEqual(0.4, quality("text/html;level=2", accept))
self.assertEqual(0.7, quality("text/html;level=3", accept))
def test_best_match(self):
mime_types_supported = ['application/xbel+xml', 'application/xml']
# direct match
self.assertEqual(best_match(mime_types_supported, 'application/xbel+xml'), 'application/xbel+xml')
# direct match with a q parameter
self.assertEqual(best_match(mime_types_supported, 'application/xbel+xml; q=1'), 'application/xbel+xml')
# direct match of our second choice with a q parameter
self.assertEqual(best_match(mime_types_supported, 'application/xml; q=1'), 'application/xml')
# match using a subtype wildcard
self.assertEqual(best_match(mime_types_supported, 'application/*; q=1'), 'application/xml')
# match using a type wildcard
self.assertEqual(best_match(mime_types_supported, '*/*'), 'application/xml')
mime_types_supported = ['application/xbel+xml', 'text/xml']
# match using a type versus a lower weighted subtype
self.assertEqual(best_match(mime_types_supported, 'text/*;q=0.5,*/*; q=0.1'), 'text/xml')
# fail to match anything
self.assertEqual(best_match(mime_types_supported, 'text/html,application/atom+xml; q=0.9'), '')
def test_support_wildcards(self):
mime_types_supported = ['image/*', 'application/xml']
# match using a type wildcard
self.assertEqual(best_match(mime_types_supported, 'image/png'), 'image/*')
# match using a wildcard for both requested and supported
self.assertEqual(best_match(mime_types_supported, 'image/*'), 'image/*')
unittest.main()
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/deps/django_authopenid/mimeparse.py
| 0.688468 | 0.325976 |
mimeparse.py
|
pypi
|
from django import template
from django.contrib.sites.models import Site
from django.core import urlresolvers
from askbot.deps.livesettings import config_value
from askbot.deps.livesettings.utils import url_join
import logging
log = logging.getLogger('configuration.config_tags')
register = template.Library()
def force_space(value, chars=40):
"""Forces spaces every `chars` in value"""
chars = int(chars)
if len(value) < chars:
return value
else:
out = []
start = 0
end = 0
looping = True
while looping:
start = end
end += chars
out.append(value[start:end])
looping = end < len(value)
return ' '.join(out)
def break_at(value, chars=40):
"""Force spaces into long lines which don't have spaces"""
#todo: EF - lazy patch
return value
chars = int(chars)
value = unicode(value)
if len(value) < chars:
return value
else:
out = []
line = value.split(' ')
for word in line:
if len(word) > chars:
out.append(force_space(word, chars))
else:
out.append(word)
return " ".join(out)
register.filter('break_at', break_at)
def config_boolean(option):
"""Looks up the configuration option, returning true or false."""
args = option.split('.')
try:
val = config_value(*args)
except:
log.warn('config_boolean tag: Tried to look up config setting "%s", got SettingNotSet, returning False', option)
val = False
if val:
return "true"
else:
return ""
register.filter('config_boolean', config_boolean)
def admin_site_views(view):
"""Returns a formatted list of sites, rendering for view, if any"""
if view:
path = urlresolvers.reverse(view)
else:
path = None
links = []
for site in Site.objects.all():
paths = ["http://", site.domain]
if path:
paths.append(path)
links.append((site.name, url_join(paths)))
ret = {
'links' : links,
}
return ret
register.inclusion_tag('askbot.deps.livesettings/_admin_site_views.html')(admin_site_views)
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/deps/livesettings/templatetags/config_tags.py
| 0.425128 | 0.166066 |
config_tags.py
|
pypi
|
from askbot.conf import settings as askbot_settings
import simplejson
def get_leaf_index(tree, leaf_name):
children = tree[1]
for index, child in enumerate(children):
if child[0] == leaf_name:
return index
return None
def _get_subtree(tree, path):
clevel = tree
for pace in path:
clevel = clevel[1][pace]
return clevel
def get_subtree(tree, path):
"""path always starts with 0,
and is a list of integers"""
assert(path[0] == 0)
if len(path) == 1:#special case
return tree[0]
else:
return _get_subtree(tree[0], path[1:])
def sort_tree(tree):
"""sorts contents of the nodes alphabetically"""
tree = sorted(tree, lambda x,y: cmp(x[0], y[0]))
for item in tree:
item[1] = sort_tree(item[1])
return tree
def get_data():
"""returns category tree data structure encoded as json
or None, if category_tree is disabled
"""
if askbot_settings.TAG_SOURCE == 'category-tree':
return simplejson.loads(askbot_settings.CATEGORY_TREE)
else:
return None
def _get_leaf_names(subtree):
leaf_names = set()
for leaf in subtree:
leaf_names.add(leaf[0])
leaf_names |= _get_leaf_names(leaf[1])
return leaf_names
def get_leaf_names(tree = None):
"""returns set of leaf names"""
data = tree or get_data()
if data is None:
return set()
return _get_leaf_names(data[0][1])
def path_is_valid(tree, path):
try:
get_subtree(tree, path)
return True
except IndexError:
return False
except AssertionError:
return False
def add_category(tree, category_name, path):
subtree = get_subtree(tree, path)
children = subtree[1]
children.append([category_name, []])
children = sorted(children, lambda x,y: cmp(x[0], y[0]))
subtree[1] = children
new_path = path[:]
#todo: reformulate all paths in terms of names?
new_item_index = get_leaf_index(subtree, category_name)
assert new_item_index != None
new_path.append(new_item_index)
return new_path
def _has_category(tree, category_name):
for item in tree:
if item[0] == category_name:
return True
if _has_category(item[1], category_name):
return True
return False
def has_category(tree, category_name):
"""true if category is in tree"""
#skip the dummy
return _has_category(tree[0][1], category_name)
def rename_category(
tree, from_name = None, to_name = None, path = None
):
if to_name == from_name:
return
subtree = get_subtree(tree, path[:-1])
from_index = get_leaf_index(subtree, from_name)
#todo possibly merge if to_name exists on the same level
#to_index = get_leaf_index(subtree, to_name)
child = subtree[1][from_index]
child[0] = to_name
return sort_tree(tree)
def _delete_category(tree, name):
for item in tree:
if item[0] == name:
tree.remove(item)
return True
if _delete_category(item[1], name):
return True
return False
def delete_category(tree, name, path):
subtree = get_subtree(tree, path[:-1])
del_index = get_leaf_index(subtree, name)
subtree[1].pop(del_index)
return sort_tree(tree)
def save_data(tree):
assert(askbot_settings.TAG_SOURCE == 'category-tree')
tree_json = simplejson.dumps(tree)
askbot_settings.update('CATEGORY_TREE', tree_json)
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/utils/category_tree.py
| 0.520984 | 0.422386 |
category_tree.py
|
pypi
|
import datetime
import os
import re
import random
import simplejson
import time
import warnings
import zlib
import zipfile
from django.core.validators import validate_email
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils.html import escape
from django.utils import six
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils import timezone
from django import forms
mark_safe_lazy = lazy(mark_safe, six.text_type) #pylint: disable=invalid-name
def decode_and_loads(input_str):
"""utf-8 decodes the input, then runs json loads"""
return simplejson.loads(input_str.decode('utf-8'))
def is_email_valid(email):
"""Returns `True` if email is valid"""
try:
validate_email(email)
except forms.ValidationError:
return False
return True
def timedelta_total_seconds(time_delta):
"""returns total seconds for the timedelta object
supports python < 2.7
"""
if hasattr(time_delta, 'total_seconds'):
return time_delta.total_seconds()
from future import __division__
# pylint: disable=line-too-long
return (time_delta.microseconds + (time_delta.seconds + time_delta.days * 24 * 3600) * 10**6) / 10**6
def get_epoch_str(date_time):
"""returns epoch as string to datetime"""
return str(int(time.mktime(date_time.timetuple())))
def get_from_dict_or_object(source, key):
"""Returns either object attribute or dictionary value
by key"""
try:
return source[key]
except:
return getattr(source, key)
def enumerate_string_list(strings):
"""for a list or a tuple ('one', 'two',) return
a list formatted as ['1) one', '2) two',]
"""
numbered_strings = enumerate(strings, start=1)
return ['%d) %s' % item for item in numbered_strings]
def format_setting_name(token):
"""Returns string in style in upper case
with underscores to separate words"""
token = token.replace(' ', '_')
token = token.replace('-', '_')
bits = token.split('_')
return '_'.join(bits).upper()
def pad_string(text):
"""Inserts one space between words,
including one space before the first word
and after the last word.
String without words is collapsed to ''
"""
words = text.strip().split()
if words:
return ' ' + ' '.join(words) + ' '
return ''
def split_list(text):
"""Takes text, representing a loosely formatted
list (comma, semicolon, empty space separated
words) and returns a list() of words.
"""
text = text.replace(',', ' ').replace(';', ' ')
return text.strip().split()
def split_phrases(text):
"""splits text by semicolon (;), comma(,) and
end of line
"""
text = text.replace(';', ',').replace('\n', ',')
return [word.strip() for word in text.split(',')]
def is_iterable(thing):
#pylint: disable=missing-docstring
if hasattr(thing, '__iter__'):
return True
return isinstance(thing, basestring)
BOT_REGEX = re.compile(
r'bot|http|\.com|crawl|spider|python|curl|yandex'
)
BROWSER_REGEX = re.compile(
r'^(Mozilla.*(Gecko|KHTML|MSIE|Presto|Trident)|Opera).*$'
)
MOBILE_REGEX = re.compile(
r'(BlackBerry|HTC|LG|MOT|Nokia|NOKIAN|PLAYSTATION|PSP|SAMSUNG|SonyEricsson)'
)
def strip_plus(text):
"""returns text with redundant spaces replaced with just one,
and stripped leading and the trailing spaces"""
return re.sub(r'\s+', ' ', text).strip()
def not_a_robot_request(request):
"""`True` if the best guess is that request is not a robot"""
if 'HTTP_ACCEPT_LANGUAGE' not in request.META:
return False
user_agent = request.META.get('HTTP_USER_AGENT', None)
if user_agent is None:
return False
if BOT_REGEX.match(user_agent, re.IGNORECASE):
return False
if MOBILE_REGEX.match(user_agent):
return True
if BROWSER_REGEX.search(user_agent):
return True
return False
def diff_date(date, use_on_prefix=False):
"""Gives human friendly label for difference in dates"""
now = datetime.datetime.now()#datetime(*time.localtime()[0:6])#???
diff = now - date
days = diff.days
hours = int(diff.seconds/3600)
minutes = int(diff.seconds/60)
if days > 2:
if date.year == now.year:
date_token = date.strftime("%b %d")
else:
date_token = date.strftime("%b %d '%y")
if use_on_prefix:
return _('on %(date)s') % {'date': date_token}
return date_token
elif days == 2:
return _('2 days ago')
elif days == 1:
return _('yesterday')
elif minutes >= 60:
return ungettext(
'%(hr)d hour ago',
'%(hr)d hours ago',
hours
) % {'hr':hours}
else:
return ungettext(
'%(min)d min ago',
'%(min)d mins ago',
minutes
) % {'min':minutes}
#todo: this function may need to be removed to simplify the paginator functionality
LEADING_PAGE_RANGE_DISPLAYED = TRAILING_PAGE_RANGE_DISPLAYED = 5
LEADING_PAGE_RANGE = TRAILING_PAGE_RANGE = 4
NUM_PAGES_OUTSIDE_RANGE = 1
ADJACENT_PAGES = 2
def setup_paginator(context):
"""custom paginator tag
Inspired from http://blog.localkinegrinds.com/2007/09/06/digg-style-pagination-in-django/
"""
#pylint: disable=line-too-long
if context["is_paginated"]:
# initialize variables
in_leading_range = in_trailing_range = False
pages_outside_leading_range = pages_outside_trailing_range = range(0)
if context["pages"] <= LEADING_PAGE_RANGE_DISPLAYED:
in_leading_range = in_trailing_range = True
page_numbers = [n for n in range(1, context["pages"] + 1) if n > 0 and n <= context["pages"]]
elif context["current_page_number"] <= LEADING_PAGE_RANGE:
in_leading_range = True
page_numbers = [n for n in range(1, LEADING_PAGE_RANGE_DISPLAYED + 1) if n > 0 and n <= context["pages"]]
pages_outside_leading_range = [n + context["pages"] for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]
elif context["current_page_number"] > context["pages"] - TRAILING_PAGE_RANGE:
in_trailing_range = True
page_numbers = [n for n in range(context["pages"] - TRAILING_PAGE_RANGE_DISPLAYED + 1, context["pages"] + 1) if n > 0 and n <= context["pages"]]
pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]
else:
page_numbers = [n for n in range(context["current_page_number"] - ADJACENT_PAGES, context["current_page_number"] + ADJACENT_PAGES + 1) if n > 0 and n <= context["pages"]]
pages_outside_leading_range = [n + context["pages"] for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]
pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]
page_object = context['page_object']
#patch for change in django 1.5
if page_object.has_previous():
previous_page_number = page_object.previous_page_number()
else:
previous_page_number = None
if page_object.has_next():
next_page_number = page_object.next_page_number()
else:
next_page_number = None
return {"base_url": escape(context["base_url"]),
"is_paginated": context["is_paginated"],
"previous": previous_page_number,
"has_previous": page_object.has_previous(),
"next": next_page_number,
"has_next": page_object.has_next(),
"page": context["current_page_number"],
"pages": context["pages"],
"page_numbers": page_numbers,
"in_leading_range" : in_leading_range,
"in_trailing_range" : in_trailing_range,
"pages_outside_leading_range": pages_outside_leading_range,
"pages_outside_trailing_range": pages_outside_trailing_range}
def get_admin():
"""Returns an admin users, usefull for raising flags"""
try:
from django.contrib.auth.models import User
return User.objects.filter(is_superuser=True)[0]
except:
raise Exception('there is no admin users')
def generate_random_key(length=16):
"""return random string, length is number of characters"""
random.seed()
assert isinstance(length, int)
format_string = '%0' + str(2*length) + 'x'
return format_string % random.getrandbits(length*8)
def list_directory_files(dir_path):
"""Lists all files in the directory,
including those located inside nested directories,
returned file paths include the directory paths"""
file_paths = list()
def handler(_, directory, file_names):
for file_name in file_names:
file_path = os.path.join(directory, file_name)
file_paths.append(file_path)
os.path.walk(dir_path, handler, None)
return file_paths
def zipzip(zip_path, *args, **kwargs): #pylint: disable=too-many-locals
"""creates or updates the zip file at `zip_path`
with contents given by the `*args`, which can be
paths to files and/or directories, glob definitons
are not supported.
If the zip file exists, new items will be added to it,
otherwise the zip file will be newly created.
If an item added already exists in the zipfile,
the old item is replaced with the new one.
If existing file is not zip, raises `ValueError` exception.
"""
zlib.Z_DEFAULT_COMPRESSION = 9
exclude_dirs = kwargs.get('exclude_dirs', list())
exclude_files = kwargs.get('exclude_files', list())
exclude_dir_types = kwargs.get('exclude_dir_types', list())
exclude_file_types = kwargs.get('exclude_file_types', list())
ignore_subpath = kwargs.get('ignore_subpath', '')
if os.path.exists(zip_path):
if not zipfile.is_zipfile(zip_path):
raise ValueError('`zip_path` must be a zip file, if exists')
with zipfile.ZipFile(zip_path, 'a', zipfile.ZIP_DEFLATED) as zip_file:
for item in args:
if os.path.isfile(item):
if item in exclude_files:
continue
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if ignore_subpath and item.startswith(ignore_subpath):
arcname = item[len(ignore_subpath):]
if arcname:
zip_file.write(item, arcname)
else:
zip_file.write(item)
else:
zip_file.write(item)
elif os.path.isdir(item):
for dir_path, _, file_names in os.walk(item):
def is_excluded_dir(dname): #pylint: disable=missing-docstring
for ex_dir in exclude_dirs:
if dname.startswith(ex_dir):
my_dl = len(dname)
ex_dl = len(ex_dir)
return my_dl == ex_dl or dname[ex_dl] == '/'
return False
if is_excluded_dir(dir_path):
continue
if any([dir_path.endswith(dirtype) for dirtype in exclude_dir_types]):
continue
for file_name in file_names:
if any([file_name.endswith(filetype) for filetype in exclude_file_types]):
continue
with warnings.catch_warnings():
warnings.simplefilter('ignore')
zip_file.write(os.path.join(dir_path, file_name))
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/utils/functions.py
| 0.428712 | 0.154823 |
functions.py
|
pypi
|
class LazyList(list):
def __init__(self, get_data):
self.data = get_data
def flatten(x):
"""
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3, 4], (5, 6)]
[1, 2, [3, 4], (5, 6)]
From http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
"""
result = []
for el in x:
if hasattr(el, '__iter__') and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def batch_size(items, size):
"""
Retrieves items in batches of the given size.
>>> l = range(1, 11)
>>> batch_size(l, 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
>>> batch_size(l, 5)
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
"""
return [items[i:i+size] for i in xrange(0, len(items), size)]
def batches(items, number):
"""
Retrieves items in the given number of batches.
>>> l = range(1, 11)
>>> batches(l, 1)
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
>>> batches(l, 2)
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
>>> batches(l, 3)
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]
>>> batches(l, 4)
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
>>> batches(l, 5)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
Initial batches will contain as many items as possible in cases where
there are not enough items to be distributed evenly.
>>> batches(l, 6)
[[1, 2], [3, 4], [5, 6], [7, 8], [9], [10]]
>>> batches(l, 7)
[[1, 2], [3, 4], [5, 6], [7], [8], [9], [10]]
>>> batches(l, 8)
[[1, 2], [3, 4], [5], [6], [7], [8], [9], [10]]
>>> batches(l, 9)
[[1, 2], [3], [4], [5], [6], [7], [8], [9], [10]]
>>> batches(l, 10)
[[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]
If there are more batches than items, empty batches will be appended
to the batch list.
>>> batches(l, 11)
[[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], []]
>>> batches(l, 12)
[[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [], []]
"""
div, mod= divmod(len(items), number)
if div > 1:
if mod:
div += 1
return batch_size(items, div)
else:
if not div:
return [[item] for item in items] + [[]] * (number - mod)
elif div == 1 and not mod:
return [[item] for item in items]
else:
# mod now tells you how many lists of 2 you can fit in
return ([items[i*2:(i*2)+2] for i in xrange(0, mod)] +
[[item] for item in items[mod*2:]])
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/utils/lists.py
| 0.809163 | 0.677647 |
lists.py
|
pypi
|
import askbot
from askbot.utils.translation import get_language
from django.db import connection, models
#mapping of "django" language names to postgres
LANGUAGE_NAMES = {
'da': 'danish',
'de': 'german',
'en': 'english',
'es': 'spanish',
'fi': 'finnish',
'fr': 'french',
'hu': 'hungarian',
'it': 'italian',
'ja': 'japanese',
'nb': 'norwegian',
'nl': 'dutch',
'pt': 'portugese',
'ro': 'romanian',
'ru': 'russian',
'sv': 'swedish',
'tr': 'turkish',
'zh-cn': 'chinese',
}
def setup_full_text_search(script_path):
"""using postgresql database connection,
installs the plsql language, if necessary
and runs the stript, whose path is given as an argument
"""
fts_init_query = open(script_path).read()
cursor = connection.cursor()
try:
#test if language exists
cursor.execute("SELECT * FROM pg_language WHERE lanname='plpgsql'")
lang_exists = cursor.fetchone()
if not lang_exists:
cursor.execute("CREATE LANGUAGE plpgsql")
#run the main query
cursor.execute(fts_init_query)
finally:
cursor.close()
def run_full_text_search(query_set, query_text, text_search_vector_name):
"""runs full text search against the query set and
the search text. All words in the query text are
added to the search with the & operator - i.e.
the more terms in search, the narrower it is.
It is also assumed that we ar searching in the same
table as the query set was built against, also
it is assumed that the table has text search vector
stored in the column called with value of`text_search_vector_name`.
"""
original_qs = query_set
table_name = query_set.model._meta.db_table
rank_clause = 'ts_rank(' + table_name + \
'.' + text_search_vector_name + \
', plainto_tsquery(%s, %s))'
where_clause = table_name + '.' + \
text_search_vector_name + \
' @@ plainto_tsquery(%s, %s)'
language_code = get_language()
#a hack with japanese search for the short queries
if language_code in ['ja', 'zh-cn'] and len(query_text) in (1, 2):
mul = 4/len(query_text) #4 for 1 and 2 for 2
query_text = (query_text + ' ')*mul
#the table name is a hack, because user does not have the language code
if askbot.is_multilingual() and table_name == 'askbot_thread':
where_clause += " AND " + table_name + \
'.' + "language_code='" + language_code + "'"
search_query = '|'.join(query_text.split())#apply "OR" operator
language_name = LANGUAGE_NAMES.get(language_code, 'english')
extra_params = (language_name, search_query,)
extra_kwargs = {
'select': {'relevance': rank_clause},
'where': [where_clause,],
'params': extra_params,
'select_params': extra_params,
}
result_qs = query_set.extra(**extra_kwargs)
#added to allow search that can be ignored by postgres FTS.
if not result_qs and len(query_text) < 5:
return original_qs.filter(
models.Q(title__icontains=search_query) |
models.Q(tagnames__icontains=search_query) |
models.Q(posts__text__icontains = search_query)
).extra(select={'relevance': rank_clause}, select_params=extra_params)
return result_qs
def run_thread_search(query_set, query):
"""runs search for full thread content"""
return run_full_text_search(query_set, query, 'text_search_vector');
run_user_search = run_thread_search #an alias
def run_title_search(query_set, query):
"""runs search for title and tags"""
return run_full_text_search(query_set, query, 'title_search_vector')
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/search/postgresql/__init__.py
| 0.406626 | 0.217795 |
__init__.py
|
pypi
|
import askbot
from django.conf import settings
from django.utils.translation import override
from haystack import indexes
from .utils import language_from_alias
class BaseIndex(indexes.SearchIndex):
i18n_enabled = askbot.is_multilingual()
text = indexes.CharField(document=True, use_template=True)
def _get_backend(self, using):
"""
We set the backend alias to be able to determine language in multilanguage setup.
"""
self._backend_alias = using
return super(BaseIndex, self)._get_backend(using)
def get_language(self, obj):
return None
def get_default_language(self, using):
"""
When using multiple languages, this allows us to specify a fallback based on the
backend being used.
"""
return language_from_alias(using) or settings.LANGUAGE_CODE
def get_current_language(self, using=None, obj=None):
"""
Helper method bound to ALWAYS return a language.
When obj is not None, this calls self.get_language to try and get a language from obj,
this is useful when the object itself defines it's language in a "language" field.
If no language was found or obj is None, then we call self.get_default_language to try and get a fallback language.
"""
language = self.get_language(obj) if obj else None
return language or self.get_default_language(using)
def get_index_kwargs(self, language):
"""
This is called to filter the indexed queryset.
"""
return {}
def index_queryset(self, using=None):
self._get_backend(using)
language = self.get_current_language(using)
filter_kwargs = self.get_index_kwargs(language)
return self.get_model().objects.filter(**filter_kwargs)
def prepare(self, obj):
current_language = self.get_current_language(using=self._backend_alias, obj=obj)
with override(current_language):
self.prepared_data = super(BaseIndex, self).prepare(obj)
self.prepared_data['text'] = ' '.join(self.prepared_data['text'].split())
return self.prepared_data
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/search/haystack/base.py
| 0.500244 | 0.204978 |
base.py
|
pypi
|
from django.db.models import signals as django_signals
from haystack.signals import RealtimeSignalProcessor
from askbot import signals as askbot_signals
class AskbotRealtimeSignalProcessor(RealtimeSignalProcessor):
'''
Based on haystack RealTimeSignalProcessor with some
modifications to work with askbot soft-delete models
'''
def handle_delete(self, sender, instance, **kwargs):
# avoid circular imports
from askbot.models import Post, Thread
if isinstance(instance, Post) and instance.thread_id:
# instance becomes the thread instance
# sender becomes the Thread class
# this is because we don't index Post instances, only Thread
# but still need to update/remove thread when post is removed.
instance, sender = (instance.thread, Thread)
super(AskbotRealtimeSignalProcessor, self).handle_delete(sender, instance, **kwargs)
def setup(self):
super(AskbotRealtimeSignalProcessor, self).setup()
try:
askbot_signals.after_post_removed.connect(self.handle_delete)
except ImportError:
pass
def teardown(self):
super(AskbotRealtimeSignalProcessor, self).setup()
#askbot signals
try:
askbot_signals.after_post_removed.disconnect(self.handle_delete)
except ImportError:
pass
try:
from haystack.exceptions import NotHandled
from celery_haystack.signals import CelerySignalProcessor
from celery_haystack.utils import enqueue_task
class AskbotCelerySignalProcessor(CelerySignalProcessor):
def setup(self):
django_signals.post_save.connect(self.enqueue_save)
django_signals.post_delete.connect(self.enqueue_delete)
try:
askbot_signals.after_post_removed.connect(self.enqueue_delete)
except ImportError:
pass
def teardown(self):
django_signals.post_save.disconnect(self.enqueue_save)
django_signals.post_delete.disconnect(self.enqueue_delete)
try:
askbot_signals.after_post_removed.disconnect(self.enqueue_delete)
except ImportError:
pass
def enqueue(self, action, instance, sender, **kwargs):
using_backends = self.connection_router.for_write(instance=instance)
for using in using_backends:
try:
connection = self.connections[using]
index = connection.get_unified_index().get_index(sender)
except NotHandled:
continue # Check next backend
if action == 'update' and not index.should_update(instance):
continue
enqueue_task(action, instance)
return # Only enqueue instance once
except ImportError:
pass
|
/sch-askbot-1.0.1.tar.gz/sch-askbot-1.0.1/askbot/search/haystack/signals.py
| 0.435301 | 0.3039 |
signals.py
|
pypi
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk
import aws_cdk.aws_ec2
import aws_cdk.aws_iam
import aws_cdk.aws_lambda
import aws_cdk.aws_logs
import aws_cdk.aws_rds
import aws_cdk.aws_sqs
import aws_cdk.aws_stepfunctions
import constructs
class CSVToAuroraTask(
aws_cdk.aws_stepfunctions.TaskStateBase,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-csv-to-aurora.CSVToAuroraTask",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
s3_input_bucket: builtins.str,
s3_input_prefix: builtins.str,
vpc: aws_cdk.aws_ec2.IVpc,
associate_with_parent: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
) -> None:
'''
:param scope: -
:param id: -
:param s3_input_bucket:
:param s3_input_prefix:
:param vpc:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param textract_state_machine_timeout_minutes:
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
'''
props = CSVToAuroraTaskProps(
s3_input_bucket=s3_input_bucket,
s3_input_prefix=s3_input_prefix,
vpc=vpc,
associate_with_parent=associate_with_parent,
input=input,
lambda_log_level=lambda_log_level,
name=name,
textract_state_machine_timeout_minutes=textract_state_machine_timeout_minutes,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
result_selector=result_selector,
timeout=timeout,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskMetrics")
def _task_metrics(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig]:
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig], jsii.get(self, "taskMetrics"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskPolicies")
def _task_policies(
self,
) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
return typing.cast(typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]], jsii.get(self, "taskPolicies"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="csvToAuroraFunction")
def csv_to_aurora_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "csvToAuroraFunction"))
@csv_to_aurora_function.setter
def csv_to_aurora_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "csvToAuroraFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="csvToAuroraLambdaLogGroup")
def csv_to_aurora_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "csvToAuroraLambdaLogGroup"))
@csv_to_aurora_lambda_log_group.setter
def csv_to_aurora_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "csvToAuroraLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cSVToAuroraSQS")
def c_sv_to_aurora_sqs(self) -> aws_cdk.aws_sqs.IQueue:
return typing.cast(aws_cdk.aws_sqs.IQueue, jsii.get(self, "cSVToAuroraSQS"))
@c_sv_to_aurora_sqs.setter
def c_sv_to_aurora_sqs(self, value: aws_cdk.aws_sqs.IQueue) -> None:
jsii.set(self, "cSVToAuroraSQS", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="dbCluster")
def db_cluster(self) -> aws_cdk.aws_rds.IServerlessCluster:
return typing.cast(aws_cdk.aws_rds.IServerlessCluster, jsii.get(self, "dbCluster"))
@db_cluster.setter
def db_cluster(self, value: aws_cdk.aws_rds.IServerlessCluster) -> None:
jsii.set(self, "dbCluster", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="putOnSQSLambdaLogGroup")
def put_on_sqs_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "putOnSQSLambdaLogGroup"))
@put_on_sqs_lambda_log_group.setter
def put_on_sqs_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "putOnSQSLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stateMachine")
def state_machine(self) -> aws_cdk.aws_stepfunctions.IStateMachine:
return typing.cast(aws_cdk.aws_stepfunctions.IStateMachine, jsii.get(self, "stateMachine"))
@state_machine.setter
def state_machine(self, value: aws_cdk.aws_stepfunctions.IStateMachine) -> None:
jsii.set(self, "stateMachine", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractPutOnSQSFunction")
def textract_put_on_sqs_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractPutOnSQSFunction"))
@textract_put_on_sqs_function.setter
def textract_put_on_sqs_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractPutOnSQSFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="version")
def version(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "version"))
@version.setter
def version(self, value: builtins.str) -> None:
jsii.set(self, "version", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-csv-to-aurora.CSVToAuroraTaskProps",
jsii_struct_bases=[aws_cdk.aws_stepfunctions.TaskStateBaseProps],
name_mapping={
"comment": "comment",
"heartbeat": "heartbeat",
"input_path": "inputPath",
"integration_pattern": "integrationPattern",
"output_path": "outputPath",
"result_path": "resultPath",
"result_selector": "resultSelector",
"timeout": "timeout",
"s3_input_bucket": "s3InputBucket",
"s3_input_prefix": "s3InputPrefix",
"vpc": "vpc",
"associate_with_parent": "associateWithParent",
"input": "input",
"lambda_log_level": "lambdaLogLevel",
"name": "name",
"textract_state_machine_timeout_minutes": "textractStateMachineTimeoutMinutes",
},
)
class CSVToAuroraTaskProps(aws_cdk.aws_stepfunctions.TaskStateBaseProps):
def __init__(
self,
*,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
s3_input_bucket: builtins.str,
s3_input_prefix: builtins.str,
vpc: aws_cdk.aws_ec2.IVpc,
associate_with_parent: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
:param s3_input_bucket:
:param s3_input_prefix:
:param vpc:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param textract_state_machine_timeout_minutes:
'''
self._values: typing.Dict[str, typing.Any] = {
"s3_input_bucket": s3_input_bucket,
"s3_input_prefix": s3_input_prefix,
"vpc": vpc,
}
if comment is not None:
self._values["comment"] = comment
if heartbeat is not None:
self._values["heartbeat"] = heartbeat
if input_path is not None:
self._values["input_path"] = input_path
if integration_pattern is not None:
self._values["integration_pattern"] = integration_pattern
if output_path is not None:
self._values["output_path"] = output_path
if result_path is not None:
self._values["result_path"] = result_path
if result_selector is not None:
self._values["result_selector"] = result_selector
if timeout is not None:
self._values["timeout"] = timeout
if associate_with_parent is not None:
self._values["associate_with_parent"] = associate_with_parent
if input is not None:
self._values["input"] = input
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if name is not None:
self._values["name"] = name
if textract_state_machine_timeout_minutes is not None:
self._values["textract_state_machine_timeout_minutes"] = textract_state_machine_timeout_minutes
@builtins.property
def comment(self) -> typing.Optional[builtins.str]:
'''An optional description for this state.
:default: - No comment
'''
result = self._values.get("comment")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def heartbeat(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the heartbeat.
:default: - None
'''
result = self._values.get("heartbeat")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def input_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select part of the state to be the input to this state.
May also be the special value JsonPath.DISCARD, which will cause the effective
input to be the empty object {}.
:default: - The entire task input (JSON path '$')
'''
result = self._values.get("input_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def integration_pattern(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern]:
'''AWS Step Functions integrates with services directly in the Amazon States Language.
You can control these AWS services using service integration patterns
:default:
- ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks.
``IntegrationPattern.RUN_JOB`` for the following exceptions:
``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token
'''
result = self._values.get("integration_pattern")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern], result)
@builtins.property
def output_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select select a portion of the state output to pass to the next state.
May also be the special value JsonPath.DISCARD, which will cause the effective
output to be the empty object {}.
:default:
- The entire JSON node determined by the state input, the task result,
and resultPath is passed to the next state (JSON path '$')
'''
result = self._values.get("output_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to indicate where to inject the state's output.
May also be the special value JsonPath.DISCARD, which will cause the state's
input to become its output.
:default: - Replaces the entire input with the result (JSON path '$')
'''
result = self._values.get("result_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_selector(
self,
) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:
'''The JSON that will replace the state's raw result and become the effective result before ResultPath is applied.
You can use ResultSelector to create a payload with values that are static
or selected from the state's raw result.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector
'''
result = self._values.get("result_selector")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the state machine.
:default: - None
'''
result = self._values.get("timeout")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def s3_input_bucket(self) -> builtins.str:
result = self._values.get("s3_input_bucket")
assert result is not None, "Required property 's3_input_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def s3_input_prefix(self) -> builtins.str:
result = self._values.get("s3_input_prefix")
assert result is not None, "Required property 's3_input_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def vpc(self) -> aws_cdk.aws_ec2.IVpc:
result = self._values.get("vpc")
assert result is not None, "Required property 'vpc' is missing"
return typing.cast(aws_cdk.aws_ec2.IVpc, result)
@builtins.property
def associate_with_parent(self) -> typing.Optional[builtins.bool]:
'''Pass the execution ID from the context object to the execution input.
This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines.
If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely.
:default: - false
:see: https://docs.aws.amazon.com/step-functions/latest/dg/concepts-nested-workflows.html#nested-execution-startid
'''
result = self._values.get("associate_with_parent")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def input(self) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskInput]:
'''The JSON input for the execution, same as that of StartExecution.
:default: - The state input (JSON path '$')
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("input")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskInput], result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''The name of the execution, same as that of StartExecution.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def textract_state_machine_timeout_minutes(self) -> typing.Optional[jsii.Number]:
result = self._values.get("textract_state_machine_timeout_minutes")
return typing.cast(typing.Optional[jsii.Number], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CSVToAuroraTaskProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"CSVToAuroraTask",
"CSVToAuroraTaskProps",
]
publication.publish()
|
/schadem_cdk_construct_csv_to_aurora-0.0.0-py3-none-any.whl/schadem_cdk_construct_csv_to_aurora/__init__.py
| 0.650245 | 0.212722 |
__init__.py
|
pypi
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk
import aws_cdk.aws_iam
import aws_cdk.aws_lambda
import aws_cdk.aws_logs
import aws_cdk.aws_sqs
import aws_cdk.aws_stepfunctions
import aws_cdk.aws_stepfunctions_tasks
import constructs
class ComprehendGenericSyncSfnTask(
aws_cdk.aws_stepfunctions.TaskStateBase,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-sfn-comprehend-classifier.ComprehendGenericSyncSfnTask",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
comprehend_classifier_arn: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
) -> None:
'''
:param scope: -
:param id: -
:param comprehend_classifier_arn:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
'''
props = ComprehendGenericSyncSfnTaskProps(
comprehend_classifier_arn=comprehend_classifier_arn,
associate_with_parent=associate_with_parent,
custom_function=custom_function,
enable_dashboard=enable_dashboard,
enable_monitoring=enable_monitoring,
input=input,
lambda_log_level=lambda_log_level,
name=name,
textract_state_machine_timeout_minutes=textract_state_machine_timeout_minutes,
workflow_tracing_enabled=workflow_tracing_enabled,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
result_selector=result_selector,
timeout=timeout,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskMetrics")
def _task_metrics(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig]:
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig], jsii.get(self, "taskMetrics"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskPolicies")
def _task_policies(
self,
) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
return typing.cast(typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]], jsii.get(self, "taskPolicies"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="comprehendSyncCallFunction")
def comprehend_sync_call_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "comprehendSyncCallFunction"))
@comprehend_sync_call_function.setter
def comprehend_sync_call_function(
self,
value: aws_cdk.aws_lambda.IFunction,
) -> None:
jsii.set(self, "comprehendSyncCallFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="comprehendSyncLambdaLogGroup")
def comprehend_sync_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "comprehendSyncLambdaLogGroup"))
@comprehend_sync_lambda_log_group.setter
def comprehend_sync_lambda_log_group(
self,
value: aws_cdk.aws_logs.ILogGroup,
) -> None:
jsii.set(self, "comprehendSyncLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="comprehendSyncSQS")
def comprehend_sync_sqs(self) -> aws_cdk.aws_sqs.IQueue:
return typing.cast(aws_cdk.aws_sqs.IQueue, jsii.get(self, "comprehendSyncSQS"))
@comprehend_sync_sqs.setter
def comprehend_sync_sqs(self, value: aws_cdk.aws_sqs.IQueue) -> None:
jsii.set(self, "comprehendSyncSQS", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="putOnSQSLambdaLogGroup")
def put_on_sqs_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "putOnSQSLambdaLogGroup"))
@put_on_sqs_lambda_log_group.setter
def put_on_sqs_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "putOnSQSLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stateMachine")
def state_machine(self) -> aws_cdk.aws_stepfunctions.IStateMachine:
return typing.cast(aws_cdk.aws_stepfunctions.IStateMachine, jsii.get(self, "stateMachine"))
@state_machine.setter
def state_machine(self, value: aws_cdk.aws_stepfunctions.IStateMachine) -> None:
jsii.set(self, "stateMachine", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractPutOnSQSFunction")
def textract_put_on_sqs_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractPutOnSQSFunction"))
@textract_put_on_sqs_function.setter
def textract_put_on_sqs_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractPutOnSQSFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="version")
def version(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "version"))
@version.setter
def version(self, value: builtins.str) -> None:
jsii.set(self, "version", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-sfn-comprehend-classifier.ComprehendGenericSyncSfnTaskProps",
jsii_struct_bases=[aws_cdk.aws_stepfunctions.TaskStateBaseProps],
name_mapping={
"comment": "comment",
"heartbeat": "heartbeat",
"input_path": "inputPath",
"integration_pattern": "integrationPattern",
"output_path": "outputPath",
"result_path": "resultPath",
"result_selector": "resultSelector",
"timeout": "timeout",
"comprehend_classifier_arn": "comprehendClassifierArn",
"associate_with_parent": "associateWithParent",
"custom_function": "customFunction",
"enable_dashboard": "enableDashboard",
"enable_monitoring": "enableMonitoring",
"input": "input",
"lambda_log_level": "lambdaLogLevel",
"name": "name",
"textract_state_machine_timeout_minutes": "textractStateMachineTimeoutMinutes",
"workflow_tracing_enabled": "workflowTracingEnabled",
},
)
class ComprehendGenericSyncSfnTaskProps(aws_cdk.aws_stepfunctions.TaskStateBaseProps):
def __init__(
self,
*,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
comprehend_classifier_arn: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
) -> None:
'''
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
:param comprehend_classifier_arn:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
'''
self._values: typing.Dict[str, typing.Any] = {
"comprehend_classifier_arn": comprehend_classifier_arn,
}
if comment is not None:
self._values["comment"] = comment
if heartbeat is not None:
self._values["heartbeat"] = heartbeat
if input_path is not None:
self._values["input_path"] = input_path
if integration_pattern is not None:
self._values["integration_pattern"] = integration_pattern
if output_path is not None:
self._values["output_path"] = output_path
if result_path is not None:
self._values["result_path"] = result_path
if result_selector is not None:
self._values["result_selector"] = result_selector
if timeout is not None:
self._values["timeout"] = timeout
if associate_with_parent is not None:
self._values["associate_with_parent"] = associate_with_parent
if custom_function is not None:
self._values["custom_function"] = custom_function
if enable_dashboard is not None:
self._values["enable_dashboard"] = enable_dashboard
if enable_monitoring is not None:
self._values["enable_monitoring"] = enable_monitoring
if input is not None:
self._values["input"] = input
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if name is not None:
self._values["name"] = name
if textract_state_machine_timeout_minutes is not None:
self._values["textract_state_machine_timeout_minutes"] = textract_state_machine_timeout_minutes
if workflow_tracing_enabled is not None:
self._values["workflow_tracing_enabled"] = workflow_tracing_enabled
@builtins.property
def comment(self) -> typing.Optional[builtins.str]:
'''An optional description for this state.
:default: - No comment
'''
result = self._values.get("comment")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def heartbeat(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the heartbeat.
:default: - None
'''
result = self._values.get("heartbeat")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def input_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select part of the state to be the input to this state.
May also be the special value JsonPath.DISCARD, which will cause the effective
input to be the empty object {}.
:default: - The entire task input (JSON path '$')
'''
result = self._values.get("input_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def integration_pattern(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern]:
'''AWS Step Functions integrates with services directly in the Amazon States Language.
You can control these AWS services using service integration patterns
:default:
- ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks.
``IntegrationPattern.RUN_JOB`` for the following exceptions:
``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token
'''
result = self._values.get("integration_pattern")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern], result)
@builtins.property
def output_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select select a portion of the state output to pass to the next state.
May also be the special value JsonPath.DISCARD, which will cause the effective
output to be the empty object {}.
:default:
- The entire JSON node determined by the state input, the task result,
and resultPath is passed to the next state (JSON path '$')
'''
result = self._values.get("output_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to indicate where to inject the state's output.
May also be the special value JsonPath.DISCARD, which will cause the state's
input to become its output.
:default: - Replaces the entire input with the result (JSON path '$')
'''
result = self._values.get("result_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_selector(
self,
) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:
'''The JSON that will replace the state's raw result and become the effective result before ResultPath is applied.
You can use ResultSelector to create a payload with values that are static
or selected from the state's raw result.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector
'''
result = self._values.get("result_selector")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the state machine.
:default: - None
'''
result = self._values.get("timeout")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def comprehend_classifier_arn(self) -> builtins.str:
result = self._values.get("comprehend_classifier_arn")
assert result is not None, "Required property 'comprehend_classifier_arn' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def associate_with_parent(self) -> typing.Optional[builtins.bool]:
'''Pass the execution ID from the context object to the execution input.
This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines.
If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely.
:default: - false
:see: https://docs.aws.amazon.com/step-functions/latest/dg/concepts-nested-workflows.html#nested-execution-startid
'''
result = self._values.get("associate_with_parent")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def custom_function(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke]:
'''not implemented yet.'''
result = self._values.get("custom_function")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke], result)
@builtins.property
def enable_dashboard(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_dashboard")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def enable_monitoring(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_monitoring")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def input(self) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskInput]:
'''The JSON input for the execution, same as that of StartExecution.
:default: - The state input (JSON path '$')
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("input")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskInput], result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''The name of the execution, same as that of StartExecution.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def textract_state_machine_timeout_minutes(self) -> typing.Optional[jsii.Number]:
'''how long can we wait for the process (default is 48 hours (60*48=2880)).'''
result = self._values.get("textract_state_machine_timeout_minutes")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def workflow_tracing_enabled(self) -> typing.Optional[builtins.bool]:
result = self._values.get("workflow_tracing_enabled")
return typing.cast(typing.Optional[builtins.bool], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ComprehendGenericSyncSfnTaskProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"ComprehendGenericSyncSfnTask",
"ComprehendGenericSyncSfnTaskProps",
]
publication.publish()
|
/schadem-cdk-construct-sfn-comprehend-classifier-0.0.1.tar.gz/schadem-cdk-construct-sfn-comprehend-classifier-0.0.1/src/schadem_cdk_construct_sfn_comprehend_classifier/__init__.py
| 0.639961 | 0.184217 |
__init__.py
|
pypi
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk
import aws_cdk.aws_iam
import aws_cdk.aws_lambda
import aws_cdk.aws_logs
import aws_cdk.aws_stepfunctions
import constructs
class TextractGenerateCSV(
aws_cdk.aws_stepfunctions.TaskStateBase,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-sfn-generate-csv.TextractGenerateCSV",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
csv_s3_output_bucket: builtins.str,
csv_s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
name: typing.Optional[builtins.str] = None,
output_type: typing.Optional[builtins.str] = None,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
) -> None:
'''
:param scope: -
:param id: -
:param csv_s3_output_bucket:
:param csv_s3_output_prefix:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param output_type:
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
'''
props = TextractGenerateCSVProps(
csv_s3_output_bucket=csv_s3_output_bucket,
csv_s3_output_prefix=csv_s3_output_prefix,
associate_with_parent=associate_with_parent,
input=input,
lambda_log_level=lambda_log_level,
lambda_memory_mb=lambda_memory_mb,
lambda_timeout=lambda_timeout,
name=name,
output_type=output_type,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
result_selector=result_selector,
timeout=timeout,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="generateCSVLambda")
def generate_csv_lambda(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "generateCSVLambda"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="generateCSVLogGroup")
def generate_csv_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "generateCSVLogGroup"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskMetrics")
def _task_metrics(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig]:
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig], jsii.get(self, "taskMetrics"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskPolicies")
def _task_policies(
self,
) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
return typing.cast(typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]], jsii.get(self, "taskPolicies"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stateMachine")
def state_machine(self) -> aws_cdk.aws_stepfunctions.StateMachine:
return typing.cast(aws_cdk.aws_stepfunctions.StateMachine, jsii.get(self, "stateMachine"))
@state_machine.setter
def state_machine(self, value: aws_cdk.aws_stepfunctions.StateMachine) -> None:
jsii.set(self, "stateMachine", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-sfn-generate-csv.TextractGenerateCSVProps",
jsii_struct_bases=[aws_cdk.aws_stepfunctions.TaskStateBaseProps],
name_mapping={
"comment": "comment",
"heartbeat": "heartbeat",
"input_path": "inputPath",
"integration_pattern": "integrationPattern",
"output_path": "outputPath",
"result_path": "resultPath",
"result_selector": "resultSelector",
"timeout": "timeout",
"csv_s3_output_bucket": "csvS3OutputBucket",
"csv_s3_output_prefix": "csvS3OutputPrefix",
"associate_with_parent": "associateWithParent",
"input": "input",
"lambda_log_level": "lambdaLogLevel",
"lambda_memory_mb": "lambdaMemoryMB",
"lambda_timeout": "lambdaTimeout",
"name": "name",
"output_type": "outputType",
},
)
class TextractGenerateCSVProps(aws_cdk.aws_stepfunctions.TaskStateBaseProps):
def __init__(
self,
*,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
csv_s3_output_bucket: builtins.str,
csv_s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
name: typing.Optional[builtins.str] = None,
output_type: typing.Optional[builtins.str] = None,
) -> None:
'''
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
:param csv_s3_output_bucket:
:param csv_s3_output_prefix:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param output_type:
'''
self._values: typing.Dict[str, typing.Any] = {
"csv_s3_output_bucket": csv_s3_output_bucket,
"csv_s3_output_prefix": csv_s3_output_prefix,
}
if comment is not None:
self._values["comment"] = comment
if heartbeat is not None:
self._values["heartbeat"] = heartbeat
if input_path is not None:
self._values["input_path"] = input_path
if integration_pattern is not None:
self._values["integration_pattern"] = integration_pattern
if output_path is not None:
self._values["output_path"] = output_path
if result_path is not None:
self._values["result_path"] = result_path
if result_selector is not None:
self._values["result_selector"] = result_selector
if timeout is not None:
self._values["timeout"] = timeout
if associate_with_parent is not None:
self._values["associate_with_parent"] = associate_with_parent
if input is not None:
self._values["input"] = input
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if lambda_memory_mb is not None:
self._values["lambda_memory_mb"] = lambda_memory_mb
if lambda_timeout is not None:
self._values["lambda_timeout"] = lambda_timeout
if name is not None:
self._values["name"] = name
if output_type is not None:
self._values["output_type"] = output_type
@builtins.property
def comment(self) -> typing.Optional[builtins.str]:
'''An optional description for this state.
:default: - No comment
'''
result = self._values.get("comment")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def heartbeat(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the heartbeat.
:default: - None
'''
result = self._values.get("heartbeat")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def input_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select part of the state to be the input to this state.
May also be the special value JsonPath.DISCARD, which will cause the effective
input to be the empty object {}.
:default: - The entire task input (JSON path '$')
'''
result = self._values.get("input_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def integration_pattern(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern]:
'''AWS Step Functions integrates with services directly in the Amazon States Language.
You can control these AWS services using service integration patterns
:default:
- ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks.
``IntegrationPattern.RUN_JOB`` for the following exceptions:
``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token
'''
result = self._values.get("integration_pattern")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern], result)
@builtins.property
def output_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select select a portion of the state output to pass to the next state.
May also be the special value JsonPath.DISCARD, which will cause the effective
output to be the empty object {}.
:default:
- The entire JSON node determined by the state input, the task result,
and resultPath is passed to the next state (JSON path '$')
'''
result = self._values.get("output_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to indicate where to inject the state's output.
May also be the special value JsonPath.DISCARD, which will cause the state's
input to become its output.
:default: - Replaces the entire input with the result (JSON path '$')
'''
result = self._values.get("result_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_selector(
self,
) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:
'''The JSON that will replace the state's raw result and become the effective result before ResultPath is applied.
You can use ResultSelector to create a payload with values that are static
or selected from the state's raw result.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector
'''
result = self._values.get("result_selector")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the state machine.
:default: - None
'''
result = self._values.get("timeout")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def csv_s3_output_bucket(self) -> builtins.str:
result = self._values.get("csv_s3_output_bucket")
assert result is not None, "Required property 'csv_s3_output_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def csv_s3_output_prefix(self) -> builtins.str:
result = self._values.get("csv_s3_output_prefix")
assert result is not None, "Required property 'csv_s3_output_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def associate_with_parent(self) -> typing.Optional[builtins.bool]:
'''Pass the execution ID from the context object to the execution input.
This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines.
If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely.
:default: - false
:see: https://docs.aws.amazon.com/step-functions/latest/dg/concepts-nested-workflows.html#nested-execution-startid
'''
result = self._values.get("associate_with_parent")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def input(self) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskInput]:
'''The JSON input for the execution, same as that of StartExecution.
:default: - The state input (JSON path '$')
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("input")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskInput], result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def lambda_memory_mb(self) -> typing.Optional[jsii.Number]:
'''memory of Lambda function (may need to increase for larger documents).'''
result = self._values.get("lambda_memory_mb")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def lambda_timeout(self) -> typing.Optional[jsii.Number]:
result = self._values.get("lambda_timeout")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''The name of the execution, same as that of StartExecution.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def output_type(self) -> typing.Optional[builtins.str]:
result = self._values.get("output_type")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractGenerateCSVProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"TextractGenerateCSV",
"TextractGenerateCSVProps",
]
publication.publish()
|
/schadem-cdk-construct-sfn-generate-csv-0.0.3.tar.gz/schadem-cdk-construct-sfn-generate-csv-0.0.3/src/schadem_cdk_construct_sfn_generate_csv/__init__.py
| 0.657538 | 0.206014 |
__init__.py
|
pypi
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk.aws_stepfunctions
import constructs
@jsii.data_type(
jsii_type="schadem-cdk-construct-sfn-idp-decider.TextractDPPOCDeciderProps",
jsii_struct_bases=[],
name_mapping={
"lambda_memory_mb": "lambdaMemoryMB",
"lambda_timeout": "lambdaTimeout",
},
)
class TextractDPPOCDeciderProps:
def __init__(
self,
*,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
self._values: typing.Dict[str, typing.Any] = {}
if lambda_memory_mb is not None:
self._values["lambda_memory_mb"] = lambda_memory_mb
if lambda_timeout is not None:
self._values["lambda_timeout"] = lambda_timeout
@builtins.property
def lambda_memory_mb(self) -> typing.Optional[jsii.Number]:
'''memory of Lambda function (may need to increase for larger documents).'''
result = self._values.get("lambda_memory_mb")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def lambda_timeout(self) -> typing.Optional[jsii.Number]:
result = self._values.get("lambda_timeout")
return typing.cast(typing.Optional[jsii.Number], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractDPPOCDeciderProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class TextractPOCDecider(
aws_cdk.aws_stepfunctions.StateMachineFragment,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-sfn-idp-decider.TextractPOCDecider",
):
def __init__(
self,
parent: constructs.Construct,
id: builtins.str,
*,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param parent: -
:param id: -
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
props = TextractDPPOCDeciderProps(
lambda_memory_mb=lambda_memory_mb, lambda_timeout=lambda_timeout
)
jsii.create(self.__class__, self, [parent, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List[aws_cdk.aws_stepfunctions.INextable]:
'''The states to chain onto if this fragment is used.'''
return typing.cast(typing.List[aws_cdk.aws_stepfunctions.INextable], jsii.get(self, "endStates"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="startState")
def start_state(self) -> aws_cdk.aws_stepfunctions.State:
'''The start state of this state machine fragment.'''
return typing.cast(aws_cdk.aws_stepfunctions.State, jsii.get(self, "startState"))
__all__ = [
"TextractDPPOCDeciderProps",
"TextractPOCDecider",
]
publication.publish()
|
/schadem-cdk-construct-sfn-idp-decider-0.0.3.tar.gz/schadem-cdk-construct-sfn-idp-decider-0.0.3/src/schadem_cdk_construct_sfn_idp_decider/__init__.py
| 0.683208 | 0.190347 |
__init__.py
|
pypi
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk.aws_stepfunctions
import constructs
class TextractAsyncToJSON(
aws_cdk.aws_stepfunctions.StateMachineFragment,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-sfn-textract-output-config-to-json.TextractAsyncToJSON",
):
def __init__(
self,
parent: constructs.Construct,
id: builtins.str,
*,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param parent: -
:param id: -
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
props = TextractAsyncToJSONProps(
s3_output_bucket=s3_output_bucket,
s3_output_prefix=s3_output_prefix,
lambda_log_level=lambda_log_level,
lambda_memory_mb=lambda_memory_mb,
lambda_timeout=lambda_timeout,
)
jsii.create(self.__class__, self, [parent, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List[aws_cdk.aws_stepfunctions.INextable]:
'''The states to chain onto if this fragment is used.'''
return typing.cast(typing.List[aws_cdk.aws_stepfunctions.INextable], jsii.get(self, "endStates"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="startState")
def start_state(self) -> aws_cdk.aws_stepfunctions.State:
'''The start state of this state machine fragment.'''
return typing.cast(aws_cdk.aws_stepfunctions.State, jsii.get(self, "startState"))
@jsii.data_type(
jsii_type="schadem-cdk-construct-sfn-textract-output-config-to-json.TextractAsyncToJSONProps",
jsii_struct_bases=[],
name_mapping={
"s3_output_bucket": "s3OutputBucket",
"s3_output_prefix": "s3OutputPrefix",
"lambda_log_level": "lambdaLogLevel",
"lambda_memory_mb": "lambdaMemoryMB",
"lambda_timeout": "lambdaTimeout",
},
)
class TextractAsyncToJSONProps:
def __init__(
self,
*,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
self._values: typing.Dict[str, typing.Any] = {
"s3_output_bucket": s3_output_bucket,
"s3_output_prefix": s3_output_prefix,
}
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if lambda_memory_mb is not None:
self._values["lambda_memory_mb"] = lambda_memory_mb
if lambda_timeout is not None:
self._values["lambda_timeout"] = lambda_timeout
@builtins.property
def s3_output_bucket(self) -> builtins.str:
result = self._values.get("s3_output_bucket")
assert result is not None, "Required property 's3_output_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def s3_output_prefix(self) -> builtins.str:
'''The prefix to use for the output files.'''
result = self._values.get("s3_output_prefix")
assert result is not None, "Required property 's3_output_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def lambda_memory_mb(self) -> typing.Optional[jsii.Number]:
'''memory of Lambda function (may need to increase for larger documents).'''
result = self._values.get("lambda_memory_mb")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def lambda_timeout(self) -> typing.Optional[jsii.Number]:
result = self._values.get("lambda_timeout")
return typing.cast(typing.Optional[jsii.Number], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractAsyncToJSONProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"TextractAsyncToJSON",
"TextractAsyncToJSONProps",
]
publication.publish()
|
/schadem-cdk-construct-sfn-textract-output-config-to-json-0.0.0.tar.gz/schadem-cdk-construct-sfn-textract-output-config-to-json-0.0.0/src/schadem_cdk_construct_sfn_textract_output_config_to_json/__init__.py
| 0.729905 | 0.193662 |
__init__.py
|
pypi
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk
import aws_cdk.aws_dynamodb
import aws_cdk.aws_iam
import aws_cdk.aws_lambda
import aws_cdk.aws_logs
import aws_cdk.aws_sns
import aws_cdk.aws_sqs
import aws_cdk.aws_stepfunctions
import aws_cdk.aws_stepfunctions_tasks
import constructs
class TextractAsyncToJSON(
aws_cdk.aws_stepfunctions.StateMachineFragment,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-textract-generic-async.TextractAsyncToJSON",
):
def __init__(
self,
parent: constructs.Construct,
id: builtins.str,
*,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param parent: -
:param id: -
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
props = TextractAsyncToJSONProps(
s3_output_bucket=s3_output_bucket,
s3_output_prefix=s3_output_prefix,
lambda_log_level=lambda_log_level,
lambda_memory_mb=lambda_memory_mb,
lambda_timeout=lambda_timeout,
)
jsii.create(self.__class__, self, [parent, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List[aws_cdk.aws_stepfunctions.INextable]:
'''The states to chain onto if this fragment is used.'''
return typing.cast(typing.List[aws_cdk.aws_stepfunctions.INextable], jsii.get(self, "endStates"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="startState")
def start_state(self) -> aws_cdk.aws_stepfunctions.State:
'''The start state of this state machine fragment.'''
return typing.cast(aws_cdk.aws_stepfunctions.State, jsii.get(self, "startState"))
@jsii.data_type(
jsii_type="schadem-cdk-construct-textract-generic-async.TextractAsyncToJSONProps",
jsii_struct_bases=[],
name_mapping={
"s3_output_bucket": "s3OutputBucket",
"s3_output_prefix": "s3OutputPrefix",
"lambda_log_level": "lambdaLogLevel",
"lambda_memory_mb": "lambdaMemoryMB",
"lambda_timeout": "lambdaTimeout",
},
)
class TextractAsyncToJSONProps:
def __init__(
self,
*,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
self._values: typing.Dict[str, typing.Any] = {
"s3_output_bucket": s3_output_bucket,
"s3_output_prefix": s3_output_prefix,
}
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if lambda_memory_mb is not None:
self._values["lambda_memory_mb"] = lambda_memory_mb
if lambda_timeout is not None:
self._values["lambda_timeout"] = lambda_timeout
@builtins.property
def s3_output_bucket(self) -> builtins.str:
result = self._values.get("s3_output_bucket")
assert result is not None, "Required property 's3_output_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def s3_output_prefix(self) -> builtins.str:
'''The prefix to use for the output files.'''
result = self._values.get("s3_output_prefix")
assert result is not None, "Required property 's3_output_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def lambda_memory_mb(self) -> typing.Optional[jsii.Number]:
'''memory of Lambda function (may need to increase for larger documents).'''
result = self._values.get("lambda_memory_mb")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def lambda_timeout(self) -> typing.Optional[jsii.Number]:
result = self._values.get("lambda_timeout")
return typing.cast(typing.Optional[jsii.Number], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractAsyncToJSONProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class TextractClassificationConfigurator(
aws_cdk.aws_stepfunctions.StateMachineFragment,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-textract-generic-async.TextractClassificationConfigurator",
):
def __init__(
self,
parent: constructs.Construct,
id: builtins.str,
*,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param parent: -
:param id: -
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
props = TextractClassificationConfiguratorProps(
lambda_log_level=lambda_log_level,
lambda_memory_mb=lambda_memory_mb,
lambda_timeout=lambda_timeout,
)
jsii.create(self.__class__, self, [parent, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List[aws_cdk.aws_stepfunctions.INextable]:
'''The states to chain onto if this fragment is used.'''
return typing.cast(typing.List[aws_cdk.aws_stepfunctions.INextable], jsii.get(self, "endStates"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="startState")
def start_state(self) -> aws_cdk.aws_stepfunctions.State:
'''The start state of this state machine fragment.'''
return typing.cast(aws_cdk.aws_stepfunctions.State, jsii.get(self, "startState"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="configurationTable")
def configuration_table(self) -> aws_cdk.aws_dynamodb.ITable:
return typing.cast(aws_cdk.aws_dynamodb.ITable, jsii.get(self, "configurationTable"))
@configuration_table.setter
def configuration_table(self, value: aws_cdk.aws_dynamodb.ITable) -> None:
jsii.set(self, "configurationTable", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="configuratorFunction")
def configurator_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "configuratorFunction"))
@configurator_function.setter
def configurator_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "configuratorFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="configuratorFunctionLogGroupName")
def configurator_function_log_group_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "configuratorFunctionLogGroupName"))
@configurator_function_log_group_name.setter
def configurator_function_log_group_name(self, value: builtins.str) -> None:
jsii.set(self, "configuratorFunctionLogGroupName", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-textract-generic-async.TextractClassificationConfiguratorProps",
jsii_struct_bases=[],
name_mapping={
"lambda_log_level": "lambdaLogLevel",
"lambda_memory_mb": "lambdaMemoryMB",
"lambda_timeout": "lambdaTimeout",
},
)
class TextractClassificationConfiguratorProps:
def __init__(
self,
*,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
self._values: typing.Dict[str, typing.Any] = {}
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if lambda_memory_mb is not None:
self._values["lambda_memory_mb"] = lambda_memory_mb
if lambda_timeout is not None:
self._values["lambda_timeout"] = lambda_timeout
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def lambda_memory_mb(self) -> typing.Optional[jsii.Number]:
'''memory of Lambda function (may need to increase for larger documents).'''
result = self._values.get("lambda_memory_mb")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def lambda_timeout(self) -> typing.Optional[jsii.Number]:
result = self._values.get("lambda_timeout")
return typing.cast(typing.Optional[jsii.Number], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractClassificationConfiguratorProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="schadem-cdk-construct-textract-generic-async.TextractDPPOCDeciderProps",
jsii_struct_bases=[],
name_mapping={
"lambda_memory_mb": "lambdaMemoryMB",
"lambda_timeout": "lambdaTimeout",
},
)
class TextractDPPOCDeciderProps:
def __init__(
self,
*,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
self._values: typing.Dict[str, typing.Any] = {}
if lambda_memory_mb is not None:
self._values["lambda_memory_mb"] = lambda_memory_mb
if lambda_timeout is not None:
self._values["lambda_timeout"] = lambda_timeout
@builtins.property
def lambda_memory_mb(self) -> typing.Optional[jsii.Number]:
'''memory of Lambda function (may need to increase for larger documents).'''
result = self._values.get("lambda_memory_mb")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def lambda_timeout(self) -> typing.Optional[jsii.Number]:
result = self._values.get("lambda_timeout")
return typing.cast(typing.Optional[jsii.Number], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractDPPOCDeciderProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class TextractGenerateCSV(
aws_cdk.aws_stepfunctions.TaskStateBase,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-textract-generic-async.TextractGenerateCSV",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
csv_s3_output_bucket: builtins.str,
csv_s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
name: typing.Optional[builtins.str] = None,
output_type: typing.Optional[builtins.str] = None,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
) -> None:
'''
:param scope: -
:param id: -
:param csv_s3_output_bucket:
:param csv_s3_output_prefix:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param output_type:
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
'''
props = TextractGenerateCSVProps(
csv_s3_output_bucket=csv_s3_output_bucket,
csv_s3_output_prefix=csv_s3_output_prefix,
associate_with_parent=associate_with_parent,
input=input,
lambda_log_level=lambda_log_level,
lambda_memory_mb=lambda_memory_mb,
lambda_timeout=lambda_timeout,
name=name,
output_type=output_type,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
result_selector=result_selector,
timeout=timeout,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="generateCSVLambda")
def generate_csv_lambda(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "generateCSVLambda"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="generateCSVLogGroup")
def generate_csv_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "generateCSVLogGroup"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskMetrics")
def _task_metrics(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig]:
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig], jsii.get(self, "taskMetrics"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskPolicies")
def _task_policies(
self,
) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
return typing.cast(typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]], jsii.get(self, "taskPolicies"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stateMachine")
def state_machine(self) -> aws_cdk.aws_stepfunctions.StateMachine:
return typing.cast(aws_cdk.aws_stepfunctions.StateMachine, jsii.get(self, "stateMachine"))
@state_machine.setter
def state_machine(self, value: aws_cdk.aws_stepfunctions.StateMachine) -> None:
jsii.set(self, "stateMachine", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-textract-generic-async.TextractGenerateCSVProps",
jsii_struct_bases=[aws_cdk.aws_stepfunctions.TaskStateBaseProps],
name_mapping={
"comment": "comment",
"heartbeat": "heartbeat",
"input_path": "inputPath",
"integration_pattern": "integrationPattern",
"output_path": "outputPath",
"result_path": "resultPath",
"result_selector": "resultSelector",
"timeout": "timeout",
"csv_s3_output_bucket": "csvS3OutputBucket",
"csv_s3_output_prefix": "csvS3OutputPrefix",
"associate_with_parent": "associateWithParent",
"input": "input",
"lambda_log_level": "lambdaLogLevel",
"lambda_memory_mb": "lambdaMemoryMB",
"lambda_timeout": "lambdaTimeout",
"name": "name",
"output_type": "outputType",
},
)
class TextractGenerateCSVProps(aws_cdk.aws_stepfunctions.TaskStateBaseProps):
def __init__(
self,
*,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
csv_s3_output_bucket: builtins.str,
csv_s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
name: typing.Optional[builtins.str] = None,
output_type: typing.Optional[builtins.str] = None,
) -> None:
'''
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
:param csv_s3_output_bucket:
:param csv_s3_output_prefix:
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param output_type:
'''
self._values: typing.Dict[str, typing.Any] = {
"csv_s3_output_bucket": csv_s3_output_bucket,
"csv_s3_output_prefix": csv_s3_output_prefix,
}
if comment is not None:
self._values["comment"] = comment
if heartbeat is not None:
self._values["heartbeat"] = heartbeat
if input_path is not None:
self._values["input_path"] = input_path
if integration_pattern is not None:
self._values["integration_pattern"] = integration_pattern
if output_path is not None:
self._values["output_path"] = output_path
if result_path is not None:
self._values["result_path"] = result_path
if result_selector is not None:
self._values["result_selector"] = result_selector
if timeout is not None:
self._values["timeout"] = timeout
if associate_with_parent is not None:
self._values["associate_with_parent"] = associate_with_parent
if input is not None:
self._values["input"] = input
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if lambda_memory_mb is not None:
self._values["lambda_memory_mb"] = lambda_memory_mb
if lambda_timeout is not None:
self._values["lambda_timeout"] = lambda_timeout
if name is not None:
self._values["name"] = name
if output_type is not None:
self._values["output_type"] = output_type
@builtins.property
def comment(self) -> typing.Optional[builtins.str]:
'''An optional description for this state.
:default: - No comment
'''
result = self._values.get("comment")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def heartbeat(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the heartbeat.
:default: - None
'''
result = self._values.get("heartbeat")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def input_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select part of the state to be the input to this state.
May also be the special value JsonPath.DISCARD, which will cause the effective
input to be the empty object {}.
:default: - The entire task input (JSON path '$')
'''
result = self._values.get("input_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def integration_pattern(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern]:
'''AWS Step Functions integrates with services directly in the Amazon States Language.
You can control these AWS services using service integration patterns
:default:
- ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks.
``IntegrationPattern.RUN_JOB`` for the following exceptions:
``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token
'''
result = self._values.get("integration_pattern")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern], result)
@builtins.property
def output_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select select a portion of the state output to pass to the next state.
May also be the special value JsonPath.DISCARD, which will cause the effective
output to be the empty object {}.
:default:
- The entire JSON node determined by the state input, the task result,
and resultPath is passed to the next state (JSON path '$')
'''
result = self._values.get("output_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to indicate where to inject the state's output.
May also be the special value JsonPath.DISCARD, which will cause the state's
input to become its output.
:default: - Replaces the entire input with the result (JSON path '$')
'''
result = self._values.get("result_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_selector(
self,
) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:
'''The JSON that will replace the state's raw result and become the effective result before ResultPath is applied.
You can use ResultSelector to create a payload with values that are static
or selected from the state's raw result.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector
'''
result = self._values.get("result_selector")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the state machine.
:default: - None
'''
result = self._values.get("timeout")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def csv_s3_output_bucket(self) -> builtins.str:
result = self._values.get("csv_s3_output_bucket")
assert result is not None, "Required property 'csv_s3_output_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def csv_s3_output_prefix(self) -> builtins.str:
result = self._values.get("csv_s3_output_prefix")
assert result is not None, "Required property 'csv_s3_output_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def associate_with_parent(self) -> typing.Optional[builtins.bool]:
'''Pass the execution ID from the context object to the execution input.
This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines.
If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely.
:default: - false
:see: https://docs.aws.amazon.com/step-functions/latest/dg/concepts-nested-workflows.html#nested-execution-startid
'''
result = self._values.get("associate_with_parent")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def input(self) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskInput]:
'''The JSON input for the execution, same as that of StartExecution.
:default: - The state input (JSON path '$')
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("input")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskInput], result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def lambda_memory_mb(self) -> typing.Optional[jsii.Number]:
'''memory of Lambda function (may need to increase for larger documents).'''
result = self._values.get("lambda_memory_mb")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def lambda_timeout(self) -> typing.Optional[jsii.Number]:
result = self._values.get("lambda_timeout")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''The name of the execution, same as that of StartExecution.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def output_type(self) -> typing.Optional[builtins.str]:
result = self._values.get("output_type")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractGenerateCSVProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class TextractGenericAsyncSfnTask(
aws_cdk.aws_stepfunctions.TaskStateBase,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-textract-generic-async.TextractGenericAsyncSfnTask",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
s3_output_bucket: builtins.str,
s3_temp_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
s3_input_bucket: typing.Optional[builtins.str] = None,
s3_input_prefix: typing.Optional[builtins.str] = None,
s3_output_prefix: typing.Optional[builtins.str] = None,
task_token_table_name: typing.Optional[builtins.str] = None,
textract_api: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
) -> None:
'''
:param scope: -
:param id: -
:param s3_output_bucket:
:param s3_temp_output_prefix: The prefix to use for the temporary output files (e. g. output from async process before stiching together)
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param s3_input_bucket: location of input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_input_prefix: prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_output_prefix: The prefix to use for the output files.
:param task_token_table_name:
:param textract_api: The prefix to use for the output files.
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
'''
props = TextractGenericAsyncSfnTaskProps(
s3_output_bucket=s3_output_bucket,
s3_temp_output_prefix=s3_temp_output_prefix,
associate_with_parent=associate_with_parent,
custom_function=custom_function,
enable_dashboard=enable_dashboard,
enable_monitoring=enable_monitoring,
input=input,
lambda_log_level=lambda_log_level,
name=name,
s3_input_bucket=s3_input_bucket,
s3_input_prefix=s3_input_prefix,
s3_output_prefix=s3_output_prefix,
task_token_table_name=task_token_table_name,
textract_api=textract_api,
textract_state_machine_timeout_minutes=textract_state_machine_timeout_minutes,
workflow_tracing_enabled=workflow_tracing_enabled,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
result_selector=result_selector,
timeout=timeout,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskMetrics")
def _task_metrics(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig]:
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig], jsii.get(self, "taskMetrics"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskPolicies")
def _task_policies(
self,
) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
return typing.cast(typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]], jsii.get(self, "taskPolicies"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="dashboardName")
def dashboard_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "dashboardName"))
@dashboard_name.setter
def dashboard_name(self, value: builtins.str) -> None:
jsii.set(self, "dashboardName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="putOnSQSLambdaLogGroup")
def put_on_sqs_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "putOnSQSLambdaLogGroup"))
@put_on_sqs_lambda_log_group.setter
def put_on_sqs_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "putOnSQSLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="receiveStartSNSLambdaLogGroup")
def receive_start_sns_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "receiveStartSNSLambdaLogGroup"))
@receive_start_sns_lambda_log_group.setter
def receive_start_sns_lambda_log_group(
self,
value: aws_cdk.aws_logs.ILogGroup,
) -> None:
jsii.set(self, "receiveStartSNSLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="startTextractLambdaLogGroup")
def start_textract_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "startTextractLambdaLogGroup"))
@start_textract_lambda_log_group.setter
def start_textract_lambda_log_group(
self,
value: aws_cdk.aws_logs.ILogGroup,
) -> None:
jsii.set(self, "startTextractLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stateMachine")
def state_machine(self) -> aws_cdk.aws_stepfunctions.IStateMachine:
return typing.cast(aws_cdk.aws_stepfunctions.IStateMachine, jsii.get(self, "stateMachine"))
@state_machine.setter
def state_machine(self, value: aws_cdk.aws_stepfunctions.IStateMachine) -> None:
jsii.set(self, "stateMachine", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskTokenTableName")
def task_token_table_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "taskTokenTableName"))
@task_token_table_name.setter
def task_token_table_name(self, value: builtins.str) -> None:
jsii.set(self, "taskTokenTableName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractAsyncCallFunction")
def textract_async_call_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractAsyncCallFunction"))
@textract_async_call_function.setter
def textract_async_call_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractAsyncCallFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractAsyncReceiveSNSFunction")
def textract_async_receive_sns_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractAsyncReceiveSNSFunction"))
@textract_async_receive_sns_function.setter
def textract_async_receive_sns_function(
self,
value: aws_cdk.aws_lambda.IFunction,
) -> None:
jsii.set(self, "textractAsyncReceiveSNSFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractAsyncSNS")
def textract_async_sns(self) -> aws_cdk.aws_sns.ITopic:
return typing.cast(aws_cdk.aws_sns.ITopic, jsii.get(self, "textractAsyncSNS"))
@textract_async_sns.setter
def textract_async_sns(self, value: aws_cdk.aws_sns.ITopic) -> None:
jsii.set(self, "textractAsyncSNS", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractAsyncSNSRole")
def textract_async_sns_role(self) -> aws_cdk.aws_iam.IRole:
return typing.cast(aws_cdk.aws_iam.IRole, jsii.get(self, "textractAsyncSNSRole"))
@textract_async_sns_role.setter
def textract_async_sns_role(self, value: aws_cdk.aws_iam.IRole) -> None:
jsii.set(self, "textractAsyncSNSRole", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractAsyncSQS")
def textract_async_sqs(self) -> aws_cdk.aws_sqs.IQueue:
return typing.cast(aws_cdk.aws_sqs.IQueue, jsii.get(self, "textractAsyncSQS"))
@textract_async_sqs.setter
def textract_async_sqs(self, value: aws_cdk.aws_sqs.IQueue) -> None:
jsii.set(self, "textractAsyncSQS", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractPutOnSQSFunction")
def textract_put_on_sqs_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractPutOnSQSFunction"))
@textract_put_on_sqs_function.setter
def textract_put_on_sqs_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractPutOnSQSFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="version")
def version(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "version"))
@version.setter
def version(self, value: builtins.str) -> None:
jsii.set(self, "version", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-textract-generic-async.TextractGenericAsyncSfnTaskProps",
jsii_struct_bases=[aws_cdk.aws_stepfunctions.TaskStateBaseProps],
name_mapping={
"comment": "comment",
"heartbeat": "heartbeat",
"input_path": "inputPath",
"integration_pattern": "integrationPattern",
"output_path": "outputPath",
"result_path": "resultPath",
"result_selector": "resultSelector",
"timeout": "timeout",
"s3_output_bucket": "s3OutputBucket",
"s3_temp_output_prefix": "s3TempOutputPrefix",
"associate_with_parent": "associateWithParent",
"custom_function": "customFunction",
"enable_dashboard": "enableDashboard",
"enable_monitoring": "enableMonitoring",
"input": "input",
"lambda_log_level": "lambdaLogLevel",
"name": "name",
"s3_input_bucket": "s3InputBucket",
"s3_input_prefix": "s3InputPrefix",
"s3_output_prefix": "s3OutputPrefix",
"task_token_table_name": "taskTokenTableName",
"textract_api": "textractAPI",
"textract_state_machine_timeout_minutes": "textractStateMachineTimeoutMinutes",
"workflow_tracing_enabled": "workflowTracingEnabled",
},
)
class TextractGenericAsyncSfnTaskProps(aws_cdk.aws_stepfunctions.TaskStateBaseProps):
def __init__(
self,
*,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
s3_output_bucket: builtins.str,
s3_temp_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
s3_input_bucket: typing.Optional[builtins.str] = None,
s3_input_prefix: typing.Optional[builtins.str] = None,
s3_output_prefix: typing.Optional[builtins.str] = None,
task_token_table_name: typing.Optional[builtins.str] = None,
textract_api: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
) -> None:
'''
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
:param s3_output_bucket:
:param s3_temp_output_prefix: The prefix to use for the temporary output files (e. g. output from async process before stiching together)
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level:
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param s3_input_bucket: location of input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_input_prefix: prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_output_prefix: The prefix to use for the output files.
:param task_token_table_name:
:param textract_api: The prefix to use for the output files.
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
'''
self._values: typing.Dict[str, typing.Any] = {
"s3_output_bucket": s3_output_bucket,
"s3_temp_output_prefix": s3_temp_output_prefix,
}
if comment is not None:
self._values["comment"] = comment
if heartbeat is not None:
self._values["heartbeat"] = heartbeat
if input_path is not None:
self._values["input_path"] = input_path
if integration_pattern is not None:
self._values["integration_pattern"] = integration_pattern
if output_path is not None:
self._values["output_path"] = output_path
if result_path is not None:
self._values["result_path"] = result_path
if result_selector is not None:
self._values["result_selector"] = result_selector
if timeout is not None:
self._values["timeout"] = timeout
if associate_with_parent is not None:
self._values["associate_with_parent"] = associate_with_parent
if custom_function is not None:
self._values["custom_function"] = custom_function
if enable_dashboard is not None:
self._values["enable_dashboard"] = enable_dashboard
if enable_monitoring is not None:
self._values["enable_monitoring"] = enable_monitoring
if input is not None:
self._values["input"] = input
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if name is not None:
self._values["name"] = name
if s3_input_bucket is not None:
self._values["s3_input_bucket"] = s3_input_bucket
if s3_input_prefix is not None:
self._values["s3_input_prefix"] = s3_input_prefix
if s3_output_prefix is not None:
self._values["s3_output_prefix"] = s3_output_prefix
if task_token_table_name is not None:
self._values["task_token_table_name"] = task_token_table_name
if textract_api is not None:
self._values["textract_api"] = textract_api
if textract_state_machine_timeout_minutes is not None:
self._values["textract_state_machine_timeout_minutes"] = textract_state_machine_timeout_minutes
if workflow_tracing_enabled is not None:
self._values["workflow_tracing_enabled"] = workflow_tracing_enabled
@builtins.property
def comment(self) -> typing.Optional[builtins.str]:
'''An optional description for this state.
:default: - No comment
'''
result = self._values.get("comment")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def heartbeat(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the heartbeat.
:default: - None
'''
result = self._values.get("heartbeat")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def input_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select part of the state to be the input to this state.
May also be the special value JsonPath.DISCARD, which will cause the effective
input to be the empty object {}.
:default: - The entire task input (JSON path '$')
'''
result = self._values.get("input_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def integration_pattern(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern]:
'''AWS Step Functions integrates with services directly in the Amazon States Language.
You can control these AWS services using service integration patterns
:default:
- ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks.
``IntegrationPattern.RUN_JOB`` for the following exceptions:
``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token
'''
result = self._values.get("integration_pattern")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern], result)
@builtins.property
def output_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select select a portion of the state output to pass to the next state.
May also be the special value JsonPath.DISCARD, which will cause the effective
output to be the empty object {}.
:default:
- The entire JSON node determined by the state input, the task result,
and resultPath is passed to the next state (JSON path '$')
'''
result = self._values.get("output_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to indicate where to inject the state's output.
May also be the special value JsonPath.DISCARD, which will cause the state's
input to become its output.
:default: - Replaces the entire input with the result (JSON path '$')
'''
result = self._values.get("result_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_selector(
self,
) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:
'''The JSON that will replace the state's raw result and become the effective result before ResultPath is applied.
You can use ResultSelector to create a payload with values that are static
or selected from the state's raw result.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector
'''
result = self._values.get("result_selector")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the state machine.
:default: - None
'''
result = self._values.get("timeout")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def s3_output_bucket(self) -> builtins.str:
result = self._values.get("s3_output_bucket")
assert result is not None, "Required property 's3_output_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def s3_temp_output_prefix(self) -> builtins.str:
'''The prefix to use for the temporary output files (e.
g. output from async process before stiching together)
'''
result = self._values.get("s3_temp_output_prefix")
assert result is not None, "Required property 's3_temp_output_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def associate_with_parent(self) -> typing.Optional[builtins.bool]:
'''Pass the execution ID from the context object to the execution input.
This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines.
If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely.
:default: - false
:see: https://docs.aws.amazon.com/step-functions/latest/dg/concepts-nested-workflows.html#nested-execution-startid
'''
result = self._values.get("associate_with_parent")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def custom_function(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke]:
'''not implemented yet.'''
result = self._values.get("custom_function")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke], result)
@builtins.property
def enable_dashboard(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_dashboard")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def enable_monitoring(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_monitoring")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def input(self) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskInput]:
'''The JSON input for the execution, same as that of StartExecution.
:default: - The state input (JSON path '$')
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("input")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskInput], result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''The name of the execution, same as that of StartExecution.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def s3_input_bucket(self) -> typing.Optional[builtins.str]:
'''location of input S3 objects - if left empty will generate rule for s3 access to all [*].'''
result = self._values.get("s3_input_bucket")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def s3_input_prefix(self) -> typing.Optional[builtins.str]:
'''prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].'''
result = self._values.get("s3_input_prefix")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def s3_output_prefix(self) -> typing.Optional[builtins.str]:
'''The prefix to use for the output files.'''
result = self._values.get("s3_output_prefix")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def task_token_table_name(self) -> typing.Optional[builtins.str]:
result = self._values.get("task_token_table_name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def textract_api(self) -> typing.Optional[builtins.str]:
'''The prefix to use for the output files.'''
result = self._values.get("textract_api")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def textract_state_machine_timeout_minutes(self) -> typing.Optional[jsii.Number]:
'''how long can we wait for the process (default is 48 hours (60*48=2880)).'''
result = self._values.get("textract_state_machine_timeout_minutes")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def workflow_tracing_enabled(self) -> typing.Optional[builtins.bool]:
result = self._values.get("workflow_tracing_enabled")
return typing.cast(typing.Optional[builtins.bool], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractGenericAsyncSfnTaskProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class TextractGenericSyncSfnTask(
aws_cdk.aws_stepfunctions.TaskStateBase,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-textract-generic-async.TextractGenericSyncSfnTask",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
s3_input_bucket: typing.Optional[builtins.str] = None,
s3_input_prefix: typing.Optional[builtins.str] = None,
textract_api: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
) -> None:
'''
:param scope: -
:param id: -
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level: The prefix to use for the output files.
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param s3_input_bucket: location of input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_input_prefix: prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].
:param textract_api:
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
'''
props = TextractGenericSyncSfnTaskProps(
s3_output_bucket=s3_output_bucket,
s3_output_prefix=s3_output_prefix,
associate_with_parent=associate_with_parent,
custom_function=custom_function,
enable_dashboard=enable_dashboard,
enable_monitoring=enable_monitoring,
input=input,
lambda_log_level=lambda_log_level,
name=name,
s3_input_bucket=s3_input_bucket,
s3_input_prefix=s3_input_prefix,
textract_api=textract_api,
textract_state_machine_timeout_minutes=textract_state_machine_timeout_minutes,
workflow_tracing_enabled=workflow_tracing_enabled,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
result_selector=result_selector,
timeout=timeout,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskMetrics")
def _task_metrics(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig]:
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig], jsii.get(self, "taskMetrics"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskPolicies")
def _task_policies(
self,
) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
return typing.cast(typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]], jsii.get(self, "taskPolicies"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="dashboardName")
def dashboard_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "dashboardName"))
@dashboard_name.setter
def dashboard_name(self, value: builtins.str) -> None:
jsii.set(self, "dashboardName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="putOnSQSLambdaLogGroup")
def put_on_sqs_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "putOnSQSLambdaLogGroup"))
@put_on_sqs_lambda_log_group.setter
def put_on_sqs_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "putOnSQSLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stateMachine")
def state_machine(self) -> aws_cdk.aws_stepfunctions.IStateMachine:
return typing.cast(aws_cdk.aws_stepfunctions.IStateMachine, jsii.get(self, "stateMachine"))
@state_machine.setter
def state_machine(self, value: aws_cdk.aws_stepfunctions.IStateMachine) -> None:
jsii.set(self, "stateMachine", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractPutOnSQSFunction")
def textract_put_on_sqs_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractPutOnSQSFunction"))
@textract_put_on_sqs_function.setter
def textract_put_on_sqs_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractPutOnSQSFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractSyncCallFunction")
def textract_sync_call_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractSyncCallFunction"))
@textract_sync_call_function.setter
def textract_sync_call_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractSyncCallFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractSyncLambdaLogGroup")
def textract_sync_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "textractSyncLambdaLogGroup"))
@textract_sync_lambda_log_group.setter
def textract_sync_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "textractSyncLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractSyncSQS")
def textract_sync_sqs(self) -> aws_cdk.aws_sqs.IQueue:
return typing.cast(aws_cdk.aws_sqs.IQueue, jsii.get(self, "textractSyncSQS"))
@textract_sync_sqs.setter
def textract_sync_sqs(self, value: aws_cdk.aws_sqs.IQueue) -> None:
jsii.set(self, "textractSyncSQS", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="version")
def version(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "version"))
@version.setter
def version(self, value: builtins.str) -> None:
jsii.set(self, "version", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-textract-generic-async.TextractGenericSyncSfnTaskProps",
jsii_struct_bases=[aws_cdk.aws_stepfunctions.TaskStateBaseProps],
name_mapping={
"comment": "comment",
"heartbeat": "heartbeat",
"input_path": "inputPath",
"integration_pattern": "integrationPattern",
"output_path": "outputPath",
"result_path": "resultPath",
"result_selector": "resultSelector",
"timeout": "timeout",
"s3_output_bucket": "s3OutputBucket",
"s3_output_prefix": "s3OutputPrefix",
"associate_with_parent": "associateWithParent",
"custom_function": "customFunction",
"enable_dashboard": "enableDashboard",
"enable_monitoring": "enableMonitoring",
"input": "input",
"lambda_log_level": "lambdaLogLevel",
"name": "name",
"s3_input_bucket": "s3InputBucket",
"s3_input_prefix": "s3InputPrefix",
"textract_api": "textractAPI",
"textract_state_machine_timeout_minutes": "textractStateMachineTimeoutMinutes",
"workflow_tracing_enabled": "workflowTracingEnabled",
},
)
class TextractGenericSyncSfnTaskProps(aws_cdk.aws_stepfunctions.TaskStateBaseProps):
def __init__(
self,
*,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
s3_input_bucket: typing.Optional[builtins.str] = None,
s3_input_prefix: typing.Optional[builtins.str] = None,
textract_api: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
) -> None:
'''
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level: The prefix to use for the output files.
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param s3_input_bucket: location of input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_input_prefix: prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].
:param textract_api:
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
'''
self._values: typing.Dict[str, typing.Any] = {
"s3_output_bucket": s3_output_bucket,
"s3_output_prefix": s3_output_prefix,
}
if comment is not None:
self._values["comment"] = comment
if heartbeat is not None:
self._values["heartbeat"] = heartbeat
if input_path is not None:
self._values["input_path"] = input_path
if integration_pattern is not None:
self._values["integration_pattern"] = integration_pattern
if output_path is not None:
self._values["output_path"] = output_path
if result_path is not None:
self._values["result_path"] = result_path
if result_selector is not None:
self._values["result_selector"] = result_selector
if timeout is not None:
self._values["timeout"] = timeout
if associate_with_parent is not None:
self._values["associate_with_parent"] = associate_with_parent
if custom_function is not None:
self._values["custom_function"] = custom_function
if enable_dashboard is not None:
self._values["enable_dashboard"] = enable_dashboard
if enable_monitoring is not None:
self._values["enable_monitoring"] = enable_monitoring
if input is not None:
self._values["input"] = input
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if name is not None:
self._values["name"] = name
if s3_input_bucket is not None:
self._values["s3_input_bucket"] = s3_input_bucket
if s3_input_prefix is not None:
self._values["s3_input_prefix"] = s3_input_prefix
if textract_api is not None:
self._values["textract_api"] = textract_api
if textract_state_machine_timeout_minutes is not None:
self._values["textract_state_machine_timeout_minutes"] = textract_state_machine_timeout_minutes
if workflow_tracing_enabled is not None:
self._values["workflow_tracing_enabled"] = workflow_tracing_enabled
@builtins.property
def comment(self) -> typing.Optional[builtins.str]:
'''An optional description for this state.
:default: - No comment
'''
result = self._values.get("comment")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def heartbeat(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the heartbeat.
:default: - None
'''
result = self._values.get("heartbeat")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def input_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select part of the state to be the input to this state.
May also be the special value JsonPath.DISCARD, which will cause the effective
input to be the empty object {}.
:default: - The entire task input (JSON path '$')
'''
result = self._values.get("input_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def integration_pattern(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern]:
'''AWS Step Functions integrates with services directly in the Amazon States Language.
You can control these AWS services using service integration patterns
:default:
- ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks.
``IntegrationPattern.RUN_JOB`` for the following exceptions:
``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token
'''
result = self._values.get("integration_pattern")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern], result)
@builtins.property
def output_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select select a portion of the state output to pass to the next state.
May also be the special value JsonPath.DISCARD, which will cause the effective
output to be the empty object {}.
:default:
- The entire JSON node determined by the state input, the task result,
and resultPath is passed to the next state (JSON path '$')
'''
result = self._values.get("output_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to indicate where to inject the state's output.
May also be the special value JsonPath.DISCARD, which will cause the state's
input to become its output.
:default: - Replaces the entire input with the result (JSON path '$')
'''
result = self._values.get("result_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_selector(
self,
) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:
'''The JSON that will replace the state's raw result and become the effective result before ResultPath is applied.
You can use ResultSelector to create a payload with values that are static
or selected from the state's raw result.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector
'''
result = self._values.get("result_selector")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the state machine.
:default: - None
'''
result = self._values.get("timeout")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def s3_output_bucket(self) -> builtins.str:
result = self._values.get("s3_output_bucket")
assert result is not None, "Required property 's3_output_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def s3_output_prefix(self) -> builtins.str:
'''The prefix to use for the output files.'''
result = self._values.get("s3_output_prefix")
assert result is not None, "Required property 's3_output_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def associate_with_parent(self) -> typing.Optional[builtins.bool]:
'''Pass the execution ID from the context object to the execution input.
This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines.
If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely.
:default: - false
:see: https://docs.aws.amazon.com/step-functions/latest/dg/concepts-nested-workflows.html#nested-execution-startid
'''
result = self._values.get("associate_with_parent")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def custom_function(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke]:
'''not implemented yet.'''
result = self._values.get("custom_function")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke], result)
@builtins.property
def enable_dashboard(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_dashboard")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def enable_monitoring(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_monitoring")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def input(self) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskInput]:
'''The JSON input for the execution, same as that of StartExecution.
:default: - The state input (JSON path '$')
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("input")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskInput], result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
'''The prefix to use for the output files.'''
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''The name of the execution, same as that of StartExecution.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def s3_input_bucket(self) -> typing.Optional[builtins.str]:
'''location of input S3 objects - if left empty will generate rule for s3 access to all [*].'''
result = self._values.get("s3_input_bucket")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def s3_input_prefix(self) -> typing.Optional[builtins.str]:
'''prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].'''
result = self._values.get("s3_input_prefix")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def textract_api(self) -> typing.Optional[builtins.str]:
result = self._values.get("textract_api")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def textract_state_machine_timeout_minutes(self) -> typing.Optional[jsii.Number]:
'''how long can we wait for the process (default is 48 hours (60*48=2880)).'''
result = self._values.get("textract_state_machine_timeout_minutes")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def workflow_tracing_enabled(self) -> typing.Optional[builtins.bool]:
result = self._values.get("workflow_tracing_enabled")
return typing.cast(typing.Optional[builtins.bool], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractGenericSyncSfnTaskProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class TextractPOCDecider(
aws_cdk.aws_stepfunctions.StateMachineFragment,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-textract-generic-async.TextractPOCDecider",
):
def __init__(
self,
parent: constructs.Construct,
id: builtins.str,
*,
lambda_memory_mb: typing.Optional[jsii.Number] = None,
lambda_timeout: typing.Optional[jsii.Number] = None,
) -> None:
'''
:param parent: -
:param id: -
:param lambda_memory_mb: memory of Lambda function (may need to increase for larger documents).
:param lambda_timeout:
'''
props = TextractDPPOCDeciderProps(
lambda_memory_mb=lambda_memory_mb, lambda_timeout=lambda_timeout
)
jsii.create(self.__class__, self, [parent, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List[aws_cdk.aws_stepfunctions.INextable]:
'''The states to chain onto if this fragment is used.'''
return typing.cast(typing.List[aws_cdk.aws_stepfunctions.INextable], jsii.get(self, "endStates"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="startState")
def start_state(self) -> aws_cdk.aws_stepfunctions.State:
'''The start state of this state machine fragment.'''
return typing.cast(aws_cdk.aws_stepfunctions.State, jsii.get(self, "startState"))
__all__ = [
"TextractAsyncToJSON",
"TextractAsyncToJSONProps",
"TextractClassificationConfigurator",
"TextractClassificationConfiguratorProps",
"TextractDPPOCDeciderProps",
"TextractGenerateCSV",
"TextractGenerateCSVProps",
"TextractGenericAsyncSfnTask",
"TextractGenericAsyncSfnTaskProps",
"TextractGenericSyncSfnTask",
"TextractGenericSyncSfnTaskProps",
"TextractPOCDecider",
]
publication.publish()
|
/schadem_cdk_construct_textract_generic_async-0.0.4-py3-none-any.whl/schadem_cdk_construct_textract_generic_async/__init__.py
| 0.661158 | 0.155046 |
__init__.py
|
pypi
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk
import aws_cdk.aws_iam
import aws_cdk.aws_lambda
import aws_cdk.aws_logs
import aws_cdk.aws_sqs
import aws_cdk.aws_stepfunctions
import aws_cdk.aws_stepfunctions_tasks
import constructs
class TextractGenericSyncSfnTask(
aws_cdk.aws_stepfunctions.TaskStateBase,
metaclass=jsii.JSIIMeta,
jsii_type="schadem-cdk-construct-textract-generic-sync.TextractGenericSyncSfnTask",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
s3_input_bucket: typing.Optional[builtins.str] = None,
s3_input_prefix: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
) -> None:
'''
:param scope: -
:param id: -
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level: The prefix to use for the output files.
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param s3_input_bucket: location of input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_input_prefix: prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
'''
props = TextractGenericSyncSfnTaskProps(
s3_output_bucket=s3_output_bucket,
s3_output_prefix=s3_output_prefix,
associate_with_parent=associate_with_parent,
custom_function=custom_function,
enable_dashboard=enable_dashboard,
enable_monitoring=enable_monitoring,
input=input,
lambda_log_level=lambda_log_level,
name=name,
s3_input_bucket=s3_input_bucket,
s3_input_prefix=s3_input_prefix,
textract_state_machine_timeout_minutes=textract_state_machine_timeout_minutes,
workflow_tracing_enabled=workflow_tracing_enabled,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
result_selector=result_selector,
timeout=timeout,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskMetrics")
def _task_metrics(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig]:
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskMetricsConfig], jsii.get(self, "taskMetrics"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="taskPolicies")
def _task_policies(
self,
) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
return typing.cast(typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]], jsii.get(self, "taskPolicies"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="dashboardName")
def dashboard_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "dashboardName"))
@dashboard_name.setter
def dashboard_name(self, value: builtins.str) -> None:
jsii.set(self, "dashboardName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="putOnSQSLambdaLogGroup")
def put_on_sqs_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "putOnSQSLambdaLogGroup"))
@put_on_sqs_lambda_log_group.setter
def put_on_sqs_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "putOnSQSLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="stateMachine")
def state_machine(self) -> aws_cdk.aws_stepfunctions.IStateMachine:
return typing.cast(aws_cdk.aws_stepfunctions.IStateMachine, jsii.get(self, "stateMachine"))
@state_machine.setter
def state_machine(self, value: aws_cdk.aws_stepfunctions.IStateMachine) -> None:
jsii.set(self, "stateMachine", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractPutOnSQSFunction")
def textract_put_on_sqs_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractPutOnSQSFunction"))
@textract_put_on_sqs_function.setter
def textract_put_on_sqs_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractPutOnSQSFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractSyncCallFunction")
def textract_sync_call_function(self) -> aws_cdk.aws_lambda.IFunction:
return typing.cast(aws_cdk.aws_lambda.IFunction, jsii.get(self, "textractSyncCallFunction"))
@textract_sync_call_function.setter
def textract_sync_call_function(self, value: aws_cdk.aws_lambda.IFunction) -> None:
jsii.set(self, "textractSyncCallFunction", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractSyncLambdaLogGroup")
def textract_sync_lambda_log_group(self) -> aws_cdk.aws_logs.ILogGroup:
return typing.cast(aws_cdk.aws_logs.ILogGroup, jsii.get(self, "textractSyncLambdaLogGroup"))
@textract_sync_lambda_log_group.setter
def textract_sync_lambda_log_group(self, value: aws_cdk.aws_logs.ILogGroup) -> None:
jsii.set(self, "textractSyncLambdaLogGroup", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="textractSyncSQS")
def textract_sync_sqs(self) -> aws_cdk.aws_sqs.IQueue:
return typing.cast(aws_cdk.aws_sqs.IQueue, jsii.get(self, "textractSyncSQS"))
@textract_sync_sqs.setter
def textract_sync_sqs(self, value: aws_cdk.aws_sqs.IQueue) -> None:
jsii.set(self, "textractSyncSQS", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="version")
def version(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "version"))
@version.setter
def version(self, value: builtins.str) -> None:
jsii.set(self, "version", value)
@jsii.data_type(
jsii_type="schadem-cdk-construct-textract-generic-sync.TextractGenericSyncSfnTaskProps",
jsii_struct_bases=[aws_cdk.aws_stepfunctions.TaskStateBaseProps],
name_mapping={
"comment": "comment",
"heartbeat": "heartbeat",
"input_path": "inputPath",
"integration_pattern": "integrationPattern",
"output_path": "outputPath",
"result_path": "resultPath",
"result_selector": "resultSelector",
"timeout": "timeout",
"s3_output_bucket": "s3OutputBucket",
"s3_output_prefix": "s3OutputPrefix",
"associate_with_parent": "associateWithParent",
"custom_function": "customFunction",
"enable_dashboard": "enableDashboard",
"enable_monitoring": "enableMonitoring",
"input": "input",
"lambda_log_level": "lambdaLogLevel",
"name": "name",
"s3_input_bucket": "s3InputBucket",
"s3_input_prefix": "s3InputPrefix",
"textract_state_machine_timeout_minutes": "textractStateMachineTimeoutMinutes",
"workflow_tracing_enabled": "workflowTracingEnabled",
},
)
class TextractGenericSyncSfnTaskProps(aws_cdk.aws_stepfunctions.TaskStateBaseProps):
def __init__(
self,
*,
comment: typing.Optional[builtins.str] = None,
heartbeat: typing.Optional[aws_cdk.Duration] = None,
input_path: typing.Optional[builtins.str] = None,
integration_pattern: typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern] = None,
output_path: typing.Optional[builtins.str] = None,
result_path: typing.Optional[builtins.str] = None,
result_selector: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
timeout: typing.Optional[aws_cdk.Duration] = None,
s3_output_bucket: builtins.str,
s3_output_prefix: builtins.str,
associate_with_parent: typing.Optional[builtins.bool] = None,
custom_function: typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke] = None,
enable_dashboard: typing.Optional[builtins.bool] = None,
enable_monitoring: typing.Optional[builtins.bool] = None,
input: typing.Optional[aws_cdk.aws_stepfunctions.TaskInput] = None,
lambda_log_level: typing.Optional[builtins.str] = None,
name: typing.Optional[builtins.str] = None,
s3_input_bucket: typing.Optional[builtins.str] = None,
s3_input_prefix: typing.Optional[builtins.str] = None,
textract_state_machine_timeout_minutes: typing.Optional[jsii.Number] = None,
workflow_tracing_enabled: typing.Optional[builtins.bool] = None,
) -> None:
'''
:param comment: An optional description for this state. Default: - No comment
:param heartbeat: Timeout for the heartbeat. Default: - None
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value JsonPath.DISCARD, which will cause the effective input to be the empty object {}. Default: - The entire task input (JSON path '$')
:param integration_pattern: AWS Step Functions integrates with services directly in the Amazon States Language. You can control these AWS services using service integration patterns Default: - ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks. ``IntegrationPattern.RUN_JOB`` for the following exceptions: ``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:param output_path: JSONPath expression to select select a portion of the state output to pass to the next state. May also be the special value JsonPath.DISCARD, which will cause the effective output to be the empty object {}. Default: - The entire JSON node determined by the state input, the task result, and resultPath is passed to the next state (JSON path '$')
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value JsonPath.DISCARD, which will cause the state's input to become its output. Default: - Replaces the entire input with the result (JSON path '$')
:param result_selector: The JSON that will replace the state's raw result and become the effective result before ResultPath is applied. You can use ResultSelector to create a payload with values that are static or selected from the state's raw result. Default: - None
:param timeout: Timeout for the state machine. Default: - None
:param s3_output_bucket:
:param s3_output_prefix: The prefix to use for the output files.
:param associate_with_parent: Pass the execution ID from the context object to the execution input. This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines. If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely. Default: - false
:param custom_function: not implemented yet.
:param enable_dashboard: not implemented yet.
:param enable_monitoring: not implemented yet.
:param input: The JSON input for the execution, same as that of StartExecution. Default: - The state input (JSON path '$')
:param lambda_log_level: The prefix to use for the output files.
:param name: The name of the execution, same as that of StartExecution. Default: - None
:param s3_input_bucket: location of input S3 objects - if left empty will generate rule for s3 access to all [*].
:param s3_input_prefix: prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].
:param textract_state_machine_timeout_minutes: how long can we wait for the process (default is 48 hours (60*48=2880)).
:param workflow_tracing_enabled:
'''
self._values: typing.Dict[str, typing.Any] = {
"s3_output_bucket": s3_output_bucket,
"s3_output_prefix": s3_output_prefix,
}
if comment is not None:
self._values["comment"] = comment
if heartbeat is not None:
self._values["heartbeat"] = heartbeat
if input_path is not None:
self._values["input_path"] = input_path
if integration_pattern is not None:
self._values["integration_pattern"] = integration_pattern
if output_path is not None:
self._values["output_path"] = output_path
if result_path is not None:
self._values["result_path"] = result_path
if result_selector is not None:
self._values["result_selector"] = result_selector
if timeout is not None:
self._values["timeout"] = timeout
if associate_with_parent is not None:
self._values["associate_with_parent"] = associate_with_parent
if custom_function is not None:
self._values["custom_function"] = custom_function
if enable_dashboard is not None:
self._values["enable_dashboard"] = enable_dashboard
if enable_monitoring is not None:
self._values["enable_monitoring"] = enable_monitoring
if input is not None:
self._values["input"] = input
if lambda_log_level is not None:
self._values["lambda_log_level"] = lambda_log_level
if name is not None:
self._values["name"] = name
if s3_input_bucket is not None:
self._values["s3_input_bucket"] = s3_input_bucket
if s3_input_prefix is not None:
self._values["s3_input_prefix"] = s3_input_prefix
if textract_state_machine_timeout_minutes is not None:
self._values["textract_state_machine_timeout_minutes"] = textract_state_machine_timeout_minutes
if workflow_tracing_enabled is not None:
self._values["workflow_tracing_enabled"] = workflow_tracing_enabled
@builtins.property
def comment(self) -> typing.Optional[builtins.str]:
'''An optional description for this state.
:default: - No comment
'''
result = self._values.get("comment")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def heartbeat(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the heartbeat.
:default: - None
'''
result = self._values.get("heartbeat")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def input_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select part of the state to be the input to this state.
May also be the special value JsonPath.DISCARD, which will cause the effective
input to be the empty object {}.
:default: - The entire task input (JSON path '$')
'''
result = self._values.get("input_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def integration_pattern(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern]:
'''AWS Step Functions integrates with services directly in the Amazon States Language.
You can control these AWS services using service integration patterns
:default:
- ``IntegrationPattern.REQUEST_RESPONSE`` for most tasks.
``IntegrationPattern.RUN_JOB`` for the following exceptions:
``BatchSubmitJob``, ``EmrAddStep``, ``EmrCreateCluster``, ``EmrTerminationCluster``, and ``EmrContainersStartJobRun``.
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token
'''
result = self._values.get("integration_pattern")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.IntegrationPattern], result)
@builtins.property
def output_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to select select a portion of the state output to pass to the next state.
May also be the special value JsonPath.DISCARD, which will cause the effective
output to be the empty object {}.
:default:
- The entire JSON node determined by the state input, the task result,
and resultPath is passed to the next state (JSON path '$')
'''
result = self._values.get("output_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_path(self) -> typing.Optional[builtins.str]:
'''JSONPath expression to indicate where to inject the state's output.
May also be the special value JsonPath.DISCARD, which will cause the state's
input to become its output.
:default: - Replaces the entire input with the result (JSON path '$')
'''
result = self._values.get("result_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def result_selector(
self,
) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:
'''The JSON that will replace the state's raw result and become the effective result before ResultPath is applied.
You can use ResultSelector to create a payload with values that are static
or selected from the state's raw result.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector
'''
result = self._values.get("result_selector")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.Duration]:
'''Timeout for the state machine.
:default: - None
'''
result = self._values.get("timeout")
return typing.cast(typing.Optional[aws_cdk.Duration], result)
@builtins.property
def s3_output_bucket(self) -> builtins.str:
result = self._values.get("s3_output_bucket")
assert result is not None, "Required property 's3_output_bucket' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def s3_output_prefix(self) -> builtins.str:
'''The prefix to use for the output files.'''
result = self._values.get("s3_output_prefix")
assert result is not None, "Required property 's3_output_prefix' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def associate_with_parent(self) -> typing.Optional[builtins.bool]:
'''Pass the execution ID from the context object to the execution input.
This allows the Step Functions UI to link child executions from parent executions, making it easier to trace execution flow across state machines.
If you set this property to ``true``, the ``input`` property must be an object (provided by ``sfn.TaskInput.fromObject``) or omitted entirely.
:default: - false
:see: https://docs.aws.amazon.com/step-functions/latest/dg/concepts-nested-workflows.html#nested-execution-startid
'''
result = self._values.get("associate_with_parent")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def custom_function(
self,
) -> typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke]:
'''not implemented yet.'''
result = self._values.get("custom_function")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions_tasks.LambdaInvoke], result)
@builtins.property
def enable_dashboard(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_dashboard")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def enable_monitoring(self) -> typing.Optional[builtins.bool]:
'''not implemented yet.'''
result = self._values.get("enable_monitoring")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def input(self) -> typing.Optional[aws_cdk.aws_stepfunctions.TaskInput]:
'''The JSON input for the execution, same as that of StartExecution.
:default: - The state input (JSON path '$')
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("input")
return typing.cast(typing.Optional[aws_cdk.aws_stepfunctions.TaskInput], result)
@builtins.property
def lambda_log_level(self) -> typing.Optional[builtins.str]:
'''The prefix to use for the output files.'''
result = self._values.get("lambda_log_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''The name of the execution, same as that of StartExecution.
:default: - None
:see: https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def s3_input_bucket(self) -> typing.Optional[builtins.str]:
'''location of input S3 objects - if left empty will generate rule for s3 access to all [*].'''
result = self._values.get("s3_input_bucket")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def s3_input_prefix(self) -> typing.Optional[builtins.str]:
'''prefix for input S3 objects - if left empty will generate rule for s3 access to all [*].'''
result = self._values.get("s3_input_prefix")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def textract_state_machine_timeout_minutes(self) -> typing.Optional[jsii.Number]:
'''how long can we wait for the process (default is 48 hours (60*48=2880)).'''
result = self._values.get("textract_state_machine_timeout_minutes")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def workflow_tracing_enabled(self) -> typing.Optional[builtins.bool]:
result = self._values.get("workflow_tracing_enabled")
return typing.cast(typing.Optional[builtins.bool], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "TextractGenericSyncSfnTaskProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"TextractGenericSyncSfnTask",
"TextractGenericSyncSfnTaskProps",
]
publication.publish()
|
/schadem_cdk_construct_textract_generic_sync-0.0.0-py3-none-any.whl/schadem_cdk_construct_textract_generic_sync/__init__.py
| 0.659295 | 0.211071 |
__init__.py
|
pypi
|
# python-scheckcli
Python CLI application to compare schema of two yaml objects.
## Getting started
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
## Add your files
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
```
cd existing_repo
git remote add origin https://gitlab.com/RonMallory/python-scheckcli.git
git branch -M main
git push -uf origin main
```
## Integrate with your tools
- [ ] [Set up project integrations](https://gitlab.com/RonMallory/python-scheckcli/-/settings/integrations)
## Collaborate with your team
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
- [ ] [Automatically merge when pipeline succeeds](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
## Test and Deploy
Use the built-in continuous integration in GitLab.
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
***
# Editing this README
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
## Suggestions for a good README
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
## Name
Choose a self-explaining name for your project.
## Description
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
## Badges
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
## Visuals
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
## Installation
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
## Usage
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
## Support
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
## Roadmap
If you have ideas for releases in the future, it is a good idea to list them in the README.
## Contributing
State if you are open to contributions and what your requirements are for accepting them.
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
## Authors and acknowledgment
Show your appreciation to those who have contributed to the project.
## License
For open source projects, say how it is licensed.
## Project status
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
|
/scheckcli-1.1.0.tar.gz/scheckcli-1.1.0/README.md
| 0.460774 | 0.838151 |
README.md
|
pypi
|
from datetime import timedelta
from functools import partial
import sched
__all__ = ["scheduler"]
_sentinel = object()
class scheduler(sched.scheduler):
"""A subclass of the `sched.scheduler` class from the standard library.
This subclass adds additional functionality to the `scheduler` class,
including the ability to schedule events using relative time intervals
and a decorator for scheduling events to run at regular intervals.
"""
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel):
"""Schedule an event to be run at a specific time.
This variant of the `sched.enter` method allows the delay argument to be
specified as a `datetime.timedelta` object. If the `kwargs` argument
is not provided, it defaults to an empty dictionary.
"""
if isinstance(delay, timedelta):
delay = delay.total_seconds()
if kwargs is _sentinel:
kwargs = {}
return super().enter(delay, priority, action, argument, kwargs)
def repeat(self, delay, priority, action, argument=(), kwargs=_sentinel):
"""Schedule an event to be run at regular intervals.
This method is a variant of the `sched2.enter` method that re-schedules itself
after each run. If the `kwargs` argument is not provided, it defaults
to an empty dictionary.
"""
if kwargs is _sentinel:
kwargs = {}
partial_action = partial(action, *argument, **kwargs)
def repeater(action):
action()
self.enter(delay, priority, repeater, (partial_action,))
self.enter(delay, priority, repeater, (partial_action,))
def every(self, delay, priority=0):
"""Schedule an event to be run at regular intervals using a decorator.
This method is a variant of the `sched2.repeat` method that can be used as a
decorator. It allows a function to be scheduled to run at regular
intervals by specifying the `delay` and `priority` as arguments. The
default `priority` is `0`.
"""
return partial(self.repeat, delay, priority)
|
/sched2-0.5.0.tar.gz/sched2-0.5.0/sched2.py
| 0.905708 | 0.42322 |
sched2.py
|
pypi
|
import copy
from .utils.cst import EMPTY, START, NONE, SINK, SELF, PLOT, END
from .utils.dsp import (
bypass, combine_dicts, selector, parent_func, kk_dict
)
from .utils.gen import counter
from .utils.base import Base
from .utils.utl import get_unused_node_id
__all__ = ['Dispatcher']
__author__ = 'Vincenzo Arcidiacono <[email protected]>'
# noinspection PyShadowingBuiltins
class Dispatcher(Base):
"""
It provides a data structure to process a complex system of functions.
The scope of this data structure is to compute the shortest workflow between
input and output data nodes.
A workflow is a sequence of function calls.
**------------------------------------------------------------------------**
**Example**:
As an example, here is a system of equations:
:math:`b - a = c`
:math:`log(c) = d_{from-log}`
:math:`d = (d_{from-log} + d_{initial-guess}) / 2`
that will be solved assuming that :math:`a = 0`, :math:`b = 1`, and
:math:`d_{initial-guess} = 4`.
**Steps**
Create an empty dispatcher::
>>> dsp = Dispatcher(name='Dispatcher')
Add data nodes to the dispatcher map::
>>> dsp.add_data(data_id='a')
'a'
>>> dsp.add_data(data_id='c')
'c'
Add a data node with a default value to the dispatcher map::
>>> dsp.add_data(data_id='b', default_value=1)
'b'
Add a function node::
>>> def diff_function(a, b):
... return b - a
...
>>> dsp.add_function('diff_function', function=diff_function,
... inputs=['a', 'b'], outputs=['c'])
'diff_function'
Add a function node with domain::
>>> from math import log
...
>>> def log_domain(x):
... return x > 0
...
>>> dsp.add_function('log', function=log, inputs=['c'], outputs=['d'],
... input_domain=log_domain)
'log'
Add a data node with function estimation and callback function.
- function estimation: estimate one unique output from multiple
estimations.
- callback function: is invoked after computing the output.
>>> def average_fun(kwargs):
... '''
... Returns the average of node estimations.
...
... :param kwargs:
... Node estimations.
... :type kwargs: dict
...
... :return:
... The average of node estimations.
... :rtype: float
... '''
...
... x = kwargs.values()
... return sum(x) / len(x)
...
>>> def callback_fun(x):
... print('(log(1) + 4) / 2 = %.1f' % x)
...
>>> dsp.add_data(data_id='d', default_value=4, wait_inputs=True,
... function=average_fun, callback=callback_fun)
'd'
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp
<...>
Dispatch the function calls to achieve the desired output data node `d`:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 0}, outputs=['d'])
(log(1) + 4) / 2 = 2.0
>>> outputs
Solution([('a', 0), ('b', 1), ('c', 1), ('d', 2.0)])
"""
def __getstate__(self):
state = self.__dict__.copy()
state['solution'] = state['solution'].__class__(self)
return state
def __init__(self, dmap=None, name='', default_values=None, raises=False,
description='', executor=False):
"""
Initializes the dispatcher.
:param dmap:
A directed graph that stores data & functions parameters.
:type dmap: schedula.utils.graph.DiGraph, optional
:param name:
The dispatcher's name.
:type name: str, optional
:param default_values:
Data node default values. These will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_values: dict[str, dict], optional
:param raises:
If True the dispatcher interrupt the dispatch when an error occur,
otherwise if raises != '' it logs a warning. If a callable is given
it will be executed passing the exception to decide to raise or not
the exception.
:type raises: bool|callable|str, optional
:param description:
The dispatcher's description.
:type description: str, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
There are four default Pool executors to dispatch asynchronously or
in parallel:
- `async`: execute all functions asynchronously in the same process,
- `parallel`: execute all functions in parallel excluding
:class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-pool`: execute all functions in parallel using a process
pool excluding :class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-dispatch`: execute all functions in parallel including
:class:`~schedula.utils.dsp.SubDispatch`.
:type executor: str, optional
"""
from .utils.graph import DiGraph
#: The directed graph that stores data & functions parameters.
self.dmap = dmap or DiGraph()
#: The dispatcher's name.
self.name = name
#: The dispatcher's description.
self.__doc__ = description
#: The function and data nodes of the dispatcher.
self.nodes = self.dmap.nodes
#: Data node default values. These will be used as input if it is not
#: specified as inputs in the ArciDispatch algorithm.
self.default_values = default_values or {}
#: If True the dispatcher interrupt the dispatch when an error occur.
self.raises = raises
#: Pool executor to dispatch asynchronously.
self.executor = executor
from .utils.sol import Solution
#: Last dispatch solution.
self.solution = Solution(self)
#: Counter to set the node index.
self.counter = counter()
def copy_structure(self, **kwargs):
"""
Returns a copy of the Dispatcher structure.
:param kwargs:
Additional parameters to initialize the new class.
:type kwargs: dict
:return:
A copy of the Dispatcher structure.
:rtype: Dispatcher
"""
kw = {
'description': self.__doc__, 'name': self.name,
'raises': self.raises, 'executor': self.executor
}
kw.update(kwargs)
return self.__class__(**kw)
def add_data(self, data_id=None, default_value=EMPTY, initial_dist=0.0,
wait_inputs=False, wildcard=None, function=None, callback=None,
description=None, filters=None, await_result=None, **kwargs):
"""
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Data node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a data to be estimated or a possible input data node::
>>> dsp.add_data(data_id='a')
'a'
Add a data with a default value (i.e., input data node)::
>>> dsp.add_data(data_id='b', default_value=1)
'b'
Create a data node with function estimation and a default value.
- function estimation: estimate one unique output from multiple
estimations.
- default value: is a default estimation.
>>> def min_fun(kwargs):
... '''
... Returns the minimum value of node estimations.
...
... :param kwargs:
... Node estimations.
... :type kwargs: dict
...
... :return:
... The minimum value of node estimations.
... :rtype: float
... '''
...
... return min(kwargs.values())
...
>>> dsp.add_data(data_id='c', default_value=2, wait_inputs=True,
... function=min_fun)
'c'
Create a data with an unknown id and return the generated id::
>>> dsp.add_data()
'unknown'
"""
# Set special data nodes.
if data_id is START:
default_value, description = NONE, START.__doc__
elif data_id is SINK:
wait_inputs, function, description = True, bypass, SINK.__doc__
elif data_id is SELF:
default_value, description = self, SELF.__doc__
elif data_id is PLOT:
from .utils.drw import autoplot_callback, autoplot_function
callback, description = callback or autoplot_callback, PLOT.__doc__
function = function or autoplot_function
# Base data node attributes.
attr_dict = {
'type': 'data',
'wait_inputs': wait_inputs,
'index': (self.counter(),)
}
if function is not None: # Add function as node attribute.
attr_dict['function'] = function
if await_result is not None: # Add await_result as node attribute.
attr_dict['await_result'] = await_result
if callback is not None: # Add callback as node attribute.
attr_dict['callback'] = callback
if wildcard is not None: # Add wildcard as node attribute.
attr_dict['wildcard'] = wildcard
if description is not None: # Add description as node attribute.
attr_dict['description'] = description
if filters: # Add filters as node attribute.
attr_dict['filters'] = filters
attr_dict.update(kwargs) # Additional attributes.
nodes = self.dmap.nodes # Namespace shortcut for speed.
if data_id is None: # Search for an unused node id.
data_id = get_unused_node_id(self.dmap) # Get an unused node id.
# Check if the node id exists as function.
elif data_id in nodes and nodes[data_id]['type'] != 'data':
raise ValueError('Invalid data id: '
'override function {}'.format(data_id))
# Add node to the dispatcher map.
self.dmap.add_node(data_id, **attr_dict)
# Set default value.
self.set_default_value(data_id, default_value, initial_dist)
return data_id # Return data node id.
def add_function(self, function_id=None, function=None, inputs=None,
outputs=None, input_domain=None, weight=None,
inp_weight=None, out_weight=None, description=None,
filters=None, await_domain=None, await_result=None,
**kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_dispatcher`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a function node::
>>> def my_function(a, b):
... c = a + b
... d = a - b
... return c, d
...
>>> dsp.add_function(function=my_function, inputs=['a', 'b'],
... outputs=['c', 'd'])
'my_function'
Add a function node with domain::
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
...
>>> def my_domain(a, b):
... return a < b
...
>>> dsp.add_function(function=my_log, inputs=['a', 'b'],
... outputs=['e'], input_domain=my_domain)
'my_log'
"""
from .utils.blue import _init
function = _init(function)
if inputs is None: # Set a dummy input.
if START not in self.nodes:
self.add_data(START)
inputs = [START] # Update inputs.
if outputs is None: # Set a dummy output.
if SINK not in self.nodes:
self.add_data(SINK)
outputs = [SINK] # Update outputs.
# Get parent function.
func = parent_func(function)
# Base function node attributes.
attr_dict = {
'type': 'function',
'inputs': inputs,
'outputs': outputs,
'function': function,
'wait_inputs': True,
'index': (self.counter(),)
}
if input_domain: # Add domain as node attribute.
attr_dict['input_domain'] = input_domain
if await_domain is not None: # Add await_domain as node attribute.
attr_dict['await_domain'] = await_domain
if await_result is not None: # Add await_result as node attribute.
attr_dict['await_result'] = await_result
if description is not None: # Add description as node attribute.
attr_dict['description'] = description
if filters: # Add filters as node attribute.
attr_dict['filters'] = filters
# Set function name.
if function_id is None:
try: # Set function name.
function_name = func.__name__
except AttributeError as ex:
if not func:
raise ValueError(
'Invalid function id due to:\n{}'.format(ex)
)
function_name = 'unknown'
else:
function_name = function_id
# Get an unused node id.
fun_id = get_unused_node_id(self.dmap, initial_guess=function_name)
if weight is not None: # Add weight as node attribute.
attr_dict['weight'] = weight
attr_dict.update(kwargs) # Set additional attributes.
# Add node to the dispatcher map.
self.dmap.add_node(fun_id, **attr_dict)
from .utils.alg import add_func_edges # Add input edges.
n_data = add_func_edges(self, fun_id, inputs, inp_weight, True)
# Add output edges.
add_func_edges(self, fun_id, outputs, out_weight, False, n_data)
return fun_id # Return function node id.
def add_func(self, function, outputs=None, weight=None,
inputs_defaults=False, inputs_kwargs=False, filters=None,
input_domain=None, await_domain=None, await_result=None,
inp_weight=None, out_weight=None, description=None,
inputs=None, function_id=None, **kwargs):
"""
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> def f(a, b, c, d=3, m=5):
... return (a + b) - c + d - m
>>> dsp.add_func(f, outputs=['d'])
'f'
>>> dsp.add_func(f, ['m'], inputs_defaults=True, inputs='beal')
'f<0>'
>>> dsp.add_func(f, ['i'], inputs_kwargs=True)
'f<1>'
>>> def g(a, b, c, *args, d=0):
... return (a + b) * c + d
>>> dsp.add_func(g, ['e'], inputs_defaults=True)
'g'
>>> sol = dsp({'a': 1, 'b': 3, 'c': 0}); sol
Solution([('a', 1), ('b', 3), ('c', 0), ('l', 3), ('d', 2),
('e', 0), ('m', 0), ('i', 6)])
"""
from .utils.blue import _init
from .utils.dsp import _get_par_args
function = _init(function)
if inputs is None:
inputs = tuple(_get_par_args(function, not inputs_kwargs)) or None
function_id = self.add_function(
weight=weight, filters=filters, outputs=outputs, function=function,
input_domain=input_domain, await_domain=await_domain, inputs=inputs,
description=description, out_weight=out_weight,
inp_weight=inp_weight, await_result=await_result,
function_id=function_id, **kwargs
)
if inputs_defaults:
for k, v in zip(inputs, _get_par_args(function, False).values()):
if v.default is not v.empty:
self.set_default_value(k, v._default)
return function_id
def add_dispatcher(self, dsp, inputs=None, outputs=None, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, inputs_prefix='', outputs_prefix='',
**kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
"""
from .utils.blue import _init
dsp = _init(dsp)
if not isinstance(dsp, self.__class__):
kw = dsp
dsp = self.__class__(
name=dsp_id or 'unknown',
executor=self.executor
)
dsp.add_from_lists(**kw)
if not dsp_id: # Get the dsp id.
dsp_id = dsp.name or 'unknown'
if description is None: # Get description.
description = dsp.__doc__ or None
if inputs is None:
inputs = kk_dict(*(k for k in dsp.data_nodes if k not in {
START, SINK, SELF, PLOT, END
}))
if outputs is None:
outputs = kk_dict(*(k for k in dsp.data_nodes if k not in {
START, SINK, SELF, PLOT, END
}))
if not isinstance(inputs, dict): # Create the inputs dict.
inputs = kk_dict(*inputs)
if not isinstance(outputs, dict): # Create the outputs dict.
outputs = kk_dict(*outputs)
if inputs_prefix:
inputs = {f'{inputs_prefix}{k}': v for k, v in inputs.items()}
if outputs_prefix:
outputs = {k: f'{outputs_prefix}{v}' for k, v in outputs.items()}
# Set zero as default input distances.
# noinspection PyTypeChecker
_weight_from = dict.fromkeys(inputs.keys(), 0.0)
_weight_from.update(inp_weight or {})
from .utils.alg import _nodes
# Return dispatcher node id.
dsp_id = self.add_function(
dsp_id, dsp, sorted(_nodes(inputs), key=str),
sorted(_nodes(outputs.values()), key=str), input_domain, weight,
_weight_from, type='dispatcher', description=description,
wait_inputs=False, await_domain=await_domain, **kwargs
)
# Set proper inputs.
self.nodes[dsp_id]['inputs'] = inputs
# Set proper outputs.
self.nodes[dsp_id]['outputs'] = outputs
if SINK not in dsp.nodes and \
SINK in _nodes(inputs.values()).union(_nodes(outputs)):
dsp.add_data(SINK) # Add sink node.
# Import default values from sub-dispatcher.
if include_defaults:
dsp_dfl = dsp.default_values # Namespace shortcut.
remove = set() # Set of nodes to remove after the import.
# Set default values.
for k, v in inputs.items():
if isinstance(v, str):
if v in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v))
else:
if v[0] in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v[0]))
remove.update(v[1:])
# Remove default values.
for k in remove:
dsp_dfl.pop(k, None)
return dsp_id # Return sub-dispatcher node id.
def add_from_lists(self, data_list=None, fun_list=None, dsp_list=None):
"""
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:returns:
- Data node ids.
- Function node ids.
- Sub-dispatcher node ids.
:rtype: (list[str], list[str], list[str])
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Define a data list::
>>> data_list = [
... {'data_id': 'a'},
... {'data_id': 'b'},
... {'data_id': 'c'},
... ]
Define a functions list::
>>> def func(a, b):
... return a + b
...
>>> fun_list = [
... {'function': func, 'inputs': ['a', 'b'], 'outputs': ['c']}
... ]
Define a sub-dispatchers list::
>>> sub_dsp = Dispatcher(name='Sub-dispatcher')
>>> sub_dsp.add_function(function=func, inputs=['e', 'f'],
... outputs=['g'])
'func'
>>>
>>> dsp_list = [
... {'dsp_id': 'Sub', 'dsp': sub_dsp,
... 'inputs': {'a': 'e', 'b': 'f'}, 'outputs': {'g': 'c'}},
... ]
Add function and data nodes to dispatcher::
>>> dsp.add_from_lists(data_list, fun_list, dsp_list)
(['a', 'b', 'c'], ['func'], ['Sub'])
"""
if data_list: # Add data nodes.
data_ids = [self.add_data(**v) for v in data_list] # Data ids.
else:
data_ids = []
if fun_list: # Add function nodes.
fun_ids = [self.add_function(**v) for v in fun_list] # Func ids.
else:
fun_ids = []
if dsp_list: # Add dispatcher nodes.
dsp_ids = [self.add_dispatcher(**v) for v in dsp_list] # Dsp ids.
else:
dsp_ids = []
# Return data, function, and sub-dispatcher node ids.
return data_ids, fun_ids, dsp_ids
def set_default_value(self, data_id, value=EMPTY, initial_dist=0.0):
"""
Set the default value of a data node in the dispatcher.
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node default value.
.. note:: If `EMPTY` the previous default value is removed.
:type value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a data node named `a`::
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
...
>>> dsp.add_data(data_id='a')
'a'
Add a default value to `a` node::
>>> dsp.set_default_value('a', value='value of the data')
>>> list(sorted(dsp.default_values['a'].items()))
[('initial_dist', 0.0), ('value', 'value of the data')]
Remove the default value of `a` node::
>>> dsp.set_default_value('a', value=sh.EMPTY)
>>> dsp.default_values
{}
"""
try:
if self.dmap.nodes[data_id]['type'] == 'data': # Is data node?
if value is EMPTY:
self.default_values.pop(data_id, None) # Remove default.
else: # Add default.
self.default_values[data_id] = {
'value': value,
'initial_dist': initial_dist
}
return
except KeyError:
pass
raise ValueError('Input error: %s is not a data node' % data_id)
def get_sub_dsp(self, nodes_bunch, edges_bunch=None):
"""
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
"""
# Get real paths.
nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch]
# Define an empty dispatcher.
sub_dsp = self.copy_structure(
dmap=self.dmap.subgraph(nodes_bunch)
)
# Namespace shortcuts for speed.
nodes, succ = sub_dsp.nodes, sub_dsp.dmap.succ
dmap_dv, dmap_rm_edge = self.default_values, sub_dsp.dmap.remove_edge
dmap_rm_node = sub_dsp.dmap.remove_node
# Remove function nodes that has not whole inputs available.
for u in nodes_bunch:
n = nodes[u].get('inputs', None) # Function inputs.
# No all inputs
if n is not None and any(k not in nodes_bunch for k in n):
dmap_rm_node(u) # Remove function node.
# Remove edges that are not in edges_bunch.
if edges_bunch is not None:
for e in edges_bunch: # Iterate sub-graph edges.
dmap_rm_edge(*e) # Remove edge.
# Remove function node with no outputs.
sub_dsp.dmap.remove_nodes_from([
u for u, n in sub_dsp.dmap.nodes.items()
if n['type'] == 'function' and not succ[u] # No outputs.
])
# Remove isolate nodes from sub-graph.
sub_dsp.dmap.remove_nodes_from([
u for u, v in sub_dsp.dmap.pred.items() if not (v or succ[u])
])
# Set default values.
sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes}
return sub_dsp # Return the sub-dispatcher.
def get_sub_dsp_from_workflow(
self, sources, graph=None, reverse=False, add_missing=False,
check_inputs=True, blockers=None, wildcard=False,
_update_links=True):
"""
Returns the sub-dispatcher induced by the workflow from sources.
The induced sub-dispatcher of the dsp contains the reachable nodes and
edges evaluated with breadth-first-search on the workflow graph from
source nodes.
:param sources:
Source nodes for the breadth-first-search.
A container of nodes which will be iterated through once.
:type sources: list[str], iterable
:param graph:
A directed graph where evaluate the breadth-first-search.
:type graph: schedula.utils.graph.DiGraph, optional
:param reverse:
If True the workflow graph is assumed as reversed.
:type reverse: bool, optional
:param add_missing:
If True, missing function' inputs are added to the sub-dispatcher.
:type add_missing: bool, optional
:param check_inputs:
If True the missing function' inputs are not checked.
:type check_inputs: bool, optional
:param blockers:
Nodes to not be added to the queue.
:type blockers: set[str], iterable, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param _update_links:
If True, it updates remote links of the extracted dispatcher.
:type _update_links: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function `fun` and a node `a` with a default value:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=1)
'a'
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['e'],
... outputs=['c'])
'fun2'
Dispatch with no calls in order to have a workflow::
>>> o = dsp.dispatch(inputs=['a', 'b'], no_call=True)
Get sub-dispatcher from workflow inputs `a` and `b`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['a', 'b'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
Get sub-dispatcher from a workflow output `c`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['c'], reverse=True)
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher (reverse workflow)'
"""
# Define an empty dispatcher map.
sub_dsp = self.copy_structure()
if not graph: # Set default graph.
graph = self.solution.workflow
# Visited nodes used as queue.
family = {}
# Namespace shortcuts for speed.
nodes, dmap_nodes = sub_dsp.dmap.nodes, self.dmap.nodes
dlt_val, dsp_dlt_val = sub_dsp.default_values, self.default_values
if not reverse:
# Namespace shortcuts for speed.
neighbors, dmap_succ = graph.succ, self.dmap.succ
succ, pred = sub_dsp.dmap.succ, sub_dsp.dmap.pred
# noinspection PyUnusedLocal
def _check_node_inputs(c, p):
if c == START:
return True
node_attr = dmap_nodes[c]
if node_attr['type'] == 'function':
if all(k in family for k in node_attr['inputs']):
_set_node_attr(c)
# namespace shortcuts for speed
s_pred = pred[c]
for p in node_attr['inputs']:
# add attributes to both representations of edge
succ[p][c] = s_pred[p] = dmap_succ[p][c]
elif not check_inputs or add_missing:
_set_node_attr(c)
# namespace shortcuts for speed
s_pred = pred[c]
if add_missing:
for p in node_attr['inputs']:
if p not in family:
_set_node_attr(p, add2family=False)
succ[p][c] = s_pred[p] = dmap_succ[p][c]
for p in node_attr['inputs']:
if p in family:
# add attributes to both representations of edge
succ[p][c] = s_pred[p] = dmap_succ[p][c]
return False
return True
return False
else:
# Namespace shortcuts for speed.
neighbors, dmap_succ = graph.pred, self.dmap.pred
pred, succ = sub_dsp.dmap.succ, sub_dsp.dmap.pred
def _check_node_inputs(c, p):
if c == START:
try:
node_attr = dmap_nodes[p]
return node_attr['type'] == 'data'
except KeyError:
return True
return False
from collections import deque
queue = deque([])
blockers = set(blockers or ())
# Function to set node attributes.
def _set_node_attr(n, add2family=True, block=False):
# Set node attributes.
nodes[n] = dmap_nodes[n]
# Add node in the adjacency matrix.
succ[n], pred[n] = ({}, {})
if n in dsp_dlt_val:
dlt_val[n] = dsp_dlt_val[n] # Set the default value.
if add2family:
# Append a new parent to the family.
family[n] = () if block and n in blockers else neighbors[n]
queue.append(n)
# Set initial node attributes.
for s in sources:
if s in dmap_nodes and s in graph.nodes:
_set_node_attr(s, block=not (wildcard and s in blockers))
# Start breadth-first-search.
while queue:
parent = queue.popleft()
# Namespace shortcuts for speed.
nbrs, dmap_nbrs = succ[parent], dmap_succ[parent]
# Iterate parent's children.
for child in sorted(family[parent], key=str):
if _check_node_inputs(child, parent):
continue
if child not in family:
_set_node_attr(child, block=True) # Set node attributes.
# Add attributes to both representations of edge: u-v and v-u.
nbrs[child] = pred[child][parent] = dmap_nbrs[child]
if _update_links:
from .utils.alg import _update_io, _get_sub_out, _get_sub_inp
succ, pred = sub_dsp.dmap.succ, sub_dsp.dmap.pred
for k, a in sub_dsp.sub_dsp_nodes.items():
nodes[k] = a = a.copy()
inp, out = _get_sub_inp(a, pred[k]), _get_sub_out(a, succ[k])
a['function'] = a['function'].get_sub_dsp_from_workflow(
sources=out.union(inp), graph=a['function'].dmap,
reverse=True, blockers=inp, wildcard=True
)
i, o = _update_io(a, pred[k], succ[k]) # Unreachable nodes.
msg = 'Sub-dsp {} missing: inp {}, out {}'
assert not i and not o, msg.format(k, i, o)
return sub_dsp # Return the sub-dispatcher map.
@property
def data_nodes(self):
"""
Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'data'}
@property
def function_nodes(self):
"""
Returns all function nodes of the dispatcher.
:return:
All data function of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'function'}
@property
def sub_dsp_nodes(self):
"""
Returns all sub-dispatcher nodes of the dispatcher.
:return:
All sub-dispatcher nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {
k: v for k, v in self.nodes.items() if v['type'] == 'dispatcher'
}
def copy(self):
"""
Returns a deepcopy of the Dispatcher.
:return:
A copy of the Dispatcher.
:rtype: Dispatcher
Example::
>>> dsp = Dispatcher()
>>> dsp is dsp.copy()
False
"""
return copy.deepcopy(self) # Return the copy of the Dispatcher.
def blue(self, memo=None, depth=-1):
"""
Constructs a BlueDispatcher out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:param depth:
Depth of sub-dispatch blue. If negative all levels are bluprinted.
:type depth: int, optional
:return:
A BlueDispatcher of the current object.
:rtype: schedula.utils.blue.BlueDispatcher
"""
if depth == 0:
return self
depth -= 1
memo = {} if memo is None else memo
if self in memo:
return memo[self]
from .utils.dsp import map_list
from .utils.blue import BlueDispatcher, _parent_blue
memo[self] = blue = BlueDispatcher(
executor=self.executor, name=self.name, raises=self.raises,
description=self.__doc__
)
dfl = self.default_values
key_map_data = ['data_id', {'value': 'default_value'}]
pred, succ = self.dmap.pred, self.dmap.succ
def _set_weight(n, r, d):
d = {i: j['weight'] for i, j in d.items() if 'weight' in j}
if d:
r[n] = d
for k, v in sorted(self.nodes.items(), key=lambda x: x[1]['index']):
v = v.copy()
t = v.pop('type')
del v['index']
if t == 'data':
method = 'add_data'
v.update(map_list(key_map_data, k, dfl.get(k, {})))
elif t in ('function', 'dispatcher'):
method = 'add_%s' % t
if t == 'dispatcher':
t = 'dsp'
v['%s_id' % t] = k
del v['wait_inputs']
_set_weight('inp_weight', v, pred[k])
_set_weight('out_weight', v, succ[k])
if 'function' in v:
v[t] = _parent_blue(v.pop('function'), memo, depth)
blue.deferred.append((method, v))
return blue
def extend(self, *blues, memo=None):
"""
Extends Dispatcher calling each deferred operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints and Dispatchers.
:type memo: dict[T,schedula.utils.blue.Blueprint|Dispatcher]
:return:
Self.
:rtype: Dispatcher
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher()
>>> dsp.add_func(callable, ['is_callable'])
'callable'
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> dsp = sh.Dispatcher().extend(dsp, blue)
"""
from .utils.blue import BlueDispatcher as Blue
return Blue().extend(*blues, memo=memo).register(self, memo=memo)
def dispatch(self, inputs=None, outputs=None, inputs_dist=None,
wildcard=False, no_call=False, shrink=False,
rm_unused_nds=False, select_output_kw=None, _wait_in=None,
stopper=None, executor=False, sol_name=(), verbose=False):
"""
Evaluates the minimum workflow and data outputs of the dispatcher
model from given inputs.
:param inputs:
Input data values.
:type inputs: dict[str, T], list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used and the input
values are not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
.. seealso:: :func:`shrink_dsp`
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param select_output_kw:
Kwargs of selector function to select specific outputs.
:type select_output_kw: dict, optional
:param _wait_in:
Override wait inputs.
:type _wait_in: dict, optional
:param stopper:
A semaphore to abort the dispatching.
:type stopper: multiprocess.Event, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
:type executor: str, optional
:param sol_name:
Solution name.
:type sol_name: tuple[str], optional
:param verbose:
If True the dispatcher will log start and end of each function.
:type verbose: str, optional
:return:
Dictionary of estimated data node outputs.
:rtype: schedula.utils.sol.Solution
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function :math:`log(b - a)` and two data `a` and `b`
with default values:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=0)
'a'
>>> dsp.add_data(data_id='b', default_value=5)
'b'
>>> dsp.add_data(data_id='d', default_value=1)
'd'
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
>>> def my_domain(a, b):
... return a < b
>>> dsp.add_function('log(b - a)', function=my_log,
... inputs=['c', 'd'],
... outputs=['e'], input_domain=my_domain)
'log(b - a)'
>>> dsp.add_function('min', function=min, inputs=['a', 'b'],
... outputs=['c'])
'min'
Dispatch without inputs. The default values are used as inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch()
>>> outputs
Solution([('a', 0), ('b', 5), ('d', 1), ('c', 0), ('e', 0.0)])
Dispatch until data node `c` is estimated:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(outputs=['c'])
>>> outputs
Solution([('a', 0), ('b', 5), ('c', 0)])
Dispatch with one inputs. The default value of `a` is not used as
inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 3})
>>> outputs
Solution([('a', 3), ('b', 5), ('d', 1), ('c', 3)])
"""
dsp = self
if not no_call:
if shrink: # Pre shrink.
dsp = self.shrink_dsp(inputs, outputs, inputs_dist, wildcard)
elif outputs:
dsp = self.get_sub_dsp_from_workflow(
outputs, self.dmap, reverse=True, blockers=inputs,
wildcard=wildcard
)
# Initialize.
self.solution = sol = self.solution.__class__(
dsp, inputs, outputs, wildcard, inputs_dist, no_call, rm_unused_nds,
_wait_in, full_name=sol_name, verbose=verbose
)
# Dispatch.
sol._run(stopper=stopper, executor=executor)
if select_output_kw:
return selector(dictionary=sol, **select_output_kw)
# Return the evaluated data outputs.
return sol
def __call__(self, *args, **kwargs):
return self.dispatch(*args, **kwargs)
def shrink_dsp(self, inputs=None, outputs=None, inputs_dist=None,
wildcard=True):
"""
Returns a reduced dispatcher.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`dispatch`
**--------------------------------------------------------------------**
**Example**:
A dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> functions = [
... {
... 'function_id': 'fun1',
... 'inputs': ['a', 'b'],
... 'outputs': ['c']
... },
... {
... 'function_id': 'fun2',
... 'inputs': ['b', 'd'],
... 'outputs': ['e']
... },
... {
... 'function_id': 'fun3',
... 'function': min,
... 'inputs': ['d', 'f'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun4',
... 'function': max,
... 'inputs': ['a', 'b'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun5',
... 'function': max,
... 'inputs': ['d', 'e'],
... 'outputs': ['c', 'f']
... },
... ]
>>> dsp.add_from_lists(fun_list=functions)
([], [...])
Get the sub-dispatcher induced by dispatching with no calls from inputs
`a`, `b`, and `c` to outputs `c`, `e`, and `f`::
>>> shrink_dsp = dsp.shrink_dsp(inputs=['a', 'b', 'd'],
... outputs=['c', 'f'])
.. dispatcher:: shrink_dsp
:opt: graph_attr={'ratio': '1'}
>>> shrink_dsp.name = 'Sub-Dispatcher'
"""
bfs = None
if inputs:
# Get all data nodes no wait inputs.
wait_in = self._get_wait_in(flag=False)
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, inputs_dist, wildcard, True, False,
True, _wait_in=wait_in
)
data_nodes = self.data_nodes # Get data nodes.
from .utils.alg import _union_workflow, _convert_bfs
bfs = _union_workflow(o) # bfg edges.
# Set minimum initial distances.
if inputs_dist:
inputs_dist = combine_dicts(o.dist, inputs_dist)
else:
inputs_dist = o.dist
# Set data nodes to wait inputs.
wait_in = self._get_wait_in(flag=True)
while True: # Start shrinking loop.
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, inputs_dist, wildcard, True, False,
False, _wait_in=wait_in
)
_union_workflow(o, bfs=bfs) # Update bfs.
n_d, status = o._remove_wait_in() # Remove wait input flags.
if not status:
break # Stop iteration.
# Update inputs.
inputs = n_d.intersection(data_nodes).union(inputs)
# Update outputs and convert bfs in DiGraphs.
outputs, bfs = outputs or o, _convert_bfs(bfs)
elif not outputs:
return self.copy_structure() # Empty Dispatcher.
# Get sub dispatcher breadth-first-search graph.
dsp = self._get_dsp_from_bfs(outputs, bfs_graphs=bfs)
return dsp # Return the shrink sub dispatcher.
def _get_dsp_from_bfs(self, outputs, bfs_graphs=None):
"""
Returns the sub-dispatcher induced by the workflow from outputs.
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param bfs_graphs:
A dictionary with directed graphs where evaluate the
breadth-first-search.
:type bfs_graphs: dict[str | Token, schedula.utils.graph.DiGraph | dict]
:return:
A sub-dispatcher
:rtype: Dispatcher
"""
bfs = bfs_graphs[NONE] if bfs_graphs is not None else self.dmap
# Get sub dispatcher breadth-first-search graph.
dsp = self.get_sub_dsp_from_workflow(
sources=outputs, graph=bfs, reverse=True, _update_links=False
)
# Namespace shortcuts.
succ, nodes, pred = dsp.dmap.succ, dsp.nodes, dsp.dmap.pred
rm_edges, nds = dsp.dmap.remove_edges_from, dsp.data_nodes
from .utils.alg import _nodes, _get_sub_out, _update_io
for n in dsp.sub_dsp_nodes:
a = nodes[n] = nodes[n].copy()
bfs = bfs_graphs[n] if bfs_graphs is not None else None
out = _get_sub_out(a, succ[n])
if 'input_domain' in a:
out.update(_nodes(a['inputs'].values()))
a['function'] = a['function']._get_dsp_from_bfs(out, bfs)
i, o = _update_io(a, pred[n], succ[n]) # Unreachable nodes.
rm_edges({(u, n) for u in i}.union(((n, u) for u in o)))
return dsp
@staticmethod
def _edge_length(edge, node_out):
"""
Returns the edge length.
The edge length is edge weight + destination node weight.
:param edge:
Edge attributes.
:type edge: dict[str, int | float]
:param node_out:
Node attributes.
:type node_out: dict[str, int | float]
:return:
Edge length.
:rtype: float, int
"""
return edge.get('weight', 1) + node_out.get('weight', 0) # Length.
def _get_wait_in(self, flag=True, all_domain=True):
"""
Set `wait_inputs` flags for data nodes that:
- are estimated from functions with a domain function, and
- are waiting inputs.
:param flag:
Value to be set. If None `wait_inputs` are just cleaned.
:type flag: bool, None, optional
:param all_domain:
Set `wait_inputs` flags for data nodes that are estimated from
functions with a domain function.
:type all_domain: bool, optional
"""
wait_in = {}
for n, a in self.data_nodes.items():
if n is not SINK and a['wait_inputs']:
wait_in[n] = flag
if all_domain:
for a in self.function_nodes.values():
if 'input_domain' in a:
wait_in.update(dict.fromkeys(a['outputs'], flag))
for n, a in self.sub_dsp_nodes.items():
if 'function' in a:
dsp = a['function']
wait_in[dsp] = w = dsp._get_wait_in(flag=flag)
if 'input_domain' not in a:
o = a['outputs']
w = [v for k, v in o.items() if k in w]
wait_in.update(dict.fromkeys(w, flag))
if 'input_domain' in a:
wait_in[n] = flag
wait_in.update(dict.fromkeys(a['outputs'].values(), flag))
return wait_in
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/dispatcher.py
| 0.80271 | 0.559711 |
dispatcher.py
|
pypi
|
import math
import inspect
import functools
import itertools
import collections
import copy as _copy
from .cst import START
from .gen import Token
from .base import Base
from .exc import DispatcherError
from dataclasses import dataclass
__author__ = 'Vincenzo Arcidiacono <[email protected]>'
def stlp(s):
"""
Converts a string in a tuple.
"""
if isinstance(s, str):
return s,
return s
def combine_dicts(*dicts, copy=False, base=None):
"""
Combines multiple dicts in one.
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict.
:rtype: dict
Example::
>>> sorted(combine_dicts({'a': 3, 'c': 3}, {'a': 1, 'b': 2}).items())
[('a', 1), ('b', 2), ('c', 3)]
"""
if len(dicts) == 1 and base is None: # Only one input dict.
cd = dicts[0].copy()
else:
cd = {} if base is None else base # Initialize empty dict.
for d in dicts: # Combine dicts.
if d:
# noinspection PyTypeChecker
cd.update(d)
# Return combined dict.
return {k: _copy.deepcopy(v) for k, v in cd.items()} if copy else cd
def kk_dict(*kk, **adict):
"""
Merges and defines dictionaries with values identical to keys.
:param kk:
A sequence of keys and/or dictionaries.
:type kk: object | dict, optional
:param adict:
A dictionary.
:type adict: dict, optional
:return:
Merged dictionary.
:rtype: dict
Example::
>>> sorted(kk_dict('a', 'b', 'c').items())
[('a', 'a'), ('b', 'b'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'a-c': 'c'}).items())
[('a', 'a'), ('a-c', 'c'), ('b', 'b')]
>>> sorted(kk_dict('a', {'b': 'c'}, 'c').items())
[('a', 'a'), ('b', 'c'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated (b)
>>> sorted(kk_dict({'a': 0, 'b': 1}, **{'b': 2, 'a': 3}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated (a, b)
"""
for k in kk:
if isinstance(k, dict):
if any(i in adict for i in k):
k = ', '.join(sorted(set(k).intersection(adict)))
raise ValueError('keyword argument repeated ({})'.format(k))
adict.update(k)
elif k in adict:
raise ValueError('keyword argument repeated ({})'.format(k))
else:
adict[k] = k
return adict
def bypass(*inputs, copy=False):
"""
Returns the same arguments.
:param inputs:
Inputs values.
:type inputs: T
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:return:
Same input values.
:rtype: (T, ...), T
Example::
>>> bypass('a', 'b', 'c')
('a', 'b', 'c')
>>> bypass('a')
'a'
"""
if len(inputs) == 1:
inputs = inputs[0] # Same inputs.
return _copy.deepcopy(inputs) if copy else inputs # Return inputs.
def summation(*inputs):
"""
Sums inputs values.
:param inputs:
Inputs values.
:type inputs: int, float
:return:
Sum of the input values.
:rtype: int, float
Example::
>>> summation(1, 3.0, 4, 2)
10.0
"""
# Return the sum of the input values.
return functools.reduce(lambda x, y: x + y, inputs)
def map_dict(key_map, *dicts, copy=False, base=None):
"""
Returns a dict with new key values.
:param key_map:
A dictionary that maps the dict keys ({old key: new key}.
:type key_map: dict
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new key values.
:rtype: dict
Example::
>>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2})
>>> sorted(d.items())
[('c', 1), ('d', 2)]
"""
it = combine_dicts(*dicts).items() # Combine dicts.
get = key_map.get # Namespace shortcut.
# Return mapped dict.
return combine_dicts({get(k, k): v for k, v in it}, copy=copy, base=base)
def map_list(key_map, *inputs, copy=False, base=None):
"""
Returns a new dict.
:param key_map:
A list that maps the dict keys ({old key: new key}
:type key_map: list[str | dict | list]
:param inputs:
A sequence of data.
:type inputs: iterable | dict | int | float | list | tuple
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new values.
:rtype: dict
Example::
>>> key_map = [
... 'a',
... {'a': 'c'},
... [
... 'a',
... {'a': 'd'}
... ]
... ]
>>> inputs = (
... 2,
... {'a': 3, 'b': 2},
... [
... 1,
... {'a': 4}
... ]
... )
>>> d = map_list(key_map, *inputs)
>>> sorted(d.items())
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
"""
d = {} if base is None else base # Initialize empty dict.
for m, v in zip(key_map, inputs):
if isinstance(m, dict):
map_dict(m, v, base=d) # Apply a map dict.
elif isinstance(m, list):
map_list(m, *v, base=d) # Apply a map list.
else:
d[m] = v # Apply map.
return combine_dicts(copy=copy, base=d) # Return dict.
def selector(keys, dictionary, copy=False, output_type='dict',
allow_miss=False):
"""
Selects the chosen dictionary keys from the given dictionary.
:param keys:
Keys to select.
:type keys: list, tuple, set
:param dictionary:
A dictionary.
:type dictionary: dict
:param copy:
If True the output contains deep-copies of the values.
:type copy: bool
:param output_type:
Type of function output:
+ 'list': a list with all values listed in `keys`.
+ 'dict': a dictionary with any outputs listed in `keys`.
+ 'values': if output length == 1 return a single value otherwise a
tuple with all values listed in `keys`.
:type output_type: str, optional
:param allow_miss:
If True it does not raise when some key is missing in the dictionary.
:type allow_miss: bool
:return:
A dictionary with chosen dictionary keys if present in the sequence of
dictionaries. These are combined with :func:`combine_dicts`.
:rtype: dict
Example::
>>> import schedula as sh
>>> fun = sh.partial(selector, ['a', 'b'])
>>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items())
[('a', 1), ('b', 2)]
"""
if not allow_miss:
# noinspection PyUnusedLocal
def check(key):
return True
else:
def check(key):
return key in dictionary
if output_type == 'list': # Select as list.
res = [dictionary[k] for k in keys if check(k)]
return _copy.deepcopy(res) if copy else res
elif output_type == 'values':
return bypass(*[dictionary[k] for k in keys if check(k)], copy=copy)
# Select as dict.
return bypass({k: dictionary[k] for k in keys if check(k)}, copy=copy)
def replicate_value(value, n=2, copy=True):
"""
Replicates `n` times the input value.
:param n:
Number of replications.
:type n: int
:param value:
Value to be replicated.
:type value: T
:param copy:
If True the list contains deep-copies of the value.
:type copy: bool
:return:
A list with the value replicated `n` times.
:rtype: list
Example::
>>> import schedula as sh
>>> fun = sh.partial(replicate_value, n=5)
>>> fun({'a': 3})
({'a': 3}, {'a': 3}, {'a': 3}, {'a': 3}, {'a': 3})
"""
return bypass(*[value] * n, copy=copy) # Return replicated values.
def parent_func(func, input_id=None):
"""
Return the parent function of a wrapped function (wrapped with
:class:`functools.partial` and :class:`add_args`).
:param func:
Wrapped function.
:type func: callable
:param input_id:
Index of the first input of the wrapped function.
:type input_id: int
:return:
Parent function.
:rtype: callable
"""
if isinstance(func, add_args):
if input_id is not None:
input_id -= func.n
return parent_func(func.func, input_id=input_id)
elif isinstance(func, partial):
if input_id is not None:
# noinspection PyTypeChecker
input_id += len(func.args)
return parent_func(func.func, input_id=input_id)
if input_id is None:
return func
else:
return func, input_id
if inspect.isclass(functools.partial):
partial = functools.partial
else: # MicroPython.
class partial:
def __init__(self, func, *args, **keywords):
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
keywords = combine_dicts(self.keywords, keywords)
return self.func(*(self.args + args), **keywords)
class add_args:
"""
Adds arguments to a function (left side).
:param func:
Function to wrap.
:type func: callable
:param n:
Number of unused arguments to add to the left side.
:type n: int
:return:
Wrapped function.
:rtype: callable
Example::
>>> import inspect
>>> def original_func(a, b, *args, c=0):
... '''Doc'''
... return a + b + c
>>> func = add_args(original_func, n=2)
>>> func.__name__, func.__doc__
('original_func', 'Doc')
>>> func(1, 2, 3, 4, c=5)
12
>>> str(inspect.signature(func))
'(none, none, a, b, *args, c=0)'
"""
__name__ = __doc__ = None
_args = ('func', 'n', 'callback')
def __init__(self, func, n=1, callback=None):
self.n = n
self.callback = callback
self.func = func
for i in range(2):
# noinspection PyBroadException
try:
self.__name__ = func.__name__
self.__doc__ = func.__doc__
break
except AttributeError:
func = parent_func(func)
@property
def __signature__(self):
return _get_signature(self.func, self.n)
def __call__(self, *args, **kwargs):
res = self.func(*args[self.n:], **kwargs)
if self.callback:
self.callback(res, *args, **kwargs)
return res
def _get_signature(func, n=1):
import inspect
sig = inspect.signature(func) # Get function signature.
def ept_par(): # Return none signature parameter.
name = Token('none')
return name, inspect.Parameter(name, inspect._POSITIONAL_OR_KEYWORD)
# Update signature parameters.
par = itertools.chain(*([p() for p in itertools.repeat(ept_par, n)],
sig.parameters.items()))
sig._parameters = sig._parameters.__class__(collections.OrderedDict(par))
return sig
def stack_nested_keys(nested_dict, key=(), depth=-1):
"""
Stacks the keys of nested-dictionaries into tuples and yields a list of
k-v pairs.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param key:
Initial keys.
:type key: tuple, optional
:param depth:
Maximum keys depth.
:type depth: int, optional
:return:
List of k-v pairs.
:rtype: generator
"""
if depth != 0 and hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
yield from stack_nested_keys(v, key=key + (k,), depth=depth - 1)
else:
yield key, nested_dict
def get_nested_dicts(nested_dict, *keys, default=None, init_nesting=dict):
"""
Get/Initialize the value of nested-dictionaries.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param keys:
Nested keys.
:type keys: object
:param default:
Function used to initialize a new value.
:type default: callable, optional
:param init_nesting:
Function used to initialize a new intermediate nesting dict.
:type init_nesting: callable, optional
:return:
Value of nested-dictionary.
:rtype: generator
"""
if keys:
default = default or init_nesting
if keys[0] in nested_dict:
nd = nested_dict[keys[0]]
else:
d = default() if len(keys) == 1 else init_nesting()
nd = nested_dict[keys[0]] = d
return get_nested_dicts(nd, *keys[1:], default=default,
init_nesting=init_nesting)
return nested_dict
def are_in_nested_dicts(nested_dict, *keys):
"""
Nested keys are inside of nested-dictionaries.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param keys:
Nested keys.
:type keys: object
:return:
True if nested keys are inside of nested-dictionaries, otherwise False.
:rtype: bool
"""
if keys:
# noinspection PyBroadException
try:
return are_in_nested_dicts(nested_dict[keys[0]], *keys[1:])
except Exception: # Key error or not a dict.
return False
return True
def combine_nested_dicts(*nested_dicts, depth=-1, base=None):
"""
Merge nested-dictionaries.
:param nested_dicts:
Nested dictionaries.
:type nested_dicts: dict
:param depth:
Maximum keys depth.
:type depth: int, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
Combined nested-dictionary.
:rtype: dict
"""
if base is None:
base = {}
for nested_dict in nested_dicts:
for k, v in stack_nested_keys(nested_dict, depth=depth):
while k:
# noinspection PyBroadException
try:
get_nested_dicts(base, *k[:-1])[k[-1]] = v
break
except Exception:
# A branch of the nested_dict is longer than the base.
k = k[:-1]
v = get_nested_dicts(nested_dict, *k)
return base
class SubDispatch(Base):
"""
It dispatches a given :class:`~schedula.dispatcher.Dispatcher` like a
function.
This function takes a sequence of dictionaries as input that will be
combined before the dispatching.
:return:
A function that executes the dispatch of the given
:class:`~schedula.dispatcher.Dispatcher`.
:rtype: callable
.. seealso:: :func:`~schedula.dispatcher.Dispatcher.dispatch`,
:func:`combine_dicts`
Example:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> from schedula import Dispatcher
>>> sub_dsp = Dispatcher(name='Sub-dispatcher')
...
>>> def fun(a):
... return a + 1, a - 1
...
>>> sub_dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
>>> dispatch = SubDispatch(sub_dsp, ['a', 'b', 'c'], output_type='dict')
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatch', dispatch, ['d'], ['e'])
'Sub-dispatch'
The Dispatcher output is:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> o = dsp.dispatch(inputs={'d': {'a': 3}})
while, the Sub-dispatch is:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> sol = o.workflow.nodes['Sub-dispatch']['solution']
>>> sol
Solution([('a', 3), ('b', 4), ('c', 2)])
>>> sol == o['e']
True
"""
def __new__(cls, dsp=None, *args, **kwargs):
from .blue import Blueprint
if isinstance(dsp, Blueprint):
return Blueprint(dsp, *args, **kwargs)._set_cls(cls)
return super(SubDispatch, cls).__new__(cls)
def __getstate__(self):
state = self.__dict__.copy()
state['solution'] = state['solution'].__class__(state['dsp'])
del state['__name__']
return state
def __setstate__(self, d):
self.__dict__ = d
self.__name__ = self.name
def __init__(self, dsp, outputs=None, inputs_dist=None, wildcard=False,
no_call=False, shrink=False, rm_unused_nds=False,
output_type='all', function_id=None, output_type_kw=None):
"""
Initializes the Sub-dispatch.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher | schedula.utils.blue.BlueDispatcher
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param output_type:
Type of function output:
+ 'all': a dictionary with all dispatch outputs.
+ 'list': a list with all outputs listed in `outputs`.
+ 'dict': a dictionary with any outputs listed in `outputs`.
:type output_type: str, optional
:param output_type_kw:
Extra kwargs to pass to the `selector` function.
:type output_type_kw: dict, optional
:param function_id:
Function name.
:type function_id: str, optional
"""
self.dsp = dsp
self.outputs = outputs
self.wildcard = wildcard
self.no_call = no_call
self.shrink = shrink
self.output_type = output_type
self.output_type_kw = output_type_kw or {}
self.inputs_dist = inputs_dist
self.rm_unused_nds = rm_unused_nds
self.name = self.__name__ = function_id or dsp.name
self.__doc__ = dsp.__doc__
self.solution = dsp.solution.__class__(dsp)
def blue(self, memo=None, depth=-1):
"""
Constructs a Blueprint out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:param depth:
Depth of sub-dispatch blue. If negative all levels are bluprinted.
:type depth: int, optional
:return:
A Blueprint of the current object.
:rtype: schedula.utils.blue.Blueprint
"""
if depth == 0:
return self
depth -= 1
memo = {} if memo is None else memo
if self not in memo:
import inspect
from .blue import Blueprint, _parent_blue
keys = tuple(inspect.signature(self.__init__).parameters)
memo[self] = Blueprint(**{
k: _parent_blue(v, memo, depth)
for k, v in self.__dict__.items() if k in keys
})._set_cls(self.__class__)
return memo[self]
def __call__(self, *input_dicts, copy_input_dicts=False, _stopper=None,
_executor=False, _sol_name=(), _verbose=False):
# Combine input dictionaries.
i = combine_dicts(*input_dicts, copy=copy_input_dicts)
# Dispatch the function calls.
self.solution = self.dsp.dispatch(
i, self.outputs, self.inputs_dist, self.wildcard, self.no_call,
self.shrink, self.rm_unused_nds, stopper=_stopper,
executor=_executor, sol_name=_sol_name, verbose=_verbose
)
return self._return(self.solution)
def _return(self, solution):
outs = self.outputs
solution.result()
solution.parent = self
# Set output.
if self.output_type != 'all':
try:
# Save outputs.
return selector(
outs, solution, output_type=self.output_type,
**self.output_type_kw
)
except KeyError:
# Outputs not reached.
missed = {k for k in outs if k not in solution}
# Raise error
msg = '\n Unreachable output-targets: {}\n Available ' \
'outputs: {}'.format(missed, list(solution.keys()))
raise DispatcherError(msg, sol=solution)
return solution # Return outputs.
def copy(self):
return _copy.deepcopy(self)
class run_model:
"""
It is an utility function to execute dynamically generated function/models
and - if Dispatcher based - add their workflows to the parent solution.
:return:
A function that executes the dispatch of the given `dsp`.
:rtype: callable
**Example**:
Follows a simple example on how to use the
:func:`~schedula.utils.dsp.run_model`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(
... function_id='execute_dsp', function=run_model,
... inputs=['dsp_model', 'inputs'], outputs=['outputs']
... )
'execute_dsp'
>>> dsp_model = Dispatcher(name='Model')
>>> dsp_model.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> sol = dsp({'dsp_model': dsp_model, 'inputs': {'b': 1, 'a': 2}})
>>> sol['outputs']
Solution([('a', 2), ('b', 1), ('c', 2)])
>>> sol.workflow.nodes['execute_dsp']['solution']
Solution([('a', 2), ('b', 1), ('c', 2)])
Moreover, it can be used also with all
:func:`~schedula.utils.dsp.SubDispatcher` like objects::
>>> sub_dsp = SubDispatch(dsp_model, outputs=['c'], output_type='list')
>>> sol = dsp({'dsp_model': sub_dsp, 'inputs': {'b': 1, 'a': 2}})
>>> sol['outputs']
[2]
>>> sol.workflow.nodes['execute_dsp']['solution']
Solution([('a', 2), ('b', 1), ('c', 2)])
"""
def __init__(self, func, *args, _init=None, **kwargs):
from .blue import Blueprint
if isinstance(func, Blueprint):
func = func.register(memo={})
self.func = func
if _init:
args, kwargs = _init(*args, **kwargs)
self.args = args
self.kwargs = kwargs
def __call__(self, **kwargs):
return self.func(*self.args, **self.kwargs, **kwargs)
class MapDispatch(SubDispatch):
"""
It dynamically builds a :class:`~schedula.dispatcher.Dispatcher` that is
used to invoke recursivelly a *dispatching function* that is defined
by a constructor function that takes a `dsp` base model as input.
The created function takes a list of dictionaries as input that are used to
invoke the mapping function and returns a list of outputs.
:return:
A function that executes the dispatch of the given
:class:`~schedula.dispatcher.Dispatcher`.
:rtype: callable
.. seealso:: :func:`~schedula.utils.dsp.SubDispatch`
Example:
A simple example on how to use the :func:`~schedula.utils.dsp.MapDispatch`:
.. dispatcher:: map_func
:opt: graph_attr={'ratio': '1'}, depth=-1, workflow=True
:code:
>>> from schedula import Dispatcher, MapDispatch
>>> dsp = Dispatcher(name='model')
...
>>> def fun(a, b):
... return a + b, a - b
...
>>> dsp.add_func(fun, ['c', 'd'], inputs_kwargs=True)
'fun'
>>> map_func = MapDispatch(dsp, constructor_kwargs={
... 'outputs': ['c', 'd'], 'output_type': 'list'
... })
>>> map_func([{'a': 1, 'b': 2}, {'a': 2, 'b': 2}, {'a': 3, 'b': 2}])
[[3, -1], [4, 0], [5, 1]]
The execution model is created dynamically according to the length of the
provided inputs. Moreover, the :func:`~schedula.utils.dsp.MapDispatch` has
the possibility to define default values, that are recursively merged with
the input provided to the *dispatching function* as follow:
.. dispatcher:: map_func
:opt: graph_attr={'ratio': '1'}, depth=-1, workflow=True
:code:
>>> map_func([{'a': 1}, {'a': 3, 'b': 3}], defaults={'b': 2})
[[3, -1], [6, 0]]
The :func:`~schedula.utils.dsp.MapDispatch` can also be used as a partial
reducing function, i.e., part of the outpus of the previous step are used as
input for the successive execution of the *dispatching function*. For
example:
.. dispatcher:: map_func
:opt: graph_attr={'ratio': '1'}, depth=-1, workflow=True
:code:
>>> map_func = MapDispatch(dsp, recursive_inputs={'c': 'b'})
>>> map_func([{'a': 1, 'b': 1}, {'a': 2}, {'a': 3}])
[Solution([('a', 1), ('b', 1), ('c', 2), ('d', 0)]),
Solution([('a', 2), ('b', 2), ('c', 4), ('d', 0)]),
Solution([('a', 3), ('b', 4), ('c', 7), ('d', -1)])]
"""
def __init__(self, dsp, defaults=None, recursive_inputs=None,
constructor=SubDispatch, constructor_kwargs=None,
function_id=None, func_kw=lambda *args, **data: {},
input_label='inputs<{}>', output_label='outputs<{}>',
data_label='data<{}>', cluster_label='task<{}>', **kwargs):
"""
Initializes the MapDispatch function.
:param dsp:
A dispatcher that identifies the base model.
:type dsp: schedula.Dispatcher | schedula.utils.blue.BlueDispatcher
:param defaults:
Defaults values that are recursively merged with the input provided
to the *dispatching function*.
:type defaults: dict
:param recursive_inputs:
List of data node ids that are extracted from the outputs of the
*dispatching function* and then merged with the inputs of the its
successive evaluation. If a dictionary is given, this is used to
rename the data node ids extracted.
:type recursive_inputs: list | dict
:param constructor:
It initializes the *dispatching function*.
:type constructor: function | class
:param constructor_kwargs:
Extra keywords passed to the constructor function.
:type constructor_kwargs: function | class
:param function_id:
Function name.
:type function_id: str, optional
:param func_kw:
Extra keywords to add the *dispatching function* to execution model.
:type func_kw: function, optional
:param input_label:
Custom label formatter for recursive inputs.
:type input_label: str, optional
:param output_label:
Custom label formatter for recursive outputs.
:type output_label: str, optional
:param data_label:
Custom label formatter for recursive internal data.
:type data_label: str, optional
:param kwargs:
Keywords to initialize the execution model.
:type kwargs: object
"""
super(MapDispatch, self).__init__(
dsp, function_id=function_id, output_type='list'
)
self.func = constructor(dsp, **(constructor_kwargs or {}))
self.kwargs = kwargs or {}
self.defaults = defaults
self.recursive_inputs = recursive_inputs
self.input_label = input_label
self.output_label = output_label
self.data_label = data_label
self.cluster_label = cluster_label
self.func_kw = func_kw
@staticmethod
def prepare_inputs(inputs, defaults):
inputs = [combine_dicts(defaults, d) for d in inputs]
return inputs if len(inputs) > 1 else inputs[0]
@staticmethod
def recursive_data(recursive_inputs, input_data, outputs):
data = selector(recursive_inputs, outputs or {}, allow_miss=True)
if isinstance(recursive_inputs, dict):
data = map_dict(recursive_inputs, data)
data.update(input_data)
return data
@staticmethod
def format_labels(it, label):
f = label.format
return [f(k, **v) for k, v in it]
@staticmethod
def format_clusters(it, label):
f = label.format
return [{'body': {
'label': f'"{f(k, **v)}"', 'labelloc': 'b'
}} for k, v in it]
def _init_dsp(self, defaults, inputs, recursive_inputs=None):
from ..dispatcher import Dispatcher
defaults = combine_dicts(self.defaults or {}, defaults or {})
self.dsp = dsp = Dispatcher(**self.kwargs)
add_data, add_func = dsp.add_data, dsp.add_func
n = len(str(len(inputs) + 1))
it = [(str(k).zfill(n), v) for k, v in enumerate(inputs, 1)]
inp = self.format_labels(it, self.input_label)
clt = self.format_clusters(it, self.cluster_label)
rl = self.format_labels(it, 'run<{}>')
self.outputs = out = self.format_labels(it, self.output_label)
add_func(self.prepare_inputs, inp, inputs=['inputs', 'defaults'])
recursive = recursive_inputs or self.recursive_inputs
if recursive:
func = functools.partial(self.recursive_data, recursive)
dat = self.format_labels(it, self.data_label)
fl = self.format_labels(it, 'recursive_data<{}>')
it = iter(zip(inp, dat, clt, fl))
i, d, c, fid = next(it)
add_data(i, clusters=c)
add_func(bypass, [d], inputs=[i], clusters=c)
for (i, d, c, fid), o, in zip(it, out[:-1]):
add_data(i, clusters=c)
add_func(func, [d], inputs=[i, o], clusters=c, function_id=fid)
inp = dat
for i, o, c, fid, (k, v) in zip(inp, out, clt, rl, enumerate(inputs)):
add_data(i, clusters=c)
kw = {'clusters': c, 'function_id': fid}
kw.update(self.func_kw(k, **v))
add_func(self.func, [o], inputs=[i], **kw)
add_data(o, clusters=c)
return {'inputs': inputs, 'defaults': defaults}
# noinspection PyMethodOverriding
def __call__(self, inputs, defaults=None, recursive_inputs=None,
_stopper=None, _executor=False, _sol_name=(), _verbose=False):
inputs = self._init_dsp(defaults, inputs, recursive_inputs)
return super(MapDispatch, self).__call__(
inputs, _stopper=_stopper, _executor=_executor, _verbose=_verbose,
_sol_name=_sol_name
)
class SubDispatchFunction(SubDispatch):
"""
It converts a :class:`~schedula.dispatcher.Dispatcher` into a function.
This function takes a sequence of arguments or a key values as input of the
dispatch.
:return:
A function that executes the dispatch of the given `dsp`.
:rtype: callable
.. seealso:: :func:`~schedula.dispatcher.Dispatcher.dispatch`,
:func:`~schedula.dispatcher.Dispatcher.shrink_dsp`
**Example**:
A dispatcher with two functions `max` and `min` and an unresolved cycle
(i.e., `a` --> `max` --> `c` --> `min` --> `a`):
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> from math import log
>>> def my_log(x):
... return log(x - 1)
>>> dsp.add_function('log(x - 1)', my_log, inputs=['c'],
... outputs=['a'], input_domain=lambda c: c > 1)
'log(x - 1)'
Extract a static function node, i.e. the inputs `a` and `b` and the
output `a` are fixed::
>>> fun = SubDispatchFunction(dsp, 'myF', ['a', 'b'], ['a'])
>>> fun.__name__
'myF'
>>> fun(b=1, a=2)
0.0
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
>>> fun.dsp.name = 'Created function internal'
The created function raises a ValueError if un-valid inputs are
provided:
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> fun(1, 0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DispatcherError:
Unreachable output-targets: ...
Available outputs: ...
"""
var_keyword = 'kw'
def __init__(self, dsp, function_id=None, inputs=None, outputs=None,
inputs_dist=None, shrink=True, wildcard=True, output_type=None,
output_type_kw=None, first_arg_as_kw=False):
"""
Initializes the Sub-dispatch Function.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher | schedula.utils.blue.BlueDispatcher
:param function_id:
Function name.
:type function_id: str, optional
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
:type shrink: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param output_type:
Type of function output:
+ 'all': a dictionary with all dispatch outputs.
+ 'list': a list with all outputs listed in `outputs`.
+ 'dict': a dictionary with any outputs listed in `outputs`.
:type output_type: str, optional
:param output_type_kw:
Extra kwargs to pass to the `selector` function.
:type output_type_kw: dict, optional
:param first_arg_as_kw:
Uses the first argument of the __call__ method as `kwargs`.
:type output_type_kw: bool
"""
if shrink:
dsp = dsp.shrink_dsp(
inputs, outputs, inputs_dist=inputs_dist, wildcard=wildcard
)
if outputs:
# Outputs not reached.
missed = {k for k in outputs if k not in dsp.nodes}
if missed: # If outputs are missing raise error.
available = list(dsp.data_nodes.keys()) # Available data nodes.
# Raise error
msg = '\n Unreachable output-targets: {}\n Available ' \
'outputs: {}'.format(missed, available)
raise ValueError(msg)
# Set internal proprieties
self.inputs = inputs
# Set dsp name equal to function id.
self.function_id = dsp.name = function_id or dsp.name or 'fun'
no_call = False
self._sol = dsp.solution.__class__(
dsp, dict.fromkeys(inputs or (), None), outputs, wildcard, None,
inputs_dist, no_call, False
)
# Initialize as sub dispatch.
super(SubDispatchFunction, self).__init__(
dsp, outputs, inputs_dist, wildcard, no_call, True, True, 'list',
output_type_kw=output_type_kw
)
# Define the function to return outputs sorted.
if output_type is not None:
self.output_type = output_type
elif outputs is None:
self.output_type = 'all'
elif len(outputs) == 1:
self.output_type = 'values'
self.first_arg_as_kw = first_arg_as_kw
@property
def __signature__(self):
import inspect
dfl, p = self.dsp.default_values, []
for name in self.inputs or ():
par = inspect.Parameter('par', inspect._POSITIONAL_OR_KEYWORD)
par._name = name
if name in dfl:
par._default = dfl[name]['value']
p.append(par)
if self.var_keyword:
p.append(inspect.Parameter(self.var_keyword, inspect._VAR_KEYWORD))
return inspect.Signature(p, __validate_parameters__=False)
def _parse_inputs(self, *args, **kw):
if self.first_arg_as_kw:
for k in sorted(args[0]):
if k in kw:
msg = 'multiple values for argument %r'
raise TypeError(msg % k) from None
kw.update(args[0])
args = args[1:]
defaults, inputs = self.dsp.default_values, {}
for i, k in enumerate(self.inputs or ()):
try:
inputs[k] = args[i]
if k in kw:
msg = 'multiple values for argument %r'
raise TypeError(msg % k) from None
except IndexError:
if k in kw:
inputs[k] = kw.pop(k)
elif k in defaults:
inputs[k] = defaults[k]['value']
else:
msg = 'missing a required argument: %r'
raise TypeError(msg % k) from None
if len(inputs) < len(args):
raise TypeError('too many positional arguments') from None
if self.var_keyword:
inputs.update(kw)
elif not all(k in inputs for k in kw):
k = next(k for k in sorted(kw) if k not in inputs)
msg = 'got an unexpected keyword argument %r'
raise TypeError(msg % k) from None
return inputs
def __call__(self, *args, _stopper=None, _executor=False, _sol_name=(),
_verbose=False, **kw):
# Namespace shortcuts.
self.solution = sol = self._sol._copy_structure()
sol.verbose = _verbose
self.solution.full_name, dfl = _sol_name, self.dsp.default_values
# Parse inputs.
inp = self._parse_inputs(*args, **kw)
i = tuple(k for k in inp if k not in self.dsp.data_nodes)
if i:
msg = "%s() got an unexpected keyword argument '%s'"
raise TypeError(msg % (self.function_id, min(i)))
inputs_dist = combine_dicts(
sol.inputs_dist, dict.fromkeys(inp, 0), self.inputs_dist or {}
)
inp.update({k: v['value'] for k, v in dfl.items() if k not in inp})
# Initialize.
sol._init_workflow(inp, inputs_dist=inputs_dist, clean=False)
# Dispatch outputs.
sol._run(stopper=_stopper, executor=_executor)
# Return outputs sorted.
return self._return(sol)
class SubDispatchPipe(SubDispatchFunction):
"""
It converts a :class:`~schedula.dispatcher.Dispatcher` into a function.
This function takes a sequence of arguments as input of the dispatch.
:return:
A function that executes the pipe of the given `dsp`.
:rtype: callable
.. seealso:: :func:`~schedula.dispatcher.Dispatcher.dispatch`,
:func:`~schedula.dispatcher.Dispatcher.shrink_dsp`
**Example**:
A dispatcher with two functions `max` and `min` and an unresolved cycle
(i.e., `a` --> `max` --> `c` --> `min` --> `a`):
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> def func(x):
... return x - 1
>>> dsp.add_function('x - 1', func, inputs=['c'], outputs=['a'])
'x - 1'
Extract a static function node, i.e. the inputs `a` and `b` and the
output `a` are fixed::
>>> fun = SubDispatchPipe(dsp, 'myF', ['a', 'b'], ['a'])
>>> fun.__name__
'myF'
>>> fun(2, 1)
1
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
>>> fun.dsp.name = 'Created function internal'
The created function raises a ValueError if un-valid inputs are
provided:
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> fun(1, 0)
0
"""
var_keyword = None
def __init__(self, dsp, function_id=None, inputs=None, outputs=None,
inputs_dist=None, no_domain=True, wildcard=True, shrink=True,
output_type=None, output_type_kw=None, first_arg_as_kw=False):
"""
Initializes the Sub-dispatch Function.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher | schedula.utils.blue.BlueDispatcher
:param function_id:
Function name.
:type function_id: str
:param inputs:
Input data nodes.
:type inputs: list[str], iterable
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param no_domain:
Skip the domain check.
:type no_domain: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
:type shrink: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param output_type:
Type of function output:
+ 'all': a dictionary with all dispatch outputs.
+ 'list': a list with all outputs listed in `outputs`.
+ 'dict': a dictionary with any outputs listed in `outputs`.
:type output_type: str, optional
:param output_type_kw:
Extra kwargs to pass to the `selector` function.
:type output_type_kw: dict, optional
:param first_arg_as_kw:
Converts first argument of the __call__ method as `kwargs`.
:type output_type_kw: bool
"""
self.solution = sol = dsp.solution.__class__(
dsp, inputs, outputs, wildcard, inputs_dist, True, True,
no_domain=no_domain
)
sol._run()
if shrink:
from .alg import _union_workflow, _convert_bfs
bfs = _union_workflow(sol)
o, bfs = outputs or sol, _convert_bfs(bfs)
dsp = dsp._get_dsp_from_bfs(o, bfs_graphs=bfs)
super(SubDispatchPipe, self).__init__(
dsp, function_id, inputs, outputs=outputs, inputs_dist=inputs_dist,
shrink=False, wildcard=wildcard, output_type=output_type,
output_type_kw=output_type_kw, first_arg_as_kw=first_arg_as_kw
)
self._reset_sol()
self.pipe = self._set_pipe()
def _reset_sol(self):
self._sol.no_call = True
self._sol._init_workflow()
self._sol._run()
self._sol.no_call = False
def _set_pipe(self):
def _make_tks(task):
v, s = task[-1]
if v is START:
nxt_nds = s.dsp.dmap[v]
else:
nxt_nds = s.workflow[v]
nxt_dsp = [n for n in nxt_nds if s.nodes[n]['type'] == 'dispatcher']
nxt_dsp = [(n, s._edge_length(s.dmap[v][n], s.nodes[n]))
for n in nxt_dsp]
return (task[0], task[1], (v, s)), nxt_nds, nxt_dsp
return [_make_tks(v['task']) for v in self._sol.pipe.values()]
def _init_new_solution(self, full_name, verbose):
key_map, sub_sol = {}, {}
for k, s in self._sol.sub_sol.items():
ns = s._copy_structure(dist=1)
ns.verbose = verbose
ns.fringe = None
ns.sub_sol = sub_sol
ns.full_name = full_name + s.full_name
key_map[s] = ns
sub_sol[ns.index] = ns
return key_map[self._sol], lambda x: key_map[x]
def _init_workflows(self, inputs):
self.solution.inputs.update(inputs)
for s in self.solution.sub_sol.values():
s._init_workflow(clean=False)
def _callback_pipe_failure(self):
pass
def _pipe_append(self):
return self.solution._pipe.append
def __call__(self, *args, _stopper=None, _executor=False, _sol_name=(),
_verbose=False, **kw):
self.solution, key_map = self._init_new_solution(_sol_name, _verbose)
pipe_append = self._pipe_append()
self._init_workflows(self._parse_inputs(*args, **kw))
for x, nxt_nds, nxt_dsp in self.pipe:
v, s = x[-1]
s = key_map(s)
pipe_append(x[:2] + ((v, s),))
if not s._set_node_output(
v, False, next_nds=nxt_nds, stopper=_stopper,
executor=_executor):
self._callback_pipe_failure()
break
for n, vw_d in nxt_dsp:
s._set_sub_dsp_node_input(v, n, [], False, vw_d)
s._see_remote_link_node(v)
# Return outputs sorted.
return self._return(self.solution)
class NoSub:
"""Class for avoiding to add a sub solution to the workflow."""
class DispatchPipe(NoSub, SubDispatchPipe):
"""
It converts a :class:`~schedula.dispatcher.Dispatcher` into a function.
This function takes a sequence of arguments as input of the dispatch.
:return:
A function that executes the pipe of the given `dsp`, updating its
workflow.
:rtype: callable
.. note::
This wrapper is not thread safe, because it overwrite the solution.
.. seealso:: :func:`~schedula.dispatcher.Dispatcher.dispatch`,
:func:`~schedula.dispatcher.Dispatcher.shrink_dsp`
**Example**:
A dispatcher with two functions `max` and `min` and an unresolved cycle
(i.e., `a` --> `max` --> `c` --> `min` --> `a`):
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> def func(x):
... return x - 1
>>> dsp.add_function('x - 1', func, inputs=['c'], outputs=['a'])
'x - 1'
Extract a static function node, i.e. the inputs `a` and `b` and the
output `a` are fixed::
>>> fun = DispatchPipe(dsp, 'myF', ['a', 'b'], ['a'])
>>> fun.__name__
'myF'
>>> fun(2, 1)
1
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
>>> fun.dsp.name = 'Created function internal'
The created function raises a ValueError if un-valid inputs are
provided:
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> fun(1, 0)
0
"""
def __getstate__(self):
self._init_workflows(dict.fromkeys(self.inputs or ()))
self._reset_sol()
state = super(DispatchPipe, self).__getstate__()
del state['pipe']
return state
def __setstate__(self, d):
super(DispatchPipe, self).__setstate__(d)
self.pipe = self._set_pipe()
def _pipe_append(self):
return lambda *args: None
def _init_new_solution(self, _sol_name, verbose):
from .asy import EXECUTORS
EXECUTORS.set_active(id(self._sol))
return self._sol, lambda x: x
def _init_workflows(self, inputs):
for s in self.solution.sub_sol.values():
s._visited.clear()
return super(DispatchPipe, self)._init_workflows(inputs)
def _return(self, solution):
# noinspection PyBroadException
try:
solution.result()
except Exception:
self._callback_pipe_failure()
return super(DispatchPipe, self)._return(solution)
def _callback_pipe_failure(self):
raise DispatcherError("The pipe is not respected.", sol=self.solution)
def plot(self, workflow=None, *args, **kwargs):
if workflow:
return self.solution.plot(*args, **kwargs)
return super(DispatchPipe, self).plot(workflow, *args, **kwargs)
def _get_par_args(func, exl_kw=False):
par = collections.OrderedDict()
for k, v in _get_signature(func, 0)._parameters.items():
if v.kind >= v.VAR_POSITIONAL or (exl_kw and v.default is not v.empty):
break
par[k] = v
return par
def add_function(dsp, inputs_kwargs=False, inputs_defaults=False, **kw):
"""
Decorator to add a function to a dispatcher.
:param dsp:
A dispatcher.
:type dsp: schedula.Dispatcher | schedula.blue.BlueDispatcher
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param kw:
See :func:~`schedula.dispatcher.Dispatcher.add_function`.
:return:
Decorator.
:rtype: callable
**------------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> @sh.add_function(dsp, outputs=['e'])
... @sh.add_function(dsp, False, True, outputs=['i'], inputs='ecah')
... @sh.add_function(dsp, True, outputs=['l'])
... def f(a, b, c, d=1):
... return (a + b) - c + d
>>> @sh.add_function(dsp, True, outputs=['d'])
... def g(e, i, *args, d=0):
... return e + i + d
>>> sol = dsp({'a': 1, 'b': 2, 'c': 3}); sol
Solution([('a', 1), ('b', 2), ('c', 3), ('h', 1), ('e', 1), ('i', 4),
('d', 5), ('l', 5)])
"""
def decorator(f):
dsp.add_func(
f, inputs_kwargs=inputs_kwargs, inputs_defaults=inputs_defaults,
**kw
)
return f
return decorator
@dataclass(repr=False, frozen=True, eq=False)
class inf:
"""Class to model infinite numbers for workflow distance."""
inf: float = 0
num: float = 0
def __iter__(self):
yield self.inf
yield self.num
@staticmethod
def format(val):
if not isinstance(val, tuple):
val = 0, val
return inf(*val)
def __repr__(self):
if self.inf == 0:
return str(self.num)
return 'inf(inf={}, num={})'.format(*self)
def __add__(self, other):
if isinstance(other, self.__class__):
return inf(self.inf + other.inf, self.num + other.num)
return inf(self.inf, self.num + other)
def __sub__(self, other):
other = isinstance(other, self.__class__) and other or (0, other)
return inf(*(x - y for x, y in zip(self, other)))
def __rsub__(self, other):
other = isinstance(other, self.__class__) and other or (0, other)
return inf(*(x - y for x, y in zip(other, self)))
def __mul__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x * y for x, y in zip(self, other)))
def __truediv__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x / y for x, y in zip(self, other)))
def __rtruediv__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x / y for x, y in zip(other, self)))
def __pow__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x ** y for x, y in zip(self, other)))
def __rpow__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x ** y for x, y in zip(other, self)))
def __mod__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x % y for x, y in zip(self, other)))
def __rmod__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x % y for x, y in zip(other, self)))
def __floordiv__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x // y for x, y in zip(self, other)))
def __rfloordiv__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x // y for x, y in zip(other, self)))
def __neg__(self):
return inf(*(-x for x in self))
def __pos__(self):
return inf(*(+x for x in self))
def __abs__(self):
return inf(*(map(abs, self)))
def __trunc__(self):
return inf(*(map(math.trunc, self)))
def __floor__(self):
return inf(*(map(math.floor, self)))
def __ceil__(self):
return inf(*(map(math.ceil, self)))
def __round__(self, n=None):
return inf(*(round(x, n) for x in self))
__radd__ = __add__
__rmul__ = __mul__
def __ge__(self, other):
other = isinstance(other, self.__class__) and tuple(other) or (0, other)
return tuple(self) >= other
def __gt__(self, other):
other = isinstance(other, self.__class__) and tuple(other) or (0, other)
return tuple(self) > other
def __eq__(self, other):
other = isinstance(other, self.__class__) and tuple(other) or (0, other)
return tuple(self) == other
def __le__(self, other):
other = isinstance(other, self.__class__) and tuple(other) or (0, other)
return tuple(self) <= other
def __lt__(self, other):
other = isinstance(other, self.__class__) and tuple(other) or (0, other)
return tuple(self) < other
def __ne__(self, other):
other = isinstance(other, self.__class__) and tuple(other) or (0, other)
return tuple(self) != other
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/utils/dsp.py
| 0.785309 | 0.280465 |
dsp.py
|
pypi
|
import copy
from .cst import NONE
class Base:
"""Base class for dispatcher objects."""
def __new__(cls, *args, **kwargs):
return super(Base, cls).__new__(cls)
def __deepcopy__(self, memo):
cls = self.__class__
memo[id(self)] = result = cls.__new__(cls)
for k, v in self.__dict__.items():
# noinspection PyArgumentList
setattr(result, k, copy.deepcopy(v, memo))
return result
def web(self, depth=-1, node_data=NONE, node_function=NONE, directory=None,
sites=None, run=True, subsite_idle_timeout=600):
"""
Creates a dispatcher Flask app.
:param depth:
Depth of sub-dispatch API. If negative all levels are configured.
:type depth: int, optional
:param node_data:
Data node attributes to produce API.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes produce API.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:param subsite_idle_timeout:
Idle timeout of a debug subsite in seconds.
:type subsite_idle_timeout: int, optional
:return:
A WebMap.
:rtype: ~schedula.utils.web.WebMap
Example:
From a dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
You can create a web server with the following steps::
>>> print("Starting...\\n"); site = dsp.web(); site
Starting...
Site(WebMap([(Dispatcher, WebMap())]), host='localhost', ...)
>>> import requests
>>> url = '%s/%s/%s' % (site.url, dsp.name, fun.__name__)
>>> requests.post(url, json={'args': (0,)}).json()['return']
[1, -1]
>>> site.shutdown() # Remember to shutdown the server.
True
.. note::
When :class:`~schedula.utils.drw.Site` is garbage collected, the
server is shutdown automatically.
"""
options = {'node_data': node_data, 'node_function': node_function}
options = {k: v for k, v in options.items() if v is not NONE}
from .web import WebMap
from .sol import Solution
obj = self.dsp if isinstance(self, Solution) else self
webmap = WebMap()
webmap.add_items(obj, workflow=False, depth=depth, **options)
webmap.directory = directory
webmap.idle_timeout = subsite_idle_timeout
if sites is not None:
sites.add(webmap.site(view=run))
elif run:
return webmap.site(view=run)
return webmap
def form(self, depth=1, node_data=NONE, node_function=NONE, directory=None,
sites=None, run=True, view=True, get_context=NONE, get_data=NONE,
edit_on_change=NONE, pre_submit=NONE, post_submit=NONE,
subsite_idle_timeout=600):
"""
Creates a dispatcher Form Flask app.
:param depth:
Depth of sub-dispatch API. If negative all levels are configured.
:type depth: int, optional
:param node_data:
Data node attributes to produce API.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes produce API.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:param view:
Open the url site with the sys default opener.
:type view: bool, optional
:param get_context:
Function to pass extra data as form context.
:type get_context: function | dict, optional
:param get_data:
Function to initialize the formdata.
:type get_data: function | dict, optional
:param edit_on_change:
Function to initialize the formdata.
:type edit_on_change: function | dict, optional
:param pre_submit:
Function to initialize the formdata.
:type pre_submit: function | dict, optional
:param post_submit:
Function to initialize the formdata.
:type post_submit: function | dict, optional
:param subsite_idle_timeout:
Idle timeout of a debug subsite in seconds.
:type subsite_idle_timeout: int, optional
:return:
A FormMap or a Site if `sites is None` and `run or view is True`.
:rtype: ~schedula.utils.form.FormMap | ~schedula.utils.drw.Site
"""
options = {'node_data': node_data, 'node_function': node_function}
options = {k: v for k, v in options.items() if v is not NONE}
from .form import FormMap
from .sol import Solution
obj = self.dsp if isinstance(self, Solution) else self
formmap = FormMap()
formmap.add_items(obj, workflow=False, depth=depth, **options)
formmap.directory = directory
formmap.idle_timeout = subsite_idle_timeout
methods = {
'get_form_context': get_context,
'get_form_data': get_data,
'get_edit_on_change_func': edit_on_change,
'get_pre_submit_func': pre_submit,
'get_post_submit_func': post_submit
}
for k, v in methods.items():
if v is not NONE:
setattr(formmap, f'_{k}', v)
if sites is not None or run or view:
site = formmap.site(view=view)
site = run and not view and site.run() or site
if sites is None:
return site
sites.add(site)
return formmap
def plot(self, workflow=None, view=True, depth=-1, name=NONE, comment=NONE,
format=NONE, engine=NONE, encoding=NONE, graph_attr=NONE,
node_attr=NONE, edge_attr=NONE, body=NONE, raw_body=NONE,
node_styles=NONE, node_data=NONE, node_function=NONE,
edge_data=NONE, max_lines=NONE, max_width=NONE, directory=None,
sites=None, index=True, viz=False, short_name=None,
executor='async', render=False):
"""
Plots the Dispatcher with a graph in the DOT language with Graphviz.
:param workflow:
If True the latest solution will be plotted, otherwise the dmap.
:type workflow: bool, optional
:param view:
Open the rendered directed graph in the DOT language with the sys
default opener.
:type view: bool, optional
:param edge_data:
Edge attributes to view.
:type edge_data: tuple[str], optional
:param node_data:
Data node attributes to view.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes to view.
:type node_function: tuple[str], optional
:param node_styles:
Default node styles according to graphviz node attributes.
:type node_styles: dict[str|Token, dict[str, str]]
:param depth:
Depth of sub-dispatch plots. If negative all levels are plotted.
:type depth: int, optional
:param name:
Graph name used in the source code.
:type name: str
:param comment:
Comment added to the first line of the source.
:type comment: str
:param directory:
(Sub)directory for source saving and rendering.
:type directory: str, optional
:param format:
Rendering output format ('pdf', 'png', ...).
:type format: str, optional
:param engine:
Layout command used ('dot', 'neato', ...).
:type engine: str, optional
:param encoding:
Encoding for saving the source.
:type encoding: str, optional
:param graph_attr:
Dict of (attribute, value) pairs for the graph.
:type graph_attr: dict, optional
:param node_attr:
Dict of (attribute, value) pairs set for all nodes.
:type node_attr: dict, optional
:param edge_attr:
Dict of (attribute, value) pairs set for all edges.
:type edge_attr: dict, optional
:param body:
Dict of (attribute, value) pairs to add to the graph body.
:type body: dict, optional
:param raw_body:
List of command to add to the graph body.
:type raw_body: list, optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param index:
Add the site index as first page?
:type index: bool, optional
:param max_lines:
Maximum number of lines for rendering node attributes.
:type max_lines: int, optional
:param max_width:
Maximum number of characters in a line to render node attributes.
:type max_width: int, optional
:param view:
Open the main page of the site?
:type view: bool, optional
:param render:
Render all pages statically?
:type render: bool, optional
:param viz:
Use viz.js as back-end?
:type viz: bool, optional
:param short_name:
Maximum length of the filename, if set name is hashed and reduced.
:type short_name: int, optional
:param executor:
Pool executor to render object.
:type executor: str, optional
:return:
A SiteMap or a Site if .
:rtype: schedula.utils.drw.SiteMap
Example:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
>>> dsp.plot(view=False, graph_attr={'ratio': '1'})
SiteMap([(Dispatcher, SiteMap())])
"""
d = {
'name': name, 'comment': comment, 'format': format, 'body': body,
'engine': engine, 'encoding': encoding, 'graph_attr': graph_attr,
'node_attr': node_attr, 'edge_attr': edge_attr, 'raw_body': raw_body
}
options = {
'digraph': {k: v for k, v in d.items() if v is not NONE} or NONE,
'node_styles': node_styles,
'node_data': node_data,
'node_function': node_function,
'edge_data': edge_data,
'max_lines': max_lines, # 5
'max_width': max_width, # 200
}
options = {k: v for k, v in options.items() if v is not NONE}
from .drw import SiteMap
from .sol import Solution
if workflow is None and isinstance(self, Solution):
workflow = True
else:
workflow = workflow or False
sitemap = SiteMap()
sitemap.short_name = short_name
sitemap.directory = directory
sitemap.add_items(self, workflow=workflow, depth=depth, **options)
if render:
sitemap.render(
directory=directory, view=view, index=index, viz_js=viz,
executor=executor
)
elif view or sites is not None:
site = sitemap.site(
directory, view=view, index=index, viz_js=viz, executor=executor
)
if sites is None:
return site
sites.add(site)
return sitemap
def get_node(self, *node_ids, node_attr=NONE):
"""
Returns a sub node of a dispatcher.
:param node_ids:
A sequence of node ids or a single node id. The id order identifies
a dispatcher sub-level.
:type node_ids: str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its
attributes are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its
attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
When 'description', returns the "description" of the searched node,
searching also in function or sub-dispatcher input/output
description.
When 'output', returns the data node output.
When 'default_value', returns the data node default value.
When 'value_type', returns the data node value's type.
When `None`, returns the node attributes.
:type node_attr: str, None, optional
:return:
Node attributes and its real path.
:rtype: (T, (str, ...))
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
>>> import schedula as sh
>>> sub_dsp = sh.Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> sub_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = sh.SubDispatch(sub_dsp, ['c'], output_type='dict')
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node output::
>>> dsp.get_node('Sub-dispatcher', 'c')
(4, ('Sub-dispatcher', 'c'))
>>> dsp.get_node('Sub-dispatcher', 'c', node_attr='type')
('data', ('Sub-dispatcher', 'c'))
.. dispatcher:: sub_dsp
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> sub_dsp, sub_dsp_id = dsp.get_node('Sub-dispatcher')
"""
kw = {}
from .sol import Solution
if node_attr is NONE:
node_attr = 'output' if isinstance(self, Solution) else 'auto'
if isinstance(self, Solution):
kw['solution'] = self
from .alg import get_sub_node
dsp = getattr(self, 'dsp', self)
# Returns the node.
return get_sub_node(dsp, node_ids, node_attr=node_attr, **kw)
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/utils/base.py
| 0.893617 | 0.205436 |
base.py
|
pypi
|
import collections
from .gen import counter
from .cst import EMPTY, NONE
from .dsp import SubDispatch, bypass, selector, stlp, parent_func, NoSub, inf
__author__ = 'Vincenzo Arcidiacono <[email protected]>'
def add_func_edges(dsp, fun_id, nodes_bunch, edge_weights=None, input=True,
data_nodes=None):
"""
Adds function node edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param fun_id:
Function node id.
:type fun_id: str
:param nodes_bunch:
A container of nodes which will be iterated through once.
:type nodes_bunch: iterable
:param edge_weights:
Edge weights.
:type edge_weights: dict, optional
:param input:
If True the nodes_bunch are input nodes, otherwise are output nodes.
:type input: bool, optional
:param data_nodes:
Data nodes to be deleted if something fail.
:type data_nodes: list
:return:
List of new data nodes.
:rtype: list
"""
# Namespace shortcut for speed.
add_edge = _add_edge_dmap_fun(dsp.dmap, edge_weights)
node, add_data = dsp.dmap.nodes, dsp.add_data
remove_nodes = dsp.dmap.remove_nodes_from
# Define an error message.
msg = 'Invalid %sput id: {} is not a data node' % ['out', 'in'][input]
i, j = ('i', 'o') if input else ('o', 'i')
data_nodes = data_nodes or [] # Update data nodes.
for u in nodes_bunch: # Iterate nodes.
try:
if node[u]['type'] != 'data': # The node is not a data node.
data_nodes.append(fun_id) # Add function id to be removed.
remove_nodes(data_nodes) # Remove function and new data nodes.
raise ValueError(msg.format(u)) # Raise error.
except KeyError:
data_nodes.append(add_data(data_id=u)) # Add new data node.
add_edge(**{i: u, j: fun_id, 'w': u}) # Add edge.
return data_nodes # Return new data nodes.
def _add_edge_dmap_fun(graph, edges_weights=None):
"""
Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: schedula.utils.graph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
add = graph.add_edge # Namespace shortcut for speed.
if edges_weights is not None:
def add_edge(i, o, w):
if w in edges_weights:
add(i, o, weight=edges_weights[w]) # Weighted edge.
else:
add(i, o) # Normal edge.
else:
# noinspection PyUnusedLocal
def add_edge(i, o, w):
add(i, o) # Normal edge.
return add_edge # Returns the function.
def _get_node(nodes, node_id, fuzzy=True):
"""
Returns a dispatcher node that match the given node id.
:param nodes:
Dispatcher nodes.
:type nodes: dict
:param node_id:
Node id.
:type node_id: str
:return:
The dispatcher node and its id.
:rtype: (str, dict)
"""
try:
return node_id, nodes[node_id] # Return dispatcher node and its id.
except KeyError as ex:
if fuzzy:
for k in sorted(nodes, key=str):
if node_id in k:
return k, nodes[k]
raise ex
def _nodes(alist):
return set(sum(map(stlp, alist), ()))
def _get_sub_inp(attr, pred):
inp = attr['inputs']
return set(sum(map(stlp, (v for k, v in inp.items() if k in pred)), ()))
def _get_sub_out(attr, succ):
out = attr['outputs']
return {k for k, v in out.items() if any(i in succ for i in stlp(v))}
def _update_io(a, pred, succ, parent=True):
inp_k, out_k = ['inputs', 'outputs'][::int(parent) * 2 - 1]
a[inp_k] = {k: v for k, v in a[inp_k].items() if k in pred}
o = {k: tuple(j for j in stlp(v) if j in succ)
for k, v in a[out_k].items()}
a[out_k] = {k: bypass(*v) for k, v in o.items() if v}
if parent:
nds = set(a['function'].data_nodes)
_update_io(a, nds, nds, parent=False)
return set(pred) - set(a[inp_k]), set(succ) - _nodes(a[out_k].values())
def _search_node_description(dsp, node_id, what='description'):
dsp = getattr(dsp, 'dsp', dsp)
from .des import search_node_description
return search_node_description(node_id, dsp.nodes[node_id], dsp, what)
def get_sub_node(dsp, path, node_attr='auto', solution=NONE, _level=0,
_dsp_name=NONE):
"""
Returns a sub node of a dispatcher.
:param dsp:
A dispatcher object or a sub dispatch function.
:type dsp: schedula.Dispatcher | SubDispatch
:param path:
A sequence of node ids or a single node id. Each id identifies a
sub-level node.
:type path: tuple, str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its attributes
are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
:type node_attr: str | None
:param solution:
Parent Solution.
:type solution: schedula.utils.Solution
:param _level:
Path level.
:type _level: int
:param _dsp_name:
dsp name to show when the function raise a value error.
:type _dsp_name: str
:return:
A sub node of a dispatcher and its path.
:rtype: dict | object, tuple[str]
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> from schedula import Dispatcher
>>> s_dsp = Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> s_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = SubDispatch(s_dsp, ['c'], output_type='dict')
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node 'c' output or type::
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'))
(4, ('Sub-dispatcher', 'c'))
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'), node_attr='type')
('data', ('Sub-dispatcher', 'c'))
Get the sub-dispatcher output:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> sol, p = get_sub_node(dsp, ('Sub-dispatcher',), node_attr='output')
>>> sol, p
(Solution([('a', 3), ('b', 1), ('c', 4)]), ('Sub-dispatcher',))
"""
path = list(path)
if isinstance(dsp, SubDispatch): # Take the dispatcher obj.
dsp = dsp.dsp
if _dsp_name is NONE: # Set origin dispatcher name for warning purpose.
_dsp_name = dsp.name
if solution is NONE: # Set origin dispatcher name for warning purpose.
solution = dsp.solution
node_id = path[_level] # Node id at given level.
try:
node_id, node = _get_node(dsp.nodes, node_id) # Get dispatcher node.
path[_level] = node_id
except KeyError:
if _level == len(path) - 1 and node_attr in ('auto', 'output') \
and solution is not EMPTY:
try:
# Get dispatcher node.
node_id, node = _get_node(solution, node_id, False)
path[_level] = node_id
return node, tuple(path)
except KeyError:
pass
msg = 'Path %s does not exist in %s dispatcher.' % (path, _dsp_name)
raise ValueError(msg)
_level += 1 # Next level.
if _level < len(path): # Is not path leaf?.
try:
if node['type'] in ('function', 'dispatcher'):
try:
solution = solution.workflow.nodes[node_id]['solution']
except (KeyError, AttributeError):
solution = EMPTY
dsp = parent_func(node['function']) # Get parent function.
else:
raise KeyError
except KeyError:
msg = 'Node of path %s at level %i is not a function or ' \
'sub-dispatcher node of %s ' \
'dispatcher.' % (path, _level, _dsp_name)
raise ValueError(msg)
# Continue the node search.
return get_sub_node(dsp, path, node_attr, solution, _level, _dsp_name)
else:
data, sol = EMPTY, solution
# Return the sub node.
if node_attr == 'auto' and node['type'] != 'data': # Auto: function.
node_attr = 'function'
elif node_attr == 'auto' and sol is not EMPTY and node_id in sol:
data = sol[node_id] # Auto: data output.
elif node_attr == 'output' and node['type'] != 'data':
data = sol.workflow.nodes[node_id]['solution']
elif node_attr == 'output' and node['type'] == 'data':
data = sol[node_id]
elif node_attr == 'description': # Search and return node description.
data = _search_node_description(dsp, node_id)[0]
elif node_attr == 'value_type' and node['type'] == 'data':
# Search and return data node value's type.
data = _search_node_description(dsp, node_id, node_attr)[0]
elif node_attr == 'default_value':
data = dsp.default_values[node_id]
elif node_attr == 'dsp':
data = dsp
elif node_attr == 'sol':
data = sol
if data is EMPTY:
data = node.get(node_attr, node)
return data, tuple(path) # Return the data
class DspPipe(collections.OrderedDict):
def __repr__(self):
return "<%s instance at %s>" % (self.__class__.__name__, id(self))
def get_full_pipe(sol, base=()):
"""
Returns the full pipe of a dispatch run.
:param sol:
A Solution object.
:type sol: schedula.utils.Solution
:param base:
Base node id.
:type base: tuple[str]
:return:
Full pipe of a dispatch run.
:rtype: DspPipe
"""
pipe, i = DspPipe(), len(base)
for p in sol._pipe:
n, s = p[-1]
d = s.dsp
p = {'task': p}
if n in s._errors:
p['error'] = s._errors[n]
node_id = s.full_name + (n,)
assert base == node_id[:i], '%s != %s' % (node_id[:i], base)
n_id = node_id[i:]
n, path = d.get_node(n, node_attr=None)
if n['type'] == 'function' and 'function' in n:
try:
sub_sol = s.workflow.nodes[path[-1]]['solution']
if isinstance(parent_func(n['function']), NoSub):
node_id = ()
sp = get_full_pipe(sub_sol, base=node_id)
if sp:
p['sub_pipe'] = sp
except KeyError:
pass
pipe[bypass(*n_id)] = p
return pipe
def _sort_sk_wait_in(sol):
c = counter()
def _get_sk_wait_in(s):
w = s._visited.copy()
_l = []
for n, a in s.dsp.sub_dsp_nodes.items():
if 'function' in a and s.index + a['index'] in s.sub_sol:
sub_sol = s.sub_sol[s.index + a['index']]
n_d, ll = _get_sk_wait_in(sub_sol)
_l += ll
wi = {k for k, v in sub_sol._wait_in.items() if v is True}
n_d = n_d.union(wi)
o = a['outputs']
w = w.union({v for k, v in o.items() if k in n_d})
# Nodes to be visited.
wi = {k for k, v in s._wait_in.items() if v is True}
n_d = {k for k in s.workflow.nodes if k not in w}.union(
s._visited.intersection(wi)
)
wi = n_d.intersection(wi)
_inf = inf(float('inf'), 0)
_l += [(s._meet.get(k, _inf), str(k), c(), s._wait_in, k) for k in wi]
return n_d, _l
return sorted(_get_sk_wait_in(sol)[1])
def _union_workflow(sol, node_id=None, bfs=None):
if node_id is not None:
j = bfs[node_id] = bfs.get(node_id, {NONE: set()})
else:
j = bfs or {NONE: set()}
j[NONE].update(sol.workflow.edges)
for n, a in sol.dsp.sub_dsp_nodes.items():
if 'function' in a:
s = sol.sub_sol.get(sol.index + a['index'], None)
if s:
_union_workflow(s, node_id=n, bfs=j)
return j
def _convert_bfs(bfs):
from .graph import DiGraph
g = DiGraph()
g.add_edges_from(bfs[NONE])
bfs[NONE] = g
for k, v in bfs.items():
if k is not NONE:
_convert_bfs(v)
return bfs
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/utils/alg.py
| 0.799521 | 0.534612 |
alg.py
|
pypi
|
from .cst import EMPTY
from ..dispatcher import Dispatcher
def _init(obj, memo=None):
return obj.register(memo=memo) if isinstance(obj, Blueprint) else obj
def _safe_call(fn, *args, memo=None, **kwargs):
return fn(
*(_init(a, memo) for a in args),
**{k: _init(v, memo=memo) for k, v in kwargs.items()}
)
class Blueprint:
"""Base Blueprint class."""
cls = Dispatcher
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.deferred = []
def __getstate__(self):
d, keys = self.__dict__, ('args', 'kwargs', 'deferred', 'cls')
return {k: d[k] for k in keys if k in d}
def _set_cls(self, cls):
self.cls = cls
return self
def register(self, obj=None, memo=None):
"""
Creates a :class:`Blueprint.cls` and calls each deferred operation.
:param obj:
The initialized object with which to call all deferred operations.
:type obj: object
:param memo:
A dictionary to cache registered Blueprints.
:type memo: dict[Blueprint,T]
:return:
The initialized object.
:rtype: Blueprint.cls | Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> blue.register()
<schedula.dispatcher.Dispatcher object at ...>
"""
if memo and self in memo:
obj = memo[self]
if obj is not None:
return obj
if obj is None:
obj = _safe_call(self.cls, *self.args, memo=memo, **self.kwargs)
for method, kwargs in self.deferred:
_safe_call(getattr(obj, method), memo=memo, **kwargs)
if memo is not None:
memo[self] = obj
return obj
def extend(self, *blues, memo=None):
"""
Extends deferred operations calling each operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,Blueprint]
:return:
Self.
:rtype: Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher()
>>> blue.extend(
... BlueDispatcher().add_func(len, ['length']),
... BlueDispatcher().add_func(callable, ['is_callable'])
... )
<schedula.utils.blue.BlueDispatcher object at ...>
"""
memo = {} if memo is None else memo
for blue in blues:
if isinstance(blue, Dispatcher):
blue = blue.blue(memo=memo)
for method, kwargs in blue.deferred:
getattr(self, method)(**kwargs)
return self
def __call__(self, *args, **kwargs):
"""Calls the registered Blueprint."""
return self.register(memo={})(*args, **kwargs)
def _parent_blue(func, memo=None, depth=-1):
from .dsp import add_args, SubDispatch, partial
memo = {} if memo is None else memo
if isinstance(func, partial):
kw = func.keywords
return func.__class__(
*(_parent_blue(v, memo, depth) for v in (func.func,) + func.args),
**{k: _parent_blue(v, memo, depth) for k, v in kw.items()}
)
elif isinstance(func, add_args):
return func.__class__(*(
_parent_blue(getattr(func, k), memo, depth) for k in func._args
))
elif isinstance(func, (Dispatcher, SubDispatch)):
return func.blue(memo, depth)
return func
class BlueDispatcher(Blueprint):
"""
Blueprint object is a blueprint of how to construct or extend a Dispatcher.
**------------------------------------------------------------------------**
**Example**:
Create a BlueDispatcher::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher(name='Dispatcher')
Add data/function/dispatcher nodes to the dispatcher map as usual::
>>> blue.add_data(data_id='a', default_value=3)
<schedula.utils.blue.BlueDispatcher object at ...>
>>> @sh.add_function(blue, True, True, outputs=['c'])
... def diff_function(a, b=2):
... return b - a
...
>>> blue.add_function(function=max, inputs=['c', 'd'], outputs=['e'])
<schedula.utils.blue.BlueDispatcher object at ...>
>>> from math import log
>>> sub_blue = sh.BlueDispatcher(name='Sub-Dispatcher')
>>> sub_blue.add_data(data_id='a', default_value=2).add_function(
... function=log, inputs=['a'], outputs=['b']
... )
<schedula.utils.blue.BlueDispatcher object at ...>
>>> blue.add_dispatcher(sub_blue, ('a',), {'b': 'f'})
<schedula.utils.blue.BlueDispatcher object at ...>
You can set the default values as usual::
>>> blue.set_default_value(data_id='c', value=1, initial_dist=6)
<schedula.utils.blue.BlueDispatcher object at ...>
You can also create a `Blueprint` out of `SubDispatchFunction` and add it to
the `Dispatcher` as follow::
>>> func = sh.SubDispatchFunction(sub_blue, 'func', ['a'], ['b'])
>>> blue.add_from_lists(fun_list=[
... dict(function=func, inputs=['a'], outputs=['d']),
... dict(function=func, inputs=['c'], outputs=['g']),
... ])
<schedula.utils.blue.BlueDispatcher object at ...>
Finally you can create the dispatcher object using the method `new`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> dsp = blue.register(memo={}); dsp
<schedula.dispatcher.Dispatcher object at ...>
Or dispatch, calling the Blueprint object:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> sol = blue({'a': 1}); sol
Solution([('a', 1), ('b', 2), ('c', 1), ('d', 0.0),
('f', 0.0), ('e', 1), ('g', 0.0)])
"""
def __init__(self, dmap=None, name='', default_values=None, raises=False,
description='', executor=False):
kwargs = {
'dmap': dmap, 'name': name, 'default_values': default_values,
'raises': raises, 'description': description, 'executor': executor
}
super(BlueDispatcher, self).__init__(**kwargs)
def add_data(self, data_id=None, default_value=EMPTY, initial_dist=0.0,
wait_inputs=False, wildcard=None, function=None, callback=None,
description=None, filters=None, await_result=None, **kwargs):
"""
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'data_id': data_id, 'filters': filters, 'wait_inputs': wait_inputs,
'wildcard': wildcard, 'function': function, 'callback': callback,
'initial_dist': initial_dist, 'default_value': default_value,
'description': description, 'await_result': await_result
})
self.deferred.append(('add_data', kwargs))
return self
def add_function(self, function_id=None, function=None, inputs=None,
outputs=None, input_domain=None, weight=None,
inp_weight=None, out_weight=None, description=None,
filters=None, await_domain=None, await_result=None,
**kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
"""
kwargs.update({
'function_id': function_id, 'inputs': inputs, 'function': function,
'weight': weight, 'input_domain': input_domain, 'filters': filters,
'await_result': await_result, 'await_domain': await_domain,
'out_weight': out_weight, 'description': description,
'outputs': outputs, 'inp_weight': inp_weight
})
self.deferred.append(('add_function', kwargs))
return self
def add_func(self, function, outputs=None, weight=None, inputs_kwargs=False,
inputs_defaults=False, filters=None, input_domain=None,
await_domain=None, await_result=None, inp_weight=None,
out_weight=None, description=None, inputs=None,
function_id=None, **kwargs):
"""
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'function_id': function_id, 'inputs': inputs, 'function': function,
'weight': weight, 'input_domain': input_domain, 'filters': filters,
'inputs_kwargs': inputs_kwargs, 'inputs_defaults': inputs_defaults,
'await_result': await_result, 'await_domain': await_domain,
'out_weight': out_weight, 'description': description,
'outputs': outputs, 'inp_weight': inp_weight
})
self.deferred.append(('add_func', kwargs))
return self
def add_dispatcher(self, dsp, inputs=None, outputs=None, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, inputs_prefix='', outputs_prefix='',
**kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: BlueDispatcher | Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'include_defaults': include_defaults, 'await_domain': await_domain,
'weight': weight, 'input_domain': input_domain, 'dsp_id': dsp_id,
'description': description, 'outputs': outputs, 'inputs': inputs,
'inp_weight': inp_weight, 'dsp': dsp,
'inputs_prefix': inputs_prefix, 'outputs_prefix': outputs_prefix
})
self.deferred.append(('add_dispatcher', kwargs))
return self
def add_from_lists(self, data_list=None, fun_list=None, dsp_list=None):
"""
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs = {
'data_list': data_list, 'fun_list': fun_list, 'dsp_list': dsp_list
}
self.deferred.append(('add_from_lists', kwargs))
return self
def set_default_value(self, data_id, value=EMPTY, initial_dist=0.0):
"""
Set the default value of a data node in the dispatcher.
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node default value.
.. note:: If `EMPTY` the previous default value is removed.
:type value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kw = {'data_id': data_id, 'value': value, 'initial_dist': initial_dist}
self.deferred.append(('set_default_value', kw))
return self
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/utils/blue.py
| 0.859339 | 0.161717 |
blue.py
|
pypi
|
class DiGraph:
__slots__ = 'nodes', 'succ', 'pred'
def __reduce__(self):
return self.__class__, (self.nodes, self.succ)
def __init__(self, nodes=None, adj=None):
if nodes is None and adj is None:
self.nodes = {}
self.succ = {}
self.pred = {}
else:
self.succ = {} if adj is None else adj
self.pred = pred = {}
nds = set()
for u, e in self.succ.items():
nds.add(u)
for v, attr in e.items():
pred[v] = d = pred.get(v, {})
d[u] = attr
nds.add(v)
self.nodes = nodes = {} if nodes is None else nodes
self.nodes.update({k: {} for k in nds if k not in nodes})
self.succ.update({k: {} for k in nodes if k not in self.succ})
self.pred.update({k: {} for k in nodes if k not in self.pred})
def __getitem__(self, item):
return self.succ[item]
@property
def adj(self):
return self.succ
def _add_node(self, n, attr):
nodes, succ, pred = self.nodes, self.succ, self.pred
if n not in nodes: # Add nodes.
succ[n] = {}
pred[n] = {}
nodes[n] = attr
elif attr:
nodes[n].update(attr)
def _remove_node(self, n):
nodes, succ, pred = self.nodes, self.succ, self.pred
for u in succ[n]:
del pred[u][n]
for u in pred[n]:
del succ[u][n]
del nodes[n], succ[n], pred[n]
def add_node(self, n, **attr):
self._add_node(n, attr)
return self
def remove_node(self, n):
self._remove_node(n)
return self
def add_nodes_from(self, nodes_for_adding):
fn = self.add_node
for n in nodes_for_adding:
try:
fn(n)
except TypeError:
fn(n[0], **n[1])
return self
def remove_nodes_from(self, nodes):
fn = self.remove_node
for n in nodes:
fn(n)
return self
def _add_edge(self, u, v, attr):
succ = self.succ
self.add_node(u)
self.add_node(v)
succ[u][v] = self.pred[v][u] = dd = succ[u].get(v, {})
dd.update(attr)
def _add_edge_fw(self, u, v, attr):
if v not in self.succ: # Add nodes.
self._add_node(v, {})
self._add_edge(u, v, attr) # Add the edge.
def add_edge_fw(self, u, v, **attr):
self._add_edge_fw(u, v, attr)
def add_edge(self, u, v, **attr):
self._add_edge(u, v, attr)
return self
def add_edges_from(self, ebunch_to_add):
fn = self.add_edge
for e in ebunch_to_add:
try:
(u, v), attr = e, {}
except ValueError:
u, v, attr = e
fn(u, v, **attr)
def remove_edge(self, u, v):
del self.succ[u][v], self.pred[v][u]
def remove_edges_from(self, ebunch):
succ, pred = self.succ, self.pred
for e in ebunch:
u, v = e[:2] # ignore edge data
del succ[u][v], pred[v][u]
@property
def edges(self):
from .dsp import stack_nested_keys
return dict(stack_nested_keys(self.succ, depth=2))
def has_edge(self, u, v):
try:
return v in self.succ[u]
except KeyError:
return False
def subgraph(self, nodes):
nodes = {n: attr.copy() for n, attr in self.nodes.items() if n in nodes}
adj = {}
for u, d in self.succ.items():
if u in nodes:
adj[u] = {v: attr.copy() for v, attr in d.items() if v in nodes}
return self.__class__(nodes, adj)
def copy(self):
nodes = {n: attr.copy() for n, attr in self.nodes.items()}
adj = {}
for u, d in self.succ.items():
adj[u] = {v: attr.copy() for v, attr in d.items()}
return self.__class__(nodes, adj)
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/utils/graph.py
| 0.610221 | 0.272649 |
graph.py
|
pypi
|
import time
import logging
import collections
from .base import Base
from .imp import finalize, Future
from .cst import START, NONE, PLOT
from heapq import heappop, heappush
from .dsp import stlp, get_nested_dicts, inf
from .alg import get_full_pipe, _sort_sk_wait_in
from .exc import DispatcherError, DispatcherAbort, SkipNode, ExecutorShutdown
from .asy import async_thread, await_result, async_process, AsyncList, EXECUTORS
from .utl import select_diff, dict_diff
log = logging.getLogger(__name__)
# noinspection PyTypeChecker
class Solution(Base, collections.OrderedDict):
"""Solution class for dispatch result."""
def __hash__(self):
return id(self)
def __init__(self, dsp=None, inputs=None, outputs=None, wildcard=False,
inputs_dist=None, no_call=False,
rm_unused_nds=False, wait_in=None, no_domain=False,
_empty=False, index=(-1,), full_name=(), verbose=False,
excluded_defaults=()):
super(Solution, self).__init__()
self.index = index
self.rm_unused_nds = rm_unused_nds
self.no_call = no_call
self.no_domain = no_domain
self._wait_in = wait_in or {}
self.outputs = set(outputs or ())
self.full_name = full_name
self._pipe = []
self.parent = dsp
self.verbose = verbose
finalize(self, EXECUTORS.pop_active, id(self))
from ..dispatcher import Dispatcher
self._set_dsp_features(dsp or Dispatcher())
if not _empty:
self._set_inputs(inputs, inputs_dist, excluded_defaults)
# Set wildcards.
self._set_wildcards(*((inputs, outputs) if wildcard else ()))
# Initialize workflow params.
self._init_workflow()
def _set_dsp_features(self, dsp):
self.dsp = dsp
self.name = dsp.name
self.nodes = dsp.nodes
self.dmap = dsp.dmap
self.raises = dsp.raises
self._pred = dsp.dmap.pred
self._succ = dsp.dmap.succ
self._edge_length = dsp._edge_length
def _set_inputs(self, inputs, initial_dist, excluded_defaults=()):
excluded = set(excluded_defaults)
if self.no_call:
# Set initial values.
initial_values = dict_diff(self.dsp.default_values, excluded)
if inputs is not None: # Update initial values with input values.
initial_values.update(dict.fromkeys(inputs, NONE))
else:
# Set initial values.
initial_values = select_diff(
self.dsp.default_values, excluded, 'value'
)
if inputs is not None: # Update initial values with input values.
initial_values.update(inputs)
excluded.update(inputs or set())
# Set initial values.
initial_distances = select_diff(
self.dsp.default_values, excluded, 'initial_dist'
)
if initial_dist is not None: # Update initial distances.
initial_distances.update(initial_dist)
self.inputs, self.inputs_dist = initial_values, initial_distances
def _set_wildcards(self, inputs=None, outputs=None):
"""
Update wildcards set with the input data nodes that are also outputs.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
"""
w = self._wildcards = set() # Clean wildcards.
if outputs and inputs:
node, wi = self.nodes, self._wait_in.get # Namespace shortcut.
# Input data nodes that are in output_targets.
w_crd = {u: node[u] for u in inputs if u in outputs or wi(u, False)}
# Data nodes without the wildcard.
w.update([k for k, v in w_crd.items() if v.get('wildcard', True)])
def _update_methods(self):
self._targets = self.outputs.copy() if self.outputs else None
def check_wait_in(self, wait_in, n_id):
"""
Stops the search of the investigated node of the ArciDispatch
algorithm, until all inputs are satisfied.
:param wait_in:
If True the node is waiting input estimations.
:type wait_in: bool
:param n_id:
Data or function node id.
:type n_id: str
:return:
True if all node inputs are satisfied, otherwise False.
:rtype: bool
"""
if self._wait_in:
wait_in = self._wait_in.get(n_id, wait_in)
if wait_in:
wf = self.workflow.pred[n_id]
return not all(k in wf for k in self._pred[n_id])
return False
def _clean_set(self):
self.clear()
from .graph import DiGraph
self.workflow = DiGraph()
self._visited = set()
self._errors = collections.OrderedDict()
self.sub_sol = {self.index: self}
self.fringe = [] # Use heapq with (distance, wait, label).
self.dist = {START: inf(0, -1)}
self.seen = {START: inf(0, -1)}
self._meet = {START: inf(0, -1)}
self._pipe = []
self._update_methods()
def _init_workflow(self, inputs=None, inputs_dist=None, initial_dist=0.0,
clean=True):
# Clean previous outputs.
if clean:
self._clean_set()
self._visited.add(START) # Nodes visited by the algorithm.
# Add the starting node to the workflow graph.
self.workflow.add_node(START, type='start')
if inputs_dist is None: # Update inp dist.
inputs_dist = self.inputs_dist or {}
if inputs is None:
inputs = self.inputs
initial_dist = inf.format(initial_dist)
# Namespace shortcuts for speed.
add_value = self._add_initial_value
# Add initial values to fringe and seen.
it = sorted(((
initial_dist + inputs_dist.get(k, 0.0), str(k), k
) for k in inputs))
if self.no_call:
for d, _, k in it:
add_value(k, {}, d)
else:
for d, _, k in it:
add_value(k, {'value': inputs[k]}, d)
self._add_out_dsp_inputs()
def _close(self, cached_ids):
p = self.index[:-1]
if p:
p = self.sub_sol[p]
if self.index in cached_ids:
k = cached_ids[self.index]
else:
i = self.index[-1:]
k = next(k for k, v in p.nodes.items() if v['index'] == i)
cached_ids[self.index] = k
return all(i in p.dist for i in p.dmap[k])
return False
@staticmethod
def _update_fut_results(futs, fut, data, key):
if isinstance(fut, Future):
get_nested_dicts(futs, fut, default=list).append((data, key))
elif isinstance(fut, AsyncList):
for i, j in enumerate(fut):
if isinstance(j, Future):
get_nested_dicts(futs, j, default=list).append((fut, i))
def result(self, timeout=None):
"""
Set all asynchronous results.
:param timeout:
The number of seconds to wait for the result if the futures aren't
done. If None, then there is no limit on the wait time.
:type timeout: float
:return:
Update Solution.
:rtype: Solution
"""
futs, ex = collections.OrderedDict(), False
_update = self._update_fut_results
for p in self._pipe:
n, s = p[-1]
if n in s:
_update(futs, s[n], s, n)
for sol in self.sub_sol.values():
for k, f in sol.items():
_update(futs, f, sol, k)
for attr in sol.workflow.nodes.values():
if 'results' in attr:
_update(futs, attr['results'], attr, 'results')
for attr in sol.workflow.edges.values():
if 'value' in attr:
_update(futs, attr['value'], attr, 'value')
if futs:
from concurrent.futures import wait as wait_fut
wait_fut(futs, timeout)
EXECUTORS.set_active(id(self), False)
exceptions = Exception, ExecutorShutdown, DispatcherAbort, SkipNode
for f, it in futs.items():
try:
r = await_result(f, 0)
for d, k in it:
d[k] = r
except exceptions as e:
for d, k in it:
if k in d:
del d[k]
if not ex:
ex = isinstance(e, SkipNode) and e.ex or e
if ex:
raise ex
return self
@staticmethod
def _dsp_closed_add(dsp_closed, s):
dsp_closed.add(s.index)
for val in s.dsp.sub_dsp_nodes.values():
_s = s.sub_sol.get(s.index + val['index'], None)
if _s:
Solution._dsp_closed_add(dsp_closed, _s)
def _run(self, stopper=None, executor=False):
# Initialized and terminated dispatcher sets.
dsp_closed, dsp_init, cached_ids = set(), {self.index}, {}
# Reset function pipe.
pipe = self._pipe = []
# A function to check if a dispatcher has been initialized.
check_dsp = dsp_init.__contains__
# Namespaces shortcuts
dsp_init_add, pipe_append = dsp_init.add, pipe.append
fringe = self.fringe
ctx = {
'no_call': self.no_call, 'stopper': stopper, 'executor': executor
}
while fringe:
# Visit the closest available node.
n = (d, _, (v, sol)) = heappop(fringe)
# Skip terminated sub-dispatcher or visited nodes.
if sol.index in dsp_closed or (v is not START and v in sol.dist):
continue
# Close sub-dispatcher solution when all outputs are satisfied.
if sol._close(cached_ids):
self._dsp_closed_add(dsp_closed, sol)
cached_ids.pop(sol.index)
continue
dsp_init_add(sol.index) # Update initialized dispatcher sets.
pipe_append(n) # Add node to the pipe.
# Set and visit nodes.
if not sol._visit_nodes(v, d, fringe, **ctx):
if self is sol:
break # Reach all targets.
else: # Terminated sub-dispatcher.
self._dsp_closed_add(dsp_closed, sol)
# See remote link node.
sol._see_remote_link_node(v, fringe, d, check_dsp)
if self.rm_unused_nds: # Remove unused func and sub-dsp nodes.
self._remove_unused_nodes()
self.fringe = None
return self # Data outputs.
def get_sub_dsp_from_workflow(self, sources, reverse=False,
add_missing=False, check_inputs=True):
"""
Returns the sub-dispatcher induced by the workflow from sources.
The induced sub-dispatcher of the dsp contains the reachable nodes and
edges evaluated with breadth-first-search on the workflow graph from
source nodes.
:param sources:
Source nodes for the breadth-first-search.
A container of nodes which will be iterated through once.
:type sources: list[str], iterable
:param reverse:
If True the workflow graph is assumed as reversed.
:type reverse: bool, optional
:param add_missing:
If True, missing function' inputs are added to the sub-dispatcher.
:type add_missing: bool, optional
:param check_inputs:
If True the missing function' inputs are not checked.
:type check_inputs: bool, optional
:return:
A sub-dispatcher.
:rtype: schedula.dispatcher.Dispatcher
"""
sub_dsp = self.dsp.get_sub_dsp_from_workflow(
sources, self.workflow, reverse=reverse, add_missing=add_missing,
check_inputs=check_inputs
)
return sub_dsp # Return the sub-dispatcher map.
@property
def pipe(self):
"""Returns the full pipe of a dispatch run."""
return get_full_pipe(self)
def _copy_structure(self, **kwargs):
sol = self.__class__(
self.dsp, self.inputs, self.outputs, False, self.inputs_dist,
self.no_call, self.rm_unused_nds, self._wait_in, self.no_domain,
True, self.index, self.full_name, self.verbose
)
sol._clean_set()
it = ['_wildcards', 'inputs', 'inputs_dist']
it += [k for k, v in kwargs.items() if v]
for k in it:
setattr(sol, k, getattr(self, k))
return sol
def __deepcopy__(self, memo):
y = super(Solution, self).__deepcopy__(memo)
y._update_methods()
return y
def _add_out_dsp_inputs(self):
# Nodes that are out of the dispatcher nodes.
o = sorted((k for k in self.inputs if k not in self.nodes), key=str)
# Add nodes that are out of the dispatcher nodes.
if self.no_call:
self.update(collections.OrderedDict.fromkeys(o, None))
else:
self.update(collections.OrderedDict((k, self.inputs[k]) for k in o))
def check_targets(self, node_id):
"""
Terminates ArciDispatch algorithm when all targets have been
visited.
:param node_id:
Data or function node id.
:type node_id: str
:return:
True if all targets have been visited, otherwise False.
:rtype: bool
"""
try:
self._targets.remove(node_id) # Remove visited node.
return not self._targets # If no targets terminate the algorithm.
except (AttributeError, KeyError):
return False # The node is not in the targets set.
def _get_node_estimations(self, node_attr, node_id):
"""
Returns the data nodes estimations and `wait_inputs` flag.
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data node's id.
:type node_id: str
:returns:
- node estimations with minimum distance from the starting node, and
- `wait_inputs` flag
:rtype: (dict[str, T], bool)
"""
# Get data node estimations.
estimations = self.workflow.pred[node_id]
wait_in = node_attr['wait_inputs'] # Namespace shortcut.
# Check if node has multiple estimations and it is not waiting inputs.
if len(estimations) > 1 and not self._wait_in.get(node_id, wait_in):
# Namespace shortcuts.
dist, edg_length = self.dist, self._edge_length
succ = self.dmap.succ
est = [] # Estimations' heap.
for k, v in estimations.items(): # Calculate length.
if k is not START:
d = dist[k] + edg_length(succ[k][node_id], node_attr)
heappush(est, (d, k, v))
# The estimation with minimum distance from the starting node.
estimations = {est[0][1]: est[0][2]}
# Remove unused workflow edges.
self.workflow.remove_edges_from([(v[1], node_id) for v in est[1:]])
return estimations, wait_in # Return estimations and wait_inputs flag.
def _remove_wait_in(self):
ll = _sort_sk_wait_in(self)
n_d = set()
for d, _, _, w, k in ll:
if d == ll[0][0]:
w[k] = False
if w is self._wait_in:
n_d.add(k)
return n_d, ll
def _set_node_output(self, node_id, no_call, next_nds=None, **kw):
"""
Set the node outputs from node inputs.
:param node_id:
Data or function node id.
:type node_id: str
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Namespace shortcuts.
node_attr = self.nodes[node_id]
node_type = node_attr['type']
if node_type == 'data': # Set data node.
return self._set_data_node_output(
node_id, node_attr, no_call, next_nds, **kw
)
elif node_type == 'function': # Set function node.
return self._set_function_node_output(
node_id, node_attr, no_call, next_nds, **kw
)
def _evaluate_function(self, args, node_id, node_attr, attr, stopper=None,
executor=False):
self._started(attr, node_id)
def _callback(is_sol, sol):
if is_sol:
attr['solution'] = sol
res = async_process(
[node_attr['function']], *args, stopper=stopper, executor=executor,
sol=self, callback=_callback, sol_name=self.full_name + (node_id,),
verbose=self.verbose
)
return res
def _check_function_domain(self, args, node_attr, node_id):
# noinspection PyUnresolvedReferences
attr = self.workflow.nodes[node_id]
if not self.no_domain and 'input_domain' in node_attr:
if node_attr.get('await_domain', True):
args = map(await_result, args)
args = [v for v in args if v is not NONE]
# noinspection PyCallingNonCallable
attr['solution_domain'] = bool(node_attr['input_domain'](*args))
if not attr['solution_domain']:
raise SkipNode
def _evaluate_node(self, args, node_attr, node_id, skip_func=False, **kw):
# noinspection PyUnresolvedReferences
attr = self.workflow.nodes[node_id]
try:
if skip_func:
value = args[0]
else:
args = [v for v in args if v is not NONE]
value = self._evaluate_function(args, node_id, node_attr, attr,
**kw)
value = self._apply_filters(value, node_id, node_attr, attr, **kw)
self._ended(attr, node_id)
if 'callback' in node_attr: # Invoke callback func of data node.
try:
# noinspection PyCallingNonCallable
node_attr['callback'](value)
except Exception as ex:
msg = "Failed CALLBACK '%s' due to:\n %s"
self._warning(msg, node_id, ex)
return value
except Exception as ex:
self._ended(attr, node_id)
# Some error occurs.
msg = "Failed DISPATCHING '%s' due to:\n %r"
self._warning(msg, node_id, ex)
raise SkipNode(ex=ex)
def _set_data_node_output(self, node_id, node_attr, no_call, next_nds=None,
**kw):
"""
Set the data node output from node estimations.
:param node_id:
Data node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimations are not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Get data node estimations.
est, wait_in = self._get_node_estimations(node_attr, node_id)
if no_call:
self[node_id] = NONE # Set data output.
value = {} # Output value.
else:
if node_id is PLOT:
est = est.copy()
est[PLOT] = {'value': {'obj': self}}
sf = not (wait_in or 'function' in node_attr)
if sf:
# Data node that has just one estimation value.
args = tuple(v['value'] for v in est.values())
else:
args = ({k: v['value'] for k, v in est.items()},)
try:
# Final estimation of the node and node status.
value = async_thread(self, args, node_attr, node_id, sf, **kw)
except SkipNode:
return False
if value is not NONE: # Set data output.
self[node_id] = value
value = {'value': value} # Output value.
if next_nds:
# namespace shortcuts for speed.
add_edge_fw = self.workflow.add_edge_fw
for u in next_nds: # Set workflow.
add_edge_fw(node_id, u, **value)
else:
# List of functions.
succ_fun = []
# namespace shortcuts for speed.
n, has, sub_sol = self.nodes, self.workflow.has_edge, self.sub_sol
index, add_succ_fun = self.index, succ_fun.append
for u in self._succ[node_id]: # no_visited_in_sub_dsp.
node = n[u]
if node['type'] == 'dispatcher' and has(u, node_id):
visited = sub_sol[index + node['index']]._visited
node['inputs'][node_id] not in visited and add_succ_fun(u)
else:
add_succ_fun(u)
# Check if it has functions as outputs and wildcard condition.
if succ_fun and succ_fun[0] not in self._visited:
# namespace shortcuts for speed.
add_edge_fw = self.workflow.add_edge_fw
for u in succ_fun: # Set workflow.
add_edge_fw(node_id, u, **value)
return True # Return that the output have been evaluated correctly.
def _apply_filters(self, res, node_id, node_attr, attr, stopper=None,
executor=False):
if 'filters' in node_attr:
self._started(attr, node_id)
attr['solution_filters'] = filters = [res]
# noinspection PyUnusedLocal
def _callback(is_sol, sol):
filters.append(sol)
res = async_process(
node_attr['filters'], res, stopper=stopper, executor=executor,
sol=self, sol_name=self.full_name + (node_id,),
callback=_callback
)
return res
def _started(self, attr, node_id):
if 'started' not in attr:
attr['started'] = time.time()
self._verbose(node_id, attr)
def _ended(self, attr, node_id):
if 'started' in attr:
attr['duration'] = time.time() - attr['started']
self._verbose(node_id, attr, end=True)
def _verbose(self, node_id, attr, end=False):
if self.verbose:
if end:
msg = 'Done `%s` in {:.5f} sec.'.format(attr['duration'])
else:
msg = 'Start `%s`...'
log.info(msg % '/'.join(self.full_name + (node_id,)))
def _set_function_node_output(self, node_id, node_attr, no_call,
next_nds=None, **kw):
"""
Set the function node output from node inputs.
:param node_id:
Function node id.
:type node_id: str
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict[str, T]
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Namespace shortcuts for speed.
o_nds, dist = node_attr['outputs'], self.dist
# List of nodes that can still be estimated by the function node.
output_nodes = next_nds or {
k for k in self._succ[node_id] if k not in dist
}
if not output_nodes: # This function is not needed.
self.workflow.remove_node(node_id) # Remove function node.
return False
# Namespace shortcuts for speed.
add_edge_fw = self.workflow.add_edge_fw
if no_call:
for u in output_nodes: # Set workflow out.
add_edge_fw(node_id, u)
return True
args = self.workflow.pred[node_id] # List of the function's arguments.
args = [args[k]['value'] for k in node_attr['inputs']]
try:
self._check_function_domain(args, node_attr, node_id)
res = async_thread(self, args, node_attr, node_id, **kw)
# noinspection PyUnresolvedReferences
self.workflow.nodes[node_id]['results'] = res
except SkipNode:
return False
# Set workflow.
for k, v in zip(o_nds, res if len(o_nds) > 1 else [res]):
if k in output_nodes and v is not NONE:
add_edge_fw(node_id, k, value=v)
return True # Return that the output have been evaluated correctly.
def _add_initial_value(self, data_id, value, initial_dist=0.0, fringe=None,
no_call=None):
"""
Add initial values updating workflow, seen, and fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node value e.g., {'value': val}.
:type value: dict[str, T]
:param initial_dist:
Data node initial distance in the ArciDispatch algorithm.
:type initial_dist: float, int, optional
:return:
True if the data has been visited, otherwise false.
:rtype: bool
"""
# Namespace shortcuts for speed.
nodes, seen, edge_weight = self.nodes, self.seen, self._edge_length
check_wait_in = self.check_wait_in
add_edge_fw = self.workflow.add_edge_fw
dsp_in = self._set_sub_dsp_node_input
update_view = self._update_meeting
if fringe is None:
fringe = self.fringe
if no_call is None:
no_call = self.no_call
if data_id not in nodes: # Data node is not in the dmap.
return False
wait_in = nodes[data_id]['wait_inputs'] # Store wait inputs flag.
index = nodes[data_id]['index'] # Store node index.
add_edge_fw(START, data_id, **value) # Add edge.
if data_id in self._wildcards: # Check if the data node has wildcard.
self._visited.add(data_id) # Update visited nodes.
self.workflow.add_node(data_id) # Add node to workflow.
for w, edge_data in self.dmap[data_id].items(): # See func node.
add_edge_fw(data_id, w, **value) # Set workflow.
node = nodes[w] # Node attributes.
# Evaluate distance.
vw_dist = initial_dist + edge_weight(edge_data, node)
update_view(w, vw_dist) # Update view distance.
# Check if all inputs are satisfied.
if node['type'] == 'dispatcher':
dsp_in(data_id, w, fringe, no_call, vw_dist)
elif check_wait_in(True, w):
continue # Pass the node.
seen[w] = vw_dist # Update distance.
if fringe is None: # SubDispatchPipe.
continue
vd = (True, w, self.index + node['index']) # Virtual distance.
heappush(fringe, (vw_dist, vd, (w, self))) # Add 2 heapq.
return True
update_view(data_id, initial_dist) # Update view distance.
if not check_wait_in(wait_in, data_id): # Check inputs.
seen[data_id] = initial_dist # Update distance.
if fringe is not None: # SubDispatchPipe.
vd = wait_in, str(data_id), self.index + index # Virtual dist.
# Add node to heapq.
heappush(fringe, (initial_dist, vd, (data_id, self)))
return True
return False
def _update_meeting(self, node_id, dist):
view = self._meet
if node_id in self._meet:
view[node_id] = max(dist, view[node_id])
else:
view[node_id] = dist
def _visit_nodes(self, node_id, dist, fringe, no_call=False,
**kw):
"""
Visits a node, updating workflow, seen, and fringe..
:param node_id:
Node id to visit.
:type node_id: str
:param dist:
Distance from the starting node.
:type dist: float, int
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param no_call:
If True data node estimation function is not used.
:type no_call: bool, optional
:return:
False if all dispatcher targets have been reached, otherwise True.
:rtype: bool
"""
# Namespace shortcuts.
wf_has_edge = self.workflow.has_edge
edge_weight = self._edge_length
nodes = self.nodes
self.dist[node_id] = dist # Set minimum dist.
self._visited.add(node_id) # Update visited nodes.
if not self._set_node_output(node_id, no_call, **kw): # Set output.
# Some error occurs or inputs are not in the function domain.
return True
if self.check_targets(node_id): # Check if the targets are satisfied.
return False # Stop loop.
for w, e_data in self.dmap[node_id].items():
if not wf_has_edge(node_id, w): # Check wildcard option.
continue
node = nodes[w] # Get node attributes.
vw_d = dist + edge_weight(e_data, node) # Evaluate dist.
if node['type'] == 'dispatcher':
self._set_sub_dsp_node_input(node_id, w, fringe, no_call, vw_d)
else: # See the node.
self._see_node(w, fringe, vw_d)
return True
def _see_node(self, node_id, fringe, dist, w_wait_in=0):
"""
See a node, updating seen and fringe.
:param node_id:
Node id to see.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param w_wait_in:
Additional weight for sorting correctly the nodes in the fringe.
:type w_wait_in: int, float
:return:
True if the node is visible, otherwise False.
:rtype: bool
"""
# Namespace shortcuts.
seen, dists = self.seen, self.dist
wait_in = self.nodes[node_id]['wait_inputs'] # Wait inputs flag.
self._update_meeting(node_id, dist) # Update view distance.
# Check if inputs are satisfied.
if self.check_wait_in(wait_in, node_id):
pass # Pass the node
elif node_id in dists: # The node w already estimated.
if dist < dists[node_id]: # Error for negative paths.
raise DispatcherError('Contradictory paths found: '
'negative weights?', sol=self)
elif node_id not in seen or dist < seen[node_id]: # Check min dist.
seen[node_id] = dist # Update dist.
if fringe is not None: # SubDispatchPipe.
index = self.nodes[node_id]['index'] # Node index.
# Virtual distance.
vd = w_wait_in + int(wait_in), str(node_id), self.index + index
# Add to heapq.
heappush(fringe, (dist, vd, (node_id, self)))
return True # The node is visible.
return False # The node is not visible.
def _remove_unused_nodes(self):
"""
Removes unused function and sub-dispatcher nodes.
"""
# Namespace shortcuts.
nodes, wf_remove_node = self.nodes, self.workflow.remove_node
add_visited, succ = self._visited.add, self.workflow.succ
# Remove unused function and sub-dispatcher nodes.
for n in [k for k in self.workflow.pred if k not in self._visited]:
node_type = nodes[n]['type'] # Node type.
if node_type == 'data':
continue # Skip data node.
if node_type == 'dispatcher' and succ[n]:
add_visited(n) # Add to visited nodes.
i = self.index + nodes[n]['index']
self.sub_sol[i]._remove_unused_nodes()
continue # Skip sub-dispatcher node with outputs.
wf_remove_node(n) # Remove unused node.
def _init_sub_dsp(self, dsp, fringe, outputs, no_call, initial_dist, index,
full_name, excluded_defaults):
"""
Initialize the dispatcher as sub-dispatcher and update the fringe.
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
"""
# Initialize as sub-dispatcher.
sol = self.__class__(
dsp, {}, outputs, False, None, no_call, False,
wait_in=self._wait_in.get(dsp, None), index=self.index + index,
full_name=full_name, verbose=self.verbose,
excluded_defaults=excluded_defaults
)
sol.sub_sol = self.sub_sol
for f in sol.fringe or (): # Update the fringe.
item = (initial_dist + f[0], (2,) + f[1][1:], f[-1])
heappush(fringe, item)
return sol
def _see_remote_link_node(self, node_id, fringe=None, dist=None,
check_dsp=lambda x: True):
"""
See data remote links of the node (set output to remote links).
:param node_id:
Node id.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param check_dsp:
A function to check if the remote dispatcher is ok.
:type check_dsp: (Dispatcher) -> bool
"""
# Get `p_id` if `node_id` is data node.
p_id = self.nodes[node_id]['type'] == 'data' and self.index[:-1]
if p_id and check_dsp(p_id) and node_id in self:
# Get parent solution and child index.
sol, c_i = self.sub_sol[p_id], self.index[-1:]
for dsp_id, n in sol.dsp.nodes.items():
if n['index'] == c_i and node_id in n.get('outputs', {}):
value = self[node_id] # Get data output.
visited = sol._visited
has_edge = sol.workflow.has_edge
pass_result = sol.workflow.add_edge_fw
see_node = sol._see_node
for n_id in stlp(n['outputs'][node_id]):
# Node has been visited or inp do not coincide with out.
if not (n_id in visited or has_edge(n_id, dsp_id)):
pass_result(dsp_id, n_id, value=value) # To child.
if fringe is not None:
see_node(n_id, fringe, dist, w_wait_in=2)
break
def _check_sub_dsp_domain(self, dsp_id, node, pred, kw):
if 'input_domain' in node and not (self.no_domain or self.no_call):
try:
adict = {k: v['value'] for k, v in pred.items()}
if node.get('await_domain', True):
adict = {k: await_result(v) for k, v in adict.items()}
kw['solution_domain'] = s = bool(node['input_domain'](adict))
return s
except Exception as ex: # Some error occurs.
msg = "Failed SUB-DSP DOMAIN '%s' due to:\n %r"
self._warning(msg, dsp_id, ex)
return False
def _set_sub_dsp_node_input(self, node_id, dsp_id, fringe, no_call,
initial_dist):
"""
Initializes the sub-dispatcher and set its inputs.
:param node_id:
Input node to set.
:type node_id: str
:param dsp_id:
Sub-dispatcher node id.
:type dsp_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:param initial_dist:
Distance to reach the sub-dispatcher node.
:type initial_dist: int, float
:return:
If the input have been set.
:rtype: bool
"""
# Namespace shortcuts.
node = self.nodes[dsp_id]
dsp, pred = node['function'], self.workflow.pred[dsp_id]
distances, sub_sol = self.dist, self.sub_sol
iv_nodes = [node_id] # Nodes do be added as initial values.
self._meet[dsp_id] = initial_dist # Set view distance.
# Check if inputs are satisfied.
if self.check_wait_in(node['wait_inputs'], dsp_id):
return False # Pass the node
if dsp_id not in distances:
kw = {}
dom = self._check_sub_dsp_domain(dsp_id, node, pred, kw)
if dom is True:
iv_nodes = pred # Args respect the domain.
elif dom is False:
return False
# Initialize the sub-dispatcher.
sub_sol[self.index + node['index']] = sol = self._init_sub_dsp(
dsp, fringe, node['outputs'], no_call, initial_dist,
node['index'], self.full_name + (dsp_id,),
set(node.get('inputs', {}).values())
)
self.workflow.add_node(dsp_id, solution=sol, **kw)
distances[dsp_id] = initial_dist # Update min distance.
else:
sol = sub_sol[self.index + node['index']]
for n_id in iv_nodes:
# Namespace shortcuts.
val = pred[n_id]
for n in stlp(node['inputs'][n_id]):
# Add initial value to the sub-dispatcher.
sol._add_initial_value(
n, val, initial_dist, fringe, no_call
)
return True
def _warning(self, msg, node_id, ex, *args, **kwargs):
"""
Handles the error messages.
.. note:: If `self.raises` is True the dispatcher interrupt the dispatch
when an error occur, otherwise if `raises != ''` it logs a warning.
"""
raises = self.raises(ex) if callable(self.raises) else self.raises
if raises and isinstance(ex, DispatcherError):
ex.update(self)
raise ex
self._errors[node_id] = msg % ((node_id, ex) + args)
node_id = '/'.join(self.full_name + (node_id,))
if raises:
raise DispatcherError(
msg, node_id, ex, *args, sol=self, ex=ex, **kwargs
)
elif raises != '':
kwargs['exc_info'] = kwargs.get('exc_info', 1)
try:
log.error(msg, node_id, ex, *args, **kwargs)
except TypeError: # MicroPython.
kwargs.pop('exc_info')
log.error(msg, node_id, ex, *args, **kwargs)
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/utils/sol.py
| 0.644673 | 0.160003 |
sol.py
|
pypi
|
from ..imp import Future
from ..cst import EMPTY
from .factory import ExecutorFactory
from ..exc import DispatcherError, DispatcherAbort
from ..dsp import parent_func, SubDispatch, NoSub, run_model
def _sync_executor():
from .executors import PoolExecutor, Executor
# noinspection PyTypeChecker
return PoolExecutor(Executor())
def _async_executor():
from .executors import PoolExecutor, ThreadExecutor
return PoolExecutor(ThreadExecutor())
def _parallel_executor(*args, **kwargs):
from .executors import PoolExecutor, ThreadExecutor, ProcessExecutor
return PoolExecutor(ThreadExecutor(), ProcessExecutor(*args, **kwargs))
def _parallel_pool_executor(*args, **kwargs):
from .executors import PoolExecutor, ThreadExecutor, ProcessPoolExecutor
return PoolExecutor(
ThreadExecutor(), ProcessPoolExecutor(*args, **kwargs), False
)
def _parallel_dispatch_executor():
from .executors import PoolExecutor, ThreadExecutor, ProcessExecutor
return PoolExecutor(ThreadExecutor(), ProcessExecutor(), True)
EXECUTORS = ExecutorFactory({
'sync': _sync_executor,
'async': _async_executor,
'parallel': _parallel_executor,
'parallel-pool': _parallel_pool_executor,
'parallel-dispatch': _parallel_dispatch_executor
})
def register_executor(name, init, executors=None):
"""
Register a new executor type.
:param name:
Executor name.
:type name: str
:param init:
Function to initialize the executor.
:type init: callable
:param executors:
Executor factory.
:type executors: ExecutorFactory
"""
if executors is None:
executors = EXECUTORS
executors[name] = init
def shutdown_executor(name=EMPTY, sol_id=EMPTY, wait=True, executors=None):
"""
Clean-up the resources associated with the Executor.
:param name:
Executor name.
:type name: str
:param sol_id:
Solution id.
:type sol_id: int
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executor have been
reclaimed.
:type wait: bool
:param executors:
Executor factory.
:type executors: ExecutorFactory
:return:
Shutdown pool executor.
:rtype: dict[concurrent.futures.Future,Thread|Process]
"""
if executors is None:
executors = EXECUTORS
return executors.shutdown_executor(name, sol_id, wait)
def shutdown_executors(wait=True, executors=None):
"""
Clean-up the resources of all initialized executors.
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executors have been
reclaimed.
:type wait: bool
:param executors:
Executor factory.
:type executors: ExecutorFactory
:return:
Shutdown pool executor.
:rtype: dict[str,dict]
"""
return shutdown_executor(wait=wait, executors=executors)
def _process_funcs(
exe_id, funcs, executor, *args, stopper=None, sol_name=None,
verbose=False, **kw):
from ...dispatcher import Dispatcher
res, sid = [], exe_id[-1]
for fn in funcs:
if stopper and stopper.is_set():
raise DispatcherAbort
pfunc, r = parent_func(fn), {}
if isinstance(pfunc, type) and issubclass(pfunc, run_model):
fn = fn(*args)
args, kw = (), {}
pfunc = fn.func
if isinstance(pfunc, (SubDispatch, Dispatcher)):
try:
if isinstance(pfunc, Dispatcher):
r['res'] = fn(*args, stopper=stopper, executor=executor,
sol_name=sol_name, verbose=verbose, **kw)
else:
r['res'] = fn(*args, _stopper=stopper, _executor=executor,
_sol_name=sol_name, _verbose=verbose, **kw)
except DispatcherError as ex:
if isinstance(pfunc, NoSub):
raise ex
r['err'] = ex
if not isinstance(pfunc, NoSub):
r['sol'] = pfunc.solution
else:
e = EXECUTORS.get_executor(exe_id)
r['res'] = e.process(sid, fn, *args, **kw) if e else fn(*args, **kw)
res.append(r)
if 'err' in r:
break
args, kw = (r['res'],), {}
return res
def async_process(funcs, *args, executor=False, sol=None, callback=None, **kw):
"""
Execute `func(*args)` in an asynchronous parallel process.
:param funcs:
Functions to be executed.
:type funcs: list[callable]
:param args:
Arguments to be passed to first function call.
:type args: tuple
:param executor:
Pool executor to run the function.
:type executor: str | bool
:param sol:
Parent solution.
:type sol: schedula.utils.sol.Solution
:param callback:
Callback function to be called after all function execution.
:type callback: callable
:param kw:
Keywords to be passed to first function call.
:type kw: dict
:return:
Functions result.
:rtype: object
"""
exe_id = EXECUTORS.executor_id(executor, sol)
exe = EXECUTORS.get_executor(exe_id)
res = (exe and exe.process_funcs or _process_funcs)(
exe_id, funcs, executor, *args, **kw
)
for r in res:
callback and callback('sol' in r, r.get('sol', r.get('res')))
if 'err' in r:
raise r['err']
return res[-1]['res']
def _async_eval(sol, args, node_attr, *a, **kw):
try:
if node_attr['type'] == 'data' and (
node_attr['wait_inputs'] or 'function' in node_attr):
args = {k: await_result(v) for k, v in args[0].items()},
else:
args = tuple(map(await_result, args))
except BaseException as ex:
raise ex
else:
return sol._evaluate_node(args, node_attr, *a, **kw)
def _await_result(result, timeout, sol, node_id):
from ..exc import SkipNode
try:
return await_result(result, None if timeout is True else timeout)
except Exception as ex:
sol._ended(sol.workflow.nodes[node_id], node_id)
# Some error occurs.
msg = "Failed DISPATCHING '%s' due to:\n %r"
sol._warning(msg, node_id, ex)
raise SkipNode(ex=ex)
def async_thread(sol, args, node_attr, node_id, *a, **kw):
"""
Execute `sol._evaluate_node` in an asynchronous thread.
:param sol:
Solution to be updated.
:type sol: schedula.utils.sol.Solution
:param args:
Arguments to be passed to node calls.
:type args: tuple
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data or function node id.
:type node_id: str
:param a:
Extra args to invoke `sol._evaluate_node`.
:type a: tuple
:param kw:
Extra kwargs to invoke `sol._evaluate_node`.
:type kw: dict
:return:
Function result.
:rtype: concurrent.futures.Future | AsyncList
"""
name = kw.get('executor', False)
exe_id = EXECUTORS.executor_id(name, sol)
sid = exe_id[-1]
executor = EXECUTORS.get_executor(exe_id)
if not executor:
return sol._evaluate_node(args, node_attr, node_id, *a, **kw)
futures = args
if node_attr['type'] == 'data' and (
node_attr['wait_inputs'] or 'function' in node_attr):
futures = args[0].values()
futures = {v for v in futures if isinstance(v, Future)}
def _submit():
return EXECUTORS.get_executor(exe_id).thread(
sid, _async_eval, sol, args, node_attr, node_id, *a, **kw
)
if futures: # Chain results.
result = executor.add_future(sid, Future())
from .executors import _safe_set_exception, _safe_set_result
def _set_res(fut):
try:
_safe_set_result(result, fut.result())
except BaseException as ex:
_safe_set_exception(result, ex)
def _submit_task(fut=None):
futures.discard(fut)
if not (futures or result.done()):
_submit().add_done_callback(_set_res)
for f in list(futures):
f.add_done_callback(_submit_task)
else:
result = _submit()
timeout = node_attr.get('await_result', False)
if timeout is not False:
return _await_result(result, timeout, sol, node_id)
n = len(node_attr.get('outputs', []))
if n > 1:
result_list = AsyncList(future=result, n=n)
for r in result_list:
executor.add_future(sid, r)
return result_list
return result
class AsyncList(list):
"""List of asynchronous results."""
def __init__(self, *, future=None, n=1):
super(AsyncList, self).__init__()
self.extend(Future() for _ in range(n))
future.add_done_callback(self)
def __call__(self, future):
from .executors import _safe_set_result, _safe_set_exception
try:
res = tuple(future.result())
assert len(self) <= len(res)
except BaseException as ex:
for fut in self:
_safe_set_exception(fut, ex)
else:
for fut, value in zip(self, res):
_safe_set_result(fut, value)
return future
def await_result(obj, timeout=None):
"""
Return the result of a `Future` object.
:param obj:
Value object.
:type obj: concurrent.futures.Future | object
:param timeout:
The number of seconds to wait for the result if the future isn't done.
If None, then there is no limit on the wait time.
:type timeout: int
:return:
Result.
:rtype: object
Example::
>>> from concurrent.futures import Future
>>> fut = Future()
>>> fut.set_result(3)
>>> await_result(fut), await_result(4)
(3, 4)
"""
return obj.result(timeout) if isinstance(obj, Future) else obj
def atexit_register(*args, **kwargs):
try:
from atexit import register as _register
except ImportError:
try:
from atexit import atexit_register as _register
except ImportError: # MicroPython.
_register = None
if _register is not None:
_register(*args, **kwargs)
return _register
atexit_register(shutdown_executors, wait=False)
|
/schedula-core-1.4.9.tar.gz/schedula-core-1.4.9/schedula/utils/asy/__init__.py
| 0.583085 | 0.261897 |
__init__.py
|
pypi
|
from typing import Any, Optional
import numbers
import re
from bisect import bisect, bisect_left
from collections.abc import Iterable
from datetime import datetime, timedelta, date
from calendar import monthrange
DAYNAMES = "sun", "mon", "tue", "wed", "thu", "fri", "sat"
WEEKDAYS = dict(zip(DAYNAMES, range(7)))
CRON_PATTERN_INVALID = """\
Invalid crontab pattern. Valid range is {min}-{max}. \
'{value}' was found.\
"""
CRON_INVALID_TYPE = """\
Argument cronspec needs to be of any of the following types: \
int, str, or an iterable type. {type!r} was given.\
"""
CRON_REPR = """\
<crontab: {0._orig_minute} {0._orig_hour} {0._orig_day_of_week} \
{0._orig_day_of_month} {0._orig_month_of_year} (m/h/d/dM/MY)>\
"""
class AttributeDict(dict):
"""Dict subclass with attribute access."""
def __getattr__(self, k):
# type: (str) -> Any
"""`d.key -> d[key]`."""
try:
return self[k]
except KeyError:
raise AttributeError(
f"{type(self).__name__!r} object has no attribute {k!r}"
)
def __setattr__(self, key, value):
# type: (str, Any) -> None
"""`d[key] = value -> d.key = value`."""
self[key] = value
def cronfield(s):
return "*" if s is None else s
def dictfilter(d=None, **kw):
"""Remove all keys from dict ``d`` whose value is :const:`None`."""
d = kw if d is None else (dict(d, **kw) if kw else d)
return {k: v for k, v in d.items() if v is not None}
class ParseException(Exception):
"""Raised by :class:`crontab_parser` when the input can't be parsed."""
def weekday(name):
"""Return the position of a weekday: 0 - 7, where 0 is Sunday.
Example:
>>> weekday('sunday'), weekday('sun'), weekday('mon')
(0, 0, 1)
"""
abbreviation = name[0:3].lower()
try:
return WEEKDAYS[abbreviation]
except KeyError:
# Show original day name in exception, instead of abbr.
raise KeyError(name)
class CrontabParser:
"""Parser for Crontab expressions.
Any expression of the form 'groups'
(see BNF grammar below) is accepted and expanded to a set of numbers.
These numbers represent the units of time that the Crontab needs to
run on:
.. code-block:: bnf
digit :: '0'..'9'
dow :: 'a'..'z'
number :: digit+ | dow+
steps :: number
range :: number ( '-' number ) ?
numspec :: '*' | range
expr :: numspec ( '/' steps ) ?
groups :: expr ( ',' expr ) *
The parser is a general purpose one, useful for parsing hours, minutes and
day of week expressions. Example usage:
.. code-block:: pycon
>>> minutes = CrontabParser(60).parse('*/15')
[0, 15, 30, 45]
>>> hours = CrontabParser(24).parse('*/4')
[0, 4, 8, 12, 16, 20]
>>> day_of_week = CrontabParser(7).parse('*')
[0, 1, 2, 3, 4, 5, 6]
It can also parse day of month and month of year expressions if initialized
with a minimum of 1. Example usage:
.. code-block:: pycon
>>> days_of_month = CrontabParser(31, 1).parse('*/3')
[1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31]
>>> months_of_year = CrontabParser(12, 1).parse('*/2')
[1, 3, 5, 7, 9, 11]
>>> months_of_year = CrontabParser(12, 1).parse('2-12/2')
[2, 4, 6, 8, 10, 12]
The maximum possible expanded value returned is found by the formula:
:math:`max_ + min_ - 1`
"""
ParseException = ParseException
_range = r"(\w+?)-(\w+)"
_steps = r"/(\w+)?"
_star = r"\*"
def __init__(self, max_=60, min_=0):
self.max_ = max_
self.min_ = min_
self.pats = (
(re.compile(self._range + self._steps), self._range_steps),
(re.compile(self._range), self._expand_range),
(re.compile(self._star + self._steps), self._star_steps),
(re.compile("^" + self._star + "$"), self._expand_star),
)
def parse(self, spec):
acc = set()
for part in spec.split(","):
if not part:
raise self.ParseException("empty part")
acc |= set(self._parse_part(part))
return acc
def _parse_part(self, part):
for regex, handler in self.pats:
m = regex.match(part)
if m:
return handler(m.groups())
return self._expand_range((part,))
def _expand_range(self, toks):
fr = self._expand_number(toks[0])
if len(toks) > 1:
to = self._expand_number(toks[1])
if to < fr: # Wrap around max_ if necessary
return list(range(fr, self.min_ + self.max_)) + list(
range(self.min_, to + 1)
)
return list(range(fr, to + 1))
return [fr]
def _range_steps(self, toks):
if len(toks) != 3 or not toks[2]:
raise self.ParseException("empty filter")
return self._expand_range(toks[:2])[:: int(toks[2])]
def _star_steps(self, toks):
if not toks or not toks[0]:
raise self.ParseException("empty filter")
return self._expand_star()[:: int(toks[0])]
def _expand_star(self, *args):
return list(range(self.min_, self.max_ + self.min_))
def _expand_number(self, s):
if isinstance(s, str) and s[0] == "-":
raise self.ParseException("negative numbers not supported")
try:
i = int(s)
except ValueError:
try:
i = weekday(s)
except KeyError:
raise ValueError(f"Invalid weekday literal {s!r}.")
max_val = self.min_ + self.max_ - 1
if i > max_val:
raise ValueError(f"Invalid end range: {i} > {max_val}.")
if i < self.min_:
raise ValueError(f"Invalid beginning range: {i} < {self.min_}.")
return i
class Crontab:
"""Crontab schedule.
Like a :manpage:`cron(5)`-job, you can specify units of time of when
you'd like the job to execute. It's a reasonably complete
implementation of :command:`cron`'s features, so it should provide a fair
degree of scheduling needs.
You can specify a minute, an hour, a day of the week, a day of the
month, and/or a month in the year in any of the following formats:
.. attribute:: minute
- A (list of) integers from 0-59 that represent the minutes of
an hour of when execution should occur; or
- A string representing a Crontab pattern. This may get pretty
advanced, like ``minute='*/15'`` (for every quarter) or
``minute='1,13,30-45,50-59/2'``.
.. attribute:: hour
- A (list of) integers from 0-23 that represent the hours of
a day of when execution should occur; or
- A string representing a Crontab pattern. This may get pretty
advanced, like ``hour='*/3'`` (for every three hours) or
``hour='0,8-17/2'`` (at midnight, and every two hours during
office hours).
.. attribute:: day_of_week
- A (list of) integers from 0-6, where Sunday = 0 and Saturday =
6, that represent the days of a week that execution should
occur.
- A string representing a Crontab pattern. This may get pretty
advanced, like ``day_of_week='mon-fri'`` (for weekdays only).
(Beware that ``day_of_week='*/2'`` does not literally mean
'every two days', but 'every day that is divisible by two'!)
.. attribute:: day_of_month
- A (list of) integers from 1-31 that represents the days of the
month that execution should occur.
- A string representing a Crontab pattern. This may get pretty
advanced, such as ``day_of_month='2-30/2'`` (for every even
numbered day) or ``day_of_month='1-7,15-21'`` (for the first and
third weeks of the month).
.. attribute:: month_of_year
- A (list of) integers from 1-12 that represents the months of
the year during which execution can occur.
- A string representing a Crontab pattern. This may get pretty
advanced, such as ``month_of_year='*/3'`` (for the first month
of every quarter) or ``month_of_year='2-12/2'`` (for every even
numbered month).
It's important to realize that any day on which execution should
occur must be represented by entries in all three of the day and
month attributes. For example, if ``day_of_week`` is 0 and
``day_of_month`` is every seventh day, only months that begin
on Sunday and are also in the ``month_of_year`` attribute will have
execution events. Or, ``day_of_week`` is 1 and ``day_of_month``
is '1-7,15-21' means every first and third Monday of every month
present in ``month_of_year``.
"""
def __init__(
self,
minute="*",
hour="*",
day_of_week="*",
day_of_month="*",
month_of_year="*",
tz: Optional[str] = None,
**kwargs,
):
self._orig_minute = cronfield(minute)
self._orig_hour = cronfield(hour)
self._orig_day_of_week = cronfield(day_of_week)
self._orig_day_of_month = cronfield(day_of_month)
self._orig_month_of_year = cronfield(month_of_year)
self._orig_kwargs = kwargs
self.hour = self._expand_cronspec(hour, 24)
self.minute = self._expand_cronspec(minute, 60)
self.day_of_week = self._expand_cronspec(day_of_week, 7)
self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
self.tz = None
if tz is not None:
import pytz
if isinstance(tz, str):
self.tz = pytz.timezone(tz) # type: ignore
elif isinstance(tz, pytz.BaseTzInfo):
self.tz = tz
else:
raise ValueError(
"Timezone must be string or pytz.timezone object"
)
@classmethod
def from_expression(
cls, crontab_expression: str, tz: Optional[str] = None
) -> "Crontab":
items = crontab_expression.split(" ")
if len(items) != 5:
raise ValueError(
"Invalid number of components in crontab expression"
)
return cls(
minute=items[0],
hour=items[1],
day_of_week=items[2],
day_of_month=items[3],
month_of_year=items[4],
tz=tz,
)
@staticmethod
def _expand_cronspec(cronspec, max_, min_=0):
"""Expand cron specification.
Takes the given cronspec argument in one of the forms:
.. code-block:: text
int (like 7)
str (like '3-5,*/15', '*', or 'monday')
set (like {0,15,30,45}
list (like [8-17])
And convert it to an (expanded) set representing all time unit
values on which the Crontab triggers. Only in case of the base
type being :class:`str`, parsing occurs. (It's fast and
happens only once for each Crontab instance, so there's no
significant performance overhead involved.)
For the other base types, merely Python type conversions happen.
The argument ``max_`` is needed to determine the expansion of
``*`` and ranges. The argument ``min_`` is needed to determine
the expansion of ``*`` and ranges for 1-based cronspecs, such as
day of month or month of year. The default is sufficient for minute,
hour, and day of week.
"""
if isinstance(cronspec, numbers.Integral):
result = {cronspec}
elif isinstance(cronspec, str):
result = CrontabParser(max_, min_).parse(cronspec)
elif isinstance(cronspec, set):
result = cronspec
elif isinstance(cronspec, Iterable):
result = set(cronspec)
else:
raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec)))
# assure the result does not preceed the min or exceed the max
for number in result:
if number >= max_ + min_ or number < min_:
raise ValueError(
CRON_PATTERN_INVALID.format(
min=min_, max=max_ - 1 + min_, value=number
)
)
return result
def _delta_to_next(self, last_run_at, next_hour, next_minute):
"""Find next delta.
Takes a :class:`~datetime.datetime` of last run, next minute and hour,
and returns a :class:`~celery.utils.time.ffwd` for the next
scheduled day and time.
Only called when ``day_of_month`` and/or ``month_of_year``
cronspec is specified to further limit scheduled job execution.
"""
datedata = AttributeDict(year=last_run_at.year)
days_of_month = sorted(self.day_of_month)
months_of_year = sorted(self.month_of_year)
def day_out_of_range(year, month, day):
try:
datetime(year=year, month=month, day=day)
except ValueError:
return True
return False
def is_before_last_run(year, month, day):
return (
self._check_awareness(datetime(year, month, day)) < last_run_at
)
def roll_over():
for _ in range(2000):
flag = (
datedata.dom == len(days_of_month)
or day_out_of_range(
datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom],
)
or (
is_before_last_run(
datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom],
)
)
)
if flag:
datedata.dom = 0
datedata.moy += 1
if datedata.moy == len(months_of_year):
datedata.moy = 0
datedata.year += 1
else:
break
else:
# Tried 2000 times, we're most likely in an infinite loop
raise RuntimeError(
"unable to rollover, "
"time specification is probably invalid"
)
if last_run_at.month in self.month_of_year:
datedata.dom = bisect(days_of_month, last_run_at.day)
datedata.moy = bisect_left(months_of_year, last_run_at.month)
else:
datedata.dom = 0
datedata.moy = bisect(months_of_year, last_run_at.month)
if datedata.moy == len(months_of_year):
datedata.moy = 0
roll_over()
while 1:
th = datetime(
year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
)
if th.isoweekday() % 7 in self.day_of_week:
break
datedata.dom += 1
roll_over()
return Ffwd(
year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0,
)
def __repr__(self):
return CRON_REPR.format(self)
def __reduce__(self):
return (
self.__class__,
(
self._orig_minute,
self._orig_hour,
self._orig_day_of_week,
self._orig_day_of_month,
self._orig_month_of_year,
),
self._orig_kwargs,
)
def __setstate__(self, state):
# Calling super's init because the kwargs aren't necessarily passed in
# the same form as they are stored by the superclass
super().__init__(**state)
def now(self) -> datetime:
if self.tz is None:
return datetime.now()
import pytz
utcnow = datetime.now(pytz.UTC)
return utcnow.astimezone(self.tz)
def _check_awareness(self, dt: datetime) -> datetime:
is_naive = dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
if is_naive:
if self.tz is not None:
ValueError(
"You cannot use naive datetime if the crontab is defined with a timezone"
)
else:
if self.tz is None:
ValueError(
"You cannot use localized datetime if the crontab is defined without a timezone"
)
else:
dt = dt.astimezone(self.tz)
return dt
def next_run_time(self, last_run_at: Optional[datetime] = None):
last_run_at = self._check_awareness(last_run_at or self.now())
now = self._check_awareness(self.now())
dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7
execute_this_date = (
last_run_at.month in self.month_of_year
and last_run_at.day in self.day_of_month
and dow_num in self.day_of_week
)
execute_this_hour = (
execute_this_date
and last_run_at.day == now.day
and last_run_at.month == now.month
and last_run_at.year == now.year
and last_run_at.hour in self.hour
and last_run_at.minute < max(self.minute)
)
if execute_this_hour:
next_minute = min(
minute for minute in self.minute if minute > last_run_at.minute
)
delta = Ffwd(minute=next_minute, second=0, microsecond=0)
else:
next_minute = min(self.minute)
execute_today = execute_this_date and last_run_at.hour < max(
self.hour
)
if execute_today:
next_hour = min(
hour for hour in self.hour if hour > last_run_at.hour
)
delta = Ffwd(
hour=next_hour, minute=next_minute, second=0, microsecond=0
)
else:
next_hour = min(self.hour)
all_dom_moy = (
self._orig_day_of_month == "*"
and self._orig_month_of_year == "*"
)
if all_dom_moy:
next_day = min(
[day for day in self.day_of_week if day > dow_num]
or self.day_of_week
)
add_week = next_day == dow_num
delta = Ffwd(
weeks=add_week and 1 or 0,
weekday=(next_day - 1) % 7,
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0,
)
else:
delta = self._delta_to_next(
last_run_at, next_hour, next_minute
)
next_run_at = now + delta
if self.tz:
next_run_at = self.tz.normalize(next_run_at)
return next_run_at
def __eq__(self, other):
if isinstance(other, Crontab):
return (
other.month_of_year == self.month_of_year
and other.day_of_month == self.day_of_month
and other.day_of_week == self.day_of_week
and other.hour == self.hour
and other.minute == self.minute
and super().__eq__(other)
)
return NotImplemented
def __ne__(self, other):
res = self.__eq__(other)
if res is NotImplemented:
return True
return not res
class Ffwd:
"""Version of ``dateutil.relativedelta`` that only supports addition."""
def __init__(
self,
year=None,
month=None,
weeks=0,
weekday=None,
day=None,
hour=None,
minute=None,
second=None,
microsecond=None,
**kwargs,
):
# pylint: disable=redefined-outer-name
# weekday is also a function in outer scope.
self.year = year
self.month = month
self.weeks = weeks
self.weekday = weekday
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.days = weeks * 7
self._has_time = self.hour is not None or self.minute is not None
def __radd__(self, other):
if not isinstance(other, date):
return NotImplemented
year = self.year or other.year
month = self.month or other.month
day = min(monthrange(year, month)[1], self.day or other.day)
ret = other.replace(
**dict(dictfilter(self._fields()), year=year, month=month, day=day)
)
if self.weekday is not None:
ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7)
return ret + timedelta(days=self.days)
def _fields(self, **extra):
return dictfilter(
{
"year": self.year,
"month": self.month,
"day": self.day,
"hour": self.hour,
"minute": self.minute,
"second": self.second,
"microsecond": self.microsecond,
},
**extra,
)
|
/schedule_cronjob-1.3.1-py3-none-any.whl/schedule/crontab.py
| 0.903324 | 0.356391 |
crontab.py
|
pypi
|
import datetime
import calendar
import functools
def nth_day_yearly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the day of the year, where the function works
example:
schedule.every().day.do(nth_day_yearly(1, job_func)) # do job_func() on first day of the year
schedule.every().day.do(nth_day_yearly(-1, job_func)) # do job_func() on last day of the year
:param n: number of day, can be 1 to 365, if leap year 366 or
-1 to -365, if leap year -366
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
year = datetime.datetime.today().year
days_of_year = 366 if calendar.isleap(year) else 365
assert n != 0, "The nth day cannot be 0 (Zero)"
assert n < days_of_year, "The nth day cannot be bigger than 365, if leap year 366"
assert n > -days_of_year, "The nth day cannot be smaller than -365, if leap year -366"
day_of_month = datetime.datetime.today().day
day_of_year = int(datetime.datetime.today().strftime("%j")) # %j = Day number of year 001-366
if n > 0 and n == day_of_year or \
n < 0 and days_of_year-n == day_of_year:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_month_yearly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the month, where the function works
example:
schedule.every().monday.do(nth_month_yearly(1, job_func)) # do job_func() on every monday of the month=1 (january)
schedule.every().day.do(nth_month_yearly(-1, job_func)) # do job_func() on every day of the month=12 (december)
:param n: number of day, can be 1 to 28 or
-1 to -28
up to 28, because february, the shortest month, has 28 days
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
assert n != 0, "The nth month cannot be 0 (Zero)"
assert n < 12, "The nth month cannot be bigger than 12"
assert n > -12, "The nth month cannot be smaller than -12"
month = datetime.datetime.today().month
if n > 0 and n == month or \
n < 0 and 13-n == month:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_day_monthly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the day of the month, where the function works
example:
schedule.every().day.do(nth_day_monthly(1, job_func)) # do job_func() on first day of the month
schedule.every().day.do(nth_day_monthly(-1, job_func)) # do job_func() on last day of the month
:param n: number of day, can be 1 to 28 or
-1 to -28
up to 28, because february, the shortest month, has 28 days
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
_, num_days_of_month = calendar.monthrange(datetime.datetime.today().year,
datetime.datetime.today().month)
assert n != 0, "The nth day cannot be 0 (Zero)"
assert n < 28, "The nth day cannot be bigger than 28"
assert n > -28, "The nth day cannot be smaller than -28"
day_of_month = datetime.datetime.today().day
if n > 0 and day_of_month == n or \
n < 0 and day_of_month+1 == num_days_of_month - n:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_week_monthly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().weekday.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the number of the week, where the function works
example:
schedule.every().monday.do(nth_week_monthly(1, job_func)) # do job_func() on first monday of the month
schedule.every().sunday.do(nth_week_monthly(-1, job_func)) # do job_func() on last sunday of the month
:param n: number of week, can be 1 to 4 or
-1 to -4
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
assert n != 0, "The nth week cannot be 0 (Zero)"
assert n < 4, "The nth week cannot be bigger than 4"
assert n > -4, "The nth week cannot be smaller than -4"
day_of_month = datetime.datetime.today().day
if n > 0:
week_n = lambda n: 7 * n
if week_n(n - 1) < day_of_month <= week_n(n):
return _execute(job_func, args, kwargs)
else:
return # wrong week
elif n < 0:
_, num_days_of_month = calendar.monthrange(datetime.datetime.today().year,
datetime.datetime.today().month)
reverse_week_n = lambda n: num_days_of_month + (n * 7)
"""
reverse week subtracts n weeks from the numbers of days of a month
reverse_week_n(0) == num_days_of_month (31, for example)
reverse_week_n(-1) == num_days_of_month - 7
reverse_week_n(-2) == num_days_of_month - 14
"""
if reverse_week_n(n) < day_of_month <= reverse_week_n(n + 1):
return _execute(job_func, args, kwargs)
else:
return # wrong week
def nth_year_ever(n, job_func, *args, **kwargs):
if datetime.datetime.today().year == n:
return _execute(job_func, args, kwargs)
def _execute(job_func, args, kwargs):
s_job_func = functools.partial(job_func, *args, **kwargs)
try:
functools.update_wrapper(s_job_func, job_func)
except AttributeError:
# job_funcs already wrapped by functools.partial won't have
# __name__, __module__ or __doc__ and the update_wrapper()
# call will fail.
pass
return s_job_func()
|
/schedule_filter-0.2.1.tar.gz/schedule_filter-0.2.1/schedule_filter/__init__.py
| 0.603581 | 0.632304 |
__init__.py
|
pypi
|
import threading
import uuid
import re
import time
from datetime import datetime, timedelta
import math
from .exceptions import TaskNameDuplicateError
from .exceptions import TaskNotFoundError
from .exceptions import TimeFormatError
from .exceptions import OperationFailError
class ScheduleManager:
"""Task schedule manager."""
def __init__(self):
self._tasks = dict()
def __del__(self):
"""Destructor"""
# Make sure all tasks are not running.
self.running_tasks.stop()
def __contains__(self, name):
"""Returns True if task name is registered."""
return name in self._tasks
def __iter__(self):
"""Iterate over tasks name."""
return iter(self._tasks)
def __repr__(self):
return ("ScheduleManager<("
"Tasks: {c}, Running: {r}, Pending: {p}"
")>").format(c=self.count,
r=self.running_tasks.count,
p=self.pending_tasks.count)
@property
def count(self):
"""int: Number of tasks registered in the schedule manager."""
return len(self._tasks)
@property
def all_tasks(self):
"""TaskGroup: Get all tasks."""
return TaskGroup(list(self._tasks.values()))
@property
def running_tasks(self):
"""TaskGroup: Get all running tasks."""
task_list = list()
for name in self._tasks:
if self._tasks[name].is_running:
task_list.append(self._tasks[name])
return TaskGroup(task_list)
@property
def pending_tasks(self):
"""TaskGroup: Get all pending tasks."""
task_list = list()
for name in self._tasks:
if not self._tasks[name].is_running:
task_list.append(self._tasks[name])
return TaskGroup(task_list)
def task(self, name):
"""Get task registerd in schedule manager by name.
Args:
name (str): Task name.
Returns:
Task: Task instance.
Raises:
TaskNotFoundError: Task is not registered in schedule manager.
"""
if name not in self._tasks:
raise TaskNotFoundError
return self._tasks[name]
def _task_list(self, tag):
task_list = list()
if isinstance(tag, list):
for tag_ in tag:
for name in self._tasks:
if tag_ in self._tasks[name].tag:
if self._tasks[name] not in task_list:
task_list.append(self._tasks[name])
else:
for name in self._tasks:
if tag in self._tasks[name].tag:
task_list.append(self._tasks[name])
return task_list
def tasks(self, tag):
"""Get tasks registerd in schedule manager by name.
Args:
tag (Union[obj, list]): Tag or tag list.
Returns:
TaskGroup: TaskGroup instance.
"""
task_list = self._task_list(tag)
return TaskGroup(task_list)
def register(self, task):
"""Register a task.
Args:
task (Task): Task.
Returns:
Task: Registered task instance.
Raises:
TaskNameDuplicateError: Duplicate task name.
"""
if task.name in self._tasks:
raise TaskNameDuplicateError
self._tasks[task.name] = task
task.manager = self
return task
def register_task(self, job, name=None, args=(), kwargs=None,
ignore_skipped=True, daemon=True):
"""Create and register a task.
Args:
job (callable): Job to be scheduled.
name (str): Task name.
By default, a unique name is constructed.
args (tuple): Argument tuple for the job invocation.
Defaults to ().
kwargs (dict): Dictionary of keyword arguments for the job
invocation.
Defaults to {}.
ignore_skipped (bool): Set True to ignore skipped job if time
spent on job is longer than the task cycle time.
Defaults to True.
daemon (bool): Set True to use as a daemon task.
Defaults to True.
Returns:
Task: Registered task instance.
Raises:
TaskNameDuplicateError: Duplicate task name.
"""
if name is None:
name = "Task-{}".format(uuid.uuid4().hex)
while name in self._tasks:
name = "Task-{}".format(uuid.uuid4().hex)
elif name in self._tasks:
raise TaskNameDuplicateError
task = Task(name=name, job=job, args=args, kwargs=kwargs)
self._tasks[name] = task
task.manager = self
return task
def unregister(self, name=None, tag=None):
"""Unregister the task.
Args:
name (str): Unregister task by name.
tag (Union[obj, list]): Unregister tasks by tag or by
a list of tags.
"""
if name:
if name in self._tasks:
task = self._tasks[name]
del self._tasks[name]
task.manager = None
if tag:
task_list = self._task_list(tag)
for task in task_list:
del self._tasks[task.name]
task.manager = None
class Task(threading.Thread):
"""Thread-based Task.
Task will be considered as periodic task by default.
:class:`Task` is able to registered in :class:`ScheduleManager` or run
directly.
Args:
job (callable): Job to be scheduled as a task.
name (str): Task name.
By default, a unique name is constructed.
args (tuple): Argument tuple for the job invocation.
Defaults to ().
kwargs (dict): Dictionary of keyword arguments for the job
invocation.
Defaults to {}.
ignore_skipped (bool): Set True to ignore skipped job if time
spent on job is longer than the task cycle time.
Defaults to True.
daemon (bool): Set True to use as a daemon task.
Defaults to True.
Attributes:
name (str): Task name.
daemon (bool): A boolean value indicating whether this task is based
on a daemon thread.
See for `threading.Thread.daemon <https://docs.python.org/3/library/threading.html#threading.Thread.daemon>`_ more detail.
"""
def __init__(self, job, name=None, args=(), kwargs=None,
ignore_skipped=True, daemon=True):
self.CHECK_INTERVAL = 1
# Flag (start task): Set to True is start() is called.
self._start = False
# Flag (stop task): Used to stop current task
self._stop_task = False
# Flag (pause task):
# Used re-registercurrent task because threads can only
# be started once
self._pause_task = False
self._manager = None
self._tag = list() # Tag list
self._ignore_skipped = ignore_skipped # Ignore skipped job activity.
self._next_run = None # datetime when the job run at next time
self._delay = None # Task delay time
self._start_at = None # Task start time
self._is_periodic = True # A periodic task or a non-periodic task.
self._nonperiod_count = 0 # Count used for non-periodic task.
self._periodic_unit = None
self._periodic = None
self._at_time = None
self._at_week_day = None
self._at_day = None
if name is None:
name = "Task-{}".format(uuid.uuid4().hex)
super().__init__(target=job,
name=name,
args=args,
kwargs=kwargs,
daemon=daemon)
def __repr__(self):
status = "initial"
if self._start:
status = "started"
if self._stop_task:
status = "stopping"
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
d_format = "%y-%m-%d %H:%M:%S"
if self._next_run:
time_next_run = self._next_run.strftime(d_format)
else:
if self._start and self._start_at:
time_next_run = "Start At {}".format((self
._start_at
.strftime(d_format)))
else:
time_next_run = None
return "Task<({}, {}, {})>".format(self._name, status, time_next_run)
@property
def next_run(self):
"""datetime: Datetime when the job run at next time."""
returns = self._next_run
if self._start:
if not returns and self._start_at:
returns = self._start_at
return returns
@property
def is_running(self):
"""bool: Return True if the task is running."""
return self._start
@property
def manager(self):
"""ScheduleManager: Schedule manager which manages current task."""
return self._manager
@manager.setter
def manager(self, manager):
"""Register task into schedule manager.
Use ScheduleManager.register(Task) instead of using
Task.set_manager(manager).
Args:
manager (ScheduleManager): ScheduleManager instance.
"""
if not manager:
if self._manager is None:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
if self.name in self._manager:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
self._manager = None
return
if self.name not in manager:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
if self is not manager.task(self.name):
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
self._manager = manager
@property
def tag(self):
"""list: Tag list of the task."""
return self._tag
def add_tag(self, tag):
"""Add tag to task.
Args:
tag (obj): Tag.
Returns:
Task: Invoked task instance.
"""
if tag not in self._tag:
self._tag.append(tag)
return self
def add_tags(self, tags):
"""Add a list of tags to task.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
for tag in tags:
self.add_tag(tag)
return self
def remove_tag(self, tag):
"""Remove tag from task.
Args:
tag (obj): Tag.
Returns:
Task: Invoked task instance.
"""
if tag in self._tag:
self._tag.remove(tag)
return self
def remove_tags(self, tags):
"""Remove a list of tags from task.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
for tag in tags:
self.remove_tag(tag)
return self
def set_tags(self, tags):
"""Set tag list to task.
Replace old tag list.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
self._tag.clear()
for tag in tags:
if tag not in self._tag:
self._tag.append(tag)
return self
def delay(self, interval=None):
"""Delay task start time.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Or set None to cancel task delay time.
Defaults to None.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
if interval is None:
self._delay = None
else:
if isinstance(interval, timedelta):
self._start_at = None # Use delay instead of start time.
self._delay = interval
elif isinstance(interval, int):
self._start_at = None # Use delay instead of start time.
self._delay = timedelta(seconds=interval)
else:
time_pattern = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
if re.match(time_pattern, interval):
self._start_at = None # Use delay instead of start time.
tsp = interval.split(":")
self._delay = timedelta(hours=int(tsp[0]),
minutes=int(tsp[1]),
seconds=int(tsp[2]))
else:
raise TimeFormatError
return self
def start_at(self, at_time=None):
"""Set task start time.
Specify a particular time that the job should be start.
Args:
at_time (Union[str, datetime]): Start time.
A string or :obj:`datetime`.
A string can be in one of the following formats:
[`HH:MM:SS`, `mm-dd HH:MM:SS`].
Or set None to cancel task start time.
Defaults to None.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
if at_time is None:
self._start_at = None
else:
if isinstance(at_time, datetime):
self._delay = None # Use start time instead of delay.
self._start_at = at_time
else:
match1 = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
match2 = (r'^([0]?\d|[1][0-2])-([0-2]?\d|[3][0-1])'
r' ([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$')
if re.match(match1, at_time):
self._delay = None # Use start time instead of delay.
tsp = at_time.split(":")
self._start_at = datetime.now().replace(hour=int(tsp[0]),
minute=int(tsp[1]),
second=int(tsp[2]))
elif re.match(match2, at_time):
self._delay = None # Use start time instead of delay.
dtsp = at_time.split(" ")
dsp = dtsp[0].split("-")
tsp = dtsp[1].split(":")
self._start_at = datetime.now().replace(month=int(dsp[0]),
day=int(dsp[1]),
hour=int(tsp[0]),
minute=int(tsp[1]),
second=int(tsp[2]))
else:
raise TimeFormatError
return self
def nonperiodic(self, count):
"""See as an non-periodic task.
Args:
count (int): Do the job for a certain number of times.
Returns:
Task: Invoked task instance.
"""
if self._start:
raise OperationFailError("Task is already running.")
if count <= 0:
raise OperationFailError("Number of times must be greater than 0.")
self._is_periodic = False
self._nonperiod_count = count
return self
def periodic(self):
"""See as an periodic task.
Returns:
Task: Invoked task instance.
"""
if self._start:
raise OperationFailError("Task is already running.")
self._is_periodic = True
return self
def period(self, interval):
"""Scheduling periodic task.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
self._periodic_unit = "every"
if isinstance(interval, timedelta):
self._periodic = interval
elif isinstance(interval, int):
self._periodic = timedelta(seconds=interval)
else:
if re.match(r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$', interval):
tsp = interval.split(":")
self._periodic = timedelta(hours=int(tsp[0]),
minutes=int(tsp[1]),
seconds=int(tsp[2]))
else:
raise TimeFormatError
return self
def period_at(self, unit="day", at_time="00:00:00",
week_day="Monday", day=1):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Args:
unit (str): Time unit of the periodic task.
Defaults to `day`.
The following unit is available:
1. `day`: Run job everyday.
2. `week`: Run job every week.
3. `month`: Run job every month.
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
This argument will only be used is unit is `week`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
day (int): Day to do the job.
Defaults to 1.
This argument will only be used is unit is `month`.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
time_pattern = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
week_day_list = {
"Monday": 0,
"Tuesday": 1,
"Wednesday": 2,
"Thursday": 3,
"Friday": 4,
"Saturday": 5,
"Sunday": 6
}
if unit == "day":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
elif unit == "week":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
if week_day not in week_day_list:
raise TimeFormatError
self._at_week_day = week_day_list[week_day]
elif unit == "month":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
if day not in range(1, 32):
raise TimeFormatError
self._at_day = day
else:
raise TimeFormatError
return self
def period_day_at(self, at_time="00:00:00"):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs everyday.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="day", at_time=at_time)
return self
def period_week_at(self, at_time="00:00:00", week_day="Monday"):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs every week.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="week", at_time=at_time, week_day=week_day)
return self
def period_month_at(self, at_time="00:00:00", day=1):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs every month.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
day (int): Day to do the job.
Defaults to 1.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="month", at_time=at_time, day=day)
return self
def _set_next_run_init(self):
# First time the job run at.
if self._periodic_unit == "every":
self._next_run = datetime.now()
elif self._periodic_unit == "day":
self._set_next_run_init_day()
elif self._periodic_unit == "week":
self._set_next_run_init_week()
elif self._periodic_unit == "month":
self._set_next_run_init_month()
def _set_next_run_init_day(self):
run_time = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if run_time < datetime.now():
self._next_run = run_time + timedelta(days=1)
else:
self._next_run = run_time
def _set_next_run_init_week(self):
tmp_runtime = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
now_weekday = tmp_runtime.date().weekday()
if now_weekday < self._at_week_day:
tmp_runtime += timedelta(days=self._at_week_day-now_weekday)
elif now_weekday > self._at_week_day:
tmp_runtime += timedelta(days=7+self._at_week_day-now_weekday)
else:
if tmp_runtime < datetime.now():
tmp_runtime += timedelta(days=7)
self._next_run = tmp_runtime
def _set_next_run_init_month(self):
try:
tmp_runtime = datetime.now().replace(day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if datetime.now().day > self._at_day:
if tmp_runtime.month == 12:
tmp_runtime = tmp_runtime.replace(year=tmp_runtime.year+1,
month=1)
else:
try:
tmp_runtime = tmp_runtime.replace(month=(tmp_runtime
.month)+1)
except ValueError:
# Because day is out of range in next month.
tmp_runtime = tmp_runtime.replace(month=(tmp_runtime
.month)+2)
elif datetime.now().day == self._at_day:
if tmp_runtime < datetime.now():
if tmp_runtime.month == 12:
tmp_runtime = tmp_runtime.replace(year=(tmp_runtime
.year)+1,
month=1)
else:
try:
tmp_runtime = (tmp_runtime
.replace(month=tmp_runtime.month+1))
except ValueError:
# Because day is out of range in next month.
tmp_runtime = (tmp_runtime
.replace(month=tmp_runtime.month+2))
self._next_run = tmp_runtime
except ValueError:
# Because day is out of range in this month.
self._next_run = datetime.now().replace(month=(datetime
.now()
.month)+1,
day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
def _set_next_run(self):
if self._periodic_unit == "every":
self._set_next_run_every()
elif self._periodic_unit == "day":
self._set_next_run_day()
elif self._periodic_unit == "week":
self._set_next_run_week()
elif self._periodic_unit == "month":
self._set_next_run_month()
def _set_next_run_every(self):
if self._ignore_skipped:
next_ = self._next_run + self._periodic
if next_ < datetime.now():
rate = (datetime.now() - self._next_run) / self._periodic
next_ = self._next_run + math.ceil(rate) * self._periodic
if next_ == datetime.now():
next_ += self._periodic
self._next_run = next_
else:
self._next_run += self._periodic
def _set_next_run_day(self):
if self._ignore_skipped:
next_ = self._next_run + timedelta(days=1)
if next_ < datetime.now():
# Record current datetime to avoid 23:59:XX situation.
time_now = datetime.now()
next_ = next_.replace(month=time_now.month,
day=time_now.day)
if next_ <= datetime.now():
next_ += timedelta(days=1)
self._next_run = next_
else:
self._next_run += timedelta(days=1)
def _set_next_run_week(self):
if self._ignore_skipped:
next_ = self._next_run + timedelta(days=7)
if next_ < datetime.now():
next_ = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
weekday_ = next_.date().weekday()
if weekday_ < self._at_week_day:
next_ += timedelta(days=self._at_week_day-weekday_)
elif weekday_ > self._at_week_day:
next_ += timedelta(days=7+self._at_week_day-weekday_)
else:
if next_ < datetime.now():
next_ += timedelta(days=7)
if next_ <= datetime.now():
next_ += timedelta(days=7)
self._next_run = next_
else:
self._next_run += timedelta(days=7)
def _set_next_run_month(self):
if self._ignore_skipped:
if self._next_run.month == 12:
next_ = self._next_run.replace(year=self._next_run.year+1,
month=1)
else:
try:
next_ = self._next_run.replace(month=(self
._next_run
.month)+1)
except ValueError:
# Because day is out of range in next month.
next_ = self._next_run.replace(month=(self
._next_run
.month)+2)
if next_ < datetime.now():
try:
next_ = datetime.now().replace(day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if datetime.now().day > self._at_day:
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next month.
next_ = next_.replace(month=next_.month+2)
elif datetime.now().day == self._at_day:
if next_ < datetime.now():
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next
# month.
next_ = next_.replace(month=next_.month+2)
except ValueError:
next_ = datetime.now().replace(month=(datetime
.now()
.month)+1,
day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if next_ <= datetime.now():
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next month.
next_ = next_.replace(month=next_.month+2)
self._next_run = next_
else:
if self._next_run.month == 12:
self._next_run = self._next_run.replace(year=(self
._next_run
.year)+1,
month=1)
else:
try:
month_next = self._next_run.month+1
self._next_run = self._next_run.replace(month=month_next)
except ValueError:
# Because day is out of range in next month.
month_next = self._next_run.month+2
self._next_run = self._next_run.replace(month=month_next)
def _next_run_at(self):
if self._next_run is None:
self._set_next_run_init()
else:
self._set_next_run()
def start(self):
"""Start the Task's activity."""
if not self._periodic_unit:
raise OperationFailError("Please set period first.")
self._start = True
# Set start at by delay time
if self._delay:
self._start_at = datetime.now() + self._delay
super().start()
def stop(self):
"""Stop the Task's activity."""
if not self._start:
raise OperationFailError("Task is not running.")
self._start = False
self._stop_task = True
def pause(self):
"""Pause the Task's activity.
Works only the task is registered into :class:`ScheduleManager`.
"""
if not self._start:
raise OperationFailError("Task is not running.")
if not self._manager:
raise OperationFailError("Register task into "
"ScheduleManager first.")
self._start = False
self._stop_task = True
self._pause_task = True
def _action_after_finish(self):
# Remove task from manager
if self._manager:
# Keep ScheduleManager instance
manager = self._manager
manager.unregister(self.name)
if self._pause_task:
# Thread-based object can only be started once.
# So create new task with same action and register task after
# delete.
# current task to realize pause action.
kwargs = None if self._kwargs == {} else self._kwargs
# New task
new_task = manager.register_task(name=self.name,
job=self._target,
args=self._args,
kwargs=kwargs)
new_task.set_tags(self.tag)
# schedule task
if self._periodic_unit == "every":
new_task.period(self._periodic)
else:
ref_week = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
None: None
}
time_str = "{}:{}:{}".format(str(self._at_time[0]),
str(self._at_time[1]),
str(self._at_time[2]))
new_task.period_at(unit=self._periodic_unit,
at_time=time_str,
week_day=ref_week[self._at_week_day],
day=self._at_day)
if not self._is_periodic:
new_task.nonperiodic(self._nonperiod_count)
if self._delay:
new_task.delay(self._start_at - datetime.now())
elif self._start_at:
if datetime.now() < self._start_at:
new_task.start_at(self._start_at)
def run(self):
"""Representing the Task's activity.
DO NOT CALL DIRECTLY.
"""
if not self._start:
raise OperationFailError("Use Task.start() instead.")
# Modified from :meth:`Thread.run`.
try:
# Delay or start at.
if self._start_at:
while not self._stop_task:
if datetime.now() >= self._start_at:
break
time.sleep(self.CHECK_INTERVAL)
self._next_run_at()
while not self._stop_task:
if datetime.now() >= self._next_run:
self._target(*self._args, **self._kwargs)
self._next_run_at()
if not self._is_periodic:
self._nonperiod_count -= 1
if self._nonperiod_count <= 0:
self._stop_task = True
break
time.sleep(self.CHECK_INTERVAL)
finally:
self._action_after_finish()
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
class TaskGroup:
"""Task group.
A set of tasks.
"""
def __init__(self, tasks=None):
"""Constructor
Args:
tasks (iterable): Task list.
"""
if not tasks:
self._tasks = list()
else:
self._tasks = list()
if isinstance(tasks, list):
self._tasks = tasks[:]
else:
for task in tasks:
self._tasks.append(task)
def __repr__(self):
return ("TaskGroup<("
"Tasks: {task_count}"
")>").format(task_count=len(self._tasks))
def __contains__(self, task):
"""Returns True if task is in the group."""
return task in self._tasks
def __iter__(self):
"""Iterate over tasks."""
return iter(self._tasks)
def __add__(self, other):
if isinstance(other, TaskGroup):
task_list = self._tasks + other._tasks
return TaskGroup(task_list)
return NotImplemented
@property
def count(self):
"""int: Number of tasks contained in the group."""
return len(self._tasks)
def set_manager(self, manager=None):
"""Change schedule manager of all tasks.
Task will be unregistered from old manager if it has been registered
in a manager.
Args:
manager (ScheduleManager): A exist schedule manager object.
Set None to create new schedule manager.
Returns:
ScheduleManager: Invoked ScheduleManager instance.
Raises:
TaskNameDuplicateError: There is a duplicate task name.
"""
if not manager:
manager = ScheduleManager()
else:
for task in self._tasks:
if task.name in manager:
error = "Duplicate task name <{}>.".format(task.name)
raise TaskNameDuplicateError(error)
for task in self._tasks:
if task.manager:
task.manager.unregister(name=task.name)
manager.register(task)
return manager
def add_tag(self, tag):
"""Add tag to tasks.
Args:
tag (obj): Tag.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.add_tag(tag)
return self
def add_tags(self, tags):
"""Add a list of tags to tasks.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.add_tags(tags)
return self
def remove_tag(self, tag):
"""Remove tag from tasks.
Args:
tag (obj): Tag.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.remove_tag(tag)
return self
def remove_tags(self, tags):
"""Remove a list of tags from tasks.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.remove_tags(tags)
return self
def set_tags(self, tags):
"""Set tag list to tasks.
Replace old tag list.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.set_tags(tags)
return self
def delay(self, interval=None):
"""Delay task start time.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Or set None to cancel task delay time.
Defaults to None.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.delay(interval)
return self
def start_at(self, at_time):
"""Set task start time.
Specify a particular time that the job should be start.
Args:
at_time (Union[str, datetime]): Start time.
A string or :obj:`datetime`.
A string can be in one of the following formats:
[`HH:MM:SS`, `mm-dd HH:MM:SS`].
Or set None to cancel task start time.
Defaults to None.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.start_at(at_time)
return self
def nonperiodic(self, count):
"""See as non-periodic tasks.
Args:
count (int): Do the job for a certain number of times.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.nonperiodic(count)
return self
def periodic(self):
"""See as periodic tasks.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.periodic()
return self
def period(self, interval):
"""Scheduling periodic tasks.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int
in seconds.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period(interval)
return self
def period_at(self,
unit="day", at_time="00:00:00",
week_day="Monday", day=1):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Args:
unit (str): Time unit of the periodic task.
Defaults to `day`.
The following unit is available:
1. `day`: Run job everyday.
2. `week`: Run job every week.
3. `month`: Run job every month.
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
This argument will only be used is unit is `week`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
day (int): Day to do the job.
Defaults to 1.
This argument will only be used is unit is `month`.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_at(unit=unit,
at_time=at_time,
week_day=week_day,
day=day)
return self
def period_day_at(self, at_time="00:00:00"):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs everyday.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_day_at(at_time=at_time)
return self
def period_week_at(self, at_time="00:00:00", week_day="Monday"):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs every week.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_week_at(at_time=at_time, week_day=week_day)
return self
def period_month_at(self, at_time="00:00:00", day=1):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs every month.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
day (int): Day to do the job.
Defaults to 1.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_month_at(at_time=at_time, day=day)
return self
def start(self):
"""Start the Tasks' activity."""
for task in self._tasks:
task.start()
def stop(self):
"""Stop the Tasks' activity."""
for task in self._tasks:
task.stop()
def pause(self):
"""Pause the Tasks' activity.
Works only the task is registered into :class:`ScheduleManager`.
"""
new_task_list = list()
for task in self._tasks:
manager = task.manager
task_name = task.name
task.pause()
while task.manager is not None:
time.sleep(1)
if manager:
new_task_list.append(manager.task(task_name))
self._tasks = new_task_list[:]
|
/schedule_manager-0.1.1-py3-none-any.whl/schedule_manager/manager.py
| 0.801081 | 0.154983 |
manager.py
|
pypi
|
from datetime import datetime
class Lesson:
__current_string = ''
def __init__(self, string_to_parse):
self.discipline = ''
self.room = ''
self.building = '0'
self.groups = [Lesson.get_course(string_to_parse[:6].strip())]
self.__current_string = string_to_parse[6:].strip()
self.is_dop = False
self.subgroup = 0
self.__get_one_more_group_if_have()
self.__split_cabinet_room_from_str() \
.__get_attr_from_discipline(). \
__split_room()
def __get_one_more_group_if_have(self):
"""
Если у одной дисциплины две группы
:return: Lesson
"""
if self.__current_string[0].isdigit():
group_to_add = self.__current_string[:6].strip()
self.groups.append(Lesson.get_course(group_to_add))
self.__current_string = self.__current_string[6:].strip()
return self
def __split_cabinet_room_from_str(self):
"""
Что бы корректно получить строки ищем начало кабинета в строке (используя пробел)
:return:
"""
place_space_from_end = self.__current_string.rfind(" ")
if place_space_from_end != -1:
self.discipline = self.__current_string[:place_space_from_end + 1].strip()
self.room = self.__current_string[place_space_from_end + 1:].strip()
return self
def __split_room(self):
"""
Разделение кабинета на номер и корпус
:return:
"""
if "/" in self.room:
self.building, self.room = self.room.split("/")
self.room = self.room.replace("'", "")
return self
def __get_attr_from_discipline(self):
"""
Получаем ифнормацию о дисуиплине: ДОП?, Подгруппа?
:return:
"""
discipline = self.discipline.replace('[СДО]', '').replace("[]", '')
if Lesson.has_numbers(discipline):
if discipline[1] == 'О':
self.subgroup = 0
else:
self.subgroup = discipline[1]
discipline = discipline[4:]
if 'ДОП' in discipline:
discipline = discipline.replace("ДОП", '').strip()
self.is_dop = True
self.discipline = discipline
return self
@staticmethod
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
@staticmethod
def get_course(group_name):
group_data = {"group": group_name, 'year': "2222", "course": "0"}
today = datetime.now()
year_end = today.year
year_start = year_end - 10
for i in range(year_start+1, year_end+1, 1):
year = str(i)
if (year[-1] == group_name[1]):
course = year_end - i + 1
group_data = {"group": group_name, "year": year, "course": course}
return group_data
|
/schedule_parser_portal_petrocollege-0.0.6.tar.gz/schedule_parser_portal_petrocollege-0.0.6/src/Lesson.py
| 0.571169 | 0.25389 |
Lesson.py
|
pypi
|
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Scheduled_poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be Scheduled_task
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval_secs = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
else:
self.polling_interval_secs = polling_interval_secs
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
|
/scheduled_poller-1.3-py3-none-any.whl/scheduled_poller/scheduled_poller.py
| 0.708818 | 0.307839 |
scheduled_poller.py
|
pypi
|
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be ScheduledTask
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
|
/scheduled_poller-1.3-py3-none-any.whl/scheduled_poller/poller.py
| 0.699357 | 0.3295 |
poller.py
|
pypi
|
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be ScheduledTask
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
|
/scheduled_poller-1.3-py3-none-any.whl/poller/poller.py
| 0.699357 | 0.3295 |
poller.py
|
pypi
|
from datetime import datetime
from enum import Enum
from copy import copy
from .utils import get_biggest_value_less_or_equal_to, get_smallest_value_greater_or_equal_to, last, first, \
weekday_num, weekday_and_num_to_day, num_days_in_month, weekday_and_week_to_day, week_num, max_week_num
class DateTimeHolder:
__slots__ = ['minute', 'hour', 'day', 'day_of_week', 'day_of_week_num', 'month', 'week', 'year']
def __init__(self, minute=None, hour=None, day=None, day_of_week=None, day_of_week_num=None, week=None,
month=None, year=None):
self.minute = minute
self.hour = hour
self.day = day
self.day_of_week = day_of_week
self.day_of_week_num = day_of_week_num
self.week = week
self.month = month
self.year = year
@property
def datetime(self):
if self.day_of_week is not None and self.day_of_week_num is not None:
day = weekday_and_num_to_day(self.year, self.month, self.day_of_week_num, self.day_of_week)
return datetime(self.year, self.month, day, self.hour or 0, self.minute or 0)
elif self.day_of_week is not None and self.week is not None:
day = weekday_and_week_to_day(self.year, self.month, self.week, self.day_of_week)
return datetime(self.year, self.month, day, self.hour or 0, self.minute or 0)
else:
return datetime(self.year, self.month or 1, self.day or 1, self.hour or 0, self.minute or 0)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __copy__(self):
return DateTimeHolder(minute=self.minute, hour=self.hour, day=self.day, day_of_week=self.day_of_week,
day_of_week_num=self.day_of_week_num, week=self.week, month=self.month, year=self.year)
def __lt__(self, other):
return self.datetime < other.datetime
def __gt__(self, other):
return self.datetime > other.datetime
def __eq__(self, other):
return self.datetime == other.datetime
def __le__(self, other):
return self.datetime <= other.datetime
def __ge__(self, other):
return self.datetime >= other.datetime
class TaskStrategy(Enum):
days_of_month = 0 # 1-31
days_of_week = 1 # Sun-Sat + week number
days_of_week_num = 2 # Sun-Sat + weekday number
class DayStrategyFraction(Enum):
minute = 0
hour = 1
day = 2
month = 3
year = 4
class DayOfWeekStrategyFraction(Enum):
minute = 0
hour = 1
day_of_week = 2
week = 3
month = 4
year = 5
class DayOfWeekNumStrategyFraction(Enum):
minute = 0
hour = 1
day_of_week = 2
day_of_week_num = 3
month = 4
year = 5
class ScheduledTask:
def __init__(self, minutes=None, hours=None, days=None, days_of_week=None, days_of_week_num=None, weeks=None,
months=None, years=None, max_iterations=100):
if days_of_week is not None and days_of_week_num is not None:
self.strategy = TaskStrategy.days_of_week_num
self.fractions = DayOfWeekNumStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days_of_week or range(0, 7),
days_of_week_num or range(0, 5), months or range(1, 13), years or range(0, 9999)]
elif days_of_week is not None or weeks is not None:
self.strategy = TaskStrategy.days_of_week
self.fractions = DayOfWeekStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days_of_week or range(0, 7),
weeks or range(0, 6), months or range(1, 13), years or range(0, 9999)]
else:
self.strategy = TaskStrategy.days_of_month
self.fractions = DayStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days or range(1, 32),
months or range(1, 13), years or range(0, 9999)]
self.highest_fraction = last([f for f in self.fractions])
# Settings
self.max_iterations = max_iterations
def _datetimeholder_valid(self, datetimeholder: DateTimeHolder, fraction: Enum):
"""Check if date time holder is valid for current fraction
i.e. if fraction is days, check if current day exists in the month
"""
# Check min value
if self.strategy == TaskStrategy.days_of_month:
min_value = 1 if fraction in [self.fractions.day, self.fractions.month, self.fractions.year] else 0
else:
min_value = 1 if fraction in [self.fractions.month, self.fractions.year] else 0
if datetimeholder[fraction.name] < min_value:
return False
# Check if day exceeds number of days in that month
if self.strategy == TaskStrategy.days_of_month and fraction == self.fractions.day:
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if datetimeholder.day > n_days_in_month:
return False
# Check if day of week number exceeds number of day of weeks for this month
if self.strategy == TaskStrategy.days_of_week_num and fraction == self.fractions.day_of_week_num:
# Since we don't know what day of week we are validating,
# assume that this number can't be more than max week number
if datetimeholder.day_of_week_num > max_week_num(datetimeholder.year, datetimeholder.month):
return False
# Check if day of week and day of week number exceeds maximum day of week number for this month
if self.strategy == TaskStrategy.days_of_week_num and fraction == self.fractions.day_of_week:
day = weekday_and_num_to_day(datetimeholder.year, datetimeholder.month, datetimeholder.day_of_week_num,
datetimeholder.day_of_week)
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if day > n_days_in_month:
return False
# Check if month has n weeks
if self.strategy == TaskStrategy.days_of_week and fraction == self.fractions.week:
if datetimeholder.week > max_week_num(datetimeholder.year, datetimeholder.month):
return False
# Check if weekday and week number combination
if self.strategy == TaskStrategy.days_of_week and fraction == self.fractions.day_of_week:
day = weekday_and_week_to_day(datetimeholder.year, datetimeholder.month, datetimeholder.week,
datetimeholder.day_of_week)
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if day is None:
return False
if day > n_days_in_month:
return False
# All checks are passed
return True
def _datetimeholders_equal(self, a: DateTimeHolder, b: DateTimeHolder, from_fraction: Enum):
"""Partially check a and b date time holders for equality, starting with fraction.
For example, if the fraction is DAY, compare only DAY, MONTH and YEAR
"""
return all([a[self.fractions(fv).name] == b[self.fractions(fv).name] for fv
in range(from_fraction.value, self.highest_fraction.value+1)])
def _datetimeholders_compare(self, a: DateTimeHolder, b: DateTimeHolder, from_fraction: Enum):
"""Partially compare a and b date time holders, starting with fraction.
For example, if the fraction is DAY, compare only DAY, MONTH and YEAR
"""
_a = DateTimeHolder()
_b = DateTimeHolder()
for fraction_value in range(from_fraction.value, self.highest_fraction.value+1):
fraction = self.fractions(fraction_value)
_a[fraction.name] = a[fraction.name]
_b[fraction.name] = b[fraction.name]
if _a > _b:
return 1
elif _a == _b:
return 0
else:
return -1
def _increase_fraction(self, result: DateTimeHolder, fraction: Enum, increment: int, current: DateTimeHolder):
"""Increase fraction on the datetimeholder
:param result:Value to increase
:param fraction:Fraction to increase
:param current:Original value - used to reset if we can't increase
:return:Number of fractions increased (to know from which to recalculate)
"""
# If candidates are range, perform step-aware increment
if type(self.candidates[fraction.value]) == list:
new_value = result[fraction.name] + increment
elif type(self.candidates[fraction.value]) == range:
new_value = result[fraction.name] + increment * self.candidates[fraction.value].step
else:
raise ValueError("candidate must be of type list or range")
datetimeholder_increased = copy(result)
datetimeholder_increased[fraction.name] = new_value
if increment > 0: # 1
new_value = get_smallest_value_greater_or_equal_to(self.candidates[fraction.value],
datetimeholder_increased[fraction.name])
in_range = new_value is not None
else: # -1
new_value = get_biggest_value_less_or_equal_to(self.candidates[fraction.value],
datetimeholder_increased[fraction.name])
in_range = new_value is not None
if self._datetimeholder_valid(datetimeholder_increased, fraction) and in_range:
result[fraction.name] = new_value
return 1
else:
if fraction == self.highest_fraction:
raise ValueError("Can't increase fraction - current " + self.highest_fraction +
" is " + result[fraction.value])
result[fraction.name] = current[fraction.name]
return 1 + self._increase_fraction(result, self.fractions(fraction.value + 1), increment, current)
def get_next_time(self, current_datetime: datetime = None):
"""Returns next task execution time nearest to the given datetime
"""
if current_datetime is None:
current_datetime = datetime.utcnow()
if self.strategy == TaskStrategy.days_of_month:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day=current_datetime.day, month=current_datetime.month, year=current_datetime.year)
elif self.strategy == TaskStrategy.days_of_week:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
week=week_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
else:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
day_of_week_num=weekday_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
result = self._get_next_time(current)
return result.datetime
def get_previous_time(self, current_datetime: datetime = None):
"""Returns previous task execution time nearest to the given datetime
"""
if current_datetime is None:
current_datetime = datetime.utcnow()
if self.strategy == TaskStrategy.days_of_month:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day=current_datetime.day, month=current_datetime.month, year=current_datetime.year)
elif self.strategy == TaskStrategy.days_of_week:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
week=week_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
else:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
day_of_week_num=weekday_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
result = self._get_previous_time(current)
return result.datetime
def _get_next_time(self, current: DateTimeHolder):
"""Calculates next task time using current
"""
result = DateTimeHolder()
fraction_value = self.highest_fraction.value
i = 0
while fraction_value != -1: # From year to minute
i += 1
if i > self.max_iterations: # Max iteration check
raise ValueError("maximum number of iterations exceeded. You found a bug with scheduledtask. Dump: " +
"candidates: {}, ".format(self.candidates) +
"current: {}, max_iterations: {}".format(current, self.max_iterations))
fraction = self.fractions(fraction_value)
if fraction is self.highest_fraction \
or self._datetimeholders_equal(result, current, self.fractions(fraction_value+1)):
result[fraction.name] = get_smallest_value_greater_or_equal_to(self.candidates[fraction_value],
current[fraction.name])
else:
result[fraction.name] = first(self.candidates[fraction_value])
if result[fraction.name] is None \
or not self._datetimeholder_valid(result, fraction) \
or not self._datetimeholders_compare(result, current, fraction) > -1: # In case with day_of_week_num
if fraction == self.highest_fraction:
return None # Can't find highest fraction match, event never happened in the past
# Decrease higher fractions on result datetime, recalculate starting from that fraction-1
fraction_value += self._increase_fraction(result, self.fractions(fraction_value + 1), +1, current) - 1
continue
fraction_value -= 1
return result
def _get_previous_time(self, current: DateTimeHolder):
"""Calculates previous task time using current
"""
result = DateTimeHolder()
fraction_value = self.highest_fraction.value
i = 0
while fraction_value != -1: # From year to minute
i += 1
if i > self.max_iterations: # Max iteration check
raise ValueError("maximum number of iterations exceeded. You found a bug with scheduledtask. Dump: " +
"candidates: {}, ".format(self.candidates) +
"current: {}, max_iterations: {}".format(current, self.max_iterations))
fraction = self.fractions(fraction_value)
if fraction is self.highest_fraction \
or self._datetimeholders_equal(result, current, self.fractions(fraction_value + 1)):
result[fraction.name] = get_biggest_value_less_or_equal_to(self.candidates[fraction_value],
current[fraction.name])
else:
result[fraction.name] = last(self.candidates[fraction_value])
if result[fraction.name] is None \
or not self._datetimeholder_valid(result, fraction) \
or not self._datetimeholders_compare(result, current, fraction) < 1: # In case with day_of_week_num
if fraction == self.highest_fraction:
return None # Can't find highest fraction match, event never happened in the past
# Decrease higher fractions on result datetime, recalculate starting from that fraction-1
fraction_value += self._increase_fraction(result, self.fractions(fraction_value + 1), -1, current) - 1
continue
fraction_value -= 1
return result
|
/scheduled_task-1.0.1.tar.gz/scheduled_task-1.0.1/scheduled_task/timeconverter.py
| 0.850453 | 0.391988 |
timeconverter.py
|
pypi
|
from datetime import datetime
from calendar import monthrange
def get_biggest_value_less_or_equal_to(iter: list or range, value):
"""Returns the biggest element from the list that is less or equal to the value. Return None if not found
"""
if type(iter) == list:
i = [x for x in iter if x <= value]
return max(i) if i else None
elif type(iter) == range:
if value in range(iter.start, iter.stop): # Value lies within this range, return step-aware value
return value - ((value - iter.start) % iter.step)
elif value > iter.stop-1: # value is greater than range, return last element of range
return iter.stop-1
else: # value is less than range, return None
return None
else:
raise ValueError("iter must be of type list or range")
def get_smallest_value_greater_or_equal_to(iter: list or range, value):
"""Returns the smallest element from the list that is greater or equal to the value. Return None if not found
"""
if type(iter) == list:
i = [x for x in iter if x >= value]
return min(i) if i else None
elif type(iter) == range:
if value in range(iter.start, iter.stop): # Value lies within this range, return step-aware value
return value + (iter.step - ((value - iter.start) % iter.step)) % iter.step
elif value < iter.start: # Value is less than range start, return start
return iter.start
else: # Value is greater than range, return None
return None
else:
raise ValueError("iter must be of type list or range")
def last(iter: list or range):
"""Returns the last element from the list or range
"""
if type(iter) == list:
return iter[len(iter)-1]
elif type(iter) == range:
return iter.stop - (iter.stop - 1 - iter.start) % iter.step - 1 # Step-aware last element
else:
raise ValueError("iter must be of type list or range")
def first(iter: list or range):
"""Returns first element from the list or range
"""
if type(iter) == list:
return iter[0]
elif type(iter) == range:
return iter.start
else:
raise ValueError("iter must be of type list or range")
def num_days_in_month(year: int, month: int):
return monthrange(year, month)[1]
def weekday_num(dt: datetime):
"""Returns number of weekday in the current month. I.e. if Tuesday is first in this month, returns 0
"""
return int((dt.day - 1)/7)
def weekday_and_num_to_day(year: int, month: int, weekday_number: int, weekday: int):
"""Converts current year, month, weekday and weekday number into the day of month
"""
dt_first = datetime(year, month, 1)
dt_first_weekday = dt_first.weekday()
return 1 - dt_first_weekday + weekday + ((0 if weekday >= dt_first_weekday else 1) + weekday_number) * 7
def weekday_and_week_to_day(year: int, month: int, week: int, weekday: int):
"""Converts current year, month, weekday and week number into the day of month
"""
dt_first = datetime(year, month, 1)
dt_first_weekday = dt_first.weekday()
result = week * 7 + weekday - dt_first_weekday + 1
if result < 1 or result > num_days_in_month(year, month):
return None
else:
return result
def week_num(dt: datetime):
"""Returns week number of the given day
"""
dt_first = dt.replace(day=1)
dt_first_weekday = dt_first.weekday()
return int((dt.day + dt_first_weekday - 1) / 7)
def max_week_num(year: int, month: int):
"""Returns number of weeks (Monday to Friday) that month contains
"""
# The same thing as week number for the last day of month
return week_num(datetime(year, month, num_days_in_month(year, month)))
|
/scheduled_task-1.0.1.tar.gz/scheduled_task-1.0.1/scheduled_task/utils.py
| 0.817028 | 0.667751 |
utils.py
|
pypi
|
import os
import argparse
import xmltodict
import pandas as pd
import re
class CLIHandler:
def __init__(self):
self.columns = [
"task_path",
"task_name",
"enabled",
"hidden",
"triggers",
"exec_command",
"exec_args",
"schedule_time",
]
self.default_values_sort_by = ["task_path", "task_name"]
self.trigger_choices = [
"EventTrigger",
"TimeTrigger",
"LogonTrigger",
"BootTrigger",
"CalendarTrigger",
"SessionStateChangeTrigger",
"RegistrationTrigger",
]
self.output_format_choices = ["html", "json", "csv"]
self.default_value_output_format = "html"
parser = self.init_argparser()
self.args = parser.parse_args()
self.check_if_path_is_dir()
self.parsed_scheduled_task_output = self.parse_scheduled_task_output()
self.show_output()
if self.args.output:
self.store_output()
def init_argparser(self):
parser = argparse.ArgumentParser(
prog="Scheduled Tasks Reader",
description="Get Overview of Scheduled Tasks from the relevant registry files.",
)
parser.add_argument("dir_of_registry_files", help="Path to the directory containing the relevant registry files")
parser.add_argument(
"-o", "--output", help="Store output at specified location. It will overwrite existing files!"
)
parser.add_argument(
"-of",
"--output_format",
choices=self.output_format_choices,
metavar="",
default=self.default_value_output_format,
help=f"Define output format. Default value is: {self.default_value_output_format}.Allowed values are: {self.output_format_choices}",
)
parser.add_argument(
"-n",
"--task_names",
nargs="+",
help="Filter for array of one or more names of scheduled task (separated by space).",
)
parser.add_argument(
"-p",
"--task_paths",
nargs="+",
help="Filter for array of one or more paths of scheduled task (separated by space).",
)
parser.add_argument(
"-s",
"--sort_by",
nargs="+",
choices=self.columns,
metavar="",
default=self.default_values_sort_by,
help=f"Sort by array of one or more attributes of scheduled task (separated by space). Default values are: {self.default_values_sort_by}.Allowed values are: {self.columns}",
)
parser.add_argument(
"-t",
"--task_triggers",
nargs="+",
choices=self.trigger_choices,
metavar="",
help=f"Filter for array of one or more trigger types of scheduled task (separated by space). Allowed values are: {self.trigger_choices}",
)
parser.add_argument("--table_terminal_output", action="store_true", help="Show the output as a table, needs a wide Terminal.")
parser.add_argument("--only_hidden", action="store_true", help="Show only the hidden scheduled tasks")
parser.add_argument(
"--raw_data",
action="store_true",
help="Append the raw data from the scheduled tasks parsed from the xmls to the normal output.",
)
parser.add_argument("--version", action="version", version="%(prog)s 0.1")
return parser
def check_if_path_is_dir(self):
if not os.path.isdir(self.args.dir_of_registry_files):
raise ValueError(f"'{self.args.dir_of_registry_files}' is not a valid path of a directory")
def parse_scheduled_task_output(self):
schedule_task_parser = ScheduledTaskParser(self.args.dir_of_registry_files)
data_frame = pd.DataFrame(schedule_task_parser.scheduled_tasks)
data_frame = data_frame.sort_values(by=self.args.sort_by)
data_frame = self.filter_data_frame(data_frame)
return data_frame
def filter_data_frame(self, data_frame):
if self.args.only_hidden:
data_frame = data_frame[data_frame.hidden == True]
if self.args.task_paths:
data_frame = data_frame[data_frame.task_path.isin(self.args.task_paths)]
if self.args.task_names:
data_frame = data_frame[data_frame.task_name.isin(self.args.task_names)]
if self.args.task_triggers:
data_frame = data_frame[
data_frame.triggers.apply(
lambda triggers: any(trigger in self.args.task_triggers for trigger in triggers)
)
]
if self.args.raw_data:
data_frame = data_frame.join(pd.io.json.json_normalize(data_frame["task_data"]))
del data_frame["task_data"]
else:
data_frame = data_frame[self.columns]
return data_frame
def show_output(self):
pd.set_option("display.max_columns", None)
pd.set_option("display.expand_frame_repr", False)
pd.set_option("max_colwidth", -1)
pd.set_option("colheader_justify", "left")
if self.args.table_terminal_output:
print(self.parsed_scheduled_task_output.to_string(index=False))
else:
for task in self.parsed_scheduled_task_output.iterrows():
print(task[1].to_string())
print("===========================")
def store_output(self):
output_format = self.args.output_format
with open(self.args.output, "w") as output_file:
if output_format == "html":
this_directory = os.path.abspath(os.path.dirname(__file__))
html_template_path = os.path.join(this_directory, "html_template.html")
with open(html_template_path, "r", encoding="UTF-8") as html_template:
html_template_content = html_template.read()
html_content = html_template_content.format(
data=self.parsed_scheduled_task_output.to_html(table_id="dataframe", index=False)
)
output_file.write(html_content)
elif output_format == "json":
output_file.write(self.parsed_scheduled_task_output.to_json())
elif output_format == "csv":
output_file.write(self.parsed_scheduled_task_output.to_csv())
class ScheduleTimeParser:
def __init__(self, task_data, CalendarTrigger=True):
self.attributes = {
"schedule": None,
"dayInterval": None,
"daysOfWeek": None,
"weeksInterval": None,
"daysOfMonth": None,
"months": None,
"calenderTrigger": CalendarTrigger,
"task_data": task_data,
"executionLimit": None,
"duration": None,
"interval": None,
"stopAtEnd": None,
}
def set_time_day(self, task_data):
self.attributes["schedule"] = "ScheduleByDay"
if "DaysInterval" in task_data:
self.attributes["dayInterval"] = task_data["DaysInterval"]
def set_time_week(self, task_data):
self.attributes["schedule"] = "ScheduleByWeek"
if "WeeksInterval" in task_data:
self.attributes["weeksInterval"] = task_data["WeeksInterval"]
if "DaysOfWeek" in task_data:
self.attributes["daysOfWeek"] = list(task_data["DaysOfWeek"].keys())
def set_time_month(self, task_data):
self.attributes["schedule"] = "ScheduleByMonth"
if "DaysOfMonth" in task_data:
self.attributes["daysOfMonth"] = list(task_data["DaysOfMonth"].keys())
if "Months" in task_data:
self.attributes["months"] = list(task_data["Months"].keys())
def select_set_time(self, schedule, task_data):
if schedule == "ScheduleByDay":
self.set_time_day(task_data)
elif schedule == "ScheduleByWeek":
self.set_time_week(task_data)
elif schedule == "ScheduleByMonth":
self.set_time_month(task_data)
def set_trigger_time(self):
if "ExecutionTimeLimit" in self.attributes["task_data"]:
self.attributes["executionLimit"] = self.attributes["task_data"]["ExecutionTimeLimit"]
if "Repetition" in self.attributes["task_data"]:
if "Duration" in self.attributes["task_data"]["Repetition"]:
self.attributes["duration"] = self.attributes["task_data"]["Repetition"]["duration"]
if "Interval" in self.attributes["task_data"]["Repetition"]:
self.attributes["interval"] = self.attributes["task_data"]["Repetition"]["Interval"]
if "StopAtDurationEnd" in self.attributes["task_data"]["Repetition"]:
self.attributes["stopAtEnd"] = self.attributes["task_data"]["Repetition"]["StopAtDurationEnd"]
def get_schedule_time(self):
if self.attributes["calenderTrigger"]:
pattern = "(?P<schedule>ScheduleBy.*)"
for tag in self.attributes["task_data"]:
match = re.match(pattern, tag)
if match:
schedule = match.group("schedule")
self.select_set_time(schedule, self.attributes["task_data"][schedule])
elif not self.attributes["calenderTrigger"]:
self.set_trigger_time()
def return_information(self):
self.get_schedule_time()
res = {}
self.attributes["calenderTrigger"] = None
for attribute, value in self.attributes.items():
if value and attribute != "task_data":
res[attribute] = value
return res
class ScheduledTaskParser:
def __init__(self, dir_path):
self.scheduled_task_reader = ScheduledTaskReader(dir_path)
self.scheduled_tasks = self.scheduled_task_reader.scheduled_tasks
self.add_additional_information()
def add_additional_information(self):
for index, schedule_task in enumerate(self.scheduled_tasks):
schedule_task_data = schedule_task["task_data"]
enabled = self.get_enabled(schedule_task_data)
self.scheduled_tasks[index]["enabled"] = enabled
self.scheduled_tasks[index]["schedule_time"] = self.get_schedule_time(schedule_task_data)
self.scheduled_tasks[index]["hidden"] = self.get_hidden_flag(schedule_task_data)
self.scheduled_tasks[index]["triggers"] = self.get_triggers(schedule_task_data)
self.scheduled_tasks[index]["exec_command"] = self.get_exec_command(schedule_task_data)
self.scheduled_tasks[index]["exec_args"] = self.get_exec_args(schedule_task_data)
def get_enabled(self, task_data):
return "Enabled" in task_data["Settings"] and task_data["Settings"]["Enabled"] == "true"
def get_schedule_time(self, task_data):
if "Triggers" in task_data and task_data["Triggers"]:
if "CalendarTrigger" in task_data["Triggers"]:
if (
"Enabled" in task_data["Triggers"]["CalendarTrigger"]
and task_data["Triggers"]["CalendarTrigger"]["Enabled"] == "true"
) or "Enabled" not in task_data["Triggers"]["CalendarTrigger"]:
schedule_time = ScheduleTimeParser(task_data["Triggers"]["CalendarTrigger"], True)
return schedule_time.return_information()
if "TimeTrigger" in task_data["Triggers"]:
if (
"Enabled" in task_data["Triggers"]["TimeTrigger"]
and task_data["Triggers"]["TimeTrigger"]["Enabled"] == "true"
) or "Enabled" not in task_data["Triggers"]["TimeTrigger"]:
schedule_time = ScheduleTimeParser(task_data["Triggers"]["TimeTrigger"], False)
return schedule_time.return_information()
return "N/A"
def get_hidden_flag(self, task_data):
if "Hidden" in task_data["Settings"]:
return task_data["Settings"]["Hidden"] == "true"
return False
def get_triggers(self, task_data):
triggers = []
if "Triggers" in task_data and task_data["Triggers"]:
for trigger, data in task_data["Triggers"].items():
if data and "Enabled" in data and data["Enabled"] == "true":
triggers.append(trigger)
elif data and "Enabled" not in data:
triggers.append(trigger)
elif not data:
triggers.append(trigger)
return triggers
def get_exec_command(self, task_data):
if "Actions" in task_data and "Exec" in task_data["Actions"] and "Command" in task_data["Actions"]["Exec"]:
return task_data["Actions"]["Exec"]["Command"]
return ""
def get_exec_args(self, task_data):
if "Actions" in task_data and "Exec" in task_data["Actions"] and "Arguments" in task_data["Actions"]["Exec"]:
return task_data["Actions"]["Exec"]["Arguments"]
return ""
class ScheduledTaskReader:
def __init__(self, dir_path):
self.dir_path = dir_path
self.scheduled_tasks = self.get_scheduled_tasks()
def get_scheduled_tasks(self):
"""iterate through every file in the directory and call get_scheduled_task_information"""
scheduled_tasks = []
for path, subdirs, files in os.walk(self.dir_path):
for task_name in files:
scheduled_tasks.append(self.get_scheduled_task_information(path, task_name))
return scheduled_tasks
def get_scheduled_task_information(self, path, task_name):
full_path = os.path.join(path, task_name)
with open(full_path, "r", encoding="utf-16") as file:
task_data = xmltodict.parse(file.read())["Task"]
task_path = os.path.relpath(path, self.dir_path)
return {"task_path": task_path, "task_name": task_name, "task_data": task_data}
def main():
CLIHandler()
if __name__ == "__main__":
main()
|
/scheduled_tasks_reader-0.1.1.tar.gz/scheduled_tasks_reader-0.1.1/scheduled_tasks_reader.py
| 0.571049 | 0.215691 |
scheduled_tasks_reader.py
|
pypi
|
from pathlib import Path
from scheduler_tools.types import PrefDict
class PrefectPreferences:
"""
This class handles reading of a ~/.prefect/ssh.json file. This file has settings for the
name of the gateway, the username to authenticate with, the path to the local ssh identity
file.
"""
def __init__(self, prefs: PrefDict):
"""
:param prefs:
"""
print(prefs)
p_localfolder = Path(prefs['localfolder'])
print("1: ")
if not p_localfolder.exists():
p_localfolder.mkdir(parents=True)
print("2: ")
self._path = p_localfolder
print("3: ")
self._data = prefs
def default_path(self) -> Path:
return self._path
@property
def gateway_url(self):
return self._data['gateway']['url']
@property
def username(self):
return self._data['gateway']['user']
@property
def identity_file(self):
return self._data['gateway']['identityfile']
@property
def known_hosts(self):
return Path('~/.ssh/known_hosts').expanduser()
def write_ssh_pid(self, pid):
with open(str(self.ssh_pid_path()), 'w') as fp:
fp.write(str(pid))
def read_ssh_pid(self) -> [str, type(None)]:
pid = None
if self.ssh_pid_path().expanduser().exists():
pid = open(str(self.ssh_pid_path().expanduser()), 'r').read()
return pid
def remove_ssh_pid(self):
if self.ssh_pid_path().exists():
self.ssh_pid_path().unlink()
def ssh_pid_path(self):
return self.default_path().expanduser() / "ssh_pid.txt"
def cluster_job_id_path(self):
return self.default_path().expanduser() / "cluster_job_id.txt"
def read_prefect_job_id(self) -> [str, type(None)]:
job_id = None
if self.cluster_job_id_path().exists():
job_id = open(str(self.cluster_job_id_path().expanduser()), 'r').read()
return job_id
def write_prefect_job_id(self, job_id):
print(f"jobid: {job_id}")
with open(str(self.cluster_job_id_path().expanduser()), 'w') as fp:
fp.write(str(job_id))
def remove_prefect_job_id(self):
if self.cluster_job_id_path().exists():
self.cluster_job_id_path().unlink()
def cluster_pid_path(self):
# this needs to be made dynamic
return self.default_path().relative_to(Path().home()) / "pidfile"
@property
def local_dask_port(self):
return self._data['dask_port']
@property
def local_dashboard_port(self):
return self._data['dashboard_port']
|
/scheduler_tools-0.1.5.tar.gz/scheduler_tools-0.1.5/scheduler_tools/PrefectPreferences.py
| 0.722918 | 0.268027 |
PrefectPreferences.py
|
pypi
|
<p align="center">
<a href="https://gitlab.com/DigonIO/scheduler"><img alt="scheduler" src="https://gitlab.com/DigonIO/scheduler/-/raw/master/doc/_assets/logo_name.svg" width="60%"></a>
</p>
<p>A simple in-process python scheduler library with asyncio, threading and timezone support.
Schedule tasks by their time cycles, fixed times, weekdays, dates, weights, offsets and execution
counts and automate Jobs.</p>
[](https://gitlab.com/DigonIO/scheduler)
[](https://github.com/DigonIO/scheduler)
[](https://gitlab.com/DigonIO/scheduler/-/blob/master/LICENSE)
[](https://gitlab.com/DigonIO/scheduler/-/pipelines)
[](https://gitlab.com/DigonIO/scheduler/-/pipelines)
[](https://github.com/psf/black)
[](https://pycqa.github.io/isort/)
[](https://pypi.org/project/scheduler/)
[](https://pypi.org/project/scheduler/)
[](https://pepy.tech/project/scheduler)
[](https://pepy.tech/project/scheduler)
[](https://digon.io/hyd/project/scheduler/t/master)
---
## Features
* Easy and user friendly in-process Job scheduling
[(Quick Start)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/quick_start.html)
* Asyncio scheduler [(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/asyncio.html)
* Threading scheduler [(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/threading.html)
* Timezone compatibility [(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/timezones.html)
* Passing of parameters
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/parameters.html)
* Job prioritization
* Default linear prioritization
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/job_prioritization.html)
* User definable prioritization functions
[(Guide)](https://digon.io/hyd/project/scheduler/t/master/pages/guides/custom_prioritization.html)
* Job tagging
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/tags.html)
* Job batching
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/job_batching.html)
* Job metadata
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/metrics.html)
* Lightweight
* High test coverage
* [Online documentation](https://digon.io/hyd/project/scheduler/t/master/readme.html)
## Installation
### pip
`scheduler` can be installed directly from the PyPI repositories with:
```bash
pip install scheduler
```
Alternatively install `scheduler` from the `git`
[repository](https://gitlab.com/DigonIO/scheduler) with:
```bash
git clone https://gitlab.com/DigonIO/scheduler.git
cd scheduler
pip install .
```
### Arch Linux
The `PKGBUILD` file can be utilized from the
[Arch Build System](https://wiki.archlinux.org/title/Arch_Build_System).
Download the `PKGBUILD` file and from within the containing folder run
```console
makepkg -i
```
## Example: *How to schedule Jobs*
The following example shows how the `Scheduler` is instantiated and how basic `Job`s are created.
For advanced scheduling examples please visit the online
[documentation](https://digon.io/hyd/project/scheduler/t/master/examples.html).
[//]: # (This example is not directly included in the testing environment. Make sure to also update the corresponding test in tests/test_readme.py when updating the following example.)
```py
import datetime as dt
from scheduler import Scheduler
from scheduler.trigger import Monday, Tuesday
def foo():
print("foo")
schedule = Scheduler()
schedule.cyclic(dt.timedelta(minutes=10), foo)
schedule.minutely(dt.time(second=15), foo)
schedule.hourly(dt.time(minute=30, second=15), foo)
schedule.daily(dt.time(hour=16, minute=30), foo)
schedule.weekly(Monday(), foo)
schedule.weekly(Monday(dt.time(hour=16, minute=30)), foo)
schedule.once(dt.timedelta(minutes=10), foo)
schedule.once(Tuesday(), foo)
schedule.once(dt.datetime(year=2022, month=2, day=15, minute=45), foo)
```
A human readable overview of the scheduled jobs can be created with a simple `print` statement:
```py
print(schedule)
```
```text
max_exec=inf, tzinfo=None, priority_function=linear_priority_function, #jobs=9
type function / alias due at due in attempts weight
-------- ---------------- ------------------- --------- ------------- ------
MINUTELY foo() 2021-05-26 03:55:15 0:00:14 0/inf 1
CYCLIC foo() 2021-05-26 04:05:00 0:09:59 0/inf 1
ONCE foo() 2021-05-26 04:05:00 0:09:59 0/1 1
HOURLY foo() 2021-05-26 04:30:15 0:35:14 0/inf 1
DAILY foo() 2021-05-26 16:30:00 12:34:59 0/inf 1
WEEKLY foo() 2021-05-31 00:00:00 4 days 0/inf 1
WEEKLY foo() 2021-05-31 16:30:00 5 days 0/inf 1
ONCE foo() 2021-06-01 00:00:00 5 days 0/1 1
ONCE foo() 2022-02-15 00:45:00 264 days 0/1 1
```
Executing pending `Job`s periodically can be achieved with a simple loop:
```py
import time
while True:
schedule.exec_jobs()
time.sleep(1)
```
## Documentation
View the API documentation [online](https://digon.io/hyd/project/scheduler/t/master/readme.html).
## Sponsor
<br>
<div align="center">
<a href="https://digon.io">
<img alt="Digon.IO GmbH - IT Dienstleister Wuppertal Softwareentwicklung und Datenwissenschaften" src="https://digon.io/static/landing/img/digon_name_right_grey.svg" width="50%">
</a>
</div>
<br>
<div align="center">
We would like to thank Digon.IO for sponsoring the development of this library.
Digon.IO is building bridges between data science and software development.
They enable companies to automate and accelerate their data-driven processes.
Please visit their website: <a href="https://digon.io/">digon.io</a>
</div>
## License
This free and open source software (FOSS) is published under the [LGPLv3 license](https://www.gnu.org/licenses/lgpl-3.0.en.html).
|
/scheduler-0.8.4.tar.gz/scheduler-0.8.4/README.md
| 0.635562 | 0.959421 |
README.md
|
pypi
|
from timeit import default_timer
__all__ = ["Scheduler"]
class Scheduler(object):
def __init__(self):
self.remove_all()
def add(self, interval, count, callback, *args, **kwargs):
"""
Append a task to the scheduler and return the assigned ID.
Arguments:
interval -- Interval in which the callback will be executed (in seconds).
count -- Maximum number of times the callback will be executed.
The task will be removed after, at least, interval*count seconds.
If count is 0 the callback will be executed infinitely.
callback -- The function to be executed (with *args and **kwargs if any).
"""
if count < 0:
raise ValueError("count must be greater than or equal to 0.")
task = [0, interval, count, callback, args, kwargs]
self._tasks.append(task)
return id(task)
def remove(self, *tasks):
"""
Remove (a) task(s) from the scheduler. Arguments must be
as many as tasks to be removed. Attempting to remove an
unexisting task will do nothing.
Example -- Scheduler.remove(task_id_1, task_id_2, ...)
"""
for task in self._tasks:
if id(task) in tasks:
self._tasks.remove(task)
def remove_all(self):
"""Remove all tasks from the scheduler."""
self._tasks = []
def run(self):
completed_tasks = []
for i, task in enumerate(self._tasks):
prev_ticks, interval, count, callback, args, kwargs = task
if default_timer() - prev_ticks >= interval:
callback(*args, **kwargs)
if count > 0:
count -= 1
if count == 0:
# Do not change indices until all tasks
# have been executed.
completed_tasks.append(id(task))
continue
else:
self._tasks[i][2] = count
# Might take a while to execute the callback,
# so get ticks again.
self._tasks[i][0] = default_timer()
self.remove(*completed_tasks)
|
/scheduler2-1.0.2a1.zip/scheduler-1.0.0a1/scheduler/scheduler.py
| 0.787523 | 0.252799 |
scheduler.py
|
pypi
|
import argparse
from datetime import datetime
import json
import logging
from schedules_tools import jsondate, discovery
from schedules_tools.converter import ScheduleConverter
from schedules_tools.models import Task, Schedule
import sys
log = logging.getLogger(__name__)
REPORT_NO_CHANGE = ''
REPORT_ADDED = '_added_'
REPORT_REMOVED = '_removed_'
REPORT_CHANGED = '_changed_'
REPORT_PREFIX_MAP = {
REPORT_ADDED: '[+]',
REPORT_REMOVED: '[-]',
REPORT_CHANGED: '[M]',
REPORT_NO_CHANGE: 3 * ' ',
}
NAME_SIM_THRESHOLD = 0.8
TASK_SCORE_THRESHOLD = 0.45
NAME_SIM_WEIGHT = 0.5
TASK_POS_WEIGHT = 0.5
def strings_similarity(str1, str2, winkler=True, scaling=0.1):
"""
Find the Jaro-Winkler distance of 2 strings.
https://en.wikipedia.org/wiki/Jaro-Winkler_distance
:param winkler: add winkler adjustment to the Jaro distance
:param scaling: constant scaling factor for how much the score is adjusted
upwards for having common prefixes. Should not exceed 0.25
"""
if str1 == str2:
return 1.0
def num_of_char_matches(s1, len1, s2, len2):
count = 0
transpositions = 0 # number of matching chars w/ different sequence order
limit = int(max(len1, len2) / 2 - 1)
for i in range(len1):
start = i - limit
if start < 0:
start = 0
end = i + limit + 1
if end > len2:
end = len2
index = s2.find(s1[i], start, end)
if index > -1: # found common char
count += 1
if index != i:
transpositions += 1
return count, transpositions
len1 = len(str1)
len2 = len(str2)
num_of_matches, transpositions = num_of_char_matches(str1, len1, str2, len2)
if num_of_matches == 0:
return 0.0
m = float(num_of_matches)
t = transpositions / 2.0
dj = (m / float(len1) + m / float(len2) + (m - t) / m) / 3.0
if winkler:
length = 0
# length of common prefix at the start of the string (max = 4)
max_length = min(
len1,
len2,
4
)
while length < max_length and str1[length] == str2[length]:
length += 1
return dj + (length * scaling * (1.0 - dj))
return dj
class ScheduleDiff(object):
result = []
hierarchy_attr = 'tasks'
subtree_hash_attr_name = 'subtree_hash'
""" Default list of attributes used to compare 2 tasks. """
default_tasks_match_attrs = ['name', 'dStart', 'dFinish']
def __init__(self, schedule_a, schedule_b, trim_time=False, extra_compare_attributes=None):
self.schedule_a = schedule_a
self.schedule_b = schedule_b
self.trim_time = trim_time
self.attributes_to_compare = self.default_tasks_match_attrs
if extra_compare_attributes:
# avoid using += to not modify class-level list
self.attributes_to_compare = self.attributes_to_compare + list(extra_compare_attributes)
self.result = self._diff()
def __str__(self):
return self.result_to_str()
def _get_subtree(self, item):
return getattr(item, self.hierarchy_attr)
def result_to_str(self, items=None, level=0):
""" Textual representation of the diff. """
res = ''
if items is None:
items = self.result
schedule = Schedule()
for item in items:
subtree = item['subtree']
state = item['item_state']
if state in [REPORT_CHANGED, REPORT_ADDED]:
task = item['right']
elif state is REPORT_REMOVED:
task = item['left']
else:
task = item['both']
task_obj = Task.load_from_dict(task, schedule)
res += '{} {}{}\n'.format(REPORT_PREFIX_MAP[state], level * ' ', str(task_obj))
if subtree:
res += self.result_to_str(subtree, level + 2)
return res
def _create_report(self,
item_state,
left=None,
right=None,
both=None,
subtree=[],
changed_attrs=[]):
"""
Returns a dictionary representing a possible change.
{
left: Task or None,
right: Task or None,
both: used instead of left and right, when the task are equal,
subtree: List of reports from the child Tasks,
changed_attr: List of changed attributes,
item_state: Type of change
}
"""
if both:
report = {
'both': both.dump_as_dict(recursive=False),
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state
}
else:
# No need to keep the whole structure,
# child tasks will be placed in report['tasks']
if left is not None:
left = left.dump_as_dict(recursive=False)
if right is not None:
right = right.dump_as_dict(recursive=False)
report = {
'left': left,
'right': right,
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state,
}
return report
def _set_subtree_items_state(self, items, state):
"""
Set the given state recursively on the subtree items
"""
def create_report(item):
kwargs = {
'subtree': self._set_subtree_items_state(self._get_subtree(item), state)
}
if state == REPORT_NO_CHANGE:
kwargs['both'] = item
elif state == REPORT_ADDED:
kwargs['right'] = item
elif state == REPORT_REMOVED:
kwargs['left'] = item
return self._create_report(state, **kwargs)
return [create_report(item) for item in items]
def get_changed_attrs(self, task_a, task_b):
"""
Compare 2 tasks
Uses attributes defined in `self.attributes_to_compare` and subtree hash and
returns a list of atts that don't match.
"""
changed_attributes = [attr for attr in self.attributes_to_compare
if not self._compare_tasks_attributes(task_a, task_b, attr)]
if task_a.get_subtree_hash(self.attributes_to_compare) \
!= task_b.get_subtree_hash(self.attributes_to_compare):
changed_attributes.append(self.subtree_hash_attr_name)
return changed_attributes
def _compare_tasks_attributes(self, task_a, task_b, attr_name):
"""
Compares tasks attributes.
Trims time from datetime objects if self.trim_time is set.
"""
attribute_a = getattr(task_a, attr_name)
attribute_b = getattr(task_b, attr_name)
# no need to compare empty values strictly
if not attribute_a and not attribute_b:
return True
if self.trim_time:
if isinstance(attribute_a, datetime):
attribute_a = attribute_a.date()
if isinstance(attribute_b, datetime):
attribute_b = attribute_b.date()
if isinstance(attribute_a, list):
attribute_a = sorted(attribute_a)
if isinstance(attribute_b, list):
attribute_b = sorted(attribute_b)
return attribute_a == attribute_b
def find_best_match(self, t1, possible_matches, start_at_index=0):
"""
Finds the best match for the given task in the list of possible matches.
Returns the index of the best match and a dict
with a state suggestion and list of changed attrs.
"""
match_index = None
best_match = {
'state': REPORT_REMOVED,
'changes': [],
'name_score': 0,
'score': TASK_SCORE_THRESHOLD
}
if start_at_index > 0:
possible_matches = possible_matches[start_at_index:]
for i, t2 in enumerate(possible_matches, start_at_index):
res = self.eval_tasks(t1, t2, i, name_threshold=best_match['name_score'])
if (res['state'] is REPORT_CHANGED
and res['score'] > best_match['score']):
match_index = i
best_match = res
if res['state'] is REPORT_NO_CHANGE:
match_index = i
best_match = res
break
return match_index, best_match
def _task_position_score(self, index):
return 1.0 / (2 * (index + 1))
def _task_score(self, name_score, position_score):
weight_sum = NAME_SIM_WEIGHT + TASK_POS_WEIGHT
name_score *= NAME_SIM_WEIGHT
position_score *= TASK_POS_WEIGHT
return (name_score + position_score) / weight_sum
def eval_tasks(self, t1, t2, t2_index, name_threshold=NAME_SIM_THRESHOLD):
name_score = 0.0
position_score = 0.0
changed_attrs = self.get_changed_attrs(t1, t2)
# different names
if 'name' in changed_attrs:
t1_subtree = t1.get_subtree_hash(self.attributes_to_compare)
t2_subtree = t2.get_subtree_hash(self.attributes_to_compare)
if t1_subtree and t2_subtree:
if t1_subtree == t2_subtree:
state = REPORT_CHANGED
position_score = 1.0
else:
name_score = strings_similarity(t1.name, t2.name)
if (name_score > name_threshold
and len(changed_attrs) < len(self.attributes_to_compare)):
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# no subtrees
else:
name_score = strings_similarity(t1.name, t2.name, winkler=False)
if name_score > name_threshold:
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# names are equal
else:
name_score = 1.0
if (not changed_attrs
or (len(changed_attrs) == 1
and self.subtree_hash_attr_name in changed_attrs)):
state = REPORT_NO_CHANGE
else:
state = REPORT_CHANGED
position_score = 1.0
return {
'state': state,
'changes': changed_attrs,
'name_score': name_score,
'position_score': position_score,
'score': self._task_score(name_score, position_score)
}
def _diff(self, tasks_a=None, tasks_b=None):
if tasks_a is None:
tasks_a = self.schedule_a.tasks
if tasks_b is None:
tasks_b = self.schedule_b.tasks
res = []
last_b_index = 0
# shortcut to create a report for an added task
def report_task_added(index, recursive=True):
task = tasks_b[index]
subtree = self._get_subtree(task)
if recursive:
subtree = self._set_subtree_items_state(subtree, REPORT_ADDED)
return self._create_report(REPORT_ADDED, right=task, subtree=subtree)
for task in tasks_a:
match_index, match = self.find_best_match(task, tasks_b, start_at_index=last_b_index)
report = {}
if match_index is None:
subtree = self._set_subtree_items_state(self._get_subtree(task), REPORT_REMOVED)
report = self._create_report(REPORT_REMOVED, left=task, subtree=subtree)
else:
# ALL elements between last_b_index and match_index => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, match_index)])
# exact match => NO CHANGE
if not match['changes']:
subtree = self._set_subtree_items_state(self._get_subtree(task), match['state'])
report_kwargs = {'both': task, 'subtree': subtree}
# structural change => CHANGED / NO CHANGE
elif self.subtree_hash_attr_name in match['changes']:
# process child tasks
subtree = self._diff(
self._get_subtree(task),
self._get_subtree(tasks_b[match_index])
)
if len(match['changes']) > 1:
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
else:
report_kwargs = {
'both': task,
'subtree': subtree
}
# no structural changes => CHANGED
else:
subtree = self._set_subtree_items_state(
self._get_subtree(tasks_b[match_index]), REPORT_NO_CHANGE)
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
report = self._create_report(match['state'],
changed_attrs=match['changes'],
**report_kwargs)
last_b_index = match_index + 1
res.append(report)
# remaining tasks => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, len(tasks_b))])
return res
def dump_json(self, **kwargs):
def _encoder(obj):
if isinstance(obj, Task):
return obj.dump_as_dict()
return jsondate._datetime_encoder(obj)
kwargs['default'] = _encoder
return json.dumps(self.result, **kwargs)
def setup_logging(level):
log_format = '%(name)-10s %(levelname)7s: %(message)s'
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(level)
formatter = logging.Formatter(log_format)
sh.setFormatter(formatter)
# setup root logger
inst = logging.getLogger('')
inst.setLevel(level)
inst.addHandler(sh)
def main():
setup_logging(logging.INFO)
parser = argparse.ArgumentParser(
description='Tool to show differences between two schedules.')
parser.add_argument('--simple-diff',
help='Simple comparison between two schedules.',
action='store_true',
default=False)
parser.add_argument(
'--handlers-path',
help='Add python-dot-notation path to discover handlers (needs to '
'be python module), can be called several times '
'(conflicting names will be overriden - the last '
'implementation will be used)',
action='append',
default=[])
parser.add_argument('--whole-days',
help='Compare just date part of timestamp (will '
'ignore differences in time)',
action='store_true',
default=False)
parser.add_argument('left')
parser.add_argument('right')
args = parser.parse_args()
for path in args.handlers_path:
discovery.search_paths.append(path)
left = ScheduleConverter()
left.import_schedule(args.left)
right = ScheduleConverter()
right.import_schedule(args.right)
if args.simple_diff:
diff_res = left.schedule.diff(right.schedule, whole_days=args.whole_days)
else:
diff_res = ScheduleDiff(left.schedule, right.schedule)
if diff_res:
print(diff_res)
sys.exit(1)
if __name__ == '__main__':
main()
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/diff.py
| 0.547222 | 0.207958 |
diff.py
|
pypi
|
# Test as "python -m schedules_tools.batches.schedule_batch"
import argparse
import re
from schedules_tools.batches.utils import initialize_ss_handler, load_template
from schedules_tools.models import Task
from smartsheet.models import Row, Cell, ObjectValue, PredecessorList, Predecessor, Duration
GA_NAME_REGEX = re.compile(r'^GA( Release)?$')
BATCH_NAME_REGEX = re.compile(r'^Batch update ([0-9]+)')
class BatchError(Exception):
pass
def construct_task(name, duration=None):
"""
makes an instance of schedule_tools Task
"""
task = Task()
task.name = name
task.dStart = None
task.dFinish = None
if duration:
task.duration = duration
elif duration == 0:
task.milestone = True
return task
def build_columns_map(columns):
result = {}
for column in columns:
if not column.title:
continue
column_name = column.title.lower()
if column_name in ('task', 'task name'):
result['name'] = column.index
elif column_name in ('start', 'start date'):
result['start'] = column.index
elif column_name in ('finish', 'due', 'end date'):
result['finish'] = column.index
missing_columns = {'name', 'start', 'finish'} - set(result.keys())
if missing_columns:
raise BatchError(f'Couldn\'t locate required columns: {missing_columns}')
return result
def add_batch(handle, template):
handler = initialize_ss_handler(handle)
columns_map = build_columns_map(handler.sheet.columns)
parsed_rows = list(
map(
lambda x: parse_row(x, columns_map),
handler.sheet.rows
)
)
# finding relevant rows
parent_row = find_parent_row(parsed_rows, template['parent'])
if not parent_row:
raise BatchError(f'Parent row "{template["parent"]}" not found.')
if template.get('first'):
predecessor_row = find_ga_row(parsed_rows)
batch_number = 1
batch_task_export_kwargs = {'to_top': True}
else:
latest_batch_row, latest_batch_number = find_latest_batch_row(
parsed_rows,
parent_row['id']
)
predecessor_row = find_predecessor_row_from_batch(
parsed_rows,
latest_batch_row['id'],
template['predecessor-task-name']
)
batch_number = latest_batch_number + 1
batch_task_export_kwargs = {'sibling_id': latest_batch_row['id']}
batch_name = 'Batch update %d' % batch_number
if 'type' in template:
batch_name = '%s %s' % (batch_name, template['type'])
# adding main batch task
batch_task = construct_task(batch_name)
batch_row_id = handler.export_task(
batch_task,
parent_id=parent_row['id'],
**batch_task_export_kwargs
).id
# exporting batch tasks and mapping them to set dependencies later
# can't set dependencies right away because task
# dependency might not be in the schedule yet
task_id_to_row = {}
for task_id, task_data in template['tasks'].items():
st_task = construct_task(task_data['name'], duration=task_data['duration'])
task_export_row = handler.export_task(st_task, batch_row_id)
task_id_to_row[task_id] = parse_row(task_export_row, columns_map)
# setting dependencies
for task_id, task_data in template['tasks'].items():
if 'dependency' not in task_data:
continue
pred_list = PredecessorList()
pred = Predecessor()
dependency_dict = task_data['dependency']
if dependency_dict['to'] == 'predecessor':
pred.row_id = predecessor_row['id']
else:
pred.row_id = task_id_to_row[int(dependency_dict['to'])]['id']
pred.type = dependency_dict.get('type') or 'FS'
if dependency_dict['lag_amount']:
lag_duration = Duration()
lag_duration.negative = dependency_dict['lag_sign'] == '-'
lag_amount = int(dependency_dict['lag_amount'])
if dependency_dict['lag_type'] == 'd':
lag_duration.days = lag_amount
else:
lag_duration.weeks = lag_amount
pred.lag = lag_duration
pred_list.predecessors = [pred]
dependency_cell = Cell()
dependency_cell.column_id = handler._sheet_columns['predecessors']
dependency_cell.object_value = ObjectValue()
dependency_cell.object_value.object_type = "PREDECESSOR_LIST"
dependency_cell.object_value = pred_list
task_row = task_id_to_row[task_id]
task_update_row = Row()
task_update_row.id = task_row['id']
task_update_row.cells.append(dependency_cell)
handler.client.Sheets.update_rows(
handler.handle,
[task_update_row]
)
def parse_row(row, columns_map):
"""
converts smartsheet row into a dict
"""
row_dict = row.to_dict()
cells = row_dict['cells']
result = {
'id': row_dict['id'],
'row_number': row_dict['rowNumber'],
'parent_id': row_dict.get('parentId'),
'name': cells[columns_map['name']].get('value'),
'date_start': cells[columns_map['start']].get('value'),
'date_finish': cells[columns_map['finish']].get('value'),
}
return result
def find_parent_row(parsed_rows, parent_name):
"""
finds a parent row by a given name
"""
for row in parsed_rows:
task_name = row['name']
if not task_name:
continue
if task_name == parent_name:
return row
return None
def find_latest_batch_row(parsed_rows, batch_parent_row_id):
"""
finds latest batch in the schedule
"""
children_rows = filter(
lambda x: x['parent_id'] == batch_parent_row_id,
parsed_rows
)
latest_batch_row = None
latest_batch_number = None
for row in children_rows:
batch_regex_match = BATCH_NAME_REGEX.match(row['name'])
if batch_regex_match:
batch_number = int(batch_regex_match.groups()[0])
if not latest_batch_number or batch_number > latest_batch_number:
latest_batch_row = row
latest_batch_number = batch_number
return latest_batch_row, latest_batch_number
def find_predecessor_row_from_batch(parsed_rows, batch_row_id, predecessor_name):
"""
finds a relevant predecessor row in a batch
"""
batch_rows = filter(
lambda x: x['parent_id'] == batch_row_id,
parsed_rows
)
for row in batch_rows:
if row['name'] == predecessor_name:
return row
return None
def find_ga_row(parsed_rows):
"""
finds GA in the schedule
"""
for row in parsed_rows:
if GA_NAME_REGEX.match(row['name']):
return row
def main():
parser = argparse.ArgumentParser(
description='Add a batch to SmartSheet schedule',
epilog="""
Requires SmartSheet API token in SMARTSHEET_API_TOKEN env variable.
It's possible to use custom batch templates by specifying BATCHES_TEMPLATE_DIR env variable.
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('template',
help='template name)',
type=str,)
parser.add_argument('handle',
help='SmartSheet handle (URL)',
type=str,)
args = parser.parse_args()
template = load_template(args.template)
add_batch(args.handle, template)
if __name__ == '__main__':
main()
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/batches/schedule_batch.py
| 0.667256 | 0.295014 |
schedule_batch.py
|
pypi
|
from schedules_tools.schedule_handlers import ScheduleHandlerBase
import logging
from lxml.html import etree
log = logging.getLogger(__name__)
css = """
a[href=""] {display:none}
table.schedule {
border-collapse: collapse;
}
table.schedule th, table.schedule td {
border: 2px solid black;
padding: 3px 5px;
}
table.schedule th {
background-color: #a5c2ff;
}
table.schedule td {
background-color: #f3ebae;
}
table.schedule td.parent-task {
font-weight: bold;
}
table.schedule td.date {
font-size: 90%;
white-space: nowrap;
text-align: right;
}
table.schedule td.duration {
text-align: right;
}
table.schedule td div.note {
font-size: 80%;
}
"""
class ScheduleHandler_html(ScheduleHandlerBase):
provide_export = True
handle_deps_satisfied = True
default_export_ext = 'html'
def __init__(self, *args, **kwargs):
super(ScheduleHandler_html, self).__init__(*args, **kwargs)
if not self.options.get('date_format', False):
self.options['date_format'] = '%a %Y-%m-%d'
@classmethod
def is_valid_source(cls, handle=None):
return False
def _export_task(self, e_table, task, hiearchy_parent='',
hiearchy_index=''):
e_tr = etree.SubElement(e_table, 'tr')
e_td = etree.SubElement(e_tr, 'td')
curr_hiearchy_index = str(hiearchy_parent)
if hiearchy_index:
curr_hiearchy_index += '.' + str(hiearchy_index)
e_td.text = curr_hiearchy_index
padding = (task.level - 1) * float(self.options.get('html_level_indent', 1))
e_td = etree.SubElement(e_tr, 'td',
style='padding-left: {}em'.format(padding))
e_td.text = task.name
if len(task.tasks):
e_td.attrib['class'] = 'parent-task'
if task.note:
e_note = etree.SubElement(e_td, 'div')
e_note.attrib['class'] = 'note'
e_note.text = task.note
if task.link:
e_div = etree.SubElement(e_td, 'div')
e_link = etree.SubElement(e_div, 'a')
e_link.attrib['href'] = task.link
e_link.text = task.link
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'date'
e_td.text = str(task.dStart.strftime(self.options['date_format']))
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'date'
e_td.text = str(task.dFinish.strftime(self.options['date_format']))
duration = task.dFinish - task.dStart
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'duration'
e_td.text = str(duration.days)
for index, task in enumerate(task.tasks):
self._export_task(e_table, task, curr_hiearchy_index, index + 1)
# Schedule
def export_schedule(self, out_file=None):
e_html = etree.Element('html')
e_head = etree.SubElement(e_html, 'head')
etree.SubElement(e_head, 'meta', charset="utf-8")
if self.options.get('html_title', False):
title = self.options['html_title']
else:
title = self.schedule.name
e_title = etree.SubElement(e_head, 'title')
e_title.text = title
if self.options.get('html_css_href', False):
etree.SubElement(e_head,
'link',
type='text/css',
rel='stylesheet',
href=self.options['html_css_href']
)
else:
e_style = etree.SubElement(e_head, 'style', type='text/css')
e_style.text = css
e_body = etree.SubElement(e_html, 'body')
e_h1 = etree.SubElement(e_body, 'h1')
e_h1.text = title
if self.options.get('html_table_header', False):
e_body.append(etree.fromstring(self.options['html_table_header']))
e_table = etree.SubElement(e_body, 'table', attrib={'align': 'center',
'class': 'schedule'})
e_tr_head = etree.SubElement(e_table, 'tr')
head_columns = ['HierarchIndex', 'Name', 'Start', 'End', 'Duration']
for column in head_columns:
e_th_head = etree.SubElement(e_tr_head, 'th')
e_th_head.text = column
for index, task in enumerate(self.schedule.tasks):
self._export_task(e_table, task, index + 1)
if self.options.get('html_table_footer', False):
e_body.append(etree.fromstring(self.options['html_table_footer']))
etree_return = etree.ElementTree(e_html)
if out_file:
etree_return.write(out_file, pretty_print=True, encoding="utf-8",
xml_declaration=False)
return str(etree_return)
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/schedule_handlers/html.py
| 0.548432 | 0.169097 |
html.py
|
pypi
|
from datetime import datetime
import logging
import pytz
log = logging.getLogger(__name__)
# Handle implementation must be in format ScheduleHandler_format
# where 'format' is used as a uniq label for the format and
# 'ScheduleHandler' can be whatever.
class ScheduleHandlerBase(object):
handle = None
schedule = None
# This flag indicate ability to export internal intermediate structure
# (Schedule) into format of implementation. It's read by ScheduleConverter
# during autodiscovery and used to provide actual help message in CLI
# TODO: add provide_import to be complete?
provide_export = False
provide_changelog = False
provide_mtime = False
options = {}
default_export_ext = None
# Handlers can depend on additional python modules. We don't require from
# users to have all of them installed if they aren't used.
# This flag indicates that the handler can be fully utilized and there is
# no missing dependent packages installed.
handle_deps_satisfied = False
def __init__(self, handle=None, schedule=None, options=dict()):
self.schedule = schedule
self.options = options
# set handle last - there might be custom processing that requires options to already be set
self.handle = handle # 'handle' is source/target of schedule in general
def _write_to_file(self, content, filename):
with open(filename, 'wb') as fp:
fp.write(content.strip().encode('UTF-8'))
def get_handle_mtime(self):
""" Implement only if schedule handler is able to get mtime directly
without storage """
raise NotImplementedError
def handle_modified_since(self, mtime):
""" Return boolean to be able to bypass processing """
# Return False only when able to tell otherwise return True
modified = True
if isinstance(mtime, datetime):
try:
handle_mtime = self.get_handle_mtime()
except NotImplementedError:
pass
# we're working with TZ naive dates (but in UTC)
if handle_mtime:
if handle_mtime.tzinfo is not None:
handle_mtime = handle_mtime.astimezone(pytz.utc).replace(tzinfo=None)
if handle_mtime <= mtime:
modified = False
return modified
def get_handle_changelog(self):
raise NotImplementedError
# handle - file/link/smartsheet id
def import_schedule(self):
raise NotImplementedError
def export_schedule(self):
raise NotImplementedError
def build_schedule(self):
raise NotImplementedError
@classmethod
def is_valid_source(cls, handle=None):
"""Method returns True, if the specific handler is able to work with
given handle"""
return False
def extract_backup(self, handle=None):
"""Prepare files which need a backup in case of external source"""
return []
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/schedule_handlers/__init__.py
| 0.405096 | 0.164516 |
__init__.py
|
pypi
|
import io
import qrcode
from PIL import Image, ImageDraw, ImageFont, ImageOps
from qrcode.image.styledpil import StyledPilImage
from qrcode.image.styles.colormasks import RadialGradiantColorMask
from qrcode.image.styles.moduledrawers import RoundedModuleDrawer
from schedulesy_qrcode.config import FONT_CONF, QR_CONF
def generate(rooms, color, client):
def save_image(image, filename):
output = io.BytesIO()
image.save(output, "png")
output.seek(0)
client.upload(output, filename, "image/png")
output.close()
def single_room(room):
print(f'🎨 Generating {".".join(room["path"])}.{room["name"]} ({room["id"]})')
qr = qrcode.QRCode(error_correction=qrcode.constants.ERROR_CORRECT_H)
qr.add_data(f'{QR_CONF["url"]}/public/{room["id"]}')
image = qr.make_image(
image_factory=StyledPilImage,
module_drawer=RoundedModuleDrawer(),
color_mask=RadialGradiantColorMask(
back_color=(255, 255, 255), center_color=color, edge_color=(0, 0, 0)
),
embeded_image_path=QR_CONF['logo'],
)
header = QR_CONF['header']
def split(a, n):
k, m = divmod(len(a), n)
return (
a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)
)
footer = "\n".join(
[" - ".join(x) for x in list(split(room["path"] + [room["name"]], 3))]
)
expanded_image = ImageOps.expand(image, border=20, fill="white")
# Add define a new font to write in the border
big_font = ImageFont.truetype(FONT_CONF['path'], int(FONT_CONF['header']))
small_font = ImageFont.truetype(FONT_CONF['path'], int(FONT_CONF['footer']))
# Instantiate draw object & add desired text
draw_object = ImageDraw.Draw(expanded_image)
draw_object.text(xy=(60, 10), text=header, fill=(0, 0, 0), font=big_font)
draw_object.text(
xy=(60, expanded_image.height - 55),
text=footer,
fill=(0, 0, 0),
font=small_font,
)
bordered = ImageOps.expand(expanded_image, border=10, fill=tuple(color))
# Preview the image
# bordered.show()
# Save the image
# bordered.save(f'out/{room["id"]}.png')
save_image(bordered, f'{room["id"]}.png')
return bordered
images = list(map(single_room, rooms))
w, h = images[0].size
separation = 2
# create big empty image with place for images
new_image = Image.new(
"RGB", (w * 2 + separation, h * 2 + separation), color="white"
)
# put images on new_image
new_image.paste(images[0], (0, 0))
new_image.paste(images[1], (w + separation, 0))
new_image.paste(images[2], (0, h + separation))
new_image.paste(images[3], (w + separation, h + separation))
s_ids = multi(rooms)
# save it
print(f"🎨 Generating {s_ids}")
# new_image.save(f'out/{"-".join(s_ids)}.png')
save_image(new_image, f"{s_ids}.png")
def multi(rooms):
ids = [int(room["id"]) for room in rooms]
ids.sort()
return "-".join(list(map(str, ids)))
|
/schedulesy_qrcode-1.0.3-py3-none-any.whl/schedulesy_qrcode/generate.py
| 0.565539 | 0.263608 |
generate.py
|
pypi
|
from abc import ABC, abstractmethod
import math
class Scheduler(ABC):
"""
Base abstract class for all schedulers
"""
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
if start_step >= stop_step:
raise AttributeError('In the scheduler, start step must be minor that stop step!')
if start_value < stop_value:
print('Initializing Scheduler to Ramp Value')
elif start_value > stop_value:
print('Initializing Scheduler to Decay Value')
else:
print('Initializing Scheduler with no effect!')
self._start_step = start_step
self._stop_step = stop_step
self._start_value = start_value
self._stop_value = stop_value
@abstractmethod
def warp_func(self, perc_step: float):
pass
def _get_perc_step(self, step: int):
# get step normalized in 0_1 range
return max(0, min(1, (step - self._start_step) / (self._stop_step - self._start_step)))
def _get_value(self, perc_step: float):
# get value at perc_step
return self._start_value + (self._stop_value - self._start_value) * perc_step
def step(self, step: int):
# step normalized in 0_1 range
perc_step = self._get_perc_step(step)
# warp perc according to scheduler type
perc_step = self.warp_func(perc_step)
return self._get_value(perc_step)
class CosineScheduler(Scheduler):
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
super().__init__(start_step, stop_step, start_value, stop_value)
def warp_func(self, perc_step: float):
# warp with cosine
# math.cos(math.pi * perc_step) goes from 1 to -1
# sum 1 and mul 0.5 to normalize
# then reverse since you still want a perc step as output
return 1 - (0.5 * (1. + math.cos(math.pi * perc_step)))
class LinearScheduler(Scheduler):
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
super().__init__(start_step, stop_step, start_value, stop_value)
def warp_func(self, perc_step: float):
# Identity warp
return perc_step
class LinearCosineScheduler:
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float, th_step: int):
"""
Linear Warmup Followed by Cosine Decay.
Learning rate increases from start_step tp th_step (0.0 to start_value) and then decays to stop_value
"""
if start_value <= stop_value:
raise AttributeError('the LinearCosine Scheduler must decay.')
if start_step >= stop_step:
raise AttributeError('In the scheduler, start step must be minor that stop step!')
if not start_step < th_step and th_step < stop_step:
raise AttributeError('In the scheduler, threshold step must lay between start and stop steps!')
super().__init__()
self.th_step = th_step
self.linear_wu = LinearScheduler(start_step, th_step, 0, start_value)
self.cosine_decay = CosineScheduler(th_step, stop_step, start_value, stop_value)
def step(self, step: int):
if step < self.th_step:
return self.linear_wu.step(step)
else:
return self.cosine_decay.step(step)
|
/scheduling_utils-0.1.2.tar.gz/scheduling_utils-0.1.2/scheduling_utils/schedulers.py
| 0.8758 | 0.304617 |
schedulers.py
|
pypi
|
from typing import Any, Dict, NoReturn, TypeVar, Union, Type, Tuple, Callable, Iterable
ObjType = TypeVar('ObjType')
SchemaType = Union[str, Type, Tuple[Type], Dict[Union[str, Type], Any]]
def _get_type(sch: Dict[Union[str, Type], Any]) -> Any:
return sch[type if type in sch else 'type']
def _default(value: Any) -> Any:
return value() if callable(value) else value
def _on_error(schema: SchemaType, msg: Union[str, Exception]) -> NoReturn:
if isinstance(schema, dict):
msg = schema.get('errmsg', msg)
raise ValueError(msg)
def _validate_const_enum(obj: ObjType, schema: Dict[str, Any], schema_type: str, key: str) -> ObjType:
if 'value' not in schema:
_on_error(schema, 'schema for "enum" must contain "value"')
if schema_type == 'enum':
if obj not in schema['value']:
_on_error(schema, '"{}" is not in enum "{}"')
elif obj != schema['value']:
_on_error(schema, '"{}" is not allowed as "{}"'.format(obj, key))
return obj
def _check_dict_key(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
unex = {i for i in obj if i not in schema['value']}
if unex and not schema.get('unexpected', False):
_on_error(schema, 'Got unexpected keys: "{}" {};'.format('", "'.join([str(i) for i in unex]), extra))
missed = {
i
for i in schema['value']
if i not in obj and (not isinstance(schema['value'][i], dict) or 'default' not in schema['value'][i])
}
if missed:
_on_error(schema, 'expected keys "{}" {}'.format('", "'.join([str(i) for i in missed]), extra))
return {
i: obj[i]
for i in unex
}
def _validate_dicts_value(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
new_obj = _check_dict_key(obj=obj, schema=schema, extra=extra)
try:
new_obj.update(
{
i: (
_default(schema['value'][i]['default'])
if i not in obj else
_apply(obj=obj[i], schema=schema['value'][i], key=i)
)
for i in schema['value']
}
)
except ValueError as ex:
_on_error(schema, ex)
return new_obj
def _validate_dict(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
if 'value' in schema:
obj = _validate_dicts_value(obj=obj, schema=schema, extra=extra)
elif 'any_key' in schema:
try:
obj = {i: _apply(obj[i], schema['any_key'], i) for i in obj}
except ValueError as ex:
_on_error(schema, ex)
return obj
def _check_filter(obj: ObjType, func: Union[Callable, Iterable[Callable]]) -> bool:
return all(func(obj) for func in ([func] if callable(func) else func))
def _generic_checks(obj: ObjType, schema: SchemaType, schema_type: Type, extra: str, key: str) -> ObjType:
if not isinstance(obj, schema_type):
_on_error(schema, 'expected type "{}" {} ; got {}'.format(schema_type, extra, type(obj)))
if 'filter' in schema and not _check_filter(obj, schema['filter']):
_on_error(schema, '"{}" not passed filter'.format(key))
if schema.get('blank') is False and not obj:
_on_error(schema, '"{}" is blank'.format(key))
if 'max_length' in schema and len(obj) > schema['max_length']:
_on_error(schema, '"{}" > max_length'.format(key))
if 'min_length' in schema and len(obj) < schema['min_length']:
_on_error(schema, '"{}" < min_length'.format(key))
return obj
def _validate_generic(obj: ObjType, schema: SchemaType, schema_type: Type, key: str, extra: str) -> ObjType:
obj = _generic_checks(obj=obj, schema=schema, schema_type=schema_type, key=key, extra=extra)
if isinstance(schema_type, type) and issubclass(schema_type, (list, tuple)) and 'value' in schema:
try:
obj = schema_type(_apply(i, schema['value'], key=key) for i in obj)
except ValueError as ex:
_on_error(schema, ex)
elif isinstance(schema_type, type) and issubclass(schema_type, dict):
obj = _validate_dict(obj=obj, schema=schema, extra=extra)
return obj
def _validate(obj: ObjType, schema: SchemaType, key: str, extra: str) -> ObjType:
schema_type = _get_type(schema)
if schema_type in {'const', 'enum'}:
return _validate_const_enum(obj=obj, schema=schema, schema_type=schema_type, key=key)
return _validate_generic(obj=obj, schema=schema, schema_type=schema_type, extra=extra, key=key)
def _apply_callable(obj: ObjType, func: Union[Callable, Iterable[Callable]]) -> ObjType:
for func in ([func] if callable(func) else func):
obj = func(obj)
return obj
def _apply(obj: ObjType, schema: SchemaType, key: str) -> ObjType:
extra = ''.join(['for ', key]) if key else ''
if not isinstance(schema, (dict, type, tuple)) and schema not in {'const', 'enum'}:
raise ValueError('schema must be type, dict, tuple or "const"/"enum" {}'.format(extra))
if schema == 'const':
return obj
if isinstance(schema, (type, tuple)):
if isinstance(obj, schema):
return obj
raise ValueError('"{}" is not type of "{}" {}'.format(obj, schema, extra))
if 'pre_call' in schema:
obj = _apply_callable(obj, schema['pre_call'])
obj = _validate(obj=obj, schema=schema, key=key, extra=extra)
if 'post_call' in schema:
obj = _apply_callable(obj, schema['post_call'])
return obj
def validate(obj: ObjType, schema: SchemaType) -> ObjType:
"""
obj - some object
schema - schema_checker
schema ::= type of this object : list/dict/str/int/float (can be tuple of types) or "const"/"enum"
OR
schema ::= dict - {
type : type of this object : "list/tuple/dict/str/int/float or "const"
"value" : need for obj type of
- list/tuple - is schema for all elements in list
- dict - dict[key -> schema]
- const - some value to be compared with using method
- enum - list/set/dict/tuple to check if obj __contains__ in "value"
"any_key" : need for obj type of dict - schema for all keys (ignores if value is set)
"default" : default value if this object does not exists (if callable will be called)
"filter" : any of
- Callable[value -> bool] - if false then raise error
- Iterable[Callable[value -> bool]] - if any of them return false then raise error
"pre_call" : any of
- Callable[value -> value] - will be called before checking type and call filter's functions
- Iterable[Callable[value -> value]] - will call all of them
"post_call" : any of
- Callable[value -> value] - will be called after checking type and call filter's functions
- Iterable[Callable[value -> value]] - will call all of them
"blank" : raise error if value is blank
"max_length" : extra check of length (len)
"min_length" : extra check of length (len)
"unexpected" : allow unexpected keys (for dict)
"errmsg" : will be in ValueError in case of error on this level
}
"""
return _apply(obj, schema, 'Top-level')
|
/schema_checker-1.1.1.tar.gz/schema_checker-1.1.1/schema_checker/jschema.py
| 0.629661 | 0.227491 |
jschema.py
|
pypi
|
from typing import List
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
from schema_classification.dto import MappingResultDict
from schema_classification.dmo import FilterIncludeAllOf
from schema_classification.dmo import FilterExcludeOneOf
from schema_classification.dmo import FilterExcludeAllOf
from schema_classification.dmo import FilterIncludeOneOf
from schema_classification.dmo import FilterStartsWith
class FilterMapping(BaseObject):
""" Filter all Invalid Mapping """
def __init__(self,
d_index: NormalizedSchema):
""" Initialize Service
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
* rename from 'predict-mapping'
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the indexed schema
"""
BaseObject.__init__(self, __name__)
self._include_one_of = FilterIncludeOneOf(d_index).process
self._include_all_of = FilterIncludeAllOf(d_index).process
self._exclude_one_of = FilterExcludeOneOf(d_index).process
self._exclude_all_of = FilterExcludeAllOf(d_index).process
self._startswith = FilterStartsWith(d_index).process
def _process(self,
input_tokens: List[str]) -> MappingResultDict:
m_include_oneof = self._include_one_of(input_tokens)
m_include_allof = self._include_all_of(input_tokens)
m_exclude_oneof = self._exclude_one_of(input_tokens)
m_exclude_allof = self._exclude_all_of(input_tokens)
m_startswith = self._startswith(input_tokens)
return {
'include_one_of': m_include_oneof,
'include_all_of': m_include_allof,
'exclude_one_of': m_exclude_oneof,
'exclude_all_of': m_exclude_allof,
'startswith': m_startswith,
}
def process(self,
input_tokens: List[str]) -> MappingResultDict:
sw = Stopwatch()
results = self._process(input_tokens)
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Mapping Prediction Completed',
f'\tTotal Time: {str(sw)}',
f'\tTotal Results: {len(results)}']))
if self.isEnabledForDebug and len(results):
self.logger.debug('\n'.join([
'Mapping Prediction Results',
f'{pformat(results)}']))
return results
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/svc/filter_mapping.py
| 0.813313 | 0.242329 |
filter_mapping.py
|
pypi
|
from typing import List
from typing import Dict
from typing import Any
from pprint import pprint
from collections import defaultdict
from baseblock import BaseObject
from schema_classification.dto import ListOfDicts
from schema_classification.dto import NormalizedSchema
from schema_classification.dto import MappingResultDict
class SelectMapping(BaseObject):
""" Choose the Best Mapping """
def __init__(self,
d_filter: dict,
d_index: dict):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* eliminate callback and pass d-index in pursuit of
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* pass d-filter instead of 'mapping'
https://github.com/grafflr/deepnlu/issues/45
:param d_filter:
relevant section of mapping ruleset
:param d_index:
callback to scoring method
"""
BaseObject.__init__(self, __name__)
self._d_filter = d_filter
self._d_index = d_index
def _invalid_names(self) -> set:
""" Join Include and Exclude results to find Candidate Mappings """
invalid_names = set()
for include_key in ['include_one_of', 'include_all_of', 'startswith']:
[invalid_names.add(x) for x in self._d_filter[include_key]]
for exclude_key in ['exclude_one_of', 'exclude_all_of']:
[invalid_names.add(x) for x in self._d_filter[exclude_key]]
return invalid_names
def process(self) -> Dict:
invalid_names = self._invalid_names()
d_mapping = self._d_index['mapping']
d_mapping = {
k: d_mapping[k]
for k in d_mapping if k not in invalid_names
}
d_by_score = defaultdict(list)
for classification in d_mapping:
def get_score() -> float:
if 'score' not in d_mapping[classification]:
return 100.0
return 100 + d_mapping[classification]['score']
d_by_score[get_score()].append(classification)
if not len(d_by_score):
return {
'classification': None,
'score': None,
}
max_score = max(d_by_score)
def cleanse() -> str:
max_classification = sorted(d_by_score[max_score])[0]
if '#' in max_classification:
max_classification = max_classification.split('#')[0].strip()
return max_classification
def bounded_score() -> float:
if max_score > 100.0:
return 100.0
if max_score < 0.0:
return 0.0
return max_score
return {
'classification': cleanse(),
'score': bounded_score()
}
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/svc/select_mapping.py
| 0.858896 | 0.316818 |
select_mapping.py
|
pypi
|
from typing import Dict
from baseblock import FileIO
from baseblock import BaseObject
from schema_classification.dmo import IndexScoring
from schema_classification.dmo import IndexExcludeAllOf
from schema_classification.dmo import IndexExcludeOneOf
from schema_classification.dmo import IndexIncludeAllOf
from schema_classification.dmo import IndexIncludeOneOf
from schema_classification.dmo import IndexStartsWith
from schema_classification.dto import RawSchema
from schema_classification.dto import NormalizedSchema
class ReadMapping(BaseObject):
""" Build an in-memory Index over a Dictionary of Classifications """
def __init__(self,
d_schema: Dict):
""" Initialize Manifest Indicer
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/167
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
26-Jul-2022
[email protected]
* remove 'schema-name' and 'absolute-path' as parameters, and instead
pass the full absolute path of a schema file, in pursuit of
https://bast-ai.atlassian.net/browse/COR-12
Updated:
26-Sept-2022
[email protected]
* pass in d-schema as dict rather than a filepath
https://github.com/craigtrim/schema-classification/issues/1
Args:
d_schema (Dict): the schema JSON
"""
BaseObject.__init__(self, __name__)
self._d_index = self._create_index(d_schema)
def _create_index(self,
d_schema: RawSchema) -> NormalizedSchema:
""" Create Index
Args:
d_schema (dict): _description_
Sample Input:
{
'Favorite_Animal_Response#1': [
{
'include_all_of': ['favorite', 'animal']
}
]
}
Raises:
ValueError: _description_
Returns:
InMemoryIndex: _description_
"""
return {
'scoring': IndexScoring(d_schema).process(),
'include_one_of': IndexIncludeOneOf(d_schema).process(),
'include_all_of': IndexIncludeAllOf(d_schema).process(),
'exclude_one_of': IndexExcludeOneOf(d_schema).process(),
'exclude_all_of': IndexExcludeAllOf(d_schema).process(),
'startswith': IndexStartsWith(d_schema).process(),
'mapping': d_schema,
}
def index(self) -> NormalizedSchema:
return self._d_index
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/svc/read_mapping.py
| 0.85814 | 0.214301 |
read_mapping.py
|
pypi
|
from pprint import pprint
from pprint import pformat
from typing import List
from typing import Dict
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.svc import ReadMapping
from schema_classification.svc import FilterMapping
from schema_classification.svc import SelectMapping
from schema_classification.dto import ServiceEvent
class SchemaOrchestrator(BaseObject):
""" Portendo performs Predictive Classification of deepNLU parsed ASTs
This Orchestration sequence requires a pre-written schema for classification
Pre-written schemas are more complex and are capable of nuanced classification
"""
def __init__(self,
d_schema: Dict):
"""Initialize Portendo API
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* make absolute_path a required parameter in pursuit of
https://github.com/grafflr/deepnlu/issues/44
* read classifications from memory (not python files)
https://github.com/grafflr/deepnlu/issues/45
Updated:
13-Jul-2022
[email protected]
* renamed from 'portendo' in pursuit of
https://github.com/grafflr/deepnlu/issues/48
Updated:
26-Jul-2022
[email protected]
* remove 'schema-name' and 'absolute-path' as parameters, and instead
pass the full absolute path of a schema file, in pursuit of
https://bast-ai.atlassian.net/browse/COR-12
* document the schema-file to schema-name mapping convention
https://bast-ai.atlassian.net/browse/COR-13
Updated:
26-Sept-2022
[email protected]
* pass in d-schema as dict rather than a filepath
https://github.com/craigtrim/schema-classification/issues/1
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
Args:
d_schema (Dict): the schema JSON
"""
BaseObject.__init__(self, __name__)
self._d_index = ReadMapping(d_schema).index()
def _run(self,
input_tokens: List[str]) -> ServiceEvent:
# this output dictionary are the classifications that are still valid
d_filter = FilterMapping(
self._d_index).process(input_tokens)
mapping = SelectMapping(
d_filter=d_filter,
d_index=self._d_index).process()
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Mapping Completed',
f'\tInput:\n{pformat(input_tokens)}',
f'\tOutput:\n{pformat(mapping)}']))
if not len(mapping):
return {
'result': None,
'tokens': input_tokens
}
return {
'result': mapping,
'tokens': input_tokens
}
def run(self,
input_tokens: List[str]) -> ServiceEvent:
""" Run the Schema Orchestrator on Input Tokens
Args:
input_tokens (list): a flat list of tokens extracted from text
Sample Input:
['network_topology', 'user', 'customer']
Returns:
tuple: the service result
"""
sw = Stopwatch()
svcresult = self._run(input_tokens)
self.logger.info('\n'.join([
'Portendo Schema Orchestrator Completed',
f'\tTotal Time: {str(sw)}',
f'\tResult:\n{pformat(svcresult)}']))
return svcresult
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/bp/schema_orchestrator.py
| 0.841468 | 0.354685 |
schema_orchestrator.py
|
pypi
|
from typing import Dict
from baseblock import BaseObject
from schema_classification.dto import Markers
from schema_classification.dto import MappingResult
class ConfidenceExcludeAllOf(BaseObject):
""" Determine Confidence Level for Selected Mapping """
def __init__(self,
mapping: Dict,
markers: Markers,
result: MappingResult):
"""
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
:param d_include_oneof:
relevant section of mapping ruleset
"""
BaseObject.__init__(self, __name__)
self._result = result
self._mapping = mapping
self._markers = markers
def _excludeall(self,
confidence: float,
mapping: dict) -> float:
# but do any of these exclusions exist in the mapping?
exclusions = set(mapping['exclude_all_of'])
markers = set(self._markers.keys())
# let's multiply each exclusion by N and deduct from the confidence
total_matches = len(exclusions.intersection(markers))
ratio = round((total_matches / len(markers)) * 100, 0)
if ratio > 80:
confidence -= 8
elif ratio > 60:
confidence -= 16
elif ratio > 40:
confidence -= 32
elif ratio > 20:
confidence -= 64
elif ratio > 0:
confidence -= 90
else:
confidence -= 99
self.logger.debug('\n'.join([
'Exclude All Of Confidence',
f'\tExclusions ({len(exclusions)}): {exclusions}',
f'\tMarkers ({len(markers)}): {markers}',
f'\tMatches: {total_matches}',
f'\tRatio: {ratio}']))
return confidence
def process(self) -> float:
confidence = self._result['confidence']
mappings = self._mapping[self._result['classification']]
# at this point, we know the exclusions rule did not apply
for mapping in mappings:
if 'exclude_all_of' in mapping:
confidence = self._excludeall(mapping=mapping,
confidence=confidence)
return confidence
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/confidence_exclude_allof.py
| 0.902588 | 0.282468 |
confidence_exclude_allof.py
|
pypi
|
from typing import Set
from typing import List
from typing import Dict
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
class FilterStartsWith(BaseObject):
""" Check if Input Text Starts with Value
Reference:
https://github.com/grafflr/graffl-core/issues/264#issuecomment-1089413865
"""
def __init__(self,
d_index: Dict):
""" Change Log
Created:
5-Apr-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/264
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* renamed from 'computer-startswith' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
self._d_startswith = d_index['startswith']
def _coverage(self,
weight: int,
mapping_name: str) -> float:
""" Determine the Coverage """
d_mapping = self._mapping[mapping_name][0]['include_one_of']
total_markers = len(d_mapping)
return round(weight / total_markers, 2)
def process(self,
input_tokens: List) -> Set:
sw = Stopwatch()
d_results = {}
input_text = ' '.join(input_tokens).lower().strip()
for phrase in self._d_startswith:
if input_text.startswith(phrase.lower()):
for mapping in self._d_startswith[phrase]:
d_results[mapping] = {'weight': 100.0, 'coverage': 100.0}
if self.isEnabledForDebug and len(d_results):
self.logger.debug('\n'.join([
'StartsWith Results:',
f'\tTotal Time: {str(sw)}',
f'\t{pformat(d_results)}']))
return d_results
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/filter_startswith.py
| 0.849222 | 0.297866 |
filter_startswith.py
|
pypi
|
from typing import List
from typing import Dict
from pprint import pprint
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
class FilterExcludeAllOf(BaseObject):
""" Filter Classifications using EXCLUDE_ALL_OF Rulesets
- Remove invalid classifications using the 'exclude-all-of' criteria.
- This component returns valid candidates only.
Implementation:
EXCLUDE_ALL_OF has some important nuances that differentiate it from EXCLUDE_ONE_OF
Reference: https://github.com/craigtrim/schema-classification/issues/5
"""
def __init__(self,
d_index: NormalizedSchema):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
* renamed from 'computer-exclude-one-of' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
Sample Input (mapping):
{
"ASSIGN_PEER_REVIEW_DISCUSSION#1":[
{
"include_all_of":[
"discussion",
"assign"
],
"include_one_of":[
"review",
"peer_review"
],
"exclude_all_of":[
"create"
]
}
]
}
Sample Input (exclude_all_of):
{
"create": "ASSIGN_PEER_REVIEW_DISCUSSION#1"
}
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
def process(self,
input_tokens: List[str]) -> Dict:
sw = Stopwatch()
invalid_names = []
s_input_tokens = set(input_tokens)
for classification in self._mapping:
for ruleset in self._mapping[classification]:
if 'exclude_all_of' not in ruleset:
continue
exclude_all_of = set(ruleset['exclude_all_of'])
if not len(exclude_all_of):
continue
result = exclude_all_of.intersection(s_input_tokens)
# all the exclusion tokens must be found
if result != exclude_all_of:
continue
invalid_names.append(classification)
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Invalid Classification Found',
f'\tName: {classification}',
f'\tRule Tokens: {exclude_all_of}',
f'\tInput Tokens: {input_tokens}']))
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Filtering Complete',
f'\tRemoved Classifications: {len(invalid_names)}',
f'\tTotal Time: {str(sw)}']))
return invalid_names
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/filter_exclude_allof.py
| 0.843122 | 0.283763 |
filter_exclude_allof.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.