id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,287,600 | overview.py | pblottiere_QSA/qsa-api/qsa_api/raster/overview.py | # coding: utf8
import sys
from pathlib import Path
from qgis.core import QgsRasterLayer, Qgis
from ..config import QSAConfig
from ..utils import logger, s3_parse_uri, s3_bucket_upload
class RasterOverview:
def __init__(self, layer: QgsRasterLayer) -> None:
self.layer = layer
def is_valid(self):
return self.layer.dataProvider().hasPyramids()
def build(self) -> (bool, str):
ds = self.layer.source()
# check if rasters stored on S3
if "/vsis3" not in ds:
return False, "Building overviews is only supported for S3 rasters"
# config overviews
self.debug("Build external overviews")
levels = self.layer.dataProvider().buildPyramidList()
for idx, level in enumerate(levels):
levels[idx].setBuild(True)
# build overviews
fmt = Qgis.RasterPyramidFormat.GeoTiff
err = self.layer.dataProvider().buildPyramids(levels, "NEAREST", fmt)
if err:
return False, f"Cannot build overview ({err})"
# search ovr file in GDAL PAM directory
ovrfile = f"{Path(ds).name}.ovr"
ovrpath = next(
QSAConfig().gdal_pam_proxy_dir.glob(f"*{ovrfile}"), None
)
if not ovrpath:
return False, "Cannot find OVR file in GDAL_PAM_PROXY_DIR"
# upload
bucket, subdirs, _ = s3_parse_uri(ds)
dest = Path(subdirs) / ovrfile
rc, msg = s3_bucket_upload(bucket, ovrpath.as_posix(), dest.as_posix())
if not rc:
return False, msg
# clean
self.debug("Remove ovr file in GDAL PAM directory")
ovrpath.unlink()
return True, ""
def debug(self, msg) -> None:
caller = f"{self.__class__.__name__}.{sys._getframe().f_back.f_code.co_name}"
msg = f"[{caller}] {msg}"
logger().debug(msg)
| 1,874 | Python | .py | 47 | 31.553191 | 85 | 0.616234 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,601 | __init__.py | pblottiere_QSA/qsa-api/qsa_api/raster/__init__.py | # coding: utf8
from .overview import RasterOverview
from .renderer import RasterSymbologyRenderer
| 99 | Python | .py | 3 | 31.666667 | 45 | 0.873684 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,602 | processing.py | pblottiere_QSA/qsa-api/qsa_api/api/processing.py | # coding: utf8
from jsonschema import validate
from flask import Blueprint, jsonify, request
from jsonschema.exceptions import ValidationError
from ..utils import logger
from ..project import QSAProject
from ..processing import RasterCalculator, Histogram
from .utils import log_request
processing = Blueprint("processing", __name__)
@processing.post("/raster/calculator/<project>")
def raster_calculator(project: str):
log_request()
try:
schema = {
"type": "object",
"required": ["expression", "output"],
"properties": {
"expression": {"type": "string"},
"output": {"type": "string"},
},
}
data = request.get_json()
try:
validate(data, schema)
except ValidationError as e:
return {"error": e.message}, 415
expression = data["expression"]
output = data["output"]
psql_schema = request.args.get("schema", default="public")
proj = QSAProject(project, psql_schema)
if not proj.exists():
return {"error": "Project doesn't exist"}, 415
calc = RasterCalculator(proj._qgis_project_uri, expression)
if not calc.is_valid():
return {"error": "Invalid expression"}, 415
rc, msg = calc.process(output)
if not rc:
return {
"error": f"Raster calculator failed to process expression ({msg})"
}, 415
return jsonify(rc), 201
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@processing.post("/raster/histogram/<project>/<layer>")
def raster_histogram(project: str, layer: str):
log_request()
try:
schema = {
"type": "object",
"properties": {
"min": {"type": "number"},
"max": {"type": "number"},
"count": {"type": "number"},
},
}
data = request.get_json()
try:
validate(data, schema)
except ValidationError as e:
return {"error": e.message}, 415
mini = None
if "min" in data:
mini = data["min"]
maxi = None
if "max" in data:
maxi = data["max"]
count = 1000
if "count" in data:
count = data["count"]
psql_schema = request.args.get("schema", default="public")
proj = QSAProject(project, psql_schema)
if proj.exists():
layer_infos = proj.layer(layer)
if layer_infos:
if "type" in layer_infos and layer_infos["type"] != "raster":
return {
"error": "Histogram is available for raster layer only"
}
histo = Histogram(proj._qgis_project_uri, layer)
return jsonify(histo.process(mini, maxi, count)), 201
else:
return {"error": "Layer does not exist"}, 415
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
| 3,235 | Python | .py | 88 | 26.613636 | 82 | 0.551504 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,603 | projects.py | pblottiere_QSA/qsa-api/qsa_api/api/projects.py | # coding: utf8
import shutil
import requests
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from flask import send_file, Blueprint, jsonify, request
from qgis.PyQt.QtCore import QDateTime
from ..wms import WMS
from ..utils import logger
from ..project import QSAProject
from .utils import log_request
projects = Blueprint("projects", __name__)
@projects.get("/")
def projects_list():
log_request()
try:
psql_schema = request.args.get("schema", default="public")
p = []
for project in QSAProject.projects(psql_schema):
p.append(project.name)
return jsonify(p)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>")
def project_info(name: str):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
return jsonify(project.metadata)
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.post("/")
def project_add():
log_request()
try:
schema = {
"type": "object",
"required": ["name", "author"],
"properties": {
"name": {"type": "string"},
"author": {"type": "string"},
"schema": {"type": "string"},
},
}
if request.is_json:
data = request.get_json()
try:
validate(data, schema)
except ValidationError as e:
return {"error": e.message}, 415
name = data["name"]
author = data["author"]
schema = ""
if "schema" in data:
schema = data["schema"]
project = QSAProject(name, schema)
if project.exists():
return {"error": "Project already exists"}
rc, err = project.create(author)
if not rc:
return {"error": err}, 415
return jsonify(True), 201
return {"error": "Request must be JSON"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.delete("/<name>")
def project_del(name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
project.remove()
return jsonify(True), 201
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/styles")
def project_styles(name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
return jsonify(project.styles), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/styles/<style>")
def project_style(name, style):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
infos, err = project.style(style)
if err:
return {"error": err}, 415
else:
return jsonify(infos), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.delete("/<name>/styles/<style>")
def project_del_style(name, style):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
if style in project.styles:
rc, msg = project.remove_style(style)
if not rc:
return {"error": msg}, 415
return jsonify(rc), 201
else:
return {"error": "Style does not exist"}, 415
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.post("/<name>/layers/<layer_name>/style")
def project_layer_update_style(name, layer_name):
log_request()
try:
schema = {
"type": "object",
"required": ["name", "current"],
"properties": {
"name": {"type": "string"},
"current": {"type": "boolean"},
},
}
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
data = request.get_json()
try:
validate(data, schema)
except ValidationError as e:
return {"error": e.message}, 415
current = data["current"]
style_name = data["name"]
rc, msg = project.layer_update_style(
layer_name, style_name, current
)
if not rc:
return {"error": msg}, 415
return jsonify(True), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/layers/<layer_name>/map/url")
def project_layer_map_url(name, layer_name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
getmap = WMS.getmap_url(name, psql_schema, layer_name)
return jsonify({"url": getmap}), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/layers/<layer_name>/map")
def project_layer_map(name, layer_name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
url = WMS.getmap(name, psql_schema, layer_name)
r = requests.get(url, stream=True)
png = "/tmp/map.png"
with open(png, "wb") as out_file:
shutil.copyfileobj(r.raw, out_file)
return send_file(png, mimetype="image/png")
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.post("/<name>/styles")
def project_add_style(name):
log_request()
try:
schema = {
"type": "object",
"required": ["name", "type", "rendering", "symbology"],
"properties": {
"name": {"type": "string"},
"type": {"type": "string"},
"symbology": {"type": "object"},
"rendering": {"type": "object"},
},
}
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
data = request.get_json()
try:
validate(data, schema)
except ValidationError as e:
return {"error": e.message}, 415
rc, err = project.add_style(
data["name"],
data["type"],
data["symbology"],
data["rendering"],
)
if rc:
return jsonify(rc), 201
else:
return {"error": err}, 415
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/styles/default")
def project_default_styles(name: str) -> dict:
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
infos = project.default_styles()
return jsonify(infos), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.post("/<name>/styles/default")
def project_update_default_style(name):
log_request()
try:
schema = {
"type": "object",
"required": ["geometry", "style"],
"properties": {
"geometry": {"type": "string"},
"style": {"type": "string"},
},
}
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
data = request.get_json()
try:
validate(data, schema)
except ValidationError as e:
return {"error": e.message}, 415
project.style_update(data["geometry"], data["style"])
return jsonify(True), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/layers")
def project_layers(name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
return jsonify(project.layers), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.post("/<name>/layers")
def project_add_layer(name):
log_request()
try:
schema = {
"type": "object",
"required": ["name", "datasource", "type"],
"properties": {
"name": {"type": "string"},
"datasource": {"type": "string"},
"crs": {"type": "number"},
"type": {"type": "string"},
"overview": {"type": "boolean"},
"datetime": {"type": "string"},
},
}
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
data = request.get_json()
try:
validate(data, schema)
except ValidationError as e:
return {"error": e.message}, 415
crs = -1
if "crs" in data:
crs = int(data["crs"])
overview = False
if "overview" in data:
overview = data["overview"]
datetime = None
if "datetime" in data:
# check format "yyyy-MM-dd HH:mm:ss"
datetime = QDateTime.fromString(
data["datetime"], "yyyy-MM-dd HH:mm:ss"
)
if not datetime.isValid():
return {"error": "Invalid datetime"}, 415
rc, err = project.add_layer(
data["datasource"],
data["type"],
data["name"],
crs,
overview,
datetime,
)
if rc:
return jsonify(rc), 201
else:
return {"error": err}, 415
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/layers/<layer_name>")
def project_info_layer(name, layer_name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
layer_infos = project.layer(layer_name)
if layer_infos:
return jsonify(layer_infos), 201
else:
return {"error": "Layer does not exist"}, 415
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.delete("/<name>/layers/<layer_name>")
def project_del_layer(name, layer_name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
if project.layer_exists(layer_name):
rc = project.remove_layer(layer_name)
return jsonify(rc), 201
else:
return {"error": "Layer does not exist"}, 415
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.get("/<name>/cache")
def project_cache(name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
cache_infos, err = project.cache_metadata()
if err:
return {"error": err}, 415
return jsonify(cache_infos), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@projects.post("/<name>/cache/reset")
def project_cache_reset(name):
log_request()
try:
psql_schema = request.args.get("schema", default="public")
project = QSAProject(name, psql_schema)
if project.exists():
rc, err = project.cache_reset()
if err:
return {"error": err}, 415
return jsonify(rc), 201
else:
return {"error": "Project does not exist"}, 415
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
| 14,883 | Python | .py | 410 | 26.207317 | 67 | 0.545492 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,604 | utils.py | pblottiere_QSA/qsa-api/qsa_api/api/utils.py | # coding: utf8
import sys
import inspect
from ..utils import logger
def log_request():
caller_stack = inspect.stack()[1]
caller_fct = caller_stack.function
caller_frame = sys._getframe(1)
caller_mod = inspect.getmodule(caller_frame)
caller_fn = getattr(caller_mod, caller_fct)
req_type = caller_fn.__qualname__.split(".")[0].upper()
source = inspect.getsource(caller_fn)
req_type = source.splitlines()[0].split(".")[1].split("(")[0].upper()
logger().debug(f"[{req_type}] {caller_mod.__name__}.{caller_fct}")
| 553 | Python | .py | 14 | 35.285714 | 73 | 0.671698 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,605 | instances.py | pblottiere_QSA/qsa-api/qsa_api/api/instances.py | # coding: utf8
from flask import Blueprint
from flask import current_app
from datetime import datetime
from ..utils import logger
from .utils import log_request
instances = Blueprint("instances", __name__)
@instances.get("/")
def instances_list():
log_request()
try:
monitor = current_app.config["MONITOR"]
if not monitor:
return {"error": "QGIS Server monitoring is not activated"}, 415
conns = {"servers": []}
for uid in monitor.conns:
info = {}
info["id"] = uid
info["ip"] = monitor.conns[uid].ip
d = datetime.now() - monitor.conns[uid].now
info["binded"] = int(d.total_seconds())
conns["servers"].append(info)
return conns
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@instances.get("/<instance>")
def instances_metadata(instance):
log_request()
try:
monitor = current_app.config["MONITOR"]
if not monitor:
return {"error": "QGIS Server monitoring is not activated"}, 415
if instance not in monitor.conns:
return {"error": "QGIS Server instance is not available"}, 415
return monitor.conns[instance].metadata
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@instances.get("/<instance>/logs")
def instances_logs(instance):
log_request()
try:
monitor = current_app.config["MONITOR"]
if not monitor:
return {"error": "QGIS Server monitoring is not activated"}, 415
if instance not in monitor.conns:
return {"error": "QGIS Server instance is not available"}, 415
return monitor.conns[instance].logs
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@instances.get("/<instance>/stats")
def instances_stats(instance):
log_request()
try:
monitor = current_app.config["MONITOR"]
if not monitor:
return {"error": "QGIS Server monitoring is not activated"}, 415
if instance not in monitor.conns:
return {"error": "QGIS Server instance is not available"}, 415
return monitor.conns[instance].stats
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
| 2,463 | Python | .py | 65 | 30.307692 | 76 | 0.631424 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,606 | symbology.py | pblottiere_QSA/qsa-api/qsa_api/api/symbology.py | # coding: utf8
from flask import Blueprint, jsonify
from qgis.core import (
QgsStyle,
QgsSimpleLineSymbolLayer,
QgsSimpleFillSymbolLayer,
QgsSingleBandGrayRenderer,
QgsMultiBandColorRenderer,
QgsSimpleMarkerSymbolLayer,
QgsSingleBandPseudoColorRenderer,
)
from ..utils import logger
from .utils import log_request
symbology = Blueprint("symbology", __name__)
@symbology.get("/vector/line/single_symbol/line/properties")
def symbology_symbols_line():
log_request()
try:
props = QgsSimpleLineSymbolLayer().properties()
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get("/vector/polygon/single_symbol/fill/properties")
def symbology_symbols_fill():
log_request()
try:
props = QgsSimpleFillSymbolLayer().properties()
props["outline_style"] = (
"solid (no, solid, dash, dot, dash dot, dash dot dot)"
)
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get("/vector/point/single_symbol/marker/properties")
def symbology_symbols_marker():
log_request()
try:
props = QgsSimpleMarkerSymbolLayer().properties()
props["outline_style"] = (
"solid (no, solid, dash, dot, dash dot, dash dot dot)"
)
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get("/vector/rendering/properties")
def symbology_vector_rendering():
log_request()
try:
props = {}
props["opacity"] = 100.0
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get(
f"/raster/{QgsSingleBandGrayRenderer(None, 1).type()}/properties"
)
def symbology_raster_singlebandgray():
log_request()
try:
props = {}
props["gray"] = {"band": 1, "min": 0.0, "max": 1.0}
props["contrast_enhancement"] = {
"algorithm": "NoEnhancement (StretchToMinimumMaximum, NoEnhancement)",
"limits_min_max": "MinMax (MinMax, UserDefined)",
}
props["color_gradient"] = "BlackToWhite (BlackToWhite, WhiteToBlack)"
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get(
f"/raster/{QgsMultiBandColorRenderer(None, 1, 1, 1).type()}/properties"
)
def symbology_raster_multibandcolor():
log_request()
try:
props = {}
props["red"] = {"band": 1, "min": 0.0, "max": 1.0}
props["green"] = {"band": 2, "min": 0.0, "max": 1.0}
props["blue"] = {"band": 3, "min": 0.0, "max": 1.0}
props["contrast_enhancement"] = {
"algorithm": "NoEnhancement (StretchToMinimumMaximum, NoEnhancement)",
"limits_min_max": "MinMax (MinMax, UserDefined)",
}
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get(
f"/raster/{QgsSingleBandPseudoColorRenderer(None, 1).type()}/properties"
)
def symbology_raster_singlebandpseudocolor():
log_request()
try:
ramps = ", ".join(QgsStyle().defaultStyle().colorRampNames())
props = {}
props["band"] = {"band": 1, "min": 0.0, "max": 1.0}
props["ramp"] = {
"name": f"Spectral ({ramps})",
"color1": "0,0,0,255",
"color2": "255,255,255,255",
"stops": "0.2;2,2,11,255:0.8;200,200,110,255",
"interpolation": "Linear (Linear, Discrete, Exact)",
}
props["contrast_enhancement"] = {
"limits_min_max": "MinMax (MinMax, UserDefined)",
}
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get(
f"/raster/{QgsSingleBandPseudoColorRenderer(None, 1).type()}/ramp/<name>/properties"
)
def symbology_raster_singlebandpseudocolor_ramp_props(name):
log_request()
try:
proper_name = ""
for n in QgsStyle().defaultStyle().colorRampNames():
if n.lower() == name:
proper_name = n
props = {}
ramp = QgsStyle().defaultStyle().colorRamp(proper_name)
if ramp:
props["color1"] = ramp.properties()["color1"].split("rgb")[0]
props["color2"] = ramp.properties()["color2"].split("rgb")[0]
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
@symbology.get("/raster/rendering/properties")
def symbology_raster_rendering():
log_request()
try:
props = {}
props["gamma"] = 1.0
props["brightness"] = 0
props["contrast"] = 0
props["saturation"] = 0
return jsonify(props)
except Exception as e:
logger().exception(str(e))
return {"error": "internal server error"}, 415
| 5,271 | Python | .py | 147 | 28.829932 | 88 | 0.618478 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,607 | histogram.py | pblottiere_QSA/qsa-api/qsa_api/processing/histogram.py | # coding: utf8
from multiprocessing import Process, Manager
from qgis.core import QgsProject, QgsRectangle
class Histogram:
def __init__(self, project_uri: str, layer: str) -> None:
self.layer = layer
self.project_uri = project_uri
def process(self, mini, maxi, count) -> (bool, dict):
# Some kind of cache is bothering us because when a raster layer is
# added on S3, we cannot open it with GDAL provider later. The
# QgsApplication needs to be restarted... why???
manager = Manager()
out = manager.dict()
p = Process(
target=Histogram._process,
args=(self.project_uri, self.layer, mini, maxi, count, out),
)
p.start()
p.join()
if "histo" in out:
return out["histo"].copy()
return {}
@staticmethod
def _process(
project_uri: str, layer: str, mini, maxi, count, out: dict
) -> None:
project = QgsProject.instance()
project.read(project_uri)
lyr = project.mapLayersByName(layer)[0]
histo = {}
for band in range(lyr.bandCount()):
h = lyr.dataProvider().histogram(
band + 1, count, mini, maxi, QgsRectangle(), 250000
)
histo[band + 1] = {}
histo[band + 1]["min"] = h.minimum
histo[band + 1]["max"] = h.maximum
histo[band + 1]["values"] = h.histogramVector
out["histo"] = histo
| 1,491 | Python | .py | 39 | 29.102564 | 75 | 0.5754 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,608 | raster_calculator.py | pblottiere_QSA/qsa-api/qsa_api/processing/raster_calculator.py | # coding: utf8
import rasterio
import tempfile
from pathlib import Path
from multiprocessing import Process, Manager
from qgis.PyQt.QtCore import QUrl, QUrlQuery
from qgis.analysis import QgsRasterCalcNode
from qgis.core import (
Qgis,
QgsProject,
QgsMapLayer,
QgsRasterPipe,
QgsRasterLayer,
QgsRasterBandStats,
QgsRasterFileWriter,
QgsRasterDataProvider,
QgsContrastEnhancement,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem,
)
from ..utils import s3_bucket_upload, s3_parse_uri, logger
class RasterCalculator:
def __init__(self, project_uri: str, expression: str) -> None:
self.expression = expression
self.project_uri = project_uri
def process(self, out_uri: str) -> (bool, str):
# Some kind of cache is bothering us because when a raster layer is
# added on S3, we cannot open it with GDAL provider later. The
# QgsApplication needs to be restarted... why???
manager = Manager()
out = manager.dict()
p = Process(
target=RasterCalculator._process,
args=(self.project_uri, self.expression, out_uri, out),
)
p.start()
p.join()
return out["rc"], out["error"]
@staticmethod
def _process(
project_uri: str, expression: str, out_uri: str, out: dict
) -> None:
vuri = RasterCalculator._virtual_uri(project_uri, expression)
if not vuri:
out["rc"] = False
out["error"] = "Failed to build virtual uri"
return
lyr = QgsRasterLayer(vuri, "", "virtualraster")
with tempfile.NamedTemporaryFile(suffix=".tif") as fp:
RasterCalculator._debug("Write temporary raster on disk")
file_writer = QgsRasterFileWriter(fp.name)
pipe = QgsRasterPipe()
pipe.set(lyr.dataProvider().clone())
rc = file_writer.writeRaster(
pipe,
lyr.width(),
lyr.height(),
lyr.extent(),
lyr.crs(),
)
if rc != Qgis.RasterFileWriterResult.Success:
out["rc"] = False
out["error"] = "Failed to write raster"
return
# update nodata
RasterCalculator._update_nodata(fp.name)
# upload
bucket, subdirs, filename = s3_parse_uri(out_uri)
dest = Path(subdirs) / Path(filename)
rc, msg = s3_bucket_upload(bucket, fp.name, dest.as_posix())
if not rc:
out["rc"] = False
out["error"] = msg
return
# build overview
lyr = QgsRasterLayer(fp.name, "", "gdal")
RasterCalculator._debug("Build overview")
fmt = Qgis.RasterPyramidFormat.GeoTiff
levels = lyr.dataProvider().buildPyramidList()
for idx, level in enumerate(levels):
levels[idx].setBuild(True)
err = lyr.dataProvider().buildPyramids(levels, "NEAREST", fmt)
if err:
out["rc"] = False
out["error"] = f"Cannot build overview ({err})"
return
# upload overview
ovr = f"{fp.name}.ovr"
dest = f"{dest.as_posix()}.ovr"
rc, msg = s3_bucket_upload(bucket, ovr, dest)
if not rc:
out["rc"] = False
out["error"] = msg
return
out["rc"] = True
out["error"] = ""
@staticmethod
def _update_nodata(filename: str) -> None:
# check if min is minimumValuePossible for the corresponding type
# if yes, update noDataValue
lyr = QgsRasterLayer(filename, "", "gdal")
stats = lyr.dataProvider().bandStatistics(
1,
QgsRasterBandStats.Min | QgsRasterBandStats.Max,
lyr.extent(),
250000,
)
for t in Qgis.DataType:
if (
stats.minimumValue
== QgsContrastEnhancement.minimumValuePossible(t)
):
RasterCalculator._debug(
f"Set no data value to {stats.minimumValue}"
)
with rasterio.open(filename, "r+") as dataset:
dataset.nodata = stats.minimumValue
break
@staticmethod
def _virtual_uri(project_uri: str, expression: str) -> str:
params = QgsRasterDataProvider.VirtualRasterParameters()
params.formula = expression
params.crs = QgsCoordinateReferenceSystem("EPSG:3857")
project = QgsProject.instance()
project.read(project_uri)
lyr_names = []
extent = None
width = 0
height = 0
params_query = QUrlQuery()
for layer in project.mapLayers().values():
if layer.type() != QgsMapLayer.RasterLayer:
continue
if layer.dataProvider().name() == "virtualraster":
continue
if layer.name() in lyr_names:
continue
if layer.name() not in expression:
continue
transform = QgsCoordinateTransform(
layer.crs(), params.crs, project
)
lyr_extent = transform.transformBoundingBox(layer.extent())
if extent is None:
extent = lyr_extent
else:
extent.combineExtentWith(lyr_extent)
if layer.width() > width:
width = layer.width()
if layer.height() > height:
height = layer.height()
vlayer = QgsRasterDataProvider.VirtualRasterInputLayers()
vlayer.name = layer.name()
vlayer.provider = layer.dataProvider().name()
vlayer.uri = layer.source()
# rInputLayers cannot be set from Python :(
# hack based on QgsRasterDataProvider.encodeVirtualRasterProviderUri
if vlayer.name not in lyr_names:
params_query.addQueryItem(vlayer.name + ":uri", vlayer.uri)
params_query.addQueryItem(
vlayer.name + ":provider", vlayer.provider
)
lyr_names.append(layer.name())
if extent is None:
return ""
params.width = width
params.height = height
params.extent = extent
vuri = QgsRasterDataProvider.encodeVirtualRasterProviderUri(params)
# rInputLayers cannot be set from Python :(
# hack based on QgsRasterDataProvider.encodeVirtualRasterProviderUri
params_uri = QUrl()
params_uri.setQuery(params_query)
params_vuri = str(
QUrl.toPercentEncoding(
str(params_uri.toEncoded(), encoding="utf-8")
),
encoding="utf-8",
)[3:]
return f"{vuri}%26{params_vuri}"
@staticmethod
def _debug(msg: str) -> None:
caller = "raster_calculator"
msg = f"[{caller}] {msg}"
logger().debug(msg)
def is_valid(self) -> bool:
node = QgsRasterCalcNode.parseRasterCalcString(self.expression, "")
if node is None:
return False
return True
| 7,298 | Python | .py | 190 | 27.052632 | 80 | 0.568053 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,609 | __init__.py | pblottiere_QSA/qsa-api/qsa_api/processing/__init__.py | # coding: utf8
from .histogram import Histogram
from .raster_calculator import RasterCalculator
| 97 | Python | .py | 3 | 31 | 47 | 0.860215 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,610 | renderer.py | pblottiere_QSA/qsa-api/qsa_api/vector/renderer.py | # coding: utf8
from pathlib import Path
from qgis.core import (
QgsSymbol,
QgsFeatureRenderer,
QgsReadWriteContext,
)
from qgis.PyQt.QtXml import QDomDocument, QDomNode
RENDERER_TAG_NAME = "renderer-v2" # constant from core/symbology/renderer.h
class VectorSymbologyRenderer:
@staticmethod
def style_is_vector(path: Path) -> bool:
with open(path, "r") as file:
if RENDERER_TAG_NAME in file.read():
return True
return False
@staticmethod
def style_to_json(path: Path) -> (dict, str):
doc = QDomDocument()
doc.setContent(open(path.as_posix()).read())
node = QDomNode(doc.firstChild())
renderer_node = node.firstChildElement(RENDERER_TAG_NAME)
renderer = QgsFeatureRenderer.load(
renderer_node, QgsReadWriteContext()
)
if renderer is None:
return {}, f"Internal error: vector style {path} cannot be loaded"
symbol = renderer.symbol()
props = symbol.symbolLayer(0).properties()
opacity = symbol.opacity()
geom = "line"
symbol = QgsSymbol.symbolTypeToString(symbol.type()).lower()
if symbol == "fill":
geom = "polygon"
m = {}
m["name"] = path.stem
m["type"] = "vector"
m["symbology"] = {}
m["symbology"]["type"] = "single_symbol"
m["symbology"]["properties"] = props
m["symbology"]["symbol"] = symbol
m["symbology"]["geometry"] = geom
m["rendering"] = {}
m["rendering"]["opacity"] = opacity
return m, ""
| 1,616 | Python | .py | 45 | 27.888889 | 78 | 0.605788 | pblottiere/QSA | 8 | 4 | 12 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,611 | pipeline.py | APAJanssen_KinaseDocker2/pipeline.py | # Pipeline imports
import argparse
import itertools
import os
import shutil
import time
import zlib
import docker
import pandas as pd
# from pymol import cmd
from rdkit import Chem
from rdkit.Chem import PandasTools
# Extra VinaGPU imports
import datetime
import re
import subprocess as sp
from meeko import MoleculePreparation
from meeko import PDBQTMolecule
from rdkit.Chem import AllChem
'''
Pipeline
'''
class Pipeline:
'''
Pipeline class that runs the docking and scoring.
'''
def __init__(self, run_name, smiles_list, kinase_families, accessions, docking_software, scoring_algorithm, output_path):
# Store variables
self.run_name = run_name if not os.path.exists(os.path.join(output_path, run_name, 'results', f'{run_name}_{docking_software}_results.csv')) else run_name + '_copy' # Prevent overwriting existing results
self.smiles_list = smiles_list
self.kinase_families = kinase_families
self.accessions = accessions
self.docking_software = docking_software
self.scoring_algorithm = scoring_algorithm
# Setup paths
self.output_path = os.path.abspath(os.path.join(output_path, self.run_name))
self.pdb_path = os.path.join(self.output_path, 'pdb')
self.docking_path = os.path.join(self.output_path, 'intermediate_input_' + self.docking_software)
self.model_path = os.path.join(self.output_path, 'intermediate_input_' + self.scoring_algorithm)
self.results_path = os.path.join(self.output_path, 'docking_results')
self.setup_folders()
# Load the kinase data and retrieve the KLIFS structures
self.kin_data = pd.read_csv(os.path.join(os.path.dirname(__file__), 'kinase_data.csv'))
self.structures = self.get_structures()
# Setup the base docker container
dev_req = docker.types.DeviceRequest
self.container = None
self.client = docker.from_env()
self.docker_kwargs = dict(image='apajanssen/kinasedocker2',
device_requests=[dev_req(device_ids=['0'], capabilities=[['gpu']])])
def run(self):
'''
Start the pipeline.
'''
# Store processing and docking function based on the docking software
if self.docking_software == 'vina':
preprocess = self.preprocessing_vina
dock = self.dock_vina
else: # diffdock
preprocess = self.preprocessing_diffdock
dock = self.dock_diffdock
self.start_time = time.time() # Start timer
print(f'[{self.get_current_runtime()}] Preprocessing...')
preprocess() # Preprocess the structures and smiles to an input file for the docking software
dock() # Dock everything with the respective docking software
self.preprocessing_dnn() # Preprocess the docking results to an input file for the DNN
self.run_dnn() # Run the DNN on the docking results
self.postprocess_results() # Postprocess the results
self.cleanup() # Cleanup the output folders
print(f'[{self.get_current_runtime()}] Pipeline finished!')
def start_docker_container(self, docker_kwargs): # Default with basic docker_kwargs
'''
Start the docker container.
'''
container = self.client.containers.run(
command='sleep infinity', # Keeps the container running until it is killed
detach=True, # Run container in background
**docker_kwargs)
return container
def remove_docker_container(self):
"""
Stop Vina-GPU docker container
"""
self.container.remove(force=True)
self.container = None
def dock_vina(self, threads=8192, search_depth=10):
'''
Dock the structures with vina.
Threads: number of threads to use for docking (8192 was found to be optimal)
Search depth: Algorithm search depth (10 was found to be optimal)
'''
print(f'[{self.get_current_runtime()}] Docking with Vina...')
print('-'*50)
# Load input data
input_data = pd.read_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_input.csv'))
# Create the Vina runner
vina_gpu = VinaGPU(out_path=self.docking_path)
# Loop over the structures and dock all associated compounds
for i, pdb in enumerate(input_data['klifs_ID'].unique(), 1):
# Get the input data for the current pdb
smiles = input_data[input_data['klifs_ID'] == pdb]['smiles'].tolist()
box_center = self.kin_data[self.kin_data['klifs_ID'] == pdb][['box_center_x', 'box_center_y', 'box_center_z']].values[0]
box_size = self.kin_data[self.kin_data['klifs_ID'] == pdb][['box_size_x', 'box_size_y', 'box_size_z']].values[0]
print(f'[{self.get_current_runtime()}] [VINA] Docking {len(self.smiles_list)} compound(s) in {pdb} ({i}/{len(input_data["klifs_ID"].unique())})')
# Dock the current pdb
vina_gpu.dock(target_pdb_path=os.path.join(self.pdb_path, f'{pdb}.pdb'),
smiles=smiles,
output_subfolder=str(pdb),
box_center=box_center,
box_size=box_size,
threads=threads,
threads_per_call=threads,
num_modes=3, # num poses
search_depth=search_depth)
self.postprocess_vina_output()
print('-'*50)
def dock_diffdock(self):
'''
Dock the structures with diffdock.
'''
print(f'[{self.get_current_runtime()}] Docking with DiffDock...')
print('-'*50)
# Setup folder links in docker container
docker_kwargs = self.docker_kwargs.copy()
docker_kwargs['volumes'] = {self.docking_path: {'bind': '/diffdock/input/data', 'mode': 'rw'},
os.path.join(self.docking_path, 'output'): {'bind': '/diffdock/results', 'mode': 'rw'}}
# Start the docker container
self.container = self.start_docker_container(docker_kwargs)
# Loop over the structures and dock all associated compounds
try:
for i, klifs_id in enumerate(self.structures, 1):
print(f'[{self.get_current_runtime()}] [DIFFDOCK] Docking {len(self.smiles_list)} compound(s) in {klifs_id} ({i}/{len(self.structures)})')
cmd = f'python3 -m inference_JS --protein_ligand_csv input/data/{self.run_name}_{self.docking_software}_input_{klifs_id}.csv --inference_steps 10 --samples_per_complex 3 --batch_size 10 --actual_steps 10 --no_final_step_noise'
_, (stdout, stderr) = self.container.exec_run(cmd=cmd, workdir='/diffdock', demux=True)
except Exception as e:
print(f'[{self.get_current_runtime()}] Error has occurred while docking: {e}')
raise e
except KeyboardInterrupt:
print(f'[{self.get_current_runtime()}] Docking interrupted by user')
finally:
self.remove_docker_container()
self.postprocess_diffdock_output()
print('-'*50)
def run_dnn(self):
'''
Run the DNN on the docking results.
'''
print(f'[{self.get_current_runtime()}] Running DNN...')
# Setup folder links in docker container
docker_kwargs = self.docker_kwargs.copy()
docker_kwargs['volumes'] = {self.model_path: {'bind': '/DNN/DNN_data/input', 'mode': 'rw'},
os.path.join(self.model_path, 'output'): {'bind': '/DNN/results', 'mode': 'rw'}}
# Start the docker container
self.container = self.start_docker_container(docker_kwargs)
# Run the DNN
try:
cmd = f'python3 DNN_eval.py --input_file {self.run_name}_DNN_input.csv --docking_type {self.docking_software}'
_, (stdout, stderr) = self.container.exec_run(cmd=cmd, workdir='/DNN', demux=True)
print(f'[{self.get_current_runtime()}] Determine clashing...')
cmd = f'python3 clashing.py --input_file {self.run_name}_DNN_input.csv'
_, (stdout, stderr) = self.container.exec_run(cmd=cmd, workdir='/DNN', demux=True)
except Exception as e:
print(f'[{self.get_current_runtime()}] Error has occurred while running DNN: {e}')
raise e
except KeyboardInterrupt:
print(f'[{self.get_current_runtime()}] DNN interrupted by user')
finally:
self.remove_docker_container()
def postprocess_vina_output(self):
'''
postprocess the vina output by concatenating everything into one file.
'''
final_data = pd.DataFrame()
folders = os.listdir(os.path.join(self.docking_path, 'output')) # Process everything in the output folder, may cause issues when previous runs are not cleaned up properly
if len(folders) == 0:
raise Exception('No docking results found, something went wrong!')
# Extract the log.tsv files from the output folder tree and append it to the final dataframe
for folder in folders:
folder_path = os.path.join(self.docking_path, 'output', folder)
if os.path.isfile(os.path.join(folder_path, 'log.tsv')):
data = pd.read_csv(os.path.join(folder_path, 'log.tsv'), sep='\t')
final_data = pd.concat([final_data, data], ignore_index=True)
# Change some headers and extract the molblock from the compressed molfile, save the first line as the pose_ID
final_data.rename(columns={'target': 'klifs_ID', 'score': 'vina_score', 'molfile': 'molblock'}, inplace=True)
final_data['klifs_ID'] = final_data['klifs_ID'].astype(int)
final_data['molblock'] = final_data['molblock'].apply(self.decompress)
final_data['pose_ID'] = final_data['molblock'].apply(lambda x: x.split('\n')[0])
final_data = final_data.reindex(columns=['pose_ID', 'klifs_ID', 'smiles', 'vina_score', 'molblock']) # Change Dataframe column order
final_data.to_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_docking_results.csv'), index=False)
def postprocess_diffdock_output(self):
'''
postprocess the diffdock output by concatenating everything into one file.
'''
final_data = pd.DataFrame()
files = os.listdir(os.path.join(self.docking_path, 'output')) # Process everything in the output folder, may cause issues when previous runs are not cleaned up properly
if len(files) == 0:
raise Exception('No docking results found, something went wrong!')
# Append the results from all files to the final dataframe
for filename in files:
file_path = os.path.join(self.docking_path, 'output', filename)
data = pd.read_csv(file_path)
final_data = pd.concat([final_data, data], ignore_index=True)
# Change some headers and extract the molblock from the compressed molfile, save the first line as the pose_ID
final_data['klifs_ID'] = final_data['klifs_ID'].astype(int)
final_data.rename(columns={'SMILES_input': 'smiles_input',
'SMILES_output': 'smiles_output',
'molfile_compressed': 'molblock',
'DiffDock_confidence': 'diffdock_confidence'}, inplace=True)
final_data['molblock'] = final_data['molblock'].apply(self.decompress)
final_data['pose_ID'] = final_data['molblock'].apply(lambda x: x.split('\n')[0])
final_data = final_data.reindex(columns=['pose_ID', 'klifs_ID', 'smiles_input', 'smiles_output', 'diffdock_confidence', 'molblock']) # Change Dataframe column order
final_data.to_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_docking_results.csv'), index=False)
def postprocess_results(self):
'''
Postprocess the results by saving the results to a csv and SDF.
'''
docking_results = pd.read_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_docking_results.csv')) # Original docking results
scoring_results = pd.read_csv(os.path.join(self.model_path, 'output', f'{self.run_name}_{self.scoring_algorithm}_input_results.csv')) # Model scoring results
clashing_results = pd.read_csv(os.path.join(self.model_path, 'output', f'{self.run_name}_{self.scoring_algorithm}_input_clashing.csv')) # Clashing results
docking_col = 'vina_score' if self.docking_software == 'vina' else 'diffdock_confidence' # Determine docking software specific column for dropping
# Merge the results
pose_results = pd.merge(docking_results, scoring_results, on='pose_ID', how='left')
pose_results = pd.merge(pose_results, clashing_results, on='pose_ID', how='left')
pose_results['Molecule'] = pose_results['molblock'].apply(Chem.MolFromMolBlock)
pose_results['Kinase'] = pose_results['klifs_ID'].apply(lambda x: self.kin_data[self.kin_data['klifs_ID'] == x]['kinase'].values[0]) # Retrieves relevant kinase information based on Klifs_ID
pose_results['accession'] = pose_results['klifs_ID'].apply(lambda x: self.kin_data[self.kin_data['klifs_ID'] == x]['accession'].values[0]) # Retrieves relevant kinase information based on Klifs_ID
pose_results.drop(columns=['pose_ID', 'molblock', docking_col], inplace=True) # Drop unnecessary columns
# Rename some columns
if self.docking_software == 'diffdock':
pose_results.rename(columns={'smiles_input': 'SMILES'}, inplace=True)
pose_results.drop(columns=['smiles_output'], inplace=True)
else:
pose_results.rename(columns={'smiles': 'SMILES'}, inplace=True)
pose_results = pose_results.reindex(columns=['SMILES', 'Kinase', 'accession', 'klifs_ID', 'pIC50', 'clash_score', 'Molecule']) # Change Dataframe column order
pose_results['pIC50'] = pose_results['pIC50'].apply(lambda x: round(x, 2)) # Round pIC50 to 2 decimals
# Save pose results to .SDF
PandasTools.WriteSDF(pose_results, os.path.join(self.results_path, f'{self.run_name}_{self.docking_software}_results.sdf'), molColName='Molecule', idName='SMILES', properties=list(pose_results.columns))
# Aggregate pose results
agg_results = pose_results.groupby(['klifs_ID', 'SMILES']).agg({'Kinase': 'first', 'accession': 'first', 'pIC50': 'mean', 'clash_score': 'max'}).reset_index()
agg_results.rename(columns={'pIC50': 'avg_score', 'clash_score': 'clash_score_max'}, inplace=True)
agg_results = agg_results.reindex(columns=['SMILES', 'Kinase', 'accession', 'klifs_ID', 'avg_score', 'clash_score_max']) # Change Dataframe column order
agg_results.to_csv(os.path.join(self.results_path, f'{self.run_name}_{self.docking_software}_results.csv'), index=False) # Save aggregated results to .csv
def preprocessing_vina(self):
'''
Preprocess the structures and smiles to an input file for vina.
The input file should be a csv with the following columns:
- klifs_ID
- smiles
- box_center_x
- box_center_y
- box_center_z
- box_size_x
- box_size_y
- box_size_z
'''
structure_smiles = itertools.product(self.structures, self.smiles_list) # Create all combinations of structures and smiles
df = pd.DataFrame(structure_smiles, columns=['klifs_ID', 'smiles'])
df.to_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_input.csv'), index=False)
def preprocessing_diffdock(self):
'''
Preprocess the structures and smiles to an input file for diffdock.
Also save input file per KLIFS to enable a loop in the pipeline.
The input file should be a csv with the following columns:
- complex_name (smiles, since it will dock per klifs)
- protein_path (path to pdb file: input/pdb/{klifs_ID}.pdb)
- ligand_description (smiles)
- protein_sequence (empty)
'''
structure_smiles = itertools.product(self.structures, self.smiles_list) # Create all combinations of structures and smiles
df = pd.DataFrame(structure_smiles, columns=['klifs_ID', 'ligand_description'])
df['complex_name'] = df['ligand_description']
df['protein_path'] = df['klifs_ID'].apply(lambda x: f'input/pdb/{x}.pdb') # Create the path to the pdb file
df['protein_sequence'] = '' # Empty protein sequence, but the input file needs it
df = df.reindex(columns=['complex_name', 'protein_path', 'ligand_description', 'protein_sequence'])
# Split the input file into chunks per klifs_ID
for prot_path in df['protein_path'].unique():
klifs_ID = prot_path.split('/')[-1].split('.')[0]
df[df['protein_path'] == prot_path].to_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_input_{klifs_ID}.csv'), index=False)
df.to_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_input.csv'), index=False)
def preprocessing_dnn(self):
'''
Preprocess the structures and smiles to an input file for the DNN.
Reads docking results, and extracts relevant information.
'''
docking_data = pd.read_csv(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_docking_results.csv'))
dnn_input = docking_data.reindex(columns=['pose_ID', 'klifs_ID', 'molblock'])
dnn_input['klifs_ID'] = dnn_input['klifs_ID'].astype(int)
dnn_input.to_csv(os.path.join(self.model_path, f'{self.run_name}_DNN_input.csv'), index=False)
def get_structures(self):
'''
Get all KLIFS structures for the given kinase families and individual accessions.
'''
structures = []
for family in self.kinase_families:
structures.extend(self.kin_data[self.kin_data['kinasegroup'] == family]['klifs_ID'].tolist())
if self.accessions:
structures.extend(self.kin_data[self.kin_data['accession'].isin(self.accessions)]['klifs_ID'].tolist())
structures = list(set(structures)) # Remove potential duplicates from single accessions being in the same group as was in selection
# Save the pdb files to the output_path/pdb folder
for pdb in structures:
compressed_pdb = self.kin_data[self.kin_data['klifs_ID'] == pdb]['pdb_compressed'].values[0]
pdb_string = self.decompress(compressed_pdb)
with open(os.path.join(self.pdb_path, f'{pdb}.pdb'), 'w') as f:
f.write(pdb_string)
return structures
def setup_folders(self):
'''
Setup the folders for the pipeline.
'''
# Create output folder
os.makedirs(self.output_path, exist_ok=True)
# Create PDB folder
os.makedirs(self.pdb_path, exist_ok=True)
# Create Docking software folders
os.makedirs(self.docking_path, exist_ok=True)
os.makedirs(os.path.join(self.docking_path, 'output'), exist_ok=True)
# Create scoring algorithm folders
if self.scoring_algorithm == 'DNN':
os.makedirs(self.model_path, exist_ok=True)
os.makedirs(os.path.join(self.model_path, 'output'), exist_ok=True)
# Create results folder
os.makedirs(self.results_path, exist_ok=True)
def cleanup(self):
'''
Cleanup the output folders.
'''
if self.docking_software == 'vina':
shutil.rmtree(os.path.join(self.docking_path, 'output'))
elif self.docking_software == 'diffdock':
shutil.rmtree(os.path.join(self.docking_path, 'output'))
# Remove the separate input files for diffdock
for klifs_ID in self.structures:
os.remove(os.path.join(self.docking_path, f'{self.run_name}_{self.docking_software}_input_{klifs_ID}.csv'))
if self.scoring_algorithm == 'DNN':
shutil.rmtree(os.path.join(self.model_path, 'output'))
def decompress(self, compressed):
'''
Decompress a string.
'''
return zlib.decompress(bytes.fromhex(compressed)).decode('utf-8')
def get_results_filepath(self):
'''
Get the filepath to the results file. Returns .sdf and .csv filepaths.
'''
return os.path.join(self.results_path, f'{self.run_name}_{self.docking_software}_results.sdf'), os.path.join(self.results_path, f'{self.run_name}_{self.docking_software}_results.csv')
def get_current_runtime(self):
'''
Get the current runtime of the pipeline.
'''
return str(round(time.time() - self.start_time)) + ' s'
'''
VinaGPU
'''
class VinaGPU():
"""
Class methods for running Vina-GPU docker container
Also contains methods for preparing the ligand and target:
- Ligand preparation via rdkit and meeko
- Target preparation via ADFR Suite and pdb_tools
"""
def __init__(self, docker_image_name='apajanssen/kinasedocker2', devices=['0'], out_path=None):
self.device = 'gpu'
self.device_id = devices
self.out_path = os.path.join(out_path, 'output') if out_path is not None else os.path.join(os.getcwd(), 'output')
# Setup ADFR suite for target preparation
self.adfr_suite_docker_path = '/htd/ADFRsuite-1.0'
# Setup meeko for ligand preparation
self.molecule_preparation = MoleculePreparation(rigid_macrocycles=True)
# Setup VinaGPU docker paths
self.vina_dir = '/vina-gpu-dockerized/vina'
self.docking_dir = self.vina_dir + '/docking'
## Configuration for running the Vina-GPU docker container
# (requires nvidia-docker runtime)
dev_req = docker.types.DeviceRequest # type: ignore
self.container = None
self.client = docker.from_env()
self.docker_kwargs = dict(
image=docker_image_name,
volumes = [f'{self.out_path}:{self.docking_dir}'],
device_requests=[dev_req(device_ids=devices, capabilities=[['gpu']])])
def dock(self, target_pdb_path, smiles=[], output_subfolder='',
box_center=(0,0,0), box_size=(20,20,20), search_depth=3,
threads=256, threads_per_call=256, num_modes=3, clean=True, verbose=False, # num_modes determines number of poses
write_log=True, **kwargs):
"""
Use Vina-GPU docker image to dock ligands (list of SMILES) to the target.
Produces a .pdbqt file for each ligand (with multiple docked orientations).
Parameters:
target_pdb_path (str) : path to target pdb file
smiles: (list(str)) : list of smiles strings
output_subfolder (str), opt : subfolder to save output files
box_center (tuple(float)), opt : coordinates of the active site of the target (x,y,z)=(0,0,0)
box_size (tuple(float)), opt : size of the bounding box around the active site (x,y,z)=(20,20,20)
threads (int), opt : number of threads to use for docking
thread_per_call (int), opt : number of threads to use for each call to Vina
num_modes (int), opt : number of poses to generate for each ligand
clean (bool), opt : remove ligand .pdbqt files after docking
verbose (bool), opt : print docking progress, scores, etc.
write_log (bool), opt : write log file with docking results
Returns:
all_scores (list(list((float))) : list of docking scores for each ligand
"""
assert (len(smiles) > 0), "A list of smiles strings must be provided"
results_path = os.path.join(self.out_path, output_subfolder)
os.makedirs(results_path, exist_ok=True)
# Prepare target .pdbqt file
target_pdbqt_path = self.prepare_target(target_pdb_path, output_path=results_path)
# Ensure no ligands from prior docking run linger (caused issues in loop)
ligand_pdbqt_paths = []
# Prepare ligand .pdbqt files
print('Processing ligands...') if verbose else None
for i, mol in enumerate(smiles):
ligand_pdbqt_path = os.path.join(results_path, f'ligand_{i}.pdbqt')
out_path = self.prepare_ligand(mol, out_path=ligand_pdbqt_path)
if out_path is not None:
ligand_pdbqt_paths.append(ligand_pdbqt_path)
basenames = [os.path.basename(p) for p in ligand_pdbqt_paths] # Ligand basenames (format 'ligand_0.pdbqt')
basenames_docked = [lig.replace('.pdbqt', '_docked.pdbqt') for lig in basenames] # Docked ligand basenames (format 'ligand_0_docked.pdbqt')
ligand_paths_docked = [os.path.join(results_path, p) for p in basenames_docked]
### Start Vina-GPU docker container and dock everything
self.container = self.start_docker_container()
try:
timing, dates = [], []
all_scores = [[0] for i in range(len(smiles))]
target = os.path.basename(target_pdb_path).strip('.pdbqt')
for i, ligand_file in enumerate(basenames):
t0 = time.time()
docking_args = dict(
receptor = f'docking/{output_subfolder}/{os.path.basename(target_pdbqt_path)}',
ligand = f'docking/{output_subfolder}/{ligand_file}',
out = f'docking/{output_subfolder}/{basenames_docked[i]}',
center_x = box_center[0],
center_y = box_center[1],
center_z = box_center[2],
size_x = box_size[0],
size_y = box_size[1],
size_z = box_size[2],
thread = threads,
search_depth = search_depth,
thread_per_call = threads_per_call,
num_modes = num_modes)
cmd = './Vina-GPU ' + ' '.join([f'--{k} {v}' for k, v in docking_args.items()])
try:
_, (stdout, stderr) = self.container.exec_run(
cmd=cmd,
workdir=self.vina_dir,
demux=True)
scores = process_stdout(stdout)
if len(scores) > 0 and scores != [None]:
all_scores[i] = scores
timing += [round(time.time() - t0, 2)]
dates += [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")]
if verbose:
print(f'- {self.device}:{self.device_id} | [{dates[-1]} | t={timing[-1]}s] Docked ligand {i+1}/{len(basenames)} | Affinity values: {all_scores[i]}...')
if write_log:
log_path = os.path.join(results_path, 'log.tsv')
write_to_log(log_path, smiles[i], target, all_scores[i], ligand_paths_docked[i])
if clean: # Remove intermediate files (undocked ligand .pdbqt files)
os.remove(ligand_pdbqt_paths[i])
os.remove(ligand_paths_docked[i])
except Exception as d:
print(d)
except Exception as e:
print(f'Error has occurred while docking ligand {i}: {e, stderr}')
raise e
except KeyboardInterrupt:
print('Docking interrupted by user')
finally:
self.remove_docker_container()
return all_scores
def start_docker_container(self):
"""
Start Vina-GPU docker container (runs until it is killed)
Returns:
docker container object
"""
container = self.client.containers.run(
command='sleep infinity', # Keeps the container running until it is killed
detach=True, # Run container in background
**self.docker_kwargs)
return container
def remove_docker_container(self):
"""
Stop Vina-GPU docker container
"""
self.container.remove(force=True)
self.container = None
def prepare_ligand(self, smiles, out_path=None):
"""
Prepare ligand for docking, return ligand .pdbqt file path
Arguments:
smiles (str) : smiles string
out_path (str) : path to save the .pdbqt file (default: ./drugex/utils/docking/output)
Returns:
path to the ligand .pdbqt file
"""
try:
# Ligand preparation via rdkit and meeko
mol = Chem.MolFromSmiles(smiles) # type: ignore
protonated_ligand = Chem.AddHs(mol) # type: ignore
AllChem.EmbedMolecule(protonated_ligand) # type: ignore
self.molecule_preparation.prepare(protonated_ligand)
# Write to .pdbqt file required by Vina
if out_path is None:
out_path = self.out_path
self.molecule_preparation.write_pdbqt_file(out_path)
except Exception as e:
print(f'Error while preparing ligand: {e}')
out_path = None
return out_path
def prepare_target(self, pdb_path, output_path=None, chain='A'):
"""
Prepare target for docking, return target pdbqt path
Arguments:
pdb_path (str) : path to target .pdb file
out_path (str) : path to save the .pdbqt file
chain (str) : chain to use for docking (if target is a multi-chain protein)
Returns:
path to the processed target .pdbqt file
"""
## Output filenames
# Prepare target
if pdb_path.endswith('.pdb'): # If target is a .pdb file, convert to .pdbqt
target_pdbqt_path = os.path.join(output_path, os.path.basename(pdb_path).replace('.pdb', '.pdbqt'))
if not os.path.isfile(target_pdbqt_path):
if output_path is None:
output_path = self.out_path
basename = os.path.basename(pdb_path)
out_file_path = os.path.join(output_path, basename) # This is where the target .pdb file will be saved
shutil.copyfile(pdb_path, out_file_path) # Copy target .pdb file to output folder
chain_basename = basename.replace('.pdb', f'_chain_{chain}.pdb') # Name of the .pdb file with only the selected chain
chain_pdb_path = os.path.join(output_path, chain_basename) # Full path to the .pdb file with only the selected chain
pdbqt_basename = basename.replace('.pdb', '.pdbqt') # Name of the .pdbqt file
target_pdbqt_path = os.path.join(output_path, pdbqt_basename) # Full path to the .pdbqt file
# Processing within the docker container
# Select a single chain in case the target is a multimer
if self.container is None:
self.container = self.start_docker_container()
try:
workdir = self.docking_dir + '/' + os.path.basename(output_path)
cmd = f"bash -c 'pdb_selchain -{chain} {basename} | pdb_delhetatm | \
pdb_tidy > {chain_basename}'"
self.container.exec_run(
cmd=cmd,
workdir=workdir,
demux=True)
## Prepare the target for docking using ADFR Suite 'prepare_receptor' binary
adfr_binary = os.path.join(self.adfr_suite_docker_path, 'bin', 'prepare_receptor')
cmd = f'{adfr_binary} -r {chain_basename} -o {pdbqt_basename} -A checkhydrogens'
self.container.exec_run(
cmd=cmd,
workdir=workdir,
demux=True)
except Exception as e:
print(f'Error while preparing target: {e}')
except KeyboardInterrupt:
print('KeyboardInterrupt')
finally:
self.remove_docker_container()
else:
target_pdbqt_path = None
raise ValueError(f'Invalid file type: {pdb_path}')
return target_pdbqt_path
'''
VinaGPU utils
'''
def run_executable(cmd, shell=True, **kwargs):
""" Run executable command and return output from stdout and stderr """
proc = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=shell, **kwargs)
stdout, stderr = proc.communicate()
return (stdout, stderr)
def process_stdout(stdout):
""" Processes the stdout of Vina, returns the affinity of each docking orientation. """
affinities = []
is_int = re.compile(r'^\s*\d+\s*$')
for line in stdout.splitlines():
if bool(is_int.match(line.decode('utf-8')[:4])):
orientation_id, affinity, dist1, dist2 = line.split()
affinities += [float(affinity)]
return affinities
def compress_string(string):
"""
Compresses a string
Arguments:
string (str) : string to compress
Returns:
compressed (str) : compressed string
"""
return zlib.compress(string.encode('utf-8')).hex()
def decompress_string(compressed):
"""
Decompresses a compressed string
Arguments:
compressed (str) : compressed string
Returns:
string (str) : decompressed string
"""
return zlib.decompress(bytes.fromhex(compressed)).decode('utf-8')
def write_to_log(log_path, smiles, target, scores, pdbqt_path=None):
"""
Writes a log file
Arguments:
log_path (str) : path to log file
smiles (str) : SMILES of ligand
target (str) : target name
scores (list) : list of scores
pdbqt_path (str) : path to pdbqt file
"""
# If no log file exists, create one with a header
if not os.path.isfile(log_path):
with open(os.path.join(log_path), 'w') as f:
header = '\t'.join(['smiles', 'target', 'score', 'molfile'])
f.write(header + '\n')
if pdbqt_path is not None: # If a pdbqt file is provided, read it in as PDBQT molecule
with open(pdbqt_path, 'r') as f:
pdbqt = f.read()
pdbqt_mol = PDBQTMolecule(pdbqt, skip_typing=True)
else:
pdbqt_mol = None
# If scores is not a list, make it a list
if not isinstance(scores, list):
scores = [scores]
z = [str(score) for score in scores] # Convert scores to strings
# Write to log file
with open(log_path, 'a') as f:
for i, score in enumerate(z):
if pdbqt_mol:
rdkit_mol = pdbqt_mol[i].export_rdkit_mol()
pose_block = Chem.MolToMolBlock(rdkit_mol)
# Replace header with Smiles_target_VINA_poserank
index = pose_block.find('3D') + 2
title = smiles + f'_{target}_VINA_{i + 1}\n'
pose_block = title + pose_block[index:]
pose_block = compress_string(pose_block)
else:
pose_block = ''
f.write('\t'.join([smiles, target, score, pose_block])+'\n')
# CLI
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run Pipeline')
parser.add_argument('--run_name', type=str, help='Name of the run', required=True)
parser.add_argument('--smi_file', type=str, help='Path to the SMILES file', required=True)
parser.add_argument('--kinase_families', type=str, nargs='+', choices=['AGC', 'CAMK', 'CK1', 'CMGC', 'Other', 'STE', 'TK', 'TKL'], help='Kinase families to include', required=False)
parser.add_argument('--accessions', type=str, nargs='+', help='Kinase accessions to include', required=False)
parser.add_argument('--docking_engine', type=str, choices=['vina', 'diffdock'], help='Docking engine to use', required=True)
parser.add_argument('--scoring_function', type=str, choices=['DNN'], help='Scoring function to use', default='DNN', required=False)
parser.add_argument('--output_path', type=str, help='Path to the output folder', required=True)
args = parser.parse_args()
# Load SMILES
if not os.path.isfile(args.smi_file):
print('SMILES file not found!')
exit()
with open(args.smi_file, 'r') as f:
smiles = [line.strip() for line in f if line.strip()] # strip whitespace and remove empty lines
# Check if SMILES are valid (RDKit)
for smile in smiles:
mol = Chem.MolFromSmiles(smile)
if not mol:
print(f'Invalid SMILES: {smile}')
exit()
# Check if kinase families or accessions are selected
if not args.kinase_families and not args.accessions:
print('No kinase families or accessions selected!')
exit()
if not args.kinase_families:
args.kinase_families = []
# Load kinase data
kin_data = pd.read_csv('kinase_data.csv')
# Check if accessions are valid
if args.accessions:
for accession in args.accessions:
if accession not in kin_data['accession'].unique():
print('-'*50)
print(f'Invalid accession: {accession}')
print('-'*50)
print('Valid accessions:')
for acc in kin_data['accession'].unique():
print(f'- {acc}')
exit()
# Check if output path exists
if not os.path.isdir(args.output_path):
print(f'{args.output_path} not found!')
exit()
print('Start pipeline...')
pipeline = Pipeline(args.run_name, smiles, args.kinase_families, args.accessions, args.docking_engine, args.scoring_function, args.output_path)
pipeline.run()
| 38,596 | Python | .py | 686 | 45.081633 | 242 | 0.609943 | APAJanssen/KinaseDocker2 | 8 | 3 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,612 | __init__.py | APAJanssen_KinaseDocker2/__init__.py | '''
First two functions are pymol's plugin setup
'''
import logging
def __init_plugin__(app=None):
'''
Add an entry to the PyMOL "Plugin" menu
'''
from pymol.plugins import addmenuitemqt
addmenuitemqt('KinaseDocker\u00b2', run_plugin_gui)
# global reference to avoid garbage collection of the dialog
dialog = None
def run_plugin_gui():
'''
Open custom dialog
'''
global dialog
if dialog is None:
dialog = make_dialog()
dialog.show()
def make_dialog():
'''
This function creates the plugin dialog in which the entire plugin is situated
'''
# Relevant imports inside function to not delay pymol startup
import os
import pandas as pd
from pymol import cmd
from pymol.Qt.utils import loadUi
from pymol.Qt import QtWidgets, QtGui, QtCore
from rdkit import Chem
from rdkit.Chem import AllChem
from .pipeline import Pipeline
class MainWindow(QtWidgets.QDialog):
'''
Main window of the plugin
'''
def __init__(self):
super().__init__()
loadUi(os.path.join(os.path.dirname(__file__), 'docker_tool.ui'), self) # load .ui file
self.setFixedSize(self.size()) # prevent resizing
# Hook signals and slots (roughly in order)
self.smiles_input.textChanged.connect(self.generate_mol)
self.smi_file = None
self.browse_smi.clicked.connect(self.browse_smi_files)
self.select_all.clicked.connect(lambda: [child.setChecked(True) for child in self.findChildren(QtWidgets.QCheckBox)]) # Selects all checkboxes in the window!
self.accessions = None
self.browse_kinase.clicked.connect(self.browse_kinase_table)
self.output_folder = None
self.browse_output.clicked.connect(self.browse_output_folder)
self.load_prev_results.clicked.connect(self.view_results)
self.run.clicked.connect(self.start_process)
def start_process(self):
'''
Start the pipeline
'''
# Get all user input
run_name = self.run_name.text()
smiles = [self.smiles_input.text()]
if not smiles[0] and self.smi_file:
with open(self.smi_file, 'r') as f:
smiles = [line.strip() for line in f if line.strip()] # strip whitespace and remove empty lines
accessions = self.accessions
kinase_families = sorted([child.objectName() for child in self.findChildren(QtWidgets.QCheckBox) if child.isChecked()])
docking_engine = self.docking_engine.currentText().lower()
output_folder = self.output_folder
scoring_algorithm = self.scoring_algorithm.currentText()
# Check if all input is valid:
if not run_name:
QtWidgets.QMessageBox.warning(self, 'Warning', 'No run name specified')
return
if not smiles[0]:
QtWidgets.QMessageBox.warning(self, 'Warning', 'No SMILES selected')
return
if not self.check_smiles(smiles):
return
if not len(kinase_families) and not accessions:
QtWidgets.QMessageBox.warning(self, 'Warning', 'No kinase (families) selected')
return
if not docking_engine:
QtWidgets.QMessageBox.warning(self, 'Warning', 'No docking engine selected')
return
if not output_folder or not os.path.exists(output_folder):
QtWidgets.QMessageBox.warning(self, 'Warning', 'No valid output folder selected')
return
if not scoring_algorithm:
QtWidgets.QMessageBox.warning(self, 'Warning', 'No docking score selected')
return
print('Start pipeline...')
pipeline = Pipeline(run_name, smiles, kinase_families, accessions, docking_engine, scoring_algorithm, output_folder)
pipeline.run()
results_sdf_filepath, _ = pipeline.get_results_filepath() # get results .sdf filepath and ignore the .csv filepath
print('View results...')
self.view_results(results_sdf_filepath)
def view_results(self, results_path=None):
'''
View results in pymol
This function creates the ResultViewer class and loads the associated Qdialog
'''
cmd.reinitialize()
self.results_view = ResultViewer()
# This check only loads data if called directly from pipeline otherwise loads the empty dialog
if results_path:
self.results_view.load_data(results_path)
self.results_view.show()
def generate_mol(self):
'''
This function dynamically generates a molecule in pymol from the SMILES input
'''
cmd.delete('mol') # delete previous molecule
smiles = self.smiles_input.text()
if smiles:
mol = Chem.MolFromSmiles(smiles)
if mol:
# Add H's and generate 3D coordinates
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol)
AllChem.UFFOptimizeMolecule(mol)
cmd.read_molstr(Chem.MolToMolBlock(mol), 'mol')
def generate_mols(self):
'''
This function generates molecules in pymol from the .smi file input
'''
cmd.delete('mols') # delete previous molecules
smi_file = self.smi_filename.text()
if smi_file:
with open(smi_file, 'r') as f:
for line in f:
line = line.strip()
# Skip empty lines
if not line:
continue
mol = Chem.MolFromSmiles(line)
if mol:
# Add H's and generate 3D coordinates
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol)
AllChem.UFFOptimizeMolecule(mol)
cmd.read_molstr(Chem.MolToMolBlock(mol), 'mols', state=0)
else:
# Immediately warn user if one of their SMILES is invalid
QtWidgets.QMessageBox.warning(self, 'Warning', f'Could not parse {line} in RDKit')
return
def browse_smi_files(self):
'''
Browse for .smi file
'''
filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Select .smi file', QtCore.QDir.rootPath() , '*.smi')
self.smi_file = filename
if filename:
self.smi_filename.setText(filename)
self.smiles_input.setText('') # clear smiles input, because the smiles_input has a higher priority in the pipeline
self.generate_mols()
def browse_output_folder(self):
'''
Browse for output folder
'''
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select output folder', QtCore.QDir.rootPath())
self.output_folder = folder
if folder:
self.output_foldername.setText(folder)
def browse_kinase_table(self):
'''
This function creates the KinaseSelector class and loads the associated Qdialog
'''
dialog = KinaseSelector()
if dialog.exec_() == QtWidgets.QDialog.Accepted: # The exec_() function forces a modal that prevents interaction with other windows
kinase_data = dialog.get_kinases() # get all selected kinases
# Retrieve accessions and update label
kinases, self.accessions = zip(*kinase_data)
label_text = '\n'.join([f'{kinase} ({accession})' for kinase, accession in kinase_data[:3]])
label_text += '\n...' if len(kinase_data) > 3 else ''
self.kinase_label.setText(label_text)
# Add tooltip with all kinases
self.kinase_label.setToolTip('\n'.join([f'{kinase} ({accession})' for kinase, accession in kinase_data]))
def check_smiles(self, smiles):
'''
Check if all SMILES are valid (according to RDKit)
'''
for smile in smiles:
mol = Chem.MolFromSmiles(smile)
if not mol:
QtWidgets.QMessageBox.warning(self, 'Warning', f'Could not parse {smile} in RDKit')
return False
return True
class ResultViewer(QtWidgets.QDialog):
'''
This class creates the results viewer dialog
'''
def __init__(self):
super().__init__()
self.setWindowTitle('Results')
self.setMinimumSize(self.size())
# Setup loading results
self.load_button = QtWidgets.QToolButton()
self.load_button.setText('Load .sdf results...')
self.load_button.clicked.connect(self.browse_results)
self.results_label = QtWidgets.QLabel('No results loaded')
# Setup table
self.table = QtWidgets.QTableWidget()
self.table.setRowCount(0)
self.table.setColumnCount(0)
self.table.setSortingEnabled(False)
self.table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
# Fire function when cell is clicked
self.table.cellClicked.connect(self.cell_clicked)
# Setup buttons
self.exit_button = QtWidgets.QPushButton('Exit')
self.exit_button.clicked.connect(self.accept)
# Create layout
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.load_button)
layout.addWidget(self.results_label)
layout.addWidget(self.table)
button_layout = QtWidgets.QHBoxLayout()
spacer = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
button_layout.addItem(spacer)
button_layout.addWidget(self.exit_button)
layout.addLayout(button_layout)
self.setLayout(layout)
def load_data(self, results_path):
'''
Load results
'''
# Check if results file exists
if not os.path.exists(results_path):
QtWidgets.QMessageBox.warning(self, 'Warning', 'No valid results file (.sdf) selected')
return
# Check if pdb folder exists for interactive rendering
if not os.path.exists(os.path.join(os.path.dirname(os.path.dirname(results_path)), 'pdb')):
QtWidgets.QMessageBox.warning(self, 'Warning', 'No valid PDB folder found\n \
Did you adhere to the correct folder structure?\n \
Results\n\t|_ results.sdf\npdb\n\t|_ pdb files\n')
return
cmd.reinitialize() # clear pymol, to prevent clashing object names
self.results_path = results_path
self.results_label.setText(f'Showing results from {results_path}')
# Load SDF file into RDKit
molecules = Chem.SDMolSupplier(results_path)
property_names = list(molecules[0].GetPropNames())
all_props = []
# Retrieve properties
for i, mol in enumerate(molecules):
if not mol:
logging.warning(f'Could not parse molecule {i} in RDKit')
continue
try:
name = mol.GetProp('_Name')
properties = [mol, name]
properties += [mol.GetProp(prop) for prop in property_names]
all_props.append(properties)
except KeyError:
logging.warning(f'Could not retrieve properties for molecule {Chem.MolToSmiles(mol)}')
continue
# Create dataframe
self.pose_results = pd.DataFrame(all_props, columns=['Molecule', 'SMILES'] + property_names)
self.pose_results['clash_score'] = self.pose_results['clash_score'].astype(float)
self.pose_results['pIC50'] = self.pose_results['pIC50'].astype(float)
# Get averaged results
self.agg_results = self.pose_results.groupby(['klifs_ID', 'SMILES'], sort=False).agg({'Kinase': 'first', 'accession': 'first', 'pIC50': 'mean', 'clash_score': 'max'}).reset_index()
self.agg_results.rename(columns={'pIC50': 'avg_score', 'clash_score': 'clash_score_max'}, inplace=True)
self.agg_results['avg_score'] = self.agg_results['avg_score'].round(2)
# Create a complex id that goes up in number like complex_0 complex_1 etc.
self.agg_results['complex_ID'] = self.agg_results.index.values
self.agg_results['complex_ID'] = self.agg_results['complex_ID'].apply(lambda x: f'complex_{x}')
self.agg_results = self.agg_results.reindex(columns=['complex_ID', 'SMILES', 'Kinase', 'accession', 'klifs_ID', 'avg_score', 'clash_score_max'])
# Populate table
n_rows = len(self.agg_results)
n_cols = len(self.agg_results.columns)
self.table.setRowCount(n_rows)
self.table.setColumnCount(n_cols)
self.table.setHorizontalHeaderLabels(self.agg_results.columns)
self.table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
for i, row in self.agg_results.iterrows():
for j, col in enumerate(self.agg_results.columns):
item = QtWidgets.QTableWidgetItem(str(row[col]))
self.table.setItem(i, j, item)
if col == 'clash_score_max':
if row[col] > 10:
item.setForeground(QtGui.QColor(255, 0, 0))
self.table.setSortingEnabled(True)
# Show first complex in pymol
self.cell_clicked(0, 0)
self.table.selectRow(0)
def cell_clicked(self, row, col):
'''
This function is called when a cell is clicked in the table.
It loads the corresponding complex in pymol as a separate KLIFS object and a separate complex_{x} object with the poses as states:
- If the complex is already loaded, it will be enabled
- If the complex is not loaded, it will be loaded
'''
cmd.disable('all') # Disable all possible previous complexes
# Get all values from row and put in a dict with corresponding column name
row_values = {self.table.horizontalHeaderItem(i).text(): self.table.item(row, i).text() for i in range(self.table.columnCount())}
klifs = row_values['klifs_ID']
smiles = row_values['SMILES']
existing_objects = cmd.get_names('objects')
# If complex is already loaded, enable it and return
if row_values['complex_ID'] in existing_objects:
cmd.enable(f'{klifs}')
cmd.enable(row_values['complex_ID'])
return
# Get all poses with the same klifs_ID and SMILES
poses = self.pose_results[(self.pose_results['klifs_ID'] == klifs) & (self.pose_results['SMILES'] == smiles)]
# Load PDB as object if not already loaded, otherwise enable it
if klifs not in existing_objects:
pdb_path = os.path.join(os.path.dirname(os.path.dirname(self.results_path)), 'pdb', f'{klifs}.pdb')
cmd.load(pdb_path, object=f'{klifs}')
else:
cmd.enable(f'{klifs}')
# Load poses as states in a separate complex_{x} object
for pose in poses['Molecule']:
cmd.read_molstr(Chem.MolToMolBlock(pose), row_values['complex_ID'], state=0)
def browse_results(self):
'''
Browse for .sdf file
'''
filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Select .sdf file', QtCore.QDir.rootPath() , '*.sdf')
if filename:
self.load_data(filename)
class KinaseSelector(QtWidgets.QDialog):
'''
This class creates the kinase selector dialog
'''
def __init__(self):
super().__init__()
self.setWindowTitle('Select kinase(s)')
self.setMinimumSize(self.size())
# Setup search bar
self.query = QtWidgets.QLineEdit()
self.query.setPlaceholderText("Search...")
self.query.textChanged.connect(self.search)
# Load kinase data
kinases = pd.read_csv(os.path.join(os.path.dirname(__file__), 'kinase_data.csv'), usecols=['kinasegroup', 'kinase', 'accession'])
kinases = kinases.drop_duplicates(keep='first').sort_values(by=['kinasegroup']).reset_index(drop=True) # Sort table by kinasegroup
# Setup table
n_rows = len(kinases)
n_cols = 3
self.table = QtWidgets.QTableWidget()
self.table.setRowCount(n_rows)
self.table.setColumnCount(n_cols)
self.table.setSortingEnabled(True)
self.table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.table.setHorizontalHeaderLabels(['Group', 'Kinase', 'Accession'])
self.table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
# Populate table
for i, row in kinases.iterrows():
accession_checkbox = CheckboxTableWidgetItem(row['kinasegroup'])
self.table.setItem(i, 0, accession_checkbox)
self.table.setItem(i, 1, QtWidgets.QTableWidgetItem(row['kinase']))
self.table.setItem(i, 2, QtWidgets.QTableWidgetItem(row['accession']))
# If row is clicked, toggle the corresponding checkbox (Apparently clicking the checkbox directly can only enable and not disable)
self.table.cellClicked.connect(self.cell_clicked)
# Setup buttons
self.ok = QtWidgets.QPushButton('OK')
self.ok.clicked.connect(self.accept)
self.cancel = QtWidgets.QPushButton('Cancel')
self.cancel.clicked.connect(self.reject)
# Setup labels
self.kinase_counter = QtWidgets.QLabel(f'0 kinases selected')
# Create layout
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.query)
layout.addWidget(self.table)
layout.addWidget(self.kinase_counter)
button_layout = QtWidgets.QHBoxLayout()
button_layout.addWidget(self.cancel)
button_layout.addWidget(self.ok)
layout.addLayout(button_layout)
self.setLayout(layout)
def cell_clicked(self, row, col):
'''
This function is called when a cell is clicked in the table.
It toggles the corresponding checkbox
'''
# Toggle checkbox
self.table.item(row, 0).setCheckState(QtCore.Qt.Checked if self.table.item(row, 0).checkState() == QtCore.Qt.Unchecked else QtCore.Qt.Unchecked)
# Count number of checkboxes in the first column that are checked
num_checked = len(self.get_kinases())
self.kinase_counter.setText(f'{num_checked} kinases selected')
def get_kinases(self):
'''
Get all checked kinases
'''
rows = [i for i in range(self.table.rowCount()) if self.table.item(i, 0).checkState() == QtCore.Qt.Checked]
checked_items = [(self.table.item(row, 1).text(), self.table.item(row, 2).text()) for row in rows] # Extract row values
return checked_items
def accept(self):
'''
Accept function for the dialog, activates when the OK button is pressed
'''
checked_items = self.get_kinases()
# Check if any kinases are selected
if len(checked_items):
super().accept()
else:
QtWidgets.QMessageBox.warning(self, 'Warning', 'No kinase selected')
def reject(self):
'''
Reject function for the dialog, activates when the Cancel button is pressed
'''
super().reject()
def search(self, s):
'''
Dynamic earch function for the dialog, activates when the search bar is used
'''
# Clear current selection.
self.table.setCurrentItem(None)
if not s:
# Empty string, don't search.
return
matching_items = self.table.findItems(s, QtCore.Qt.MatchContains) # Find all items that contain the search string.
if matching_items:
item = matching_items[0]
self.table.setCurrentItem(item) # Select the first matching item.
class CheckboxTableWidgetItem(QtWidgets.QTableWidgetItem):
'''
This class creates a custom QTableWidgetItem with a checkbox
'''
def __init__(self, text):
super().__init__(text, QtWidgets.QTableWidgetItem.UserType)
# These settings are required to make the checkbox work
self.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
self.setCheckState(QtCore.Qt.Unchecked) # Default state is unchecked
def __lt__(self, other):
'''
This function overrides the __lt__ function which is used when sorting the table.
It ensures that the checkboxes are sorted correctly.
'''
if self.checkState() == other.checkState():
return self.text() < other.text() # If the checkboxes are the same, sort alphabetically
elif self.checkState() == QtCore.Qt.Unchecked:
return False # A checked state is sorted lower than an unchecked state
return True
dialog = MainWindow()
return dialog
| 23,118 | Python | .py | 439 | 38.14123 | 192 | 0.589001 | APAJanssen/KinaseDocker2 | 8 | 3 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,613 | sshcm.py | Gyarbij_SSHConnectionMonitor/sshcm.py | import os
import time
import re
import subprocess
def check_ssh_connections(last_line):
log_files = ['/var/log/auth.log', '/var/log/secure'] # Add other log files if needed
new_last_line = last_line
for log_file in log_files:
if os.path.exists(log_file):
with open(log_file, 'r') as f:
lines = f.readlines()
if not lines:
continue
new_last_line = lines[-1]
if last_line:
lines = lines[lines.index(last_line) + 1:]
for line in lines:
if 'sshd' in line:
if 'Accepted' in line:
match = re.search(r'Accepted .* for (.*?) from (.*?) port', line)
if match:
user, ip = match.groups()
send_alert(f"A new SSH connection (accepted publickey) from {user}@{ip}!", "SSH Alert")
elif 'Failed password' in line:
match = re.search(r'Failed password for (.*?) from (.*?) port', line)
if match:
user, ip = match.groups()
send_alert(f"Failed SSH login attempt (wrong password) detected from {user}@{ip}!", "SSH Warning")
elif 'Invalid user' in line:
match = re.search(r'Invalid user (.*?) from (.*?) port', line)
if match:
user, ip = match.groups()
send_alert(f"Failed SSH login attempt (invalid user) detected for {user}@{ip}!", "SSH Warning")
# Handle failed key authentication case
elif 'Failed publickey' in line or 'Connection closed by authenticating user' in line:
match = re.search(r'for (.*?) from (.*?) port', line)
if match:
user, ip = match.groups()
send_alert(f"Failed SSH login attempt (incorrect key file) detected for {user}@{ip}!", "SSH Warning")
return new_last_line
def send_alert(message, title):
print(message)
if is_gui_session():
send_desktop_notification(message, title)
def is_gui_session():
"""Check if the current session is a GUI session"""
return os.environ.get("DISPLAY") is not None
def send_desktop_notification(message, title):
"""Send a desktop notification"""
try:
subprocess.run(['notify-send', title, message], check=True)
except Exception as e:
print(f"Failed to send desktop notification: {e}")
def main():
last_line = None
while True:
last_line = check_ssh_connections(last_line)
time.sleep(10) # Check every 10 seconds
if __name__ == "__main__":
main() | 2,852 | Python | .py | 60 | 33.616667 | 129 | 0.530605 | Gyarbij/SSHConnectionMonitor | 8 | 1 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,614 | .pylintrc-mandatory | OCA_automation/.pylintrc-mandatory |
[MASTER]
load-plugins=pylint_odoo
score=n
[ODOOLINT]
readme-template-url="https://github.com/OCA/maintainer-tools/blob/master/template/module/README.rst"
manifest-required-authors=Odoo Community Association (OCA)
manifest-required-keys=license
manifest-deprecated-keys=description,active
license-allowed=AGPL-3,GPL-2,GPL-2 or any later version,GPL-3,GPL-3 or any later version,LGPL-3
valid-odoo-versions=16.0
[MESSAGES CONTROL]
disable=all
enable=anomalous-backslash-in-string,
api-one-deprecated,
api-one-multi-together,
assignment-from-none,
attribute-deprecated,
class-camelcase,
dangerous-default-value,
dangerous-view-replace-wo-priority,
development-status-allowed,
duplicate-id-csv,
duplicate-key,
duplicate-xml-fields,
duplicate-xml-record-id,
eval-referenced,
eval-used,
incoherent-interpreter-exec-perm,
license-allowed,
manifest-author-string,
manifest-deprecated-key,
manifest-required-author,
manifest-required-key,
manifest-version-format,
method-compute,
method-inverse,
method-required-super,
method-search,
openerp-exception-warning,
pointless-statement,
pointless-string-statement,
print-used,
redundant-keyword-arg,
redundant-modulename-xml,
reimported,
relative-import,
return-in-init,
rst-syntax-error,
sql-injection,
too-few-format-args,
translation-field,
translation-required,
unreachable,
use-vim-comment,
wrong-tabs-instead-of-spaces,
xml-syntax-error,
attribute-string-redundant,
character-not-valid-in-resource-link,
consider-merging-classes-inherited,
context-overridden,
create-user-wo-reset-password,
dangerous-filter-wo-user,
dangerous-qweb-replace-wo-priority,
deprecated-data-xml-node,
deprecated-openerp-xml-node,
duplicate-po-message-definition,
except-pass,
file-not-used,
invalid-commit,
manifest-maintainers-list,
missing-newline-extrafiles,
missing-readme,
missing-return,
odoo-addons-relative-import,
old-api7-method-defined,
po-msgstr-variables,
po-syntax-error,
renamed-field-parameter,
resource-not-exist,
str-format-used,
test-folder-imported,
translation-contains-variable,
translation-positional-used,
unnecessary-utf8-coding-comment,
website-manifest-key-not-valid-uri,
xml-attribute-translatable,
xml-deprecated-qweb-directive,
xml-deprecated-tree-attribute,
external-request-timeout
[REPORTS]
msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}
output-format=colorized
reports=no
| 2,649 | Python | .py | 93 | 24.16129 | 100 | 0.749118 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,615 | .pylintrc | OCA_automation/.pylintrc |
[MASTER]
load-plugins=pylint_odoo
score=n
[ODOOLINT]
readme-template-url="https://github.com/OCA/maintainer-tools/blob/master/template/module/README.rst"
manifest-required-authors=Odoo Community Association (OCA)
manifest-required-keys=license
manifest-deprecated-keys=description,active
license-allowed=AGPL-3,GPL-2,GPL-2 or any later version,GPL-3,GPL-3 or any later version,LGPL-3
valid-odoo-versions=16.0
[MESSAGES CONTROL]
disable=all
# This .pylintrc contains optional AND mandatory checks and is meant to be
# loaded in an IDE to have it check everything, in the hope this will make
# optional checks more visible to contributors who otherwise never look at a
# green travis to see optional checks that failed.
# .pylintrc-mandatory containing only mandatory checks is used the pre-commit
# config as a blocking check.
enable=anomalous-backslash-in-string,
api-one-deprecated,
api-one-multi-together,
assignment-from-none,
attribute-deprecated,
class-camelcase,
dangerous-default-value,
dangerous-view-replace-wo-priority,
development-status-allowed,
duplicate-id-csv,
duplicate-key,
duplicate-xml-fields,
duplicate-xml-record-id,
eval-referenced,
eval-used,
incoherent-interpreter-exec-perm,
license-allowed,
manifest-author-string,
manifest-deprecated-key,
manifest-required-author,
manifest-required-key,
manifest-version-format,
method-compute,
method-inverse,
method-required-super,
method-search,
openerp-exception-warning,
pointless-statement,
pointless-string-statement,
print-used,
redundant-keyword-arg,
redundant-modulename-xml,
reimported,
relative-import,
return-in-init,
rst-syntax-error,
sql-injection,
too-few-format-args,
translation-field,
translation-required,
unreachable,
use-vim-comment,
wrong-tabs-instead-of-spaces,
xml-syntax-error,
attribute-string-redundant,
character-not-valid-in-resource-link,
consider-merging-classes-inherited,
context-overridden,
create-user-wo-reset-password,
dangerous-filter-wo-user,
dangerous-qweb-replace-wo-priority,
deprecated-data-xml-node,
deprecated-openerp-xml-node,
duplicate-po-message-definition,
except-pass,
file-not-used,
invalid-commit,
manifest-maintainers-list,
missing-newline-extrafiles,
missing-readme,
missing-return,
odoo-addons-relative-import,
old-api7-method-defined,
po-msgstr-variables,
po-syntax-error,
renamed-field-parameter,
resource-not-exist,
str-format-used,
test-folder-imported,
translation-contains-variable,
translation-positional-used,
unnecessary-utf8-coding-comment,
website-manifest-key-not-valid-uri,
xml-attribute-translatable,
xml-deprecated-qweb-directive,
xml-deprecated-tree-attribute,
external-request-timeout,
# messages that do not cause the lint step to fail
consider-merging-classes-inherited,
create-user-wo-reset-password,
dangerous-filter-wo-user,
deprecated-module,
file-not-used,
invalid-commit,
missing-manifest-dependency,
missing-newline-extrafiles,
missing-readme,
no-utf8-coding-comment,
odoo-addons-relative-import,
old-api7-method-defined,
redefined-builtin,
too-complex,
unnecessary-utf8-coding-comment
[REPORTS]
msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}
output-format=colorized
reports=no
| 3,512 | Python | .py | 115 | 26.269565 | 100 | 0.753024 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,616 | __init__.py | OCA_automation/automation_oca/__init__.py | from . import controllers
from . import models
from . import wizards
| 69 | Python | .py | 3 | 22 | 25 | 0.818182 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,617 | __manifest__.py | OCA_automation/automation_oca/__manifest__.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "Automation Oca",
"summary": """
Automate actions in threaded models""",
"version": "16.0.1.1.1",
"license": "AGPL-3",
"category": "Automation",
"author": "Dixmit,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/automation",
"depends": ["mail", "link_tracker"],
"data": [
"security/security.xml",
"security/ir.model.access.csv",
"views/menu.xml",
"wizards/automation_configuration_test.xml",
"views/automation_record.xml",
"views/automation_record_step.xml",
"views/automation_configuration_step.xml",
"views/automation_configuration.xml",
"views/link_tracker_clicks.xml",
"views/automation_filter.xml",
"views/automation_tag.xml",
"data/cron.xml",
],
"assets": {
"web.assets_backend": [
"automation_oca/static/src/**/*.js",
"automation_oca/static/src/**/*.xml",
"automation_oca/static/src/**/*.scss",
],
},
"demo": [
"demo/demo.xml",
],
}
| 1,181 | Python | .py | 37 | 24.945946 | 64 | 0.581802 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,618 | automation_record.py | OCA_automation/automation_oca/models/automation_record.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
from collections import defaultdict
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class AutomationRecord(models.Model):
_name = "automation.record"
_description = "Automation Record"
name = fields.Char(compute="_compute_name")
state = fields.Selection(
[("run", "Running"), ("done", "Done")], compute="_compute_state", store=True
)
configuration_id = fields.Many2one(
"automation.configuration", required=True, readonly=True
)
model = fields.Char(index=True, required=False, readonly=True)
resource_ref = fields.Reference(
selection="_selection_target_model",
compute="_compute_resource_ref",
readonly=True,
)
res_id = fields.Many2oneReference(
string="Record",
index=True,
required=False,
readonly=True,
model_field="model",
copy=False,
)
automation_step_ids = fields.One2many(
"automation.record.step", inverse_name="record_id", readonly=True
)
is_test = fields.Boolean()
@api.model
def _selection_target_model(self):
return [
(model.model, model.name)
for model in self.env["ir.model"]
.sudo()
.search([("is_mail_thread", "=", True)])
]
@api.depends("automation_step_ids.state")
def _compute_state(self):
for record in self:
record.state = (
"run"
if record.automation_step_ids.filtered(lambda r: r.state == "scheduled")
else "done"
)
@api.depends("model", "res_id")
def _compute_resource_ref(self):
for record in self:
if record.model and record.model in self.env:
record.resource_ref = "%s,%s" % (record.model, record.res_id or 0)
else:
record.resource_ref = None
@api.depends("res_id", "model")
def _compute_name(self):
for record in self:
record.name = self.env[record.model].browse(record.res_id).display_name
@api.model
def _search(
self,
args,
offset=0,
limit=None,
order=None,
count=False,
access_rights_uid=None,
):
ids = super()._search(
args,
offset=offset,
limit=limit,
order=order,
count=False,
access_rights_uid=access_rights_uid,
)
if self.env.is_system():
# restrictions do not apply to group "Settings"
return len(ids) if count else ids
# TODO highlight orphaned records in UI:
# - self.model + self.res_id are set
# - self.record returns empty recordset
# Remark: self.record is @property, not field
if not ids:
return 0 if count else []
orig_ids = ids
ids = set(ids)
result = []
model_data = defaultdict(
lambda: defaultdict(set)
) # {res_model: {res_id: set(ids)}}
for sub_ids in self._cr.split_for_in_conditions(ids):
self._cr.execute(
"""
SELECT id, res_id, model
FROM "%s"
WHERE id = ANY (%%(ids)s)"""
% self._table,
dict(ids=list(sub_ids)),
)
for eid, res_id, model in self._cr.fetchall():
model_data[model][res_id].add(eid)
for model, targets in model_data.items():
if not self.env[model].check_access_rights("read", False):
continue
recs = self.env[model].browse(list(targets))
missing = recs - recs.exists()
if missing:
for res_id in missing.ids:
_logger.warning(
"Deleted record %s,%s is referenced by automation.record %s",
model,
res_id,
list(targets[res_id]),
)
recs = recs - missing
allowed = (
self.env[model]
.with_context(active_test=False)
._search([("id", "in", recs.ids)])
)
for target_id in allowed:
result += list(targets[target_id])
if len(orig_ids) == limit and len(result) < len(orig_ids):
result.extend(
self._search(
args,
offset=offset + len(orig_ids),
limit=limit,
order=order,
count=count,
access_rights_uid=access_rights_uid,
)[: limit - len(result)]
)
# Restore original ordering
result = [x for x in orig_ids if x in result]
return len(result) if count else list(result)
def read(self, fields=None, load="_classic_read"):
"""Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them."""
self.check_access_rule("read")
return super().read(fields=fields, load=load)
def check_access_rule(self, operation):
"""In order to check if we can access a record, we are checking if we can access
the related document"""
super().check_access_rule(operation)
if self.env.is_superuser():
return
default_checker = self.env["mail.thread"].get_automation_access
by_model_rec_ids = defaultdict(set)
by_model_checker = {}
for exc_rec in self.sudo():
by_model_rec_ids[exc_rec.model].add(exc_rec.res_id)
if exc_rec.model not in by_model_checker:
by_model_checker[exc_rec.model] = getattr(
self.env[exc_rec.model], "get_automation_access", default_checker
)
for model, rec_ids in by_model_rec_ids.items():
records = self.env[model].browse(rec_ids).with_user(self._uid)
checker = by_model_checker[model]
for record in records:
check_operation = checker(
[record.id], operation, model_name=record._name
)
record.check_access_rights(check_operation)
record.check_access_rule(check_operation)
def write(self, vals):
self.check_access_rule("write")
return super().write(vals)
| 6,619 | Python | .py | 172 | 26.976744 | 88 | 0.544648 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,619 | mail_mail.py | OCA_automation/automation_oca/models/mail_mail.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import re
import markupsafe
import werkzeug.urls
from odoo import api, fields, models, tools
class MailMail(models.Model):
_inherit = "mail.mail"
automation_record_step_id = fields.Many2one("automation.record.step")
@api.model_create_multi
def create(self, values_list):
records = super().create(values_list)
for record in records.filtered("automation_record_step_id"):
record.automation_record_step_id.message_id = record.message_id
return records
def _send_prepare_body(self):
body = super()._send_prepare_body()
if self.automation_record_step_id:
body = self.env["mail.render.mixin"]._shorten_links(body, {}, blacklist=[])
token = self.automation_record_step_id._get_mail_tracking_token()
for match in set(re.findall(tools.URL_REGEX, body)):
href = match[0]
url = match[1]
parsed = werkzeug.urls.url_parse(url, scheme="http")
if parsed.scheme.startswith("http") and parsed.path.startswith("/r/"):
new_href = href.replace(
url,
"%s/au/%s/%s"
% (url, str(self.automation_record_step_id.id), token),
)
body = body.replace(
markupsafe.Markup(href), markupsafe.Markup(new_href)
)
body = tools.append_content_to_html(
body,
'<img src="%s"/>'
% self.automation_record_step_id._get_mail_tracking_url(),
plaintext=False,
)
return body
| 1,773 | Python | .py | 40 | 31.95 | 87 | 0.565621 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,620 | mail_thread.py | OCA_automation/automation_oca/models/mail_thread.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, models, tools
class MailThread(models.AbstractModel):
_inherit = "mail.thread"
@api.model
def _routing_handle_bounce(self, email_message, message_dict):
"""We want to mark the bounced email"""
result = super(MailThread, self)._routing_handle_bounce(
email_message, message_dict
)
bounced_msg_id = message_dict.get("bounced_msg_id")
if bounced_msg_id:
self.env["automation.record.step"].search(
[("message_id", "in", bounced_msg_id)]
)._set_mail_bounced()
return result
@api.model
def _message_route_process(self, message, message_dict, routes):
"""Override to update the parent mailing traces. The parent is found
by using the References header of the incoming message and looking for
matching message_id in automation.record.step."""
if routes:
thread_references = (
message_dict["references"] or message_dict["in_reply_to"]
)
msg_references = tools.mail_header_msgid_re.findall(thread_references)
if msg_references:
records = self.env["automation.record.step"].search(
[("message_id", "in", msg_references)]
)
records._set_mail_open()
records._set_mail_reply()
return super(MailThread, self)._message_route_process(
message, message_dict, routes
)
@api.model
def get_automation_access(self, doc_ids, operation, model_name=False):
"""Retrieve access policy.
The behavior is similar to `mail.thread` and `mail.message`
and it relies on the access rules defines on the related record.
The behavior can be customized on the related model
by defining `_automation_record_access`.
By default `write`, otherwise the custom permission is returned.
"""
DocModel = self.env[model_name] if model_name else self
create_allow = getattr(DocModel, "_automation_record_access", "write")
if operation in ["write", "unlink"]:
check_operation = "write"
elif operation == "create" and create_allow in [
"create",
"read",
"write",
"unlink",
]:
check_operation = create_allow
elif operation == "create":
check_operation = "write"
else:
check_operation = operation
return check_operation
| 2,639 | Python | .py | 61 | 33.065574 | 82 | 0.607629 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,621 | automation_configuration_step.py | OCA_automation/automation_oca/models/automation_configuration_step.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from collections import defaultdict
import babel.dates
from dateutil.relativedelta import relativedelta
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.osv import expression
from odoo.tools import get_lang
from odoo.tools.safe_eval import safe_eval
class AutomationConfigurationStep(models.Model):
_name = "automation.configuration.step"
_description = "Automation Steps"
_order = "trigger_interval_hours ASC"
name = fields.Char(required=True)
configuration_id = fields.Many2one(
"automation.configuration", required=True, auto_join=True
)
domain = fields.Char(
required=True, default="[]", help="Filter to apply specifically"
)
applied_domain = fields.Char(
compute="_compute_applied_domain",
recursive=True,
)
parent_id = fields.Many2one("automation.configuration.step", ondelete="cascade")
model_id = fields.Many2one(related="configuration_id.model_id")
model = fields.Char(related="model_id.model")
child_ids = fields.One2many(
"automation.configuration.step", inverse_name="parent_id"
)
step_type = fields.Selection(
[("mail", "Mail"), ("action", "Server Action"), ("activity", "Activity")],
required=True,
default="mail",
)
step_icon = fields.Char(compute="_compute_step_info")
step_name = fields.Char(compute="_compute_step_info")
trigger_interval_hours = fields.Integer(
compute="_compute_trigger_interval_hours", store=True
)
trigger_interval = fields.Integer()
trigger_interval_type = fields.Selection(
[("hours", "Hours"), ("days", "Days")], required=True, default="hours"
)
allow_expiry = fields.Boolean(compute="_compute_allow_expiry")
expiry = fields.Boolean(compute="_compute_expiry", store=True, readonly=False)
expiry_interval = fields.Integer()
expiry_interval_type = fields.Selection(
[("hours", "Hours"), ("days", "Days")], required=True, default="hours"
)
trigger_type = fields.Selection(
selection="_trigger_type_selection",
required=True,
default="start",
)
trigger_child_types = fields.Json(compute="_compute_trigger_child_types")
trigger_type_data = fields.Json(compute="_compute_trigger_type_data")
mail_author_id = fields.Many2one(
"res.partner", required=True, default=lambda r: r.env.user.id
)
mail_template_id = fields.Many2one(
"mail.template", domain="[('model_id', '=', model_id)]"
)
server_action_id = fields.Many2one(
"ir.actions.server", domain="[('model_id', '=', model_id)]"
)
activity_type_id = fields.Many2one(
"mail.activity.type",
string="Activity",
domain="['|', ('res_model', '=', False), ('res_model', '=', model)]",
compute="_compute_activity_info",
readonly=False,
store=True,
ondelete="restrict",
)
activity_summary = fields.Char(
"Summary", compute="_compute_activity_info", readonly=False, store=True
)
activity_note = fields.Html(
"Note", compute="_compute_activity_info", readonly=False, store=True
)
activity_date_deadline_range = fields.Integer(
string="Due Date In",
compute="_compute_activity_info",
readonly=False,
store=True,
)
activity_date_deadline_range_type = fields.Selection(
[("days", "Days"), ("weeks", "Weeks"), ("months", "Months")],
string="Due type",
default="days",
compute="_compute_activity_info",
readonly=False,
store=True,
)
activity_user_type = fields.Selection(
[("specific", "Specific User"), ("generic", "Generic User From Record")],
compute="_compute_activity_info",
readonly=False,
store=True,
help="""Use 'Specific User' to always assign the same user on the next activity.
Use 'Generic User From Record' to specify the field name of the user
to choose on the record.""",
)
activity_user_id = fields.Many2one(
"res.users",
string="Responsible",
compute="_compute_activity_info",
readonly=False,
store=True,
)
activity_user_field_id = fields.Many2one(
"ir.model.fields",
"User field name",
compute="_compute_activity_info",
readonly=False,
store=True,
)
parent_position = fields.Integer(
compute="_compute_parent_position", recursive=True, store=True
)
graph_data = fields.Json(compute="_compute_graph_data")
graph_done = fields.Integer(compute="_compute_total_graph_data")
graph_error = fields.Integer(compute="_compute_total_graph_data")
@api.onchange("trigger_type")
def _onchange_trigger_type(self):
if self.trigger_type == "start":
# Theoretically, only start allows no parent, so we will keep it this way
self.parent_id = False
########################################
# Graph computed fields ################
########################################
@api.depends()
def _compute_graph_data(self):
total = self.env["automation.record.step"].read_group(
[
("configuration_step_id", "in", self.ids),
(
"processed_on",
">=",
fields.Date.context_today(self) + relativedelta(days=-14),
),
("is_test", "=", False),
],
["configuration_step_id"],
["configuration_step_id", "processed_on:day"],
lazy=False,
)
done = self.env["automation.record.step"].read_group(
[
("configuration_step_id", "in", self.ids),
(
"processed_on",
">=",
fields.Date.context_today(self) + relativedelta(days=-14),
),
("state", "=", "done"),
("is_test", "=", False),
],
["configuration_step_id"],
["configuration_step_id", "processed_on:day"],
lazy=False,
)
now = fields.Datetime.now()
date_map = {
babel.dates.format_datetime(
now + relativedelta(days=i - 14),
format="dd MMM yyy",
tzinfo=self._context.get("tz", None),
locale=get_lang(self.env).code,
): 0
for i in range(0, 15)
}
result = defaultdict(
lambda: {"done": date_map.copy(), "error": date_map.copy()}
)
for line in total:
result[line["configuration_step_id"][0]]["error"][
line["processed_on:day"]
] += line["__count"]
for line in done:
result[line["configuration_step_id"][0]]["done"][
line["processed_on:day"]
] += line["__count"]
result[line["configuration_step_id"][0]]["error"][
line["processed_on:day"]
] -= line["__count"]
for record in self:
graph_info = dict(result[record.id])
record.graph_data = {
"error": [
{"x": key[:-5], "y": value, "name": key}
for (key, value) in graph_info["error"].items()
],
"done": [
{"x": key[:-5], "y": value, "name": key}
for (key, value) in graph_info["done"].items()
],
}
@api.depends()
def _compute_total_graph_data(self):
for record in self:
record.graph_done = self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", record.id),
("state", "=", "done"),
("is_test", "=", False),
]
)
record.graph_error = self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", record.id),
("state", "in", ["expired", "rejected", "error", "cancel"]),
("is_test", "=", False),
]
)
@api.depends("step_type")
def _compute_activity_info(self):
for to_reset in self.filtered(lambda act: act.step_type != "activity"):
to_reset.activity_summary = False
to_reset.activity_note = False
to_reset.activity_date_deadline_range = False
to_reset.activity_date_deadline_range_type = False
to_reset.activity_user_type = False
to_reset.activity_user_id = False
to_reset.activity_user_field_id = False
for activity in self.filtered(lambda act: act.step_type == "activity"):
if not activity.activity_date_deadline_range_type:
activity.activity_date_deadline_range_type = "days"
if not activity.activity_user_id:
activity.activity_user_id = self.env.user.id
@api.depends("trigger_interval", "trigger_interval_type")
def _compute_trigger_interval_hours(self):
for record in self:
record.trigger_interval_hours = record._get_trigger_interval_hours()
def _get_trigger_interval_hours(self):
if self.trigger_interval_type == "days":
return self.trigger_interval * 24
return self.trigger_interval
@api.depends("parent_id", "parent_id.parent_position", "trigger_type")
def _compute_parent_position(self):
for record in self:
record.parent_position = (
(record.parent_id.parent_position + 1) if record.parent_id else 0
)
@api.depends(
"domain", "configuration_id.domain", "parent_id", "parent_id.applied_domain"
)
def _compute_applied_domain(self):
for record in self:
eval_context = record.configuration_id._get_eval_context()
record.applied_domain = expression.AND(
[
safe_eval(record.domain, eval_context),
safe_eval(
(record.parent_id and record.parent_id.applied_domain)
or record.configuration_id.domain,
eval_context,
),
]
)
@api.model
def _trigger_type_selection(self):
return [
(trigger_id, trigger.get("name", trigger_id))
for trigger_id, trigger in self._trigger_types().items()
]
@api.model
def _trigger_types(self):
"""
This function will return a dictionary that map trigger_types to its configurations.
Each trigger_type can contain:
- name (Required field)
- step type: List of step types that succeed after this.
If it is false, it will work for all step types,
otherwise only for the ones on the list
- color: Color of the icon
- icon: Icon to show
- message_configuration: Message to show on the step configuration
- allow_expiry: True if it allows expiration of activity
- message: Message to show on the record if expected is not date defined
"""
return {
"start": {
"name": _("start of workflow"),
"step_type": [],
"message_configuration": False,
"message": False,
"allow_parent": True,
},
"after_step": {
"name": _("execution of another step"),
"color": "text-success",
"icon": "fa fa-code-fork fa-rotate-180 fa-flip-vertical",
"message_configuration": False,
"message": False,
},
"mail_open": {
"name": _("Mail opened"),
"allow_expiry": True,
"step_type": ["mail"],
"color": "text-success",
"icon": "fa fa-envelope-open-o",
"message_configuration": _("Opened after"),
"message": _("Not opened yet"),
},
"mail_not_open": {
"name": _("Mail not opened"),
"step_type": ["mail"],
"color": "text-danger",
"icon": "fa fa-envelope-open-o",
"message_configuration": _("Not opened within"),
"message": False,
},
"mail_reply": {
"name": _("Mail replied"),
"allow_expiry": True,
"step_type": ["mail"],
"color": "text-success",
"icon": "fa fa-reply",
"message_configuration": _("Replied after"),
"message": _("Not replied yet"),
},
"mail_not_reply": {
"name": _("Mail not replied"),
"step_type": ["mail"],
"color": "text-danger",
"icon": "fa fa-reply",
"message_configuration": _("Not replied within"),
"message": False,
},
"mail_click": {
"name": _("Mail clicked"),
"allow_expiry": True,
"step_type": ["mail"],
"color": "text-success",
"icon": "fa fa-hand-pointer-o",
"message_configuration": _("Clicked after"),
"message": _("Not clicked yet"),
},
"mail_not_clicked": {
"name": _("Mail not clicked"),
"step_type": ["mail"],
"color": "text-danger",
"icon": "fa fa-hand-pointer-o",
"message_configuration": _("Not clicked within"),
"message": False,
},
"mail_bounce": {
"name": _("Mail bounced"),
"allow_expiry": True,
"step_type": ["mail"],
"color": "text-danger",
"icon": "fa fa-exclamation-circle",
"message_configuration": _("Bounced after"),
"message": _("Not bounced yet"),
},
"activity_done": {
"name": _("Activity has been finished"),
"step_type": ["activity"],
"color": "text-success",
"icon": "fa fa-clock-o",
"message_configuration": _("After finished"),
"message": _("Activity not done"),
},
"activity_not_done": {
"name": _("Activity has not been finished"),
"allow_expiry": True,
"step_type": ["activity"],
"color": "text-danger",
"icon": "fa fa-clock-o",
"message_configuration": _("Not finished within"),
"message": False,
},
}
@api.model
def _step_icons(self):
"""
This function will return a dictionary that maps step types and icons
"""
return {
"mail": "fa fa-envelope",
"activity": "fa fa-clock-o",
"action": "fa fa-cogs",
}
@api.depends("step_type")
def _compute_step_info(self):
step_icons = self._step_icons()
step_name_map = dict(self._fields["step_type"].selection)
for record in self:
record.step_icon = step_icons.get(record.step_type, "")
record.step_name = step_name_map.get(record.step_type, "")
@api.depends("trigger_type")
def _compute_trigger_type_data(self):
trigger_types = self._trigger_types()
for record in self:
record.trigger_type_data = trigger_types[record.trigger_type]
@api.depends("trigger_type")
def _compute_allow_expiry(self):
trigger_types = self._trigger_types()
for record in self:
record.allow_expiry = trigger_types[record.trigger_type].get(
"allow_expiry", False
)
@api.depends("trigger_type")
def _compute_expiry(self):
trigger_types = self._trigger_types()
for record in self:
record.expiry = (
trigger_types[record.trigger_type].get("allow_expiry", False)
and record.expiry
)
@api.depends("step_type")
def _compute_trigger_child_types(self):
trigger_types = self._trigger_types()
for record in self:
trigger_child_types = {}
for trigger_type_id, trigger_type in trigger_types.items():
if "step_type" not in trigger_type:
# All are allowed
trigger_child_types[trigger_type_id] = trigger_type
elif record.step_type in trigger_type["step_type"]:
trigger_child_types[trigger_type_id] = trigger_type
record.trigger_child_types = trigger_child_types
def _check_configuration(self):
trigger_conf = self._trigger_types()[self.trigger_type]
if not self.parent_id and not trigger_conf.get("allow_parent"):
raise ValidationError(
_("%s configurations needs a parent") % trigger_conf["name"]
)
if (
self.parent_id
and "step_type" in trigger_conf
and self.parent_id.step_type not in trigger_conf["step_type"]
):
step_types = dict(self._fields["step_type"].selection)
raise ValidationError(
_("To use a %(name)s trigger type we need a parent of type %(parents)s")
% {
"name": trigger_conf["name"],
"parents": ",".join(
[
name
for step_type, name in step_types.items()
if step_type in trigger_conf["step_type"]
]
),
}
)
@api.constrains("parent_id", "parent_id.step_type", "trigger_type")
def _check_parent_configuration(self):
for record in self:
record._check_configuration()
def _get_record_activity_scheduled_date(self):
if self.trigger_type in [
"mail_open",
"mail_bounce",
"mail_click",
"mail_not_clicked",
"mail_reply",
"mail_not_reply",
"activity_done",
]:
return False
return fields.Datetime.now() + relativedelta(
**{self.trigger_interval_type: self.trigger_interval}
)
def _get_expiry_date(self):
if not self.expiry:
return False
return fields.Datetime.now() + relativedelta(
**{self.expiry_interval_type: self.expiry_interval}
)
def _create_record_activity_vals(self, record, **kwargs):
return {
"configuration_step_id": self.id,
"expiry_date": self._get_expiry_date(),
"scheduled_date": self._get_record_activity_scheduled_date(),
**kwargs,
}
| 19,321 | Python | .py | 481 | 28.546778 | 92 | 0.531179 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,622 | automation_filter.py | OCA_automation/automation_oca/models/automation_filter.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class AutomationFilter(models.Model):
_name = "automation.filter"
_description = "Automation Filter"
name = fields.Char(required=True)
model_id = fields.Many2one(
"ir.model",
domain=[("is_mail_thread", "=", True)],
required=True,
ondelete="cascade",
help="Model where the configuration is applied",
)
model = fields.Char(related="model_id.model")
domain = fields.Char(required=True, default="[]", help="Filter to apply")
@api.onchange("model_id")
def _onchange_model(self):
self.domain = []
| 706 | Python | .py | 19 | 31.473684 | 77 | 0.658358 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,623 | automation_tag.py | OCA_automation/automation_oca/models/automation_tag.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from random import randint
from odoo import api, fields, models
class AutomationTag(models.Model):
_name = "automation.tag"
_description = "Automation Tag"
@api.model
def _get_default_color(self):
return randint(1, 11)
name = fields.Char(required=True)
color = fields.Integer(default=lambda r: r._get_default_color())
active = fields.Boolean(default=True)
| 488 | Python | .py | 13 | 33.230769 | 68 | 0.720085 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,624 | __init__.py | OCA_automation/automation_oca/models/__init__.py | from . import automation_configuration
from . import automation_configuration_step
from . import automation_record
from . import automation_record_step
from . import mail_mail
from . import mail_thread
from . import link_tracker
from . import automation_filter
from . import automation_tag
from . import mail_activity
| 318 | Python | .py | 10 | 30.8 | 43 | 0.831169 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,625 | link_tracker.py | OCA_automation/automation_oca/models/link_tracker.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class LinkTrackerClick(models.Model):
_inherit = "link.tracker.click"
automation_record_step_id = fields.Many2one("automation.record.step")
automation_configuration_step_id = fields.Many2one(
related="automation_record_step_id.configuration_step_id", store=True
)
automation_configuration_id = fields.Many2one(
related="automation_record_step_id.configuration_id", store=True
)
@api.model
def add_click(self, code, automation_record_step_id=False, **route_values):
if automation_record_step_id:
tracker_code = self.env["link.tracker.code"].search([("code", "=", code)])
if not tracker_code:
return None
ip = route_values.get("ip", False)
if self.search_count(
[
(
"automation_record_step_id",
"=",
automation_record_step_id,
),
("link_id", "=", tracker_code.link_id.id),
("ip", "=", ip),
]
):
return None
route_values["link_id"] = tracker_code.link_id.id
click_values = self._prepare_click_values_from_route(
automation_record_step_id=automation_record_step_id, **route_values
)
click = self.create(click_values)
click.automation_record_step_id._set_mail_open()
click.automation_record_step_id._set_mail_clicked()
return click
return super().add_click(code, **route_values)
| 1,745 | Python | .py | 40 | 31.5 | 86 | 0.567647 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,626 | mail_activity.py | OCA_automation/automation_oca/models/mail_activity.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class MailActivity(models.Model):
_inherit = "mail.activity"
automation_record_step_id = fields.Many2one("automation.record.step")
def _action_done(self, *args, **kwargs):
if self.automation_record_step_id:
self.automation_record_step_id._set_activity_done()
return super()._action_done(*args, **kwargs)
| 470 | Python | .py | 10 | 41.5 | 73 | 0.705495 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,627 | automation_configuration.py | OCA_automation/automation_oca/models/automation_configuration.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from collections import defaultdict
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.tools.safe_eval import (
datetime as safe_datetime,
dateutil as safe_dateutil,
safe_eval,
time as safe_time,
)
class AutomationConfiguration(models.Model):
_name = "automation.configuration"
_description = "Automation Configuration"
_inherit = ["mail.thread"]
name = fields.Char(required=True)
active = fields.Boolean(default=True)
tag_ids = fields.Many2many("automation.tag")
company_id = fields.Many2one("res.company")
domain = fields.Char(
required=True,
default="[]",
compute="_compute_domain",
help="""
Filter to apply
Following special variable can be used in filter :
* datetime
* dateutil
* time
* user
* ref """,
)
editable_domain = fields.Char(
required=True,
default="[]",
help="""Filter to apply
Following special variable can be used in filter :
* datetime
* dateutil
* time
* user
* ref """,
)
model_id = fields.Many2one(
"ir.model",
domain=[("is_mail_thread", "=", True)],
required=True,
ondelete="cascade",
help="Model where the configuration is applied",
)
filter_id = fields.Many2one("automation.filter")
filter_domain = fields.Binary(compute="_compute_filter_domain")
model = fields.Char(related="model_id.model")
field_id = fields.Many2one(
"ir.model.fields",
domain="[('model_id', '=', model_id), "
"('ttype', 'in', ['char', 'selection', 'integer', 'text', 'many2one'])]",
help="Used to avoid duplicates",
)
is_periodic = fields.Boolean(
help="Mark it if you want to make the execution periodic"
)
# The idea of flow of states will be:
# draft -> run -> done -> draft (for periodic execution)
# -> on demand -> done -> draft (for on demand execution)
state = fields.Selection(
[
("draft", "Draft"),
("periodic", "Periodic"),
("ondemand", "On demand"),
("done", "Done"),
],
default="draft",
required=True,
group_expand="_group_expand_states",
)
automation_step_ids = fields.One2many(
"automation.configuration.step", inverse_name="configuration_id"
)
automation_direct_step_ids = fields.One2many(
"automation.configuration.step",
inverse_name="configuration_id",
domain=[("parent_id", "=", False)],
)
record_test_count = fields.Integer(compute="_compute_record_test_count")
record_count = fields.Integer(compute="_compute_record_count")
record_done_count = fields.Integer(compute="_compute_record_count")
record_run_count = fields.Integer(compute="_compute_record_count")
activity_mail_count = fields.Integer(compute="_compute_activity_count")
activity_action_count = fields.Integer(compute="_compute_activity_count")
click_count = fields.Integer(compute="_compute_click_count")
next_execution_date = fields.Datetime(compute="_compute_next_execution_date")
@api.depends("filter_id.domain", "filter_id", "editable_domain")
def _compute_domain(self):
for record in self:
record.domain = (
record.filter_id and record.filter_id.domain
) or record.editable_domain
@api.depends()
def _compute_click_count(self):
data = self.env["link.tracker.click"].read_group(
[("automation_configuration_id", "in", self.ids)],
[],
["automation_configuration_id"],
lazy=False,
)
mapped_data = {d["automation_configuration_id"][0]: d["__count"] for d in data}
for record in self:
record.click_count = mapped_data.get(record.id, 0)
@api.depends()
def _compute_activity_count(self):
data = self.env["automation.record.step"].read_group(
[
("configuration_id", "in", self.ids),
("state", "=", "done"),
("is_test", "=", False),
],
[],
["configuration_id", "step_type"],
lazy=False,
)
mapped_data = defaultdict(lambda: {})
for d in data:
mapped_data[d["configuration_id"][0]][d["step_type"]] = d["__count"]
for record in self:
record.activity_mail_count = mapped_data[record.id].get("mail", 0)
record.activity_action_count = mapped_data[record.id].get("action", 0)
@api.depends()
def _compute_record_count(self):
data = self.env["automation.record"].read_group(
[("configuration_id", "in", self.ids), ("is_test", "=", False)],
[],
["configuration_id", "state"],
lazy=False,
)
mapped_data = defaultdict(lambda: {})
for d in data:
mapped_data[d["configuration_id"][0]][d["state"]] = d["__count"]
for record in self:
record.record_done_count = mapped_data[record.id].get("done", 0)
record.record_run_count = mapped_data[record.id].get("periodic", 0)
record.record_count = sum(mapped_data[record.id].values())
@api.depends()
def _compute_record_test_count(self):
data = self.env["automation.record"].read_group(
[("configuration_id", "in", self.ids), ("is_test", "=", True)],
[],
["configuration_id"],
lazy=False,
)
mapped_data = {d["configuration_id"][0]: d["__count"] for d in data}
for record in self:
record.record_test_count = mapped_data.get(record.id, 0)
@api.depends("model_id")
def _compute_filter_domain(self):
for record in self:
record.filter_domain = (
[] if not record.model_id else [("model_id", "=", record.model_id.id)]
)
@api.depends("state")
def _compute_next_execution_date(self):
for record in self:
if record.state == "periodic":
record.next_execution_date = self.env.ref(
"automation_oca.cron_configuration_run"
).nextcall
else:
record.next_execution_date = False
@api.onchange("filter_id")
def _onchange_filter(self):
self.model_id = self.filter_id.model_id
@api.onchange("model_id")
def _onchange_model(self):
self.editable_domain = []
self.filter_id = False
self.field_id = False
self.automation_step_ids = [(5, 0, 0)]
def start_automation(self):
self.ensure_one()
if self.state != "draft":
raise ValidationError(_("State must be in draft in order to start"))
self.state = "periodic" if self.is_periodic else "ondemand"
def done_automation(self):
self.ensure_one()
self.state = "done"
def back_to_draft(self):
self.ensure_one()
self.state = "draft"
def cron_automation(self):
for record in self.search([("state", "=", "periodic")]):
record.run_automation()
def _get_eval_context(self):
"""Prepare the context used when evaluating python code
:returns: dict -- evaluation context given to safe_eval
"""
return {
"ref": self.env.ref,
"user": self.env.user,
"time": safe_time,
"datetime": safe_datetime,
"dateutil": safe_dateutil,
}
def _get_automation_records_to_create(self):
"""
We will find all the records that fulfill the domain but don't have a record created.
Also, we need to check by autencity field if defined.
In order to do this, we will add some extra joins on the query of the domain
"""
eval_context = self._get_eval_context()
domain = safe_eval(self.domain, eval_context)
Record = self.env[self.model_id.model]
if self.company_id and "company_id" in Record._fields:
# In case of company defined, we add only if the records have company field
domain += [("company_id", "=", self.company_id.id)]
query = Record._where_calc(domain)
alias = query.left_join(
query._tables[Record._table],
"id",
"automation_record",
"res_id",
"automation_record",
"{rhs}.model = %s AND {rhs}.configuration_id = %s AND "
"({rhs}.is_test IS NULL OR NOT {rhs}.is_test)",
(Record._name, self.id),
)
query.add_where("{}.id is NULL".format(alias))
if self.field_id:
# In case of unicity field defined, we need to add this
# left join to find already created records
linked_tab = query.left_join(
query._tables[Record._table],
self.field_id.name,
Record._table,
self.field_id.name,
"linked",
)
alias2 = query.left_join(
linked_tab,
"id",
"automation_record",
"res_id",
"automation_record_linked",
"{rhs}.model = %s AND {rhs}.configuration_id = %s AND "
"({rhs}.is_test IS NULL OR NOT {rhs}.is_test)",
(Record._name, self.id),
)
query.add_where("{}.id is NULL".format(alias2))
from_clause, where_clause, params = query.get_sql()
# We also need to find with a group by in order to avoid duplication
# when we have both records created between two executions
# (first one has priority)
query_str = "SELECT {} FROM {} WHERE {}{}{}{} GROUP BY {}".format(
", ".join([f'MIN("{next(iter(query._tables))}".id) as id']),
from_clause,
where_clause or "TRUE",
(" ORDER BY %s" % self.order) if query.order else "",
(" LIMIT %d" % self.limit) if query.limit else "",
(" OFFSET %d" % self.offset) if query.offset else "",
"%s.%s" % (query._tables[Record._table], self.field_id.name),
)
else:
query_str, params = query.select()
self.env.cr.execute(query_str, params)
return Record.browse([r[0] for r in self.env.cr.fetchall()])
def run_automation(self):
self.ensure_one()
if self.state not in ["periodic", "ondemand"]:
return
records = self.env["automation.record"]
for record in self._get_automation_records_to_create():
records |= self._create_record(record)
records.automation_step_ids._trigger_activities()
def _create_record(self, record, **kwargs):
return self.env["automation.record"].create(
self._create_record_vals(record, **kwargs)
)
def _create_record_vals(self, record, **kwargs):
return {
**kwargs,
"res_id": record.id,
"model": record._name,
"configuration_id": self.id,
"automation_step_ids": [
(0, 0, activity._create_record_activity_vals(record))
for activity in self.automation_direct_step_ids
],
}
def _group_expand_states(self, states, domain, order):
"""
This is used to show all the states on the kanban view
"""
return [key for key, _val in self._fields["state"].selection]
def save_filter(self):
self.ensure_one()
self.filter_id = self.env["automation.filter"].create(
{
"name": self.name,
"domain": self.editable_domain,
"model_id": self.model_id.id,
}
)
| 12,086 | Python | .py | 301 | 30.219269 | 93 | 0.565572 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,628 | automation_record_step.py | OCA_automation/automation_oca/models/automation_record_step.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import threading
import traceback
from io import StringIO
import werkzeug.urls
from dateutil.relativedelta import relativedelta
from odoo import _, api, fields, models, tools
from odoo.tools.safe_eval import safe_eval
class AutomationRecordStep(models.Model):
_name = "automation.record.step"
_description = "Activities done on the record"
_order = "scheduled_date ASC"
name = fields.Char(related="configuration_step_id.name")
record_id = fields.Many2one("automation.record", required=True, ondelete="cascade")
configuration_step_id = fields.Many2one(
"automation.configuration.step", required=True
)
configuration_id = fields.Many2one(
related="configuration_step_id.configuration_id",
store=True,
)
step_type = fields.Selection(related="configuration_step_id.step_type", store=True)
scheduled_date = fields.Datetime(readonly=True)
expiry_date = fields.Datetime(readonly=True)
processed_on = fields.Datetime(readonly=True)
parent_id = fields.Many2one("automation.record.step", readonly=True)
child_ids = fields.One2many("automation.record.step", inverse_name="parent_id")
trigger_type = fields.Selection(related="configuration_step_id.trigger_type")
trigger_type_data = fields.Json(compute="_compute_trigger_type_data")
step_icon = fields.Char(compute="_compute_step_info")
step_name = fields.Char(compute="_compute_step_info")
state = fields.Selection(
[
("scheduled", "Scheduled"),
("done", "Done"),
("expired", "Expired"),
("rejected", "Rejected"),
("error", "Error"),
("cancel", "Cancelled"),
],
default="scheduled",
readonly=True,
)
error_trace = fields.Text(readonly=True)
parent_position = fields.Integer(
compute="_compute_parent_position", recursive=True, store=True
)
# Mailing fields
message_id = fields.Char(readonly=True)
mail_status = fields.Selection(
[
("sent", "Sent"),
("open", "Opened"),
("bounce", "Bounced"),
("reply", "Replied"),
],
readonly=True,
)
mail_clicked_on = fields.Datetime(readonly=True)
mail_replied_on = fields.Datetime(readonly=True)
mail_opened_on = fields.Datetime(readonly=True)
activity_done_on = fields.Datetime(readonly=True)
is_test = fields.Boolean(related="record_id.is_test", store=True)
step_actions = fields.Json(compute="_compute_step_actions")
@api.depends("trigger_type")
def _compute_trigger_type_data(self):
trigger_types = self.env["automation.configuration.step"]._trigger_types()
for record in self:
record.trigger_type_data = trigger_types[record.trigger_type]
@api.depends("parent_id", "parent_id.parent_position")
def _compute_parent_position(self):
for record in self:
record.parent_position = (
(record.parent_id.parent_position + 1) if record.parent_id else 0
)
@api.depends("step_type")
def _compute_step_info(self):
step_icons = self.env["automation.configuration.step"]._step_icons()
step_name_map = dict(
self.env["automation.configuration.step"]._fields["step_type"].selection
)
for record in self:
record.step_icon = step_icons.get(record.step_type, "")
record.step_name = step_name_map.get(record.step_type, "")
def _check_to_execute(self):
if (
self.configuration_step_id.trigger_type == "mail_not_open"
and self.parent_id.mail_status in ["open", "reply"]
):
return False
if (
self.configuration_step_id.trigger_type == "mail_not_reply"
and self.parent_id.mail_status == "reply"
):
return False
if (
self.configuration_step_id.trigger_type == "mail_not_clicked"
and self.parent_id.mail_clicked_on
):
return False
if (
self.configuration_step_id.trigger_type == "activity_not_done"
and self.parent_id.activity_done_on
):
return False
return True
def run(self, trigger_activity=True):
self.ensure_one()
if self.state != "scheduled":
return self.browse()
if (
self.record_id.resource_ref is None
or not self.record_id.resource_ref.filtered_domain(
safe_eval(self.configuration_step_id.applied_domain)
)
or not self._check_to_execute()
):
self.write({"state": "rejected", "processed_on": fields.Datetime.now()})
return self.browse()
try:
result = getattr(self, "_run_%s" % self.configuration_step_id.step_type)()
self.write({"state": "done", "processed_on": fields.Datetime.now()})
if result:
childs = self._fill_childs()
if trigger_activity:
childs._trigger_activities()
return childs
except Exception:
buff = StringIO()
traceback.print_exc(file=buff)
traceback_txt = buff.getvalue()
self.write(
{
"state": "error",
"error_trace": traceback_txt,
"processed_on": fields.Datetime.now(),
}
)
return self.browse()
def _fill_childs(self, **kwargs):
return self.create(
[
activity._create_record_activity_vals(
self.record_id.resource_ref,
parent_id=self.id,
record_id=self.record_id.id,
**kwargs
)
for activity in self.configuration_step_id.child_ids
]
)
def _run_activity(self):
record = self.env[self.record_id.model].browse(self.record_id.res_id)
vals = {
"summary": self.configuration_step_id.activity_summary or "",
"note": self.configuration_step_id.activity_note or "",
"activity_type_id": self.configuration_step_id.activity_type_id.id,
"automation_record_step_id": self.id,
}
if self.configuration_step_id.activity_date_deadline_range > 0:
range_type = self.configuration_step_id.activity_date_deadline_range_type
vals["date_deadline"] = fields.Date.context_today(self) + relativedelta(
**{range_type: self.configuration_step_id.activity_date_deadline_range}
)
user = False
if self.configuration_step_id.activity_user_type == "specific":
user = self.configuration_step_id.activity_user_id
elif self.configuration_step_id.activity_user_type == "generic":
user = record[self.configuration_step_id.activity_user_field_id.name]
if user:
vals["user_id"] = user.id
record.activity_schedule(**vals)
return True
def _run_mail(self):
author_id = self.configuration_step_id.mail_author_id.id
composer_values = {
"author_id": author_id,
"record_name": False,
"model": self.record_id.model,
"composition_mode": "mass_mail",
"template_id": self.configuration_step_id.mail_template_id.id,
"automation_record_step_id": self.id,
}
res_ids = [self.record_id.res_id]
composer = (
self.env["mail.compose.message"]
.with_context(active_ids=res_ids)
.create(composer_values)
)
composer.write(
composer._onchange_template_id(
self.configuration_step_id.mail_template_id.id,
"mass_mail",
self.record_id.model,
self.record_id.res_id,
)["value"]
)
# composer.body =
extra_context = self._run_mail_context()
composer = composer.with_context(active_ids=res_ids, **extra_context)
# auto-commit except in testing mode
auto_commit = not getattr(threading.current_thread(), "testing", False)
if not self.is_test:
# We just abort the sending, but we want to check how the generation works
composer._action_send_mail(auto_commit=auto_commit)
self.mail_status = "sent"
return True
def _get_mail_tracking_token(self):
return tools.hmac(self.env(su=True), "automation_oca", self.id)
def _get_mail_tracking_url(self):
return werkzeug.urls.url_join(
self.get_base_url(),
"automation_oca/track/%s/%s/blank.gif"
% (self.id, self._get_mail_tracking_token()),
)
def _run_mail_context(self):
return {}
def _run_action(self):
self.configuration_step_id.server_action_id.with_context(
active_model=self.record_id.model,
active_ids=[self.record_id.res_id],
).run()
return True
def _cron_automation_steps(self):
childs = self.browse()
for activity in self.search(
[
("state", "=", "scheduled"),
("scheduled_date", "<=", fields.Datetime.now()),
]
):
childs |= activity.run(trigger_activity=False)
childs._trigger_activities()
self.search(
[
("state", "=", "scheduled"),
("expiry_date", "!=", False),
("expiry_date", "<=", fields.Datetime.now()),
]
)._expiry()
def _trigger_activities(self):
# Creates a cron trigger.
# On glue modules we could use queue job for a more discrete example
# But cron trigger fulfills the job in some way
for date in set(self.mapped("scheduled_date")):
if date:
self.env["ir.cron.trigger"].create(
{
"call_at": date,
"cron_id": self.env.ref("automation_oca.cron_step_execute").id,
}
)
def _expiry(self):
self.write({"state": "expired", "processed_on": fields.Datetime.now()})
def cancel(self):
self.filtered(lambda r: r.state == "scheduled").write(
{"state": "cancel", "processed_on": fields.Datetime.now()}
)
def _activate(self):
todo = self.filtered(lambda r: not r.scheduled_date)
for record in todo:
config = record.configuration_step_id
record.scheduled_date = fields.Datetime.now() + relativedelta(
**{config.trigger_interval_type: config.trigger_interval}
)
todo._trigger_activities()
def _set_activity_done(self):
self.write({"activity_done_on": fields.Datetime.now()})
self.child_ids.filtered(
lambda r: r.trigger_type == "activity_done"
and not r.scheduled_date
and r.state == "scheduled"
)._activate()
def _set_mail_bounced(self):
self.write({"mail_status": "bounce"})
self.child_ids.filtered(
lambda r: r.trigger_type == "mail_bounce"
and not r.scheduled_date
and r.state == "scheduled"
)._activate()
def _set_mail_open(self):
self.filtered(lambda t: t.mail_status not in ["open", "reply"]).write(
{"mail_status": "open", "mail_opened_on": fields.Datetime.now()}
)
self.child_ids.filtered(
lambda r: r.trigger_type
in ["mail_open", "mail_not_reply", "mail_not_clicked"]
and not r.scheduled_date
and r.state == "scheduled"
)._activate()
def _set_mail_clicked(self):
self.filtered(lambda t: not t.mail_clicked_on).write(
{"mail_clicked_on": fields.Datetime.now()}
)
self.child_ids.filtered(
lambda r: r.trigger_type == "mail_click"
and not r.scheduled_date
and r.state == "scheduled"
)._activate()
def _set_mail_reply(self):
self.filtered(lambda t: t.mail_status != "reply").write(
{"mail_status": "reply", "mail_replied_on": fields.Datetime.now()}
)
self.child_ids.filtered(
lambda r: r.trigger_type == "mail_reply"
and not r.scheduled_date
and r.state == "scheduled"
)._activate()
@api.depends("state")
def _compute_step_actions(self):
for record in self:
record.step_actions = record._get_step_actions()
def _get_step_actions(self):
"""
This should return a list of dictionaries that will have the following keys:
- icon: Icon to show (fontawesome icon like fa fa-clock-o)
- name: name of the action to show (translatable value)
- done: if the action succeeded (boolean)
- color: Color to show when done (text-success, text-danger...)
"""
if self.step_type == "activity":
return [
{
"icon": "fa fa-clock-o",
"name": _("Activity Done"),
"done": bool(self.activity_done_on),
"color": "text-success",
}
]
if self.step_type == "mail":
return [
{
"icon": "fa fa-envelope",
"name": _("Sent"),
"done": bool(self.mail_status and self.mail_status != "bounced"),
"color": "text-success",
},
{
"icon": "fa fa-envelope-open-o",
"name": _("Opened"),
"done": bool(
self.mail_status and self.mail_status in ["reply", "open"]
),
"color": "text-success",
},
{
"icon": "fa fa-hand-pointer-o",
"name": _("Clicked"),
"done": bool(self.mail_status and self.mail_clicked_on),
"color": "text-success",
},
{
"icon": "fa fa-reply",
"name": _("Replied"),
"done": bool(self.mail_status and self.mail_status == "reply"),
"color": "text-success",
},
{
"icon": "fa fa-exclamation-circle",
"name": _("Bounced"),
"done": bool(self.mail_status and self.mail_status == "bounce"),
"color": "text-danger",
},
]
return []
| 14,947 | Python | .py | 366 | 29.437158 | 87 | 0.554983 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,629 | main.py | OCA_automation/automation_oca/controllers/main.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import base64
from werkzeug.exceptions import NotFound
from odoo import http, tools
from odoo.http import Response, request
from odoo.tools import consteq
class AutomationOCAController(http.Controller):
# ------------------------------------------------------------
# TRACKING
# ------------------------------------------------------------
@http.route(
"/automation_oca/track/<int:record_id>/<string:token>/blank.gif",
type="http",
auth="public",
)
def automation_oca_mail_open(self, record_id, token, **post):
"""Email tracking. Blank item added.
We will return the blank item allways, but we will make the request only if
the data is correct"""
if consteq(
token,
tools.hmac(request.env(su=True), "automation_oca", record_id),
):
request.env["automation.record.step"].sudo().browse(
record_id
)._set_mail_open()
response = Response()
response.mimetype = "image/gif"
response.data = base64.b64decode(
b"R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=="
# This is the code of a blank small image
)
return response
@http.route(
"/r/<string:code>/au/<int:record_id>/<string:token>", type="http", auth="public"
)
def automation_oca_redirect(self, code, record_id, token, **post):
# don't assume geoip is set, it is part of the website module
# which mass_mailing doesn't depend on
country_code = request.geoip.get("country_code")
automation_record_step_id = False
if consteq(
token,
tools.hmac(request.env(su=True), "automation_oca", record_id),
):
automation_record_step_id = record_id
request.env["link.tracker.click"].sudo().add_click(
code,
ip=request.httprequest.remote_addr,
country_code=country_code,
automation_record_step_id=automation_record_step_id,
)
redirect_url = request.env["link.tracker"].get_url_from_code(code)
if not redirect_url:
raise NotFound()
return request.redirect(redirect_url, code=301, local=False)
| 2,355 | Python | .py | 57 | 32.807018 | 88 | 0.59607 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,630 | test_automation_mail.py | OCA_automation/automation_oca/tests/test_automation_mail.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import tools
from odoo.tests.common import Form, HttpCase
from odoo.addons.mail.tests.common import MockEmail
from .common import AutomationTestCase
MAIL_TEMPLATE = """Return-Path: <[email protected]>
To: {to}
cc: {cc}
Received: by mail1.openerp.com (Postfix, from userid 10002)
id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST)
From: {email_from}
Subject: {subject}
MIME-Version: 1.0
Content-Type: multipart/alternative;
boundary="----=_Part_4200734_24778174.1344608186754"
Date: Fri, 10 Aug 2012 14:16:26 +0000
Message-ID: {msg_id}
{extra}
------=_Part_4200734_24778174.1344608186754
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: quoted-printable
I would gladly answer to your mass mailing !
--
Your Dear Customer
------=_Part_4200734_24778174.1344608186754
Content-Type: text/html; charset=utf-8
Content-Transfer-Encoding: quoted-printable
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>=20
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8" />
</head>=20
<body style=3D"margin: 0; padding: 0; background: #ffffff;-webkit-text-size-adjust: 100%;">=20
<p>I would gladly answer to your mass mailing !</p>
<p>--<br/>
Your Dear Customer
<p>
</body>
</html>
------=_Part_4200734_24778174.1344608186754--
"""
class TestAutomationMail(AutomationTestCase, MockEmail, HttpCase):
def test_activity_execution(self):
"""
We will check the execution of the tasks and that we cannot execute them again
"""
activity = self.create_mail_activity()
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
messages_01 = self.partner_01.message_ids
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertEqual(1, len(record_activity))
self.assertEqual("done", record_activity.state)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(self.partner_01.message_ids - messages_01)
def test_bounce(self):
"""
Now we will check the execution of scheduled activities"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_bounce"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
parsed_bounce_values = {
"email_from": "[email protected]",
"to": "[email protected]",
"message_id": tools.generate_tracking_message_id("MailTest"),
"bounced_partner": self.env["res.partner"].sudo(),
"bounced_message": self.env["mail.message"].sudo(),
"bounced_email": "",
"bounced_msg_id": [record_activity.message_id],
}
record_activity.invalidate_recordset()
self.assertFalse(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-exclamation-circle"
]
)
self.env["mail.thread"]._routing_handle_bounce(False, parsed_bounce_values)
self.assertEqual("bounce", record_activity.mail_status)
self.assertTrue(record_child_activity.scheduled_date)
record_activity.invalidate_recordset()
self.assertTrue(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-exclamation-circle"
]
)
def test_reply(self):
"""
Now we will check the execution of scheduled activities"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_reply"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
record_activity.invalidate_recordset()
self.assertFalse(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-reply"
]
)
self.gateway_mail_reply_wrecord(
MAIL_TEMPLATE, self.partner_01, use_in_reply_to=True
)
self.assertEqual("reply", record_activity.mail_status)
self.assertTrue(record_child_activity.scheduled_date)
record_activity.invalidate_recordset()
self.assertTrue(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-reply"
]
)
def test_no_reply(self):
"""
Now we will check the not reply validation. To remember:
if it is not opened, the schedule date of the child task will be false
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_not_reply"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
self.url_open(record_activity._get_mail_tracking_url())
self.assertEqual("open", record_activity.mail_status)
self.assertTrue(record_child_activity.scheduled_date)
self.gateway_mail_reply_wrecord(
MAIL_TEMPLATE, self.partner_01, use_in_reply_to=True
)
self.assertEqual("reply", record_activity.mail_status)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual("rejected", record_child_activity.state)
def test_open(self):
"""
Now we will check the execution of scheduled activities"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_open"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
record_activity.invalidate_recordset()
self.assertFalse(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-envelope-open-o"
]
)
self.url_open(record_activity._get_mail_tracking_url())
self.assertEqual("open", record_activity.mail_status)
self.assertTrue(record_child_activity.scheduled_date)
record_activity.invalidate_recordset()
self.assertTrue(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-envelope-open-o"
]
)
def test_open_wrong_code(self):
"""
We wan to ensure that the code is checked on the call
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_open"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
self.url_open(
"/automation_oca/track/%s/INVENTED_CODE/blank.gif" % record_activity.id
)
self.assertEqual("sent", record_activity.mail_status)
self.assertFalse(record_child_activity.scheduled_date)
def test_no_open(self):
"""
Now we will check the not open validation when it is not opened (should be executed)
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_not_open"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertTrue(record_child_activity.scheduled_date)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual("done", record_child_activity.state)
def test_no_open_rejected(self):
"""
Now we will check the not open validation when it was already opened (rejection)
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_not_open"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertTrue(record_child_activity.scheduled_date)
self.url_open(record_activity._get_mail_tracking_url())
self.assertEqual("open", record_activity.mail_status)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual("rejected", record_child_activity.state)
def test_click(self):
"""
Now we will check the execution of scheduled activities that should happen
after a click
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_click"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.env["link.tracker"].search(
[("url", "=", "https://www.twitter.com")]
).unlink()
self.configuration.start_automation()
self.assertEqual(0, self.configuration.click_count)
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.configuration.invalidate_recordset()
self.assertEqual(0, self.configuration.click_count)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
self.url_open(record_activity._get_mail_tracking_url())
self.assertEqual("open", record_activity.mail_status)
self.configuration.invalidate_recordset()
self.assertEqual(0, self.configuration.click_count)
self.assertFalse(record_child_activity.scheduled_date)
record_activity.invalidate_recordset()
self.assertFalse(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-hand-pointer-o"
]
)
tracker = self.env["link.tracker"].search(
[("url", "=", "https://www.twitter.com")]
)
self.assertTrue(tracker)
self.url_open(
"/r/%s/au/%s/%s"
% (
tracker.code,
record_activity.id,
record_activity._get_mail_tracking_token(),
)
)
self.assertEqual("open", record_activity.mail_status)
self.assertEqual(
1,
self.env["link.tracker.click"].search_count(
[
("automation_record_step_id", "=", record_activity.id),
("link_id", "=", tracker.id),
]
),
)
record_activity.invalidate_recordset()
self.assertTrue(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-hand-pointer-o"
]
)
self.assertTrue(record_child_activity.scheduled_date)
self.configuration.invalidate_recordset()
self.assertEqual(1, self.configuration.click_count)
# Now we will check that a second click does not generate a second log
self.url_open(
"/r/%s/au/%s/%s"
% (
tracker.code,
record_activity.id,
record_activity._get_mail_tracking_token(),
)
)
self.assertEqual(
1,
self.env["link.tracker.click"].search_count(
[
("automation_record_step_id", "=", record_activity.id),
("link_id", "=", tracker.id),
]
),
)
self.configuration.invalidate_recordset()
self.assertEqual(1, self.configuration.click_count)
def test_click_wrong_url(self):
"""
Now we will check that no log is processed when the clicked url is malformed.
That happens because we add a code information on the URL.
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_click"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
tracker = self.env["link.tracker"].search(
[("url", "=", "https://www.twitter.com")]
)
self.assertTrue(tracker)
self.url_open(
"/r/%s/au/%s/1234"
% (
tracker.code,
record_activity.id,
)
)
self.assertEqual("sent", record_activity.mail_status)
self.assertFalse(record_child_activity.scheduled_date)
# Now we check the case where the code is not found
tracker.unlink()
self.url_open(
"/r/%s/au/%s/%s"
% (
tracker.code,
record_activity.id,
record_activity._get_mail_tracking_token(),
)
)
self.assertEqual("sent", record_activity.mail_status)
self.assertFalse(record_child_activity.scheduled_date)
def test_no_click(self):
"""
Checking the not clicked validation when it is not clicked (should be executed)
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_not_clicked"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
self.url_open(record_activity._get_mail_tracking_url())
self.assertEqual("open", record_activity.mail_status)
self.assertTrue(record_child_activity.scheduled_date)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual("done", record_child_activity.state)
def test_no_click_rejected(self):
"""
Checking the not clicked validation when it was already clicked
"""
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(
parent_id=activity.id, trigger_type="mail_not_clicked"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
with self.mock_mail_gateway():
self.env["automation.record.step"]._cron_automation_steps()
self.assertSentEmail(self.env.user.partner_id, [self.partner_01])
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual("sent", record_activity.mail_status)
self.assertTrue(record_child_activity)
self.assertFalse(record_child_activity.scheduled_date)
self.url_open(record_activity._get_mail_tracking_url())
self.assertEqual("open", record_activity.mail_status)
self.assertTrue(record_child_activity.scheduled_date)
tracker = self.env["link.tracker"].search(
[("url", "=", "https://www.twitter.com")]
)
self.url_open(
"/r/%s/au/%s/%s"
% (
tracker.code,
record_activity.id,
record_activity._get_mail_tracking_token(),
)
)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual("rejected", record_child_activity.state)
def test_is_test_behavior(self):
"""
We want to ensure that no mails are sent on tests
"""
self.create_mail_activity()
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
with Form(
self.env["automation.configuration.test"].with_context(
default_configuration_id=self.configuration.id,
defaul_model=self.configuration.model,
)
) as f:
self.assertTrue(f.resource_ref)
f.resource_ref = "%s,%s" % (self.partner_01._name, self.partner_01.id)
wizard = f.save()
wizard_action = wizard.test_record()
record = self.env[wizard_action["res_model"]].browse(wizard_action["res_id"])
self.assertTrue(record)
self.assertEqual("scheduled", record.automation_step_ids.state)
self.assertFalse(record.automation_step_ids.mail_status)
with self.mock_mail_gateway():
record.automation_step_ids.run()
self.assertNotSentEmail()
self.assertEqual("sent", record.automation_step_ids.mail_status)
| 24,704 | Python | .py | 545 | 35.388991 | 95 | 0.616962 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,631 | __init__.py | OCA_automation/automation_oca/tests/__init__.py | from . import test_automation_action
from . import test_automation_activity
from . import test_automation_base
from . import test_automation_mail
from . import test_automation_security
| 185 | Python | .py | 5 | 36 | 38 | 0.833333 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,632 | test_automation_activity.py | OCA_automation/automation_oca/tests/test_automation_activity.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests import Form
from .common import AutomationTestCase
class TestAutomationActivity(AutomationTestCase):
def test_activity_execution(self):
"""
We will check the execution of activity tasks (generation of an activity)
"""
activity = self.create_activity_action()
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertFalse(self.partner_01.activity_ids)
self.env["automation.record.step"]._cron_automation_steps()
self.assertTrue(self.partner_01.activity_ids)
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertEqual(
record_activity, self.partner_01.activity_ids.automation_record_step_id
)
self.assertFalse(record_activity.activity_done_on)
record_activity.invalidate_recordset()
self.assertFalse(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-clock-o"
]
)
self.partner_01.activity_ids.action_feedback()
self.assertTrue(record_activity.activity_done_on)
record_activity.invalidate_recordset()
self.assertTrue(
[
step
for step in record_activity.step_actions
if step["done"] and step["icon"] == "fa fa-clock-o"
]
)
def test_activity_execution_child(self):
"""
We will check the execution of the hild task (activity_done) is only scheduled
after the activity is done
"""
activity = self.create_activity_action()
child_activity = self.create_server_action(
parent_id=activity.id, trigger_type="activity_done"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.env["automation.record.step"]._cron_automation_steps()
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual(
record_activity, self.partner_01.activity_ids.automation_record_step_id
)
self.assertFalse(record_activity.activity_done_on)
self.assertFalse(record_child_activity.scheduled_date)
self.partner_01.activity_ids.action_feedback()
self.assertTrue(record_activity.activity_done_on)
self.assertTrue(record_child_activity.scheduled_date)
def test_activity_execution_not_done_child_done(self):
"""
We will check the execution of the tasks with activity_not_done is not executed
if it has been done
"""
activity = self.create_activity_action()
child_activity = self.create_server_action(
parent_id=activity.id, trigger_type="activity_not_done"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.env["automation.record.step"]._cron_automation_steps()
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual(
record_activity, self.partner_01.activity_ids.automation_record_step_id
)
self.assertFalse(record_activity.activity_done_on)
self.assertTrue(record_child_activity.scheduled_date)
self.partner_01.activity_ids.action_feedback()
self.assertTrue(record_activity.activity_done_on)
self.assertTrue(record_child_activity.scheduled_date)
self.assertEqual("scheduled", record_child_activity.state)
record_child_activity.run()
self.assertEqual("rejected", record_child_activity.state)
def test_activity_execution_not_done_child_not_done(self):
"""
We will check the execution of the tasks with activity_not_done is executed
if it has been not done
"""
activity = self.create_activity_action()
child_activity = self.create_server_action(
parent_id=activity.id, trigger_type="activity_not_done"
)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.env["automation.record.step"]._cron_automation_steps()
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
record_child_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", child_activity.id)]
)
self.assertEqual(
record_activity, self.partner_01.activity_ids.automation_record_step_id
)
self.assertFalse(record_activity.activity_done_on)
self.assertTrue(record_child_activity.scheduled_date)
self.assertEqual("scheduled", record_child_activity.state)
record_child_activity.run()
self.assertEqual("done", record_child_activity.state)
def test_compute_default_values(self):
activity = self.create_server_action()
self.assertFalse(activity.activity_user_id)
with Form(activity) as f:
f.step_type = "activity"
f.activity_type_id = self.activity_type
self.assertTrue(activity.activity_user_id)
with Form(activity) as f:
f.step_type = "action"
f.server_action_id = self.action
self.assertFalse(activity.activity_user_id)
| 6,385 | Python | .py | 136 | 37.330882 | 87 | 0.642846 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,633 | test_automation_action.py | OCA_automation/automation_oca/tests/test_automation_action.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from .common import AutomationTestCase
class TestAutomationAction(AutomationTestCase):
def test_activity_execution(self):
"""
We will check the execution of the tasks and that we cannot execute them again
"""
activity = self.create_server_action()
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertTrue(self.partner_01.comment)
self.assertTrue(self.partner_02.comment)
self.env["automation.record.step"]._cron_automation_steps()
self.assertFalse(self.partner_01.comment)
self.assertTrue(self.partner_02.comment)
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertEqual(1, len(record_activity))
self.assertEqual("done", record_activity.state)
self.partner_01.comment = "My comment"
# We check that the action is not executed again
record_activity.run()
self.assertFalse(record_activity.step_actions)
self.assertTrue(self.partner_01.comment)
def test_child_execution_filters(self):
"""
We will create a task that executes two more tasks filtered with and extra task
The child tasks should only be created after the first one is finished.
Also, if one is aborted, the subsuquent tasks will not be created.
TASK 1 ---> TASK 1_1 (only for partner 1) --> TASK 1_1_1
---> TASK 1_2 (only for partner 2) --> TASK 1_2_1
In this case, the task 1_1_1 will only be generated for partner 1 and task 1_2_1
for partner 2
"""
self.configuration.editable_domain = "[('id', 'in', [%s, %s])]" % (
self.partner_01.id,
self.partner_02.id,
)
activity_1 = self.create_server_action()
activity_1_1 = self.create_server_action(
parent_id=activity_1.id, domain="[('id', '=', %s)]" % self.partner_01.id
)
activity_1_2 = self.create_server_action(
parent_id=activity_1.id, domain="[('id', '=', %s)]" % self.partner_02.id
)
activity_1_1_1 = self.create_server_action(parent_id=activity_1_1.id)
activity_1_2_1 = self.create_server_action(parent_id=activity_1_2.id)
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertEqual(
0,
self.env["automation.record.step"].search_count(
[
(
"configuration_step_id",
"in",
(
activity_1_1
| activity_1_2
| activity_1_1_1
| activity_1_2_1
).ids,
)
]
),
)
self.assertTrue(self.partner_01.comment)
self.assertTrue(self.partner_02.comment)
self.env["automation.record.step"]._cron_automation_steps()
self.assertFalse(self.partner_01.comment)
self.assertFalse(self.partner_02.comment)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_1.id),
("record_id.res_id", "=", self.partner_01.id),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_2.id),
("record_id.res_id", "=", self.partner_01.id),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_1.id),
("record_id.res_id", "=", self.partner_02.id),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_2.id),
("record_id.res_id", "=", self.partner_02.id),
]
),
)
self.assertEqual(
0,
self.env["automation.record.step"].search_count(
[
(
"configuration_step_id",
"in",
(activity_1_1_1 | activity_1_2_1).ids,
)
]
),
)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_1.id),
("record_id.res_id", "=", self.partner_01.id),
("state", "=", "done"),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_2.id),
("record_id.res_id", "=", self.partner_01.id),
("state", "=", "rejected"),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_1.id),
("record_id.res_id", "=", self.partner_02.id),
("state", "=", "rejected"),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_2.id),
("record_id.res_id", "=", self.partner_02.id),
("state", "=", "done"),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_1_1.id),
("record_id.res_id", "=", self.partner_01.id),
]
),
)
self.assertEqual(
0,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_2_1.id),
("record_id.res_id", "=", self.partner_01.id),
]
),
)
self.assertEqual(
0,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_1_1.id),
("record_id.res_id", "=", self.partner_02.id),
]
),
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[
("configuration_step_id", "=", activity_1_2_1.id),
("record_id.res_id", "=", self.partner_02.id),
]
),
)
| 7,606 | Python | .py | 199 | 24.39196 | 88 | 0.472909 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,634 | test_automation_security.py | OCA_automation/automation_oca/tests/test_automation_security.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests.common import users
from odoo.addons.mail.tests.common import mail_new_test_user
from .common import AutomationTestCase
class TestAutomationSecurity(AutomationTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Removing rules in order to check only what we expect
cls.env["ir.rule"].search(
[("model_id", "=", cls.env.ref("base.model_res_partner").id)]
).toggle_active()
cls.user_automation_01 = mail_new_test_user(
cls.env,
login="user_automation_01",
name="User automation 01",
email="[email protected]",
company_id=cls.env.user.company_id.id,
notification_type="inbox",
groups="base.group_user,automation_oca.group_automation_user",
)
cls.user_automation_02 = mail_new_test_user(
cls.env,
login="user_automation_02",
name="User automation 01",
email="[email protected]",
company_id=cls.env.user.company_id.id,
notification_type="inbox",
groups="base.group_user,automation_oca.group_automation_user",
)
cls.group_1 = cls.env["res.groups"].create(
{
"name": "G1",
"users": [(4, cls.user_automation_01.id)],
"rule_groups": [
(
0,
0,
{
"name": "Rule 01",
"model_id": cls.env.ref("base.model_res_partner").id,
"domain_force": "[('id', '!=', %s)]" % cls.partner_01.id,
},
)
],
}
)
cls.group_2 = cls.env["res.groups"].create(
{
"name": "G2",
"users": [(4, cls.user_automation_02.id)],
"rule_groups": [
(
0,
0,
{
"name": "Rule 01",
"model_id": cls.env.ref("base.model_res_partner").id,
"domain_force": "[('id', '!=', %s)]" % cls.partner_02.id,
},
)
],
}
)
cls.configuration.editable_domain = [
("id", "in", (cls.partner_01 | cls.partner_02).ids)
]
cls.configuration.start_automation()
cls.env["automation.configuration"].cron_automation()
@users("user_automation_01")
def test_security_01(self):
record = self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
self.assertEqual(1, len(record))
self.assertEqual(self.partner_02, record.resource_ref)
@users("user_automation_02")
def test_security_02(self):
record = self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
self.assertEqual(1, len(record))
self.assertEqual(self.partner_01, record.resource_ref)
@users("user_automation_01")
def test_security_deleted_record(self):
original_record = self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
self.partner_02.unlink()
record = self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
self.assertFalse(record)
self.assertTrue(original_record)
self.assertFalse(original_record.read())
| 3,836 | Python | .py | 96 | 26.947917 | 85 | 0.511659 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,635 | test_automation_base.py | OCA_automation/automation_oca/tests/test_automation_base.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from datetime import datetime
from freezegun import freeze_time
from odoo.exceptions import ValidationError
from odoo.tests import Form
from odoo.tools.safe_eval import safe_eval
from .common import AutomationTestCase
class TestAutomationBase(AutomationTestCase):
def test_no_cron_no_start(self):
"""
We want to check that the system only generates on periodical configurations
"""
self.env["automation.configuration"].cron_automation()
self.assertEqual(
0,
self.env["automation.record"].search_count(
[("configuration_id", "=", self.configuration.id)]
),
)
self.configuration.run_automation()
self.assertEqual(
0,
self.env["automation.record"].search_count(
[("configuration_id", "=", self.configuration.id)]
),
)
def test_no_cron_on_demand(self):
"""
We want to check that the system does not generate using cron
on on demand configurations, but allows manuall execution
"""
self.configuration.is_periodic = False
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertEqual(
0,
self.env["automation.record"].search_count(
[("configuration_id", "=", self.configuration.id)]
),
)
self.configuration.run_automation()
self.assertNotEqual(
0,
self.env["automation.record"].search_count(
[("configuration_id", "=", self.configuration.id)]
),
)
def test_next_execution_date(self):
with freeze_time("2022-01-01"):
self.assertFalse(self.configuration.next_execution_date)
self.env.ref(
"automation_oca.cron_configuration_run"
).nextcall = datetime.now()
self.configuration.start_automation()
self.assertEqual(
self.configuration.next_execution_date, datetime(2022, 1, 1, 0, 0, 0)
)
def test_cron_no_duplicates(self):
"""
We want to check that the records are generated only once, not twice
"""
self.create_server_action()
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
record = self.env["automation.record"].search(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_01.id),
]
)
self.assertEqual(
1,
self.env["automation.record"].search_count(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_01.id),
]
),
)
self.assertEqual(
1,
self.env["automation.record"].search_count(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_02.id),
]
),
)
self.env["automation.configuration"].cron_automation()
self.assertEqual(
1,
self.env["automation.record"].search_count(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_01.id),
]
),
)
self.assertEqual(
1,
self.env["automation.record"].search_count(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_02.id),
]
),
)
record = self.env["automation.record"].search(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_01.id),
]
)
self.assertEqual(
1,
self.env["automation.record.step"].search_count(
[("record_id", "=", record.id)]
),
)
def test_filter(self):
"""
We want to see that the records are only generated for
the records that fulfill the domain
"""
self.create_server_action()
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertEqual(
1,
self.env["automation.record"].search_count(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_01.id),
]
),
)
self.assertEqual(
0,
self.env["automation.record"].search_count(
[
("configuration_id", "=", self.configuration.id),
("res_id", "=", self.partner_02.id),
]
),
)
def test_exception(self):
"""
Check that the error is raised properly and stored the full error
"""
activity = self.create_server_action(server_action_id=self.error_action.id)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
record = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertFalse(record.error_trace)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual(record.state, "error")
self.assertTrue(record.error_trace)
def test_record_resource_information(self):
"""
Check the record computed fields of record
"""
self.create_server_action(server_action_id=self.error_action.id)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
record = self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
self.assertEqual(self.partner_01.display_name, record.display_name)
self.assertEqual(self.partner_01, record.resource_ref)
record.model = "unexistent.model"
self.assertFalse(record.resource_ref)
def test_expiry(self):
"""
Testing that expired actions are not executed
"""
activity = self.create_server_action(expiry=True, trigger_interval=1)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertEqual("scheduled", record_activity.state)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual("expired", record_activity.state)
def test_cancel(self):
"""
Testing that cancelled actions are not executed
"""
activity = self.create_server_action()
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertEqual("scheduled", record_activity.state)
record_activity.cancel()
self.assertEqual("cancel", record_activity.state)
self.env["automation.record.step"]._cron_automation_steps()
self.assertEqual("cancel", record_activity.state)
def test_counter(self):
"""
Check the counter function
"""
self.create_server_action(server_action_id=self.error_action.id)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.assertEqual(0, self.configuration.record_count)
self.assertEqual(0, self.configuration.record_test_count)
self.env["automation.configuration"].cron_automation()
self.configuration.invalidate_recordset()
self.assertEqual(1, self.configuration.record_count)
self.assertEqual(0, self.configuration.record_test_count)
def test_start_configuration_twice_exception(self):
"""
Check that we cannot start automation twice
"""
self.configuration.start_automation()
with self.assertRaises(ValidationError):
self.configuration.start_automation()
def test_state_automation_management(self):
"""
Testing the change of state
Draft -> Run -> Stop -> Draft
"""
self.configuration.start_automation()
self.assertEqual(self.configuration.state, "periodic")
self.configuration.done_automation()
self.assertEqual(self.configuration.state, "done")
self.env["automation.configuration"].cron_automation()
self.assertFalse(
self.env["automation.record"].search(
[
("configuration_id", "=", self.configuration.id),
]
)
)
self.configuration.back_to_draft()
self.assertEqual(self.configuration.state, "draft")
def test_graph(self):
"""
Checking the graph results.
We will use 2 parent actions (1 will fail) and a child action of the one ok.
After 2 executions, we should have (1 OK, 0 Errors) for parent and child and
(0 OK, 1 Error) for the failing one.
"""
activity_01 = self.create_server_action()
activity_02 = self.create_server_action(server_action_id=self.error_action.id)
activity_03 = self.create_mail_activity()
child_activity = self.create_server_action(parent_id=activity_01.id)
self.configuration.editable_domain = "[('id', '=', %s)]" % self.partner_01.id
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertEqual(0, self.configuration.activity_mail_count)
self.assertEqual(0, self.configuration.activity_action_count)
self.assertEqual(0, activity_01.graph_done)
self.assertEqual(0, activity_01.graph_error)
self.assertEqual(0, sum(d["y"] for d in activity_01.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in activity_01.graph_data["error"]))
self.assertEqual(0, activity_02.graph_done)
self.assertEqual(0, activity_02.graph_error)
self.assertEqual(0, sum(d["y"] for d in activity_02.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in activity_02.graph_data["error"]))
self.assertEqual(0, activity_03.graph_done)
self.assertEqual(0, activity_03.graph_error)
self.assertEqual(0, sum(d["y"] for d in activity_03.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in activity_03.graph_data["error"]))
self.assertEqual(0, child_activity.graph_done)
self.assertEqual(0, child_activity.graph_error)
self.assertEqual(0, sum(d["y"] for d in child_activity.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in child_activity.graph_data["error"]))
self.env["automation.record.step"]._cron_automation_steps()
self.configuration.invalidate_recordset()
self.assertEqual(1, self.configuration.activity_mail_count)
self.assertEqual(1, self.configuration.activity_action_count)
activity_01.invalidate_recordset()
self.assertEqual(1, activity_01.graph_done)
self.assertEqual(0, activity_01.graph_error)
self.assertEqual(1, sum(d["y"] for d in activity_01.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in activity_01.graph_data["error"]))
activity_02.invalidate_recordset()
self.assertEqual(0, activity_02.graph_done)
self.assertEqual(1, activity_02.graph_error)
self.assertEqual(0, sum(d["y"] for d in activity_02.graph_data["done"]))
self.assertEqual(1, sum(d["y"] for d in activity_02.graph_data["error"]))
activity_03.invalidate_recordset()
self.assertEqual(1, activity_03.graph_done)
self.assertEqual(0, activity_03.graph_error)
self.assertEqual(1, sum(d["y"] for d in activity_03.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in activity_03.graph_data["error"]))
child_activity.invalidate_recordset()
self.assertEqual(0, child_activity.graph_done)
self.assertEqual(0, child_activity.graph_error)
self.assertEqual(0, sum(d["y"] for d in child_activity.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in child_activity.graph_data["error"]))
self.env["automation.record.step"]._cron_automation_steps()
self.configuration.invalidate_recordset()
self.assertEqual(1, self.configuration.activity_mail_count)
self.assertEqual(2, self.configuration.activity_action_count)
activity_01.invalidate_recordset()
self.assertEqual(1, activity_01.graph_done)
self.assertEqual(0, activity_01.graph_error)
self.assertEqual(1, sum(d["y"] for d in activity_01.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in activity_01.graph_data["error"]))
activity_02.invalidate_recordset()
self.assertEqual(0, activity_02.graph_done)
self.assertEqual(1, activity_02.graph_error)
self.assertEqual(0, sum(d["y"] for d in activity_02.graph_data["done"]))
self.assertEqual(1, sum(d["y"] for d in activity_02.graph_data["error"]))
activity_03.invalidate_recordset()
self.assertEqual(1, activity_03.graph_done)
self.assertEqual(0, activity_03.graph_error)
self.assertEqual(1, sum(d["y"] for d in activity_03.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in activity_03.graph_data["error"]))
child_activity.invalidate_recordset()
self.assertEqual(1, child_activity.graph_done)
self.assertEqual(0, child_activity.graph_error)
self.assertEqual(1, sum(d["y"] for d in child_activity.graph_data["done"]))
self.assertEqual(0, sum(d["y"] for d in child_activity.graph_data["error"]))
def test_schedule_date_computation_hours(self):
with freeze_time("2022-01-01"):
activity = self.create_server_action(trigger_interval=1)
self.assertEqual(1, activity.trigger_interval_hours)
self.configuration.editable_domain = (
"[('id', '=', %s)]" % self.partner_01.id
)
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertEqual("scheduled", record_activity.state)
self.assertEqual(
record_activity.scheduled_date, datetime(2022, 1, 1, 1, 0, 0, 0)
)
def test_schedule_date_computation_days(self):
with freeze_time("2022-01-01"):
activity = self.create_server_action(
trigger_interval=1, trigger_interval_type="days"
)
self.assertEqual(24, activity.trigger_interval_hours)
self.configuration.editable_domain = (
"[('id', '=', %s)]" % self.partner_01.id
)
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
record_activity = self.env["automation.record.step"].search(
[("configuration_step_id", "=", activity.id)]
)
self.assertEqual("scheduled", record_activity.state)
self.assertEqual(
record_activity.scheduled_date, datetime(2022, 1, 2, 0, 0, 0, 0)
)
def test_onchange_activity_trigger_type(self):
activity = self.create_mail_activity()
child_activity = self.create_mail_activity(parent_id=activity.id)
self.assertEqual(child_activity.trigger_type, "after_step")
self.assertTrue(child_activity.parent_id)
with Form(child_activity) as f:
f.trigger_type = "mail_bounce"
self.assertTrue(f.parent_id)
def test_onchange_activity_trigger_type_start(self):
activity = self.create_server_action()
child_activity = self.create_server_action(parent_id=activity.id)
self.assertEqual(child_activity.trigger_type, "after_step")
self.assertTrue(child_activity.parent_id)
with Form(child_activity) as f:
f.trigger_type = "start"
self.assertFalse(f.parent_id)
def test_field_not_field_unicity(self):
self.configuration.editable_domain = (
"[('id', 'in', %s)]" % (self.partner_01 | self.partner_02).ids
)
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertEqual(
2,
len(
self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
),
)
def test_field_field_unicity(self):
self.configuration.editable_domain = (
"[('id', 'in', %s)]" % (self.partner_01 | self.partner_02).ids
)
self.configuration.field_id = self.env.ref("base.field_res_partner__email")
self.configuration.start_automation()
self.env["automation.configuration"].cron_automation()
self.assertEqual(
1,
len(
self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
),
)
self.partner_01.email = "t" + self.partner_01.email
self.env["automation.configuration"].cron_automation()
self.assertEqual(
2,
len(
self.env["automation.record"].search(
[("configuration_id", "=", self.configuration.id)]
)
),
)
def test_configuration_filter_domain(self):
domain = [("partner_id", "=", self.partner_01.id)]
self.assertFalse(self.configuration.filter_id)
self.configuration.editable_domain = domain
self.configuration.save_filter()
self.assertTrue(self.configuration.filter_id)
self.assertEqual(self.configuration.model_id, self.configuration.model_id)
domain = [("partner_id", "=", self.partner_02.id)]
self.configuration.invalidate_recordset()
self.assertNotEqual(domain, safe_eval(self.configuration.domain))
self.configuration.filter_id.domain = domain
self.assertEqual(domain, safe_eval(self.configuration.domain))
with Form(self.env["automation.configuration"]) as f:
self.assertFalse(f.filter_domain)
f.name = "My other configuration"
f.filter_id = self.configuration.filter_id
self.assertEqual(f.model_id, self.env.ref("base.model_res_partner"))
self.assertIn(
self.configuration.filter_id,
self.env["automation.filter"].search(f.filter_domain),
)
f.model_id = self.env.ref("base.model_res_users")
self.assertFalse(f.filter_id)
def test_filter_onchange(self):
with Form(self.env["automation.filter"]) as f:
f.name = "My other configuration"
f.model_id = self.env.ref("base.model_res_partner")
f.domain = [("id", "=", 1)]
f.model_id = self.env.ref("base.model_res_users")
self.assertFalse(safe_eval(f.domain))
def test_constrains_mail(self):
activity = self.create_server_action()
with self.assertRaises(ValidationError):
self.create_server_action(parent_id=activity.id, trigger_type="mail_bounce")
def test_constrains_start_with_parent(self):
activity = self.create_server_action()
with self.assertRaises(ValidationError):
self.create_server_action(parent_id=activity.id, trigger_type="start")
def test_constrains_no_start_without_parent(self):
with self.assertRaises(ValidationError):
self.create_server_action(parent_id=False, trigger_type="after_step")
def test_is_test_behavior(self):
"""
We want to ensure that no mails are sent on tests
"""
self.create_server_action()
with Form(
self.env["automation.configuration.test"].with_context(
default_configuration_id=self.configuration.id,
defaul_model=self.configuration.model,
)
) as f:
self.assertTrue(f.resource_ref)
f.resource_ref = "%s,%s" % (self.partner_01._name, self.partner_01.id)
wizard = f.save()
wizard_action = wizard.test_record()
record = self.env[wizard_action["res_model"]].browse(wizard_action["res_id"])
self.assertEqual(self.configuration, record.configuration_id)
self.assertEqual(1, self.configuration.record_test_count)
self.assertEqual(0, self.configuration.record_count)
def test_check_icons(self):
action = self.create_server_action()
mail = self.create_mail_activity()
activity = self.create_activity_action()
self.assertEqual(action.step_icon, "fa fa-cogs")
self.assertEqual(mail.step_icon, "fa fa-envelope")
self.assertEqual(activity.step_icon, "fa fa-clock-o")
def test_trigger_types(self):
action = self.create_server_action()
child = self.create_server_action(parent_id=action.id)
self.assertTrue(action.trigger_type_data["allow_parent"])
self.assertFalse(child.trigger_type_data.get("allow_parent", False))
def test_trigger_childs(self):
action = self.create_server_action()
mail = self.create_mail_activity()
activity = self.create_activity_action()
self.assertEqual(1, len(action.trigger_child_types))
self.assertEqual({"after_step"}, set(action.trigger_child_types.keys()))
self.assertEqual(8, len(mail.trigger_child_types))
self.assertEqual(
{
"after_step",
"mail_open",
"mail_not_open",
"mail_reply",
"mail_not_reply",
"mail_click",
"mail_not_clicked",
"mail_bounce",
},
set(mail.trigger_child_types.keys()),
)
self.assertEqual(3, len(activity.trigger_child_types))
self.assertEqual(
{"after_step", "activity_done", "activity_not_done"},
set(activity.trigger_child_types.keys()),
)
| 23,577 | Python | .py | 512 | 35.302734 | 88 | 0.60475 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,636 | common.py | OCA_automation/automation_oca/tests/common.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class AutomationTestCase(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env["automation.configuration"].search([]).toggle_active()
cls.action = cls.env["ir.actions.server"].create(
{
"name": "Demo action",
"state": "code",
"model_id": cls.env.ref("base.model_res_partner").id,
"code": "records.write({'comment': env.context.get('key_value')})",
}
)
cls.activity_type = cls.env["mail.activity.type"].create({"name": "DEMO"})
cls.error_action = cls.env["ir.actions.server"].create(
{
"name": "Demo action",
"state": "code",
"model_id": cls.env.ref("base.model_res_partner").id,
"code": "raise UserError('ERROR')",
}
)
cls.template = cls.env["mail.template"].create(
{
"name": "My template",
"model_id": cls.env.ref("base.model_res_partner").id,
"subject": "Subject",
"partner_to": "{{ object.id }}",
"body_html": 'My template <a href="https://www.twitter.com" /> with link',
}
)
cls.partner_01 = cls.env["res.partner"].create(
{"name": "Demo partner", "comment": "Demo", "email": "[email protected]"}
)
cls.partner_02 = cls.env["res.partner"].create(
{"name": "Demo partner 2", "comment": "Demo", "email": "[email protected]"}
)
cls.configuration = cls.env["automation.configuration"].create(
{
"name": "Test configuration",
"model_id": cls.env.ref("base.model_res_partner").id,
"is_periodic": True,
}
)
@classmethod
def create_server_action(cls, parent_id=False, **kwargs):
return cls.env["automation.configuration.step"].create(
{
"name": "Demo activity",
"parent_id": parent_id,
"configuration_id": cls.configuration.id,
"step_type": "action",
"server_action_id": cls.action.id,
"trigger_type": "after_step" if parent_id else "start",
**kwargs,
}
)
@classmethod
def create_activity_action(cls, parent_id=False, **kwargs):
return cls.env["automation.configuration.step"].create(
{
"name": "Demo activity",
"parent_id": parent_id,
"configuration_id": cls.configuration.id,
"step_type": "activity",
"activity_type_id": cls.activity_type.id,
"trigger_type": "after_step" if parent_id else "start",
**kwargs,
}
)
@classmethod
def create_mail_activity(cls, parent_id=False, trigger_type=False, **kwargs):
return cls.env["automation.configuration.step"].create(
{
"name": "Demo activity",
"parent_id": parent_id,
"configuration_id": cls.configuration.id,
"step_type": "mail",
"mail_template_id": cls.template.id,
"trigger_type": trigger_type
or ("after_step" if parent_id else "start"),
**kwargs,
}
)
| 3,558 | Python | .py | 87 | 28.333333 | 90 | 0.5114 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,637 | automation_configuration_test.py | OCA_automation/automation_oca/wizards/automation_configuration_test.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class AutomationConfigurationTest(models.TransientModel):
_name = "automation.configuration.test"
_description = "Test automation configuration"
configuration_id = fields.Many2one("automation.configuration", required=True)
model = fields.Char(related="configuration_id.model")
resource_ref = fields.Reference(
selection="_selection_target_model",
readonly=False,
required=True,
store=True,
compute="_compute_resource_ref",
)
@api.model
def _selection_target_model(self):
return [
(model.model, model.name)
for model in self.env["ir.model"]
.sudo()
.search([("is_mail_thread", "=", True)])
]
@api.depends("model")
def _compute_resource_ref(self):
for record in self:
if record.model and record.model in self.env:
res = self.env[record.model].search([], limit=1)
record.resource_ref = "%s,%s" % (record.model, res.id)
else:
record.resource_ref = None
def test_record(self):
return self.configuration_id._create_record(
self.resource_ref, is_test=True
).get_formview_action()
| 1,363 | Python | .py | 35 | 30.4 | 81 | 0.625 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,638 | mail_compose_message.py | OCA_automation/automation_oca/wizards/mail_compose_message.py | # Copyright 2024 Dixmit
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class MailComposeMessage(models.TransientModel):
_inherit = "mail.compose.message"
automation_record_step_id = fields.Many2one("automation.record.step")
def get_mail_values(self, res_ids):
result = super().get_mail_values(res_ids)
if self.automation_record_step_id:
for res_id in res_ids:
result[res_id][
"automation_record_step_id"
] = self.automation_record_step_id.id
return result
| 612 | Python | .py | 14 | 35.142857 | 73 | 0.655405 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,639 | setup.py | OCA_automation/setup/_metapackage/setup.py | import setuptools
with open('VERSION.txt', 'r') as f:
version = f.read().strip()
setuptools.setup(
name="odoo-addons-oca-automation",
description="Meta package for oca-automation Odoo addons",
version=version,
install_requires=[
'odoo-addon-automation_oca>=16.0dev,<16.1dev',
],
classifiers=[
'Programming Language :: Python',
'Framework :: Odoo',
'Framework :: Odoo :: 16.0',
]
)
| 447 | Python | .py | 16 | 22.8125 | 62 | 0.636364 | OCA/automation | 8 | 9 | 7 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,640 | ocr.py | Zipstack_unstract-sdk/src/unstract/sdk/ocr.py | from abc import ABCMeta
from typing import Optional
from deprecated import deprecated
from unstract.sdk.adapter import ToolAdapter
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.ocr import adapters
from unstract.sdk.adapters.ocr.ocr_adapter import OCRAdapter
from unstract.sdk.constants import LogLevel
from unstract.sdk.exceptions import OCRError
from unstract.sdk.tool.base import BaseTool
class OCR(metaclass=ABCMeta):
def __init__(
self,
tool: BaseTool,
adapter_instance_id: Optional[str] = None,
):
self._tool = tool
self._ocr_adapters = adapters
self._adapter_instance_id = adapter_instance_id
self._ocr_instance: OCRAdapter = None
self._initialise(adapter_instance_id)
def _initialise(self, adapter_instance_id):
if self._adapter_instance_id:
self._ocr_instance: OCRAdapter = self._get_ocr()
def _get_ocr(self) -> Optional[OCRAdapter]:
try:
if not self._adapter_instance_id:
raise OCRError("Adapter instance ID not set. " "Initialisation failed")
ocr_config = ToolAdapter.get_adapter_config(
self._tool, self._adapter_instance_id
)
ocr_adapter_id = ocr_config.get(Common.ADAPTER_ID)
if ocr_adapter_id in self._ocr_adapters:
ocr_adapter = self._ocr_adapters[ocr_adapter_id][Common.METADATA][
Common.ADAPTER
]
ocr_metadata = ocr_config.get(Common.ADAPTER_METADATA)
self._ocr_instance = ocr_adapter(ocr_metadata)
return self._ocr_instance
except Exception as e:
self._tool.stream_log(
log=f"Unable to get OCR adapter {self._adapter_instance_id}: {e}",
level=LogLevel.ERROR,
)
return None
def process(
self, input_file_path: str, output_file_path: Optional[str] = None
) -> str:
return self._ocr_instance.process(input_file_path, output_file_path)
@deprecated("Instantiate OCR and call process() instead")
def get_x2text(self, adapter_instance_id: str) -> OCRAdapter:
if not self._ocr_instance:
self._adapter_instance_id = adapter_instance_id
self._ocr_instance: OCRAdapter = self._get_ocr()
return self._ocr_instance
| 2,410 | Python | .py | 55 | 34.490909 | 87 | 0.646055 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,641 | prompt.py | Zipstack_unstract-sdk/src/unstract/sdk/prompt.py | import logging
from typing import Any, Optional
import requests
from requests import ConnectionError, RequestException, Response
from unstract.sdk.constants import LogLevel, PromptStudioKeys, ToolEnv
from unstract.sdk.helper import SdkHelper
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.utils.common_utils import log_elapsed
logger = logging.getLogger(__name__)
class PromptTool:
"""Class to handle prompt service methods for Unstract Tools."""
def __init__(
self,
tool: BaseTool,
prompt_host: str,
prompt_port: str,
is_public_call: bool = False,
) -> None:
"""
Args:
tool (AbstractTool): Instance of AbstractTool
prompt_host (str): Host of platform service
prompt_host (str): Port of platform service
"""
self.tool = tool
self.base_url = SdkHelper.get_platform_base_url(prompt_host, prompt_port)
self.is_public_call = is_public_call
if not is_public_call:
self.bearer_token = tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY)
@log_elapsed(operation="ANSWER_PROMPTS")
def answer_prompt(
self, payload: dict[str, Any], params: Optional[dict[str, str]] = None
) -> dict[str, Any]:
url_path = "answer-prompt"
if self.is_public_call:
url_path = "answer-prompt-public"
return self._post_call(
url_path=url_path,
payload=payload,
params=params,
)
def single_pass_extraction(
self, payload: dict[str, Any], params: Optional[dict[str, str]] = None
) -> dict[str, Any]:
return self._post_call(
url_path="single-pass-extraction",
payload=payload,
params=params,
)
def summarize(
self, payload: dict[str, Any], params: Optional[dict[str, str]] = None
) -> dict[str, Any]:
return self._post_call(url_path="summarize", payload=payload, params=params)
def _post_call(
self,
url_path: str,
payload: dict[str, Any],
params: Optional[dict[str, str]] = None,
) -> dict[str, Any]:
"""Invokes and communicates to prompt service to fetch response for the
prompt.
Args:
url_path (str): URL path to the service endpoint
payload (dict): Payload to send in the request body
params (dict, optional): Query parameters to include in the request
Returns:
dict: Response from the prompt service
Sample Response:
{
"status": "OK",
"error": "",
"cost": 0,
structure_output : {}
}
"""
result: dict[str, Any] = {
"status": "ERROR",
"error": "",
"cost": 0,
"structure_output": "",
}
url: str = f"{self.base_url}/{url_path}"
headers: dict[str, str] = {}
if not self.is_public_call:
headers = {"Authorization": f"Bearer {self.bearer_token}"}
response: Response = Response()
try:
response = requests.post(
url=url, json=payload, params=params, headers=headers
)
response.raise_for_status()
result["status"] = "OK"
result["structure_output"] = response.text
except ConnectionError as connect_err:
msg = "Unable to connect to prompt service. Please contact admin."
self._stringify_and_stream_err(connect_err, msg)
result["error"] = msg
except RequestException as e:
# Extract error information from the response if available
error_message = str(e)
content_type = response.headers.get("Content-Type", "").lower()
if "application/json" in content_type:
response_json = response.json()
if "error" in response_json:
error_message = response_json["error"]
elif response.text:
error_message = response.text
result["error"] = error_message
self.tool.stream_log(
f"Error while fetching response for prompt: {result['error']}",
level=LogLevel.ERROR,
)
return result
def _stringify_and_stream_err(self, err: RequestException, msg: str) -> None:
error_message = str(err)
trace = f"{msg}: {error_message}"
self.tool.stream_log(trace, level=LogLevel.ERROR)
logger.error(trace)
@staticmethod
def get_exported_tool(
tool: BaseTool, prompt_registry_id: str
) -> Optional[dict[str, Any]]:
"""Get exported custom tool by the help of unstract DB tool.
Args:
prompt_registry_id (str): ID of the prompt_registry_id
tool (AbstractTool): Instance of AbstractTool
Required env variables:
PLATFORM_HOST: Host of platform service
PLATFORM_PORT: Port of platform service
"""
platform_host = tool.get_env_or_die(ToolEnv.PLATFORM_HOST)
platform_port = tool.get_env_or_die(ToolEnv.PLATFORM_PORT)
tool.stream_log("Connecting to DB and getting exported tool metadata")
base_url = SdkHelper.get_platform_base_url(platform_host, platform_port)
bearer_token = tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY)
url = f"{base_url}/custom_tool_instance"
query_params = {PromptStudioKeys.PROMPT_REGISTRY_ID: prompt_registry_id}
headers = {"Authorization": f"Bearer {bearer_token}"}
response = requests.get(url, headers=headers, params=query_params)
if response.status_code == 200:
adapter_data: dict[str, Any] = response.json()
tool.stream_log(
"Successfully retrieved metadata for the exported "
f"tool: {prompt_registry_id}"
)
return adapter_data
elif response.status_code == 404:
tool.stream_error_and_exit(
f"Exported tool {prompt_registry_id} is not found"
)
return None
else:
tool.stream_error_and_exit(
f"Error while retrieving tool metadata "
"for the exported tool: "
f"{prompt_registry_id} / {response.reason}"
)
return None
| 6,457 | Python | .py | 158 | 30.424051 | 84 | 0.589107 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,642 | vector_db.py | Zipstack_unstract-sdk/src/unstract/sdk/vector_db.py | import logging
from collections.abc import Sequence
from typing import Any, Optional, Union
from deprecated import deprecated
from llama_index.core import StorageContext, VectorStoreIndex
from llama_index.core.indices.base import IndexType
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import BaseNode, Document
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
VectorStore,
VectorStoreQueryResult,
)
from unstract.sdk.adapter import ToolAdapter
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.vectordb import adapters
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.constants import LogLevel, ToolEnv
from unstract.sdk.embedding import Embedding
from unstract.sdk.exceptions import SdkError, VectorDBError
from unstract.sdk.helper import SdkHelper
from unstract.sdk.platform import PlatformHelper
from unstract.sdk.tool.base import BaseTool
logger = logging.getLogger(__name__)
class VectorDB:
"""Class to handle VectorDB for Unstract Tools."""
vector_db_adapters = adapters
DEFAULT_EMBEDDING_DIMENSION = 1536
EMBEDDING_INSTANCE_ERROR = (
"Vector DB does not have an embedding initialised."
"Migrate to VectorDB instead of deprecated ToolVectorDB "
"and pass in an Embedding to proceed"
)
def __init__(
self,
tool: BaseTool,
adapter_instance_id: Optional[str] = None,
embedding: Optional[Embedding] = None,
):
self._tool = tool
self._adapter_instance_id = adapter_instance_id
self._vector_db_instance = None
self._embedding_instance = None
self._embedding_dimension = VectorDB.DEFAULT_EMBEDDING_DIMENSION
self._initialise(embedding)
def _initialise(self, embedding: Optional[Embedding] = None):
if embedding:
self._embedding_instance = embedding._embedding_instance
self._embedding_dimension = embedding._length
if self._adapter_instance_id:
self._vector_db_instance: Union[
BasePydanticVectorStore, VectorStore
] = self._get_vector_db()
def _get_org_id(self) -> str:
platform_helper = PlatformHelper(
tool=self._tool,
platform_host=self._tool.get_env_or_die(ToolEnv.PLATFORM_HOST),
platform_port=self._tool.get_env_or_die(ToolEnv.PLATFORM_PORT),
)
# fetch org id from bearer token
platform_details = platform_helper.get_platform_details()
if not platform_details:
# Errors are logged by the SDK itself
raise SdkError("Error getting platform details")
account_id = platform_details.get("organization_id")
return account_id
def _get_vector_db(self) -> Union[BasePydanticVectorStore, VectorStore]:
"""Gets an instance of LlamaIndex's VectorStore.
Returns:
Union[BasePydanticVectorStore, VectorStore]: Vector store instance
"""
try:
if not self._adapter_instance_id:
raise VectorDBError(
"Adapter instance ID not set. Initialisation failed"
)
vector_db_config = ToolAdapter.get_adapter_config(
self._tool, self._adapter_instance_id
)
vector_db_adapter_id = vector_db_config.get(Common.ADAPTER_ID)
if vector_db_adapter_id not in self.vector_db_adapters:
raise SdkError(
f"VectorDB adapter not supported : " f"{vector_db_adapter_id}"
)
vector_db_adapter = self.vector_db_adapters[vector_db_adapter_id][
Common.METADATA
][Common.ADAPTER]
vector_db_metadata = vector_db_config.get(Common.ADAPTER_METADATA)
# Adding the collection prefix and embedding type
# to the metadata
if not SdkHelper.is_public_adapter(adapter_id=self._adapter_instance_id):
org = self._get_org_id()
vector_db_metadata[VectorDbConstants.VECTOR_DB_NAME] = org
vector_db_metadata[
VectorDbConstants.EMBEDDING_DIMENSION
] = self._embedding_dimension
self.vector_db_adapter_class = vector_db_adapter(vector_db_metadata)
return self.vector_db_adapter_class.get_vector_db_instance()
except Exception as e:
self._tool.stream_log(
log=f"Unable to get vector_db {self._adapter_instance_id}: {e}",
level=LogLevel.ERROR,
)
raise VectorDBError(f"Error getting vectorDB instance: {e}") from e
def index_document(
self,
documents: Sequence[Document],
chunk_size: int = 1024,
chunk_overlap: int = 128,
show_progress: bool = False,
**index_kwargs,
) -> IndexType:
if not self._embedding_instance:
raise VectorDBError(self.EMBEDDING_INSTANCE_ERROR)
storage_context = self.get_storage_context()
parser = SentenceSplitter.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=self._embedding_instance.callback_manager,
)
return VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
show_progress=show_progress,
embed_model=self._embedding_instance,
transformations=[parser],
callback_manager=self._embedding_instance.callback_manager,
**index_kwargs,
)
@deprecated(version="0.47.0", reason="Use index_document() instead")
def get_vector_store_index_from_storage_context(
self,
documents: Sequence[Document],
storage_context: Optional[StorageContext] = None,
show_progress: bool = False,
callback_manager=None,
**kwargs,
) -> IndexType:
if not self._embedding_instance:
raise VectorDBError(self.EMBEDDING_INSTANCE_ERROR)
parser = kwargs.get("node_parser")
return VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
show_progress=show_progress,
embed_model=self._embedding_instance,
node_parser=parser,
callback_manager=self._embedding_instance.callback_manager,
)
def get_vector_store_index(self, **kwargs: Any) -> VectorStoreIndex:
if not self._embedding_instance:
raise VectorDBError(self.EMBEDDING_INSTANCE_ERROR)
return VectorStoreIndex.from_vector_store(
vector_store=self._vector_db_instance,
embed_model=self._embedding_instance,
callback_manager=self._embedding_instance.callback_manager,
)
def get_storage_context(self) -> StorageContext:
return StorageContext.from_defaults(vector_store=self._vector_db_instance)
def query(self, query) -> VectorStoreQueryResult:
return self._vector_db_instance.query(query=query)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
if not self.vector_db_adapter_class:
raise VectorDBError("Vector DB is not initialised properly")
self.vector_db_adapter_class.delete(
ref_doc_id=ref_doc_id, delete_kwargs=delete_kwargs
)
def add(
self,
ref_doc_id,
nodes: list[BaseNode],
) -> list[str]:
if not self.vector_db_adapter_class:
raise VectorDBError("Vector DB is not initialised properly")
self.vector_db_adapter_class.add(
ref_doc_id=ref_doc_id,
nodes=nodes,
)
def close(self, **kwargs):
if not self.vector_db_adapter_class:
raise VectorDBError("Vector DB is not initialised properly")
self.vector_db_adapter_class.close()
def get_class_name(self) -> str:
"""Gets the class name of the Llama Index Vector DB.
Args:
NA
Returns:
Class name
"""
return self._vector_db_instance.class_name()
@deprecated("Use VectorDB instead of ToolVectorDB")
def get_vector_db(
self, adapter_instance_id: str, embedding_dimension: int
) -> Union[BasePydanticVectorStore, VectorStore]:
if not self._vector_db_instance:
self._adapter_instance_id = adapter_instance_id
self._initialise()
return self._vector_db_instance
# Legacy
ToolVectorDB = VectorDB
| 8,636 | Python | .py | 200 | 33.725 | 85 | 0.653302 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,643 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/constants.py | from enum import Enum
class ToolEnv:
"""Environment variables used by tools.
The 'ToolEnv' class represents a set of environment variables that are
commonly used by tools.
Attributes:
PLATFORM_API_KEY (str): Platform service API key.
PLATFORM_HOST (str): Platform service host.
PLATFORM_PORT (str): Platform service port.
DATA_DIR (str): The environment variable for the tool data directory.
EXECUTION_BY_TOOL (str): Implicitly set to 1 by the SDK if its executed
by a tool.
"""
PLATFORM_API_KEY = "PLATFORM_SERVICE_API_KEY"
PLATFORM_HOST = "PLATFORM_SERVICE_HOST"
PLATFORM_PORT = "PLATFORM_SERVICE_PORT"
DATA_DIR = "TOOL_DATA_DIR"
EXECUTION_BY_TOOL = "EXECUTION_BY_TOOL"
class ConnectorKeys:
ID = "id"
PROJECT_ID = "project_id"
CONNECTOR_ID = "connector_id"
TOOL_INSTANCE_ID = "tool_instance_id"
CONNECTOR_METADATA = "connector_metadata"
CONNECTOR_TYPE = "connector_type"
class AdapterKeys:
ADAPTER_INSTANCE_ID = "adapter_instance_id"
class PromptStudioKeys:
PROMPT_REGISTRY_ID = "prompt_registry_id"
class ConnectorType:
INPUT = "INPUT"
OUTPUT = "OUTPUT"
class LogType:
LOG = "LOG"
UPDATE = "UPDATE"
class LogStage:
TOOL_RUN = "TOOL_RUN"
class LogState:
"""State of logs INPUT_UPDATE tag for update the FE input component
OUTPUT_UPDATE tag for update the FE output component."""
INPUT_UPDATE = "INPUT_UPDATE"
OUTPUT_UPDATE = "OUTPUT_UPDATE"
class Connector:
FILE_SYSTEM = "FILE_SYSTEM"
DATABASE = "DATABASE"
class Command:
SPEC = "SPEC"
PROPERTIES = "PROPERTIES"
ICON = "ICON"
RUN = "RUN"
VARIABLES = "VARIABLES"
@classmethod
def static_commands(cls) -> set[str]:
return {cls.SPEC, cls.PROPERTIES, cls.ICON, cls.VARIABLES}
class UsageType:
LLM_COMPLETE = "LLM_COMPLETE"
RAG = "RAG"
INDEXER = "INDEXER"
class LogLevel(Enum):
DEBUG = "DEBUG"
INFO = "INFO"
WARN = "WARN"
ERROR = "ERROR"
FATAL = "FATAL"
class PropKey:
"""Keys for the properties.json of tools."""
INPUT = "input"
OUTPUT = "output"
RESULT = "result"
TYPE = "type"
RESTRICTIONS = "restrictions"
MAX_FILE_SIZE = "maxFileSize"
FILE_SIZE_REGEX = r"^(\d+)\s*([KkMmGgTt]B?)$"
ALLOWED_FILE_TYPES = "allowedFileTypes"
FUNCTION_NAME = "functionName"
class OutputType:
JSON = "JSON"
TXT = "TXT"
class ToolExecKey:
OUTPUT_DIR = "COPY_TO_FOLDER"
METADATA_FILE = "METADATA.json"
INFILE = "INFILE"
SOURCE = "SOURCE"
class MetadataKey:
SOURCE_NAME = "source_name"
SOURCE_HASH = "source_hash"
WORKFLOW_ID = "workflow_id"
EXECUTION_ID = "execution_id"
ORG_ID = "organization_id"
TOOL_META = "tool_metadata"
TOOL_NAME = "tool_name"
TOTAL_ELA_TIME = "total_elapsed_time"
ELAPSED_TIME = "elapsed_time"
OUTPUT = "output"
OUTPUT_TYPE = "output_type"
class ToolSettingsKey:
"""A class representing the keys used in the tool settings.
Attributes:
LLM_ADAPTER_ID (str): The key for the LLM adapter ID.
EMBEDDING_ADAPTER_ID (str): The key for the embedding adapter ID.
VECTOR_DB_ADAPTER_ID (str): The key for the vector DB adapter ID.
X2TEXT_ADAPTER_ID (str): The key for the X2Text adapter ID.
"""
LLM_ADAPTER_ID = "llmAdapterId"
EMBEDDING_ADAPTER_ID = "embeddingAdapterId"
VECTOR_DB_ADAPTER_ID = "vectorDbAdapterId"
X2TEXT_ADAPTER_ID = "x2TextAdapterId"
ADAPTER_INSTANCE_ID = "adapter_instance_id"
EMBEDDING_DIMENSION = "embedding_dimension"
RUN_ID = "run_id"
WORKFLOW_ID = "workflow_id"
EXECUTION_ID = "execution_id"
class PublicAdapterKeys:
PUBLIC_LLM_CONFIG = "PUBLIC_LLM_CONFIG"
PUBLIC_EMBEDDING_CONFIG = "PUBLIC_EMBEDDING_CONFIG"
PUBLIC_VECTOR_DB_CONFIG = "PUBLIC_VECTOR_DB_CONFIG"
PUBLIC_X2TEXT_CONFIG = "PUBLIC_X2TEXT_CONFIG"
class MimeType:
PDF = "application/pdf"
TEXT = "text/plain"
| 4,038 | Python | .py | 120 | 28.441667 | 79 | 0.677769 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,644 | embedding.py | Zipstack_unstract-sdk/src/unstract/sdk/embedding.py | from typing import Any, Optional
from deprecated import deprecated
from llama_index.core.base.embeddings.base import Embedding
from llama_index.core.callbacks import CallbackManager as LlamaIndexCallbackManager
from llama_index.core.embeddings import BaseEmbedding
from unstract.sdk.adapter import ToolAdapter
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.embedding import adapters
from unstract.sdk.constants import LogLevel, ToolEnv
from unstract.sdk.exceptions import EmbeddingError, SdkError
from unstract.sdk.helper import SdkHelper
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.utils.callback_manager import CallbackManager
class Embedding:
_TEST_SNIPPET = "Hello, I am Unstract"
MAX_TOKENS = 1024 * 16
embedding_adapters = adapters
def __init__(
self,
tool: BaseTool,
adapter_instance_id: Optional[str] = None,
usage_kwargs: dict[Any, Any] = {},
):
self._tool = tool
self._adapter_instance_id = adapter_instance_id
self._embedding_instance: BaseEmbedding = None
self._length: int = None
self._usage_kwargs = usage_kwargs
self._initialise()
def _initialise(self):
if self._adapter_instance_id:
self._embedding_instance = self._get_embedding()
self._length: int = self._get_embedding_length()
self._usage_kwargs["adapter_instance_id"] = self._adapter_instance_id
if not SdkHelper.is_public_adapter(adapter_id=self._adapter_instance_id):
platform_api_key = self._tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY)
CallbackManager.set_callback(
platform_api_key=platform_api_key,
model=self._embedding_instance,
kwargs=self._usage_kwargs,
)
def _get_embedding(self) -> BaseEmbedding:
"""Gets an instance of LlamaIndex's embedding object.
Args:
adapter_instance_id (str): UUID of the embedding adapter
Returns:
BaseEmbedding: Embedding instance
"""
try:
if not self._adapter_instance_id:
raise EmbeddingError(
"Adapter instance ID not set. " "Initialisation failed"
)
embedding_config_data = ToolAdapter.get_adapter_config(
self._tool, self._adapter_instance_id
)
embedding_adapter_id = embedding_config_data.get(Common.ADAPTER_ID)
if embedding_adapter_id not in self.embedding_adapters:
raise SdkError(
f"Embedding adapter not supported : " f"{embedding_adapter_id}"
)
embedding_adapter = self.embedding_adapters[embedding_adapter_id][
Common.METADATA
][Common.ADAPTER]
embedding_metadata = embedding_config_data.get(Common.ADAPTER_METADATA)
embedding_adapter_class = embedding_adapter(embedding_metadata)
self._usage_kwargs["provider"] = embedding_adapter_class.get_provider()
return embedding_adapter_class.get_embedding_instance()
except Exception as e:
self._tool.stream_log(
log=f"Error getting embedding: {e}", level=LogLevel.ERROR
)
raise EmbeddingError(f"Error getting embedding instance: {e}") from e
def get_query_embedding(self, query: str) -> Embedding:
return self._embedding_instance.get_query_embedding(query)
def _get_embedding_length(self) -> int:
embedding_list = self._embedding_instance._get_text_embedding(
self._TEST_SNIPPET
)
embedding_dimension = len(embedding_list)
return embedding_dimension
def get_class_name(self) -> str:
"""Gets the class name of the Llama Index Embedding.
Args:
NA
Returns:
Class name
"""
return self._embedding_instance.class_name()
def get_callback_manager(self) -> LlamaIndexCallbackManager:
"""Gets the llama-index callback manager set on the model.
Args:
NA
Returns:
llama-index callback manager
"""
return self._embedding_instance.callback_manager
@deprecated("Use Embedding instead of ToolEmbedding")
def get_embedding_length(self, embedding: BaseEmbedding) -> int:
return self._get_embedding_length()
@deprecated("Use Embedding instead of ToolEmbedding")
def get_embedding(self, adapter_instance_id: str) -> BaseEmbedding:
if not self._embedding_instance:
self._adapter_instance_id = adapter_instance_id
self._initialise()
return self._embedding_instance
# Legacy
ToolEmbedding = Embedding
| 4,856 | Python | .py | 108 | 35.037037 | 86 | 0.65072 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,645 | x2txt.py | Zipstack_unstract-sdk/src/unstract/sdk/x2txt.py | from abc import ABCMeta
from typing import Any, Optional
import pdfplumber
from deprecated import deprecated
from unstract.sdk.adapter import ToolAdapter
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.x2text import adapters
from unstract.sdk.adapters.x2text.constants import X2TextConstants
from unstract.sdk.adapters.x2text.dto import TextExtractionResult
from unstract.sdk.adapters.x2text.llm_whisperer.src import LLMWhisperer
from unstract.sdk.adapters.x2text.llm_whisperer.src.constants import WhispererConfig
from unstract.sdk.adapters.x2text.x2text_adapter import X2TextAdapter
from unstract.sdk.audit import Audit
from unstract.sdk.constants import LogLevel, MimeType, ToolEnv
from unstract.sdk.exceptions import X2TextError
from unstract.sdk.helper import SdkHelper
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.utils import ToolUtils
class X2Text(metaclass=ABCMeta):
def __init__(
self,
tool: BaseTool,
adapter_instance_id: Optional[str] = None,
usage_kwargs: dict[Any, Any] = {},
):
self._tool = tool
self._x2text_adapters = adapters
self._adapter_instance_id = adapter_instance_id
self._x2text_instance: X2TextAdapter = None
self._usage_kwargs = usage_kwargs
self._initialise()
def _initialise(self):
if self._adapter_instance_id:
self._x2text_instance = self._get_x2text()
def _get_x2text(self) -> X2TextAdapter:
try:
if not self._adapter_instance_id:
raise X2TextError(
"Adapter instance ID not set. " "Initialisation failed"
)
x2text_config = ToolAdapter.get_adapter_config(
self._tool, self._adapter_instance_id
)
x2text_adapter_id = x2text_config.get(Common.ADAPTER_ID)
if x2text_adapter_id in self._x2text_adapters:
x2text_adapter = self._x2text_adapters[x2text_adapter_id][
Common.METADATA
][Common.ADAPTER]
x2text_metadata = x2text_config.get(Common.ADAPTER_METADATA)
# Add x2text service host, port and platform_service_key
x2text_metadata[
X2TextConstants.X2TEXT_HOST
] = self._tool.get_env_or_die(X2TextConstants.X2TEXT_HOST)
x2text_metadata[
X2TextConstants.X2TEXT_PORT
] = self._tool.get_env_or_die(X2TextConstants.X2TEXT_PORT)
if not SdkHelper.is_public_adapter(
adapter_id=self._adapter_instance_id
):
x2text_metadata[
X2TextConstants.PLATFORM_SERVICE_API_KEY
] = self._tool.get_env_or_die(
X2TextConstants.PLATFORM_SERVICE_API_KEY
)
self._x2text_instance = x2text_adapter(x2text_metadata)
return self._x2text_instance
except Exception as e:
self._tool.stream_log(
log=f"Unable to get x2text adapter {self._adapter_instance_id}: {e}",
level=LogLevel.ERROR,
)
raise X2TextError(f"Error getting text extractor: {e}") from e
def process(
self,
input_file_path: str,
output_file_path: Optional[str] = None,
**kwargs: dict[Any, Any],
) -> TextExtractionResult:
mime_type = ToolUtils.get_file_mime_type(input_file_path)
text_extraction_result: TextExtractionResult = None
if mime_type == MimeType.TEXT:
with open(input_file_path, encoding="utf-8") as file:
extracted_text = file.read()
text_extraction_result = TextExtractionResult(
extracted_text=extracted_text, extraction_metadata=None
)
text_extraction_result = self._x2text_instance.process(
input_file_path, output_file_path, **kwargs
)
# The will be executed each and every time text extraction takes place
self.push_usage_details(input_file_path, mime_type)
return text_extraction_result
@deprecated("Instantiate X2Text and call process() instead")
def get_x2text(self, adapter_instance_id: str) -> X2TextAdapter:
if not self._x2text_instance:
self._adapter_instance_id = adapter_instance_id
self._initialise()
return self._x2text_instance
def push_usage_details(self, input_file_path: str, mime_type: str) -> None:
file_size = ToolUtils.get_file_size(input_file_path)
self._x2text_instance
if mime_type == MimeType.PDF:
with pdfplumber.open(input_file_path) as pdf:
# calculate the number of pages
page_count = len(pdf.pages)
if isinstance(self._x2text_instance, LLMWhisperer):
self._x2text_instance.config.get(WhispererConfig.PAGES_TO_EXTRACT)
page_count = ToolUtils.calculate_page_count(
self._x2text_instance.config.get(WhispererConfig.PAGES_TO_EXTRACT),
page_count,
)
Audit().push_page_usage_data(
platform_api_key=self._tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY),
file_size=file_size,
file_type=mime_type,
page_count=page_count,
kwargs=self._usage_kwargs,
)
else:
# We are allowing certain image types,and raw texts. We will consider them
# as single page documents as there in no concept of page numbers.
Audit().push_page_usage_data(
platform_api_key=self._tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY),
file_size=file_size,
file_type=mime_type,
page_count=1,
kwargs=self._usage_kwargs,
)
| 6,006 | Python | .py | 128 | 35.132813 | 87 | 0.621737 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,646 | cache.py | Zipstack_unstract-sdk/src/unstract/sdk/cache.py | from typing import Any, Optional
import requests
from unstract.sdk.constants import LogLevel
from unstract.sdk.platform import PlatformBase
from unstract.sdk.tool.base import BaseTool
class ToolCache(PlatformBase):
"""Class to handle caching for Unstract Tools.
Notes:
- PLATFORM_SERVICE_API_KEY environment variable is required.
"""
def __init__(
self, tool: BaseTool, platform_host: str, platform_port: int
) -> None:
"""
Args:
tool (AbstractTool): Instance of AbstractTool
platform_host (str): The host of the platform.
platform_port (int): The port of the platform.
Notes:
- PLATFORM_SERVICE_API_KEY environment variable is required.
- The platform_host and platform_port are the host and port of
the platform service.
"""
super().__init__(
tool=tool, platform_host=platform_host, platform_port=platform_port
)
def set(self, key: str, value: str) -> bool:
"""Sets the value for a key in the cache.
Args:
key (str): The key.
value (str): The value.
Returns:
bool: Whether the operation was successful.
"""
url = f"{self.base_url}/cache"
json = {"key": key, "value": value}
headers = {"Authorization": f"Bearer {self.bearer_token}"}
response = requests.post(url, json=json, headers=headers)
if response.status_code == 200:
self.tool.stream_log(f"Successfully cached data for key: {key}")
return True
else:
self.tool.stream_log(
f"Error while caching data for key: {key} / {response.reason}",
level=LogLevel.ERROR,
)
return False
def get(self, key: str) -> Optional[Any]:
"""Gets the value for a key in the cache.
Args:
key (str): The key.
Returns:
str: The value.
"""
url = f"{self.base_url}/cache?key={key}"
headers = {"Authorization": f"Bearer {self.bearer_token}"}
response = requests.get(url, headers=headers)
if response.status_code == 200:
self.tool.stream_log(
f"Successfully retrieved cached data for key: {key}"
)
return response.text
elif response.status_code == 404:
self.tool.stream_log(
f"Data not found for key: {key}", level=LogLevel.WARN
)
return None
else:
self.tool.stream_log(
f"Error while retrieving cached data for key: "
f"{key} / {response.reason}",
level=LogLevel.ERROR,
)
return None
def delete(self, key: str) -> bool:
"""Deletes the value for a key in the cache.
Args:
key (str): The key.
Returns:
bool: Whether the operation was successful.
"""
url = f"{self.base_url}/cache?key={key}"
headers = {"Authorization": f"Bearer {self.bearer_token}"}
response = requests.delete(url, headers=headers)
if response.status_code == 200:
self.tool.stream_log(
f"Successfully deleted cached data for key: {key}"
)
return True
else:
self.tool.stream_log(
"Error while deleting cached data "
f"for key: {key} / {response.reason}",
level=LogLevel.ERROR,
)
return False
| 3,621 | Python | .py | 96 | 26.958333 | 79 | 0.561073 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,647 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/__init__.py | __version__ = "0.52.0"
def get_sdk_version():
"""Returns the SDK version."""
return __version__
| 106 | Python | .py | 4 | 23 | 34 | 0.6 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,648 | platform.py | Zipstack_unstract-sdk/src/unstract/sdk/platform.py | from typing import Any, Optional
import requests
from unstract.sdk.constants import LogLevel, ToolEnv
from unstract.sdk.helper import SdkHelper
from unstract.sdk.tool.base import BaseTool
class PlatformBase:
"""Base class to handle interactions with Unstract's platform service.
Notes:
- PLATFORM_SERVICE_API_KEY environment variable is required.
"""
def __init__(
self,
tool: BaseTool,
platform_host: str,
platform_port: str,
) -> None:
"""
Args:
tool (AbstractTool): Instance of AbstractTool
platform_host (str): Host of platform service
platform_port (str): Port of platform service
Notes:
- PLATFORM_SERVICE_API_KEY environment variable is required.
"""
self.tool = tool
self.base_url = SdkHelper.get_platform_base_url(platform_host, platform_port)
self.bearer_token = tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY)
class PlatformHelper(PlatformBase):
"""Implementation of `UnstractPlatformBase` to interact with platform
service.
Notes:
- PLATFORM_SERVICE_API_KEY environment variable is required.
"""
def __init__(self, tool: BaseTool, platform_host: str, platform_port: str):
"""Constructor of the implementation of `UnstractPlatformBase`
Args:
tool (AbstractTool): Instance of AbstractTool
platform_host (str): Host of platform service
platform_port (str): Port of platform service
"""
super().__init__(
tool=tool, platform_host=platform_host, platform_port=platform_port
)
def get_platform_details(self) -> Optional[dict[str, Any]]:
"""Obtains platform details associated with the platform key.
Currently helps fetch organization ID related to the key.
Returns:
Optional[dict[str, Any]]: Dictionary containing the platform details
"""
url = f"{self.base_url}/platform_details"
headers = {"Authorization": f"Bearer {self.bearer_token}"}
response = requests.get(url, headers=headers)
if response.status_code != 200:
self.tool.stream_log(
(
"Error while retrieving platform details: "
f"[{response.status_code}] {response.reason}"
),
level=LogLevel.ERROR,
)
return None
else:
platform_details: dict[str, Any] = response.json().get("details")
return platform_details
| 2,602 | Python | .py | 64 | 31.359375 | 85 | 0.631788 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,649 | index.py | Zipstack_unstract-sdk/src/unstract/sdk/index.py | import json
import logging
from typing import Any, Callable, Optional
from deprecated import deprecated
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.vector_stores import (
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
from unstract.sdk.adapter import ToolAdapter
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.no_op.src.no_op_custom_vectordb import (
NoOpCustomVectorDB,
)
from unstract.sdk.adapters.x2text.constants import X2TextConstants
from unstract.sdk.adapters.x2text.dto import TextExtractionResult
from unstract.sdk.adapters.x2text.llm_whisperer.src import LLMWhisperer
from unstract.sdk.constants import LogLevel
from unstract.sdk.embedding import Embedding
from unstract.sdk.exceptions import IndexingError, SdkError
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.utils import ToolUtils
from unstract.sdk.utils.common_utils import log_elapsed
from unstract.sdk.vector_db import VectorDB
from unstract.sdk.x2txt import X2Text
logger = logging.getLogger(__name__)
class Constants:
TOP_K = 5
class Index:
def __init__(self, tool: BaseTool):
# TODO: Inherit from StreamMixin and avoid using BaseTool
self.tool = tool
def query_index(
self,
embedding_instance_id: str,
vector_db_instance_id: str,
doc_id: str,
usage_kwargs: dict[Any, Any] = {},
):
try:
embedding = Embedding(
tool=self.tool,
adapter_instance_id=embedding_instance_id,
usage_kwargs=usage_kwargs,
)
except SdkError as e:
self.tool.stream_log(embedding_instance_id)
raise SdkError(f"Error loading {embedding_instance_id}: {e}")
try:
vector_db = VectorDB(
tool=self.tool,
adapter_instance_id=vector_db_instance_id,
embedding=embedding,
)
except SdkError as e:
self.tool.stream_log(
f"Error loading {vector_db_instance_id}", level=LogLevel.ERROR
)
raise SdkError(f"Error loading {vector_db_instance_id}: {e}")
try:
try:
self.tool.stream_log(f">>> Querying {vector_db_instance_id}...")
self.tool.stream_log(f">>> {doc_id}")
doc_id_eq_filter = MetadataFilter.from_dict(
{
"key": "doc_id",
"operator": FilterOperator.EQ,
"value": doc_id,
}
)
filters = MetadataFilters(filters=[doc_id_eq_filter])
q = VectorStoreQuery(
query_embedding=embedding.get_query_embedding(" "),
doc_ids=[doc_id],
filters=filters,
similarity_top_k=Constants.TOP_K,
)
except Exception as e:
self.tool.stream_log(
f"Error building query {vector_db}: {e}", level=LogLevel.ERROR
)
raise SdkError(f"Error building query {vector_db}: {e}")
n: VectorStoreQueryResult = vector_db.query(query=q)
if len(n.nodes) > 0:
self.tool.stream_log(f"Found {len(n.nodes)} nodes for {doc_id}")
all_text = ""
for node in n.nodes:
all_text += node.get_content()
return all_text
else:
self.tool.stream_log(f"No nodes found for {doc_id}")
return None
finally:
vector_db.close()
@log_elapsed(operation="EXTRACTION")
def extract_text(
self,
x2text_instance_id: str,
file_path: str,
output_file_path: Optional[str] = None,
enable_highlight: bool = False,
usage_kwargs: dict[Any, Any] = {},
process_text: Optional[Callable[[str], str]] = None,
) -> str:
"""Extracts text from a document.
Uses the configured service to perform the extraction
- LLM Whisperer
- Unstructured IO Community / Enterprise
- Llama Parse
Args:
x2text_instance_id (str): UUID of the text extractor
file_path (str): Path to the file
output_file_path (Optional[str], optional): File path to write
the extracted contents into. Defaults to None.
enable_highlight (bool, optional): Flag to provide highlighting metadata.
Defaults to False.
usage_kwargs (dict[Any, Any], optional): Dict to capture usage.
Defaults to {}.
process_text (Optional[Callable[[str], str]], optional): Optional function
to post-process the text. Defaults to None.
Raises:
IndexingError: Errors during text extraction
"""
self.tool.stream_log("Extracting text from input file")
extracted_text = ""
try:
x2text = X2Text(
tool=self.tool,
adapter_instance_id=x2text_instance_id,
usage_kwargs=usage_kwargs,
)
if enable_highlight and isinstance(x2text._x2text_instance, LLMWhisperer):
process_response: TextExtractionResult = x2text.process(
input_file_path=file_path,
output_file_path=output_file_path,
enable_highlight=enable_highlight,
)
whisper_hash_value = process_response.extraction_metadata.whisper_hash
metadata = {X2TextConstants.WHISPER_HASH: whisper_hash_value}
self.tool.update_exec_metadata(metadata)
else:
process_response: TextExtractionResult = x2text.process(
input_file_path=file_path,
output_file_path=output_file_path,
)
extracted_text = process_response.extracted_text
except AdapterError as e:
# Wrapping AdapterErrors with SdkError
raise IndexingError(str(e)) from e
if process_text:
try:
result = process_text(extracted_text)
if isinstance(result, str):
extracted_text = result
else:
logger.warning("'process_text' is expected to return an 'str'")
except Exception as e:
logger.error(f"Error occured inside function 'process_text': {e}")
return extracted_text
@log_elapsed(operation="CHECK_AND_INDEX(overall)")
def index(
self,
tool_id: str,
embedding_instance_id: str,
vector_db_instance_id: str,
x2text_instance_id: str,
file_path: str,
chunk_size: int,
chunk_overlap: int,
reindex: bool = False,
file_hash: Optional[str] = None,
output_file_path: Optional[str] = None,
enable_highlight: bool = False,
usage_kwargs: dict[Any, Any] = {},
process_text: Optional[Callable[[str], str]] = None,
) -> str:
"""Indexes an individual file using the passed arguments.
Args:
tool_id (str): UUID of the tool (workflow_id in case it's called
from workflow)
embedding_instance_id (str): UUID of the embedding service configured
vector_db_instance_id (str): UUID of the vector DB configured
x2text_instance_id (str): UUID of the x2text adapter configured.
This is to extract text from documents.
file_path (str): Path to the file that needs to be indexed.
chunk_size (int): Chunk size to be used for indexing
chunk_overlap (int): Overlap in chunks to be used for indexing
reindex (bool, optional): Flag to denote if document should be
re-indexed if its already indexed. Defaults to False.
file_hash (Optional[str], optional): SHA256 hash of the file.
Defaults to None. If None, the hash is generated.
output_file_path (Optional[str], optional): File path to write
the extracted contents into. Defaults to None.
Returns:
str: A unique ID for the file and indexing arguments combination
"""
doc_id = self.generate_index_key(
vector_db=vector_db_instance_id,
embedding=embedding_instance_id,
x2text=x2text_instance_id,
chunk_size=str(chunk_size),
chunk_overlap=str(chunk_overlap),
file_path=file_path,
file_hash=file_hash,
)
self.tool.stream_log(f"Checking if doc_id {doc_id} exists")
try:
embedding = Embedding(
tool=self.tool,
adapter_instance_id=embedding_instance_id,
usage_kwargs=usage_kwargs,
)
except SdkError as e:
self.tool.stream_log(
f"Error loading {embedding_instance_id}", level=LogLevel.ERROR
)
raise SdkError(f"Error loading {embedding_instance_id}: {e}")
try:
vector_db = VectorDB(
tool=self.tool,
adapter_instance_id=vector_db_instance_id,
embedding=embedding,
)
except SdkError as e:
self.tool.stream_log(
f"Error loading {vector_db_instance_id}", level=LogLevel.ERROR
)
raise SdkError(f"Error loading {vector_db_instance_id}: {e}")
try:
# Checking if document is already indexed against doc_id
doc_id_eq_filter = MetadataFilter.from_dict(
{"key": "doc_id", "operator": FilterOperator.EQ, "value": doc_id}
)
filters = MetadataFilters(filters=[doc_id_eq_filter])
q = VectorStoreQuery(
query_embedding=embedding.get_query_embedding(" "),
doc_ids=[doc_id],
filters=filters,
)
doc_id_found = False
try:
n: VectorStoreQueryResult = vector_db.query(query=q)
if len(n.nodes) > 0:
doc_id_found = True
self.tool.stream_log(f"Found {len(n.nodes)} nodes for {doc_id}")
else:
self.tool.stream_log(f"No nodes found for {doc_id}")
except Exception as e:
self.tool.stream_log(
f"Error querying {vector_db_instance_id}: {e}, proceeding to index",
level=LogLevel.ERROR,
)
if doc_id_found and not reindex:
self.tool.stream_log(f"File was indexed already under {doc_id}")
return doc_id
extracted_text = self.extract_text(
x2text_instance_id=x2text_instance_id,
file_path=file_path,
output_file_path=output_file_path,
enable_highlight=enable_highlight,
usage_kwargs=usage_kwargs,
process_text=process_text,
)
if not extracted_text:
raise IndexingError("No text available to index")
# For No-op adapters, addition of nodes to vectorDB should not happen
# and this has to be handled in the adapter level. But there are a
# few challenges considering callback manager and upstream Llama index
# method invocations. Hence, making this check here and returning
# the doc id to maintain the legacy flow of adapters.
if isinstance(
vector_db.get_vector_db(
adapter_instance_id=vector_db_instance_id, embedding_dimension=1
),
(NoOpCustomVectorDB),
):
return doc_id
self.index_to_vector_db(
vector_db=vector_db,
embedding=embedding,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
doc_id=doc_id,
text_to_idx=extracted_text,
doc_id_found=doc_id_found,
)
return doc_id
finally:
vector_db.close()
@log_elapsed(operation="INDEXING")
def index_to_vector_db(
self,
vector_db: VectorDB,
embedding: Embedding,
chunk_size: int,
chunk_overlap: int,
text_to_idx: str,
doc_id: str,
doc_id_found: bool,
):
self.tool.stream_log("Indexing file...")
full_text = [
{
"section": "full",
"text_contents": text_to_idx,
}
]
# Check if chunking is required
documents = []
for item in full_text:
text = item["text_contents"]
document = Document(
text=text,
doc_id=doc_id,
metadata={"section": item["section"]},
)
document.id_ = doc_id
documents.append(document)
self.tool.stream_log(f"Number of documents: {len(documents)}")
if doc_id_found:
# Delete the nodes for the doc_id
try:
vector_db.delete(ref_doc_id=doc_id)
self.tool.stream_log(f"Deleted nodes for {doc_id}")
except Exception as e:
self.tool.stream_log(
f"Error deleting nodes for {doc_id}: {e}",
level=LogLevel.ERROR,
)
raise SdkError(f"Error deleting nodes for {doc_id}: {e}") from e
try:
if chunk_size == 0:
parser = SentenceSplitter.from_defaults(
chunk_size=len(documents[0].text) + 10,
chunk_overlap=0,
callback_manager=embedding.get_callback_manager(),
)
nodes = parser.get_nodes_from_documents(documents, show_progress=True)
node = nodes[0]
node.embedding = embedding.get_query_embedding(" ")
vector_db.add(doc_id, nodes=[node])
self.tool.stream_log("Added node to vector db")
else:
self.tool.stream_log("Adding nodes to vector db...")
# TODO: Phase 2:
# Post insertion to VDB, use query using doc_id and
# store all the VDB ids to a table against the doc_id
# During deletion for cases where metadata filtering
# does not work, these ids can be used for direct deletion
# This new table will also act like an audit trail for
# all nodes that were added to the VDB by Unstract
# Once this is in place, the overridden implementation
# of prefixing ids with doc_id before adding to VDB
# can be removed
vector_db.index_document(
documents,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
show_progress=True,
)
except Exception as e:
self.tool.stream_log(
f"Error adding nodes to vector db: {e}",
level=LogLevel.ERROR,
)
raise IndexingError(str(e)) from e
self.tool.stream_log("File has been indexed successfully")
return
def generate_index_key(
self,
vector_db: str,
embedding: str,
x2text: str,
chunk_size: str,
chunk_overlap: str,
file_path: Optional[str] = None,
file_hash: Optional[str] = None,
) -> str:
"""Generates a unique ID useful for identifying files during indexing.
Args:
vector_db (str): UUID of the vector DB adapter
embedding (str): UUID of the embedding adapter
x2text (str): UUID of the X2Text adapter
chunk_size (str): Chunk size for indexing
chunk_overlap (str): Chunk overlap for indexing
file_path (Optional[str]): Path to the file that needs to be indexed.
Defaults to None. One of file_path or file_hash needs to be specified.
file_hash (Optional[str], optional): SHA256 hash of the file.
Defaults to None. If None, the hash is generated with file_path.
Returns:
str: Key representing unique ID for a file
"""
if not file_path and not file_hash:
raise ValueError("One of `file_path` or `file_hash` need to be provided")
if not file_hash:
file_hash = ToolUtils.get_hash_from_file(file_path=file_path)
# Whole adapter config is used currently even though it contains some keys
# which might not be relevant to indexing. This is easier for now than
# marking certain keys of the adapter config as necessary.
index_key = {
"file_hash": file_hash,
"vector_db_config": ToolAdapter.get_adapter_config(self.tool, vector_db),
"embedding_config": ToolAdapter.get_adapter_config(self.tool, embedding),
"x2text_config": ToolAdapter.get_adapter_config(self.tool, x2text),
# Typed and hashed as strings since the final hash is persisted
# and this is required to be backward compatible
"chunk_size": str(chunk_size),
"chunk_overlap": str(chunk_overlap),
}
# JSON keys are sorted to ensure that the same key gets hashed even in
# case where the fields are reordered.
hashed_index_key = ToolUtils.hash_str(json.dumps(index_key, sort_keys=True))
return hashed_index_key
@deprecated(version="0.45.0", reason="Use generate_index_key() instead")
def generate_file_id(
self,
tool_id: str,
vector_db: str,
embedding: str,
x2text: str,
chunk_size: str,
chunk_overlap: str,
file_path: Optional[str] = None,
file_hash: Optional[str] = None,
) -> str:
return self.generate_index_key(
vector_db,
embedding,
x2text,
chunk_size,
chunk_overlap,
file_path,
file_hash,
)
def index_file(
self,
tool_id: str,
embedding_type: str,
vector_db: str,
x2text_adapter: str,
file_path: str,
chunk_size: int,
chunk_overlap: int,
reindex: bool = False,
file_hash: Optional[str] = None,
output_file_path: Optional[str] = None,
) -> str:
return self.index(
tool_id=tool_id,
embedding_instance_id=embedding_type,
vector_db_instance_id=vector_db,
x2text_instance_id=x2text_adapter,
file_path=file_path,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
reindex=reindex,
file_hash=file_hash,
output_file_path=output_file_path,
)
@deprecated("Deprecated class and method. Use Index and query_index() instead")
def get_text_from_index(
self, embedding_type: str, vector_db: str, doc_id: str
) -> Optional[str]:
return self.query_index(
embedding_instance_id=embedding_type,
vector_db_instance_id=vector_db,
doc_id=doc_id,
)
# Legacy
ToolIndex = Index
| 19,792 | Python | .py | 474 | 29.392405 | 88 | 0.56813 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,650 | helper.py | Zipstack_unstract-sdk/src/unstract/sdk/helper.py | import logging
from unstract.sdk.constants import PublicAdapterKeys
logger = logging.getLogger(__name__)
class SdkHelper:
def __init__(self) -> None:
pass
@staticmethod
def get_platform_base_url(platform_host: str, platform_port: str) -> str:
"""Make base url from host and port.
Args:
platform_host (str): Host of platform service
platform_port (str): Port of platform service
Returns:
str: URL to the platform service
"""
if platform_host[-1] == "/":
return f"{platform_host[:-1]}:{platform_port}"
return f"{platform_host}:{platform_port}"
@staticmethod
def is_public_adapter(adapter_id: str) -> bool:
"""Check if the given adapter_id is one of the public adapter keys.
This method iterates over the attributes of the PublicAdapterKeys class
and checks if the provided adapter_id matches any of the attribute values.
Args:
adapter_id (str): The ID of the adapter to check.
Returns:
bool: True if the adapter_id matches any public adapter key,
False otherwise.
"""
try:
for attr in dir(PublicAdapterKeys):
if getattr(PublicAdapterKeys, attr) == adapter_id:
return True
return False
except Exception as e:
logger.warning(
f"Unable to determine if adapter_id: {adapter_id}"
f"is public or not: {str(e)}"
)
return False
| 1,580 | Python | .py | 40 | 29.45 | 82 | 0.599346 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,651 | exceptions.py | Zipstack_unstract-sdk/src/unstract/sdk/exceptions.py | class SdkError(Exception):
DEFAULT_MESSAGE = "Something went wrong"
def __init__(self, message: str = DEFAULT_MESSAGE):
super().__init__(message)
# Make it user friendly wherever possible
self.message = message
def __str__(self) -> str:
return self.message
class IndexingError(SdkError):
def __init__(self, message: str = ""):
if "404" in message:
message = "Index not found. Please check vector DB settings."
super().__init__(message)
class LLMError(SdkError):
DEFAULT_MESSAGE = "Error ocurred related to LLM"
class EmbeddingError(SdkError):
DEFAULT_MESSAGE = "Error ocurred related to embedding"
class VectorDBError(SdkError):
DEFAULT_MESSAGE = "Error ocurred related to vector DB"
class X2TextError(SdkError):
DEFAULT_MESSAGE = "Error ocurred related to text extractor"
class OCRError(SdkError):
DEFAULT_MESSAGE = "Error ocurred related to OCR"
class RateLimitError(SdkError):
DEFAULT_MESSAGE = "Running into rate limit errors, please try again later"
| 1,071 | Python | .py | 25 | 37.2 | 78 | 0.705825 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,652 | llm.py | Zipstack_unstract-sdk/src/unstract/sdk/llm.py | import logging
import re
from typing import Any, Callable, Optional
from deprecated import deprecated
from llama_index.core.base.llms.types import CompletionResponseGen
from llama_index.core.llms import LLM as LlamaIndexLLM
from llama_index.core.llms import CompletionResponse
from openai import APIError as OpenAIAPIError
from openai import RateLimitError as OpenAIRateLimitError
from unstract.sdk.adapter import ToolAdapter
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.llm import adapters
from unstract.sdk.adapters.llm.exceptions import parse_llm_err
from unstract.sdk.adapters.llm.llm_adapter import LLMAdapter
from unstract.sdk.constants import LogLevel, ToolEnv
from unstract.sdk.exceptions import LLMError, RateLimitError, SdkError
from unstract.sdk.helper import SdkHelper
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.utils.callback_manager import CallbackManager
logger = logging.getLogger(__name__)
class LLM:
"""Interface to handle all LLM interactions."""
json_regex = re.compile(r"\[(?:.|\n)*\]|\{(?:.|\n)*\}")
llm_adapters = adapters
MAX_TOKENS = 1024 * 4
RESPONSE = "response"
def __init__(
self,
tool: BaseTool,
adapter_instance_id: Optional[str] = None,
usage_kwargs: dict[Any, Any] = {},
):
"""
Notes:
- "Azure OpenAI" : Environment variables required
OPENAI_API_KEY,OPENAI_API_BASE, OPENAI_API_VERSION,
OPENAI_API_ENGINE, OPENAI_API_MODEL
Args:
tool (AbstractTool): Instance of AbstractTool
"""
self._tool = tool
self._adapter_instance_id = adapter_instance_id
self._llm_instance: LlamaIndexLLM = None
self._usage_kwargs = usage_kwargs
self._initialise()
def _initialise(self):
if self._adapter_instance_id:
self._llm_instance = self._get_llm(self._adapter_instance_id)
self._usage_kwargs["adapter_instance_id"] = self._adapter_instance_id
if not SdkHelper.is_public_adapter(adapter_id=self._adapter_instance_id):
platform_api_key = self._tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY)
CallbackManager.set_callback(
platform_api_key=platform_api_key,
model=self._llm_instance,
kwargs=self._usage_kwargs,
)
def complete(
self,
prompt: str,
extract_json: bool = True,
process_text: Optional[Callable[[str], str]] = None,
**kwargs: Any,
) -> dict[str, Any]:
"""Generates a completion response for the given prompt.
Args:
prompt (str): The input text prompt for generating the completion.
extract_json (bool, optional): If set to True, the response text is
processed using a regex to extract JSON content from it. If no JSON is
found, the text is returned as it is. Defaults to True.
process_text (Optional[Callable[[str], str]], optional): A callable that
processes the generated text and extracts specific information.
Defaults to None.
**kwargs (Any): Additional arguments passed to the completion function.
Returns:
dict[str, Any]: A dictionary containing the result of the completion
and any processed output.
Raises:
LLMError: If an error occurs during the completion process, it will be
raised after being processed by `parse_llm_err`.
"""
try:
response: CompletionResponse = self._llm_instance.complete(prompt, **kwargs)
process_text_output = {}
if process_text:
try:
process_text_output = process_text(response, LLM.json_regex)
if not isinstance(process_text_output, dict):
process_text_output = {}
except Exception as e:
logger.error(f"Error occured inside function 'process_text': {e}")
process_text_output = {}
if extract_json:
match = LLM.json_regex.search(response.text)
if match:
response.text = match.group(0)
return {LLM.RESPONSE: response, **process_text_output}
except Exception as e:
raise parse_llm_err(e) from e
def stream_complete(
self,
prompt: str,
**kwargs: Any,
) -> CompletionResponseGen:
try:
response: CompletionResponseGen = self._llm_instance.stream_complete(
prompt, **kwargs
)
return response
except Exception as e:
raise parse_llm_err(e) from e
def _get_llm(self, adapter_instance_id: str) -> LlamaIndexLLM:
"""Returns the LLM object for the tool.
Returns:
LLM: The LLM object for the tool.
(llama_index.llms.base.LLM)
"""
try:
if not self._adapter_instance_id:
raise LLMError("Adapter instance ID not set. " "Initialisation failed")
llm_config_data = ToolAdapter.get_adapter_config(
self._tool, self._adapter_instance_id
)
llm_adapter_id = llm_config_data.get(Common.ADAPTER_ID)
if llm_adapter_id not in self.llm_adapters:
raise SdkError(f"LLM adapter not supported : " f"{llm_adapter_id}")
llm_adapter = self.llm_adapters[llm_adapter_id][Common.METADATA][
Common.ADAPTER
]
llm_metadata = llm_config_data.get(Common.ADAPTER_METADATA)
llm_adapter_class: LLMAdapter = llm_adapter(llm_metadata)
self._usage_kwargs["provider"] = llm_adapter_class.get_provider()
llm_instance: LLM = llm_adapter_class.get_llm_instance()
return llm_instance
except Exception as e:
self._tool.stream_log(
log=f"Unable to get llm instance: {e}", level=LogLevel.ERROR
)
raise LLMError(f"Error getting llm instance: {e}") from e
def get_max_tokens(self, reserved_for_output: int = 0) -> int:
"""Returns the maximum number of tokens that can be used for the LLM.
Args:
reserved_for_output (int): The number of tokens reserved for the
output.
The default is 0.
Returns:
int: The maximum number of tokens that can be used for the LLM.
"""
return self.MAX_TOKENS - reserved_for_output
def set_max_tokens(self, max_tokens: int) -> None:
"""Set the maximum number of tokens that can be used for the LLM.
Args:
max_tokens (int): The number of tokens to be used at the maximum
Returns:
None
"""
self._llm_instance.max_tokens = max_tokens
def get_class_name(self) -> str:
"""Gets the class name of the Llama Index LLM.
Args:
NA
Returns:
Class name
"""
return self._llm_instance.class_name()
def get_model_name(self) -> str:
"""Gets the name of the LLM model.
Args:
NA
Returns:
LLM model name
"""
return self._llm_instance.model
@deprecated("Use LLM instead of ToolLLM")
def get_llm(self, adapter_instance_id: Optional[str] = None) -> LlamaIndexLLM:
if not self._llm_instance:
self._adapter_instance_id = adapter_instance_id
self._initialise()
return self._llm_instance
@classmethod
@deprecated("Instantiate LLM and call complete() instead")
def run_completion(
cls,
llm: LlamaIndexLLM,
platform_api_key: str,
prompt: str,
retries: int = 3,
**kwargs: Any,
) -> Optional[dict[str, Any]]:
# Setup callback manager to collect Usage stats
CallbackManager.set_callback_manager(
platform_api_key=platform_api_key, llm=llm, **kwargs
)
# Removing specific keys from kwargs
new_kwargs = kwargs.copy()
for key in [
"workflow_id",
"execution_id",
"adapter_instance_id",
"run_id",
]:
new_kwargs.pop(key, None)
try:
response: CompletionResponse = llm.complete(prompt, **new_kwargs)
match = cls.json_regex.search(response.text)
if match:
response.text = match.group(0)
return {"response": response}
# TODO: Handle for all LLM providers
except OpenAIAPIError as e:
msg = "OpenAI error: "
msg += e.message
if hasattr(e, "body") and isinstance(e.body, dict) and "message" in e.body:
msg += e.body["message"]
if isinstance(e, OpenAIRateLimitError):
raise RateLimitError(msg)
raise LLMError(msg) from e
# Legacy
ToolLLM = LLM
| 9,188 | Python | .py | 218 | 31.431193 | 88 | 0.601097 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,653 | adapter.py | Zipstack_unstract-sdk/src/unstract/sdk/adapter.py | import json
from typing import Any, Optional
import requests
from requests.exceptions import ConnectionError, HTTPError
from unstract.sdk.adapters.utils import AdapterUtils
from unstract.sdk.constants import AdapterKeys, LogLevel, ToolEnv
from unstract.sdk.exceptions import SdkError
from unstract.sdk.helper import SdkHelper
from unstract.sdk.platform import PlatformBase
from unstract.sdk.tool.base import BaseTool
class ToolAdapter(PlatformBase):
"""Class to handle Adapters for Unstract Tools.
Notes:
- PLATFORM_SERVICE_API_KEY environment variable is required.
"""
def __init__(
self,
tool: BaseTool,
platform_host: str,
platform_port: str,
) -> None:
"""
Args:
tool (AbstractTool): Instance of AbstractTool
platform_host (str): Host of platform service
platform_port (str): Port of platform service
Notes:
- PLATFORM_SERVICE_API_KEY environment variable is required.
- The platform_host and platform_port are the
host and port of the platform service.
"""
super().__init__(
tool=tool, platform_host=platform_host, platform_port=platform_port
)
def _get_adapter_configuration(
self,
adapter_instance_id: str,
) -> dict[str, Any]:
"""Get Adapter
1. Get the adapter config from platform service
using the adapter_instance_id
Args:
adapter_instance_id (str): Adapter instance ID
Returns:
dict[str, Any]: Config stored for the adapter
"""
url = f"{self.base_url}/adapter_instance"
query_params = {AdapterKeys.ADAPTER_INSTANCE_ID: adapter_instance_id}
headers = {"Authorization": f"Bearer {self.bearer_token}"}
try:
response = requests.get(url, headers=headers, params=query_params)
response.raise_for_status()
adapter_data: dict[str, Any] = response.json()
# Removing name and type to avoid migration for already indexed records
adapter_name = adapter_data.pop("adapter_name", "")
adapter_type = adapter_data.pop("adapter_type", "")
provider = adapter_data.get("adapter_id", "").split("|")[0]
# TODO: Print metadata after redacting sensitive information
self.tool.stream_log(
f"Retrieved config for '{adapter_instance_id}', type: "
f"'{adapter_type}', provider: '{provider}', name: '{adapter_name}'",
level=LogLevel.DEBUG,
)
except ConnectionError:
raise SdkError(
"Unable to connect to platform service, please contact the admin."
)
except HTTPError as e:
default_err = (
"Error while calling the platform service, please contact the admin."
)
msg = AdapterUtils.get_msg_from_request_exc(
err=e, message_key="error", default_err=default_err
)
raise SdkError(f"Error retrieving adapter. {msg}")
return adapter_data
@staticmethod
def get_adapter_config(
tool: BaseTool, adapter_instance_id: str
) -> Optional[dict[str, Any]]:
"""Get adapter spec by the help of unstract DB tool.
This method first checks if the adapter_instance_id matches
any of the public adapter keys. If it matches, the configuration
is fetched from environment variables. Otherwise, it connects to the
platform service to retrieve the configuration.
Args:
tool (AbstractTool): Instance of AbstractTool
adapter_instance_id (str): ID of the adapter instance
Required env variables:
PLATFORM_HOST: Host of platform service
PLATFORM_PORT: Port of platform service
Returns:
dict[str, Any]: Config stored for the adapter
"""
# Check if the adapter ID matches any public adapter keys
if SdkHelper.is_public_adapter(adapter_id=adapter_instance_id):
adapter_metadata_config = tool.get_env_or_die(adapter_instance_id)
adapter_metadata = json.loads(adapter_metadata_config)
return adapter_metadata
platform_host = tool.get_env_or_die(ToolEnv.PLATFORM_HOST)
platform_port = tool.get_env_or_die(ToolEnv.PLATFORM_PORT)
tool.stream_log(
f"Retrieving config from DB for '{adapter_instance_id}'",
level=LogLevel.DEBUG,
)
tool_adapter = ToolAdapter(
tool=tool,
platform_host=platform_host,
platform_port=platform_port,
)
return tool_adapter._get_adapter_configuration(adapter_instance_id)
| 4,833 | Python | .py | 111 | 33.612613 | 85 | 0.6331 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,654 | audit.py | Zipstack_unstract-sdk/src/unstract/sdk/audit.py | from typing import Any, Union
import requests
from llama_index.core.callbacks import CBEventType, TokenCountingHandler
from unstract.sdk.constants import LogLevel, ToolEnv
from unstract.sdk.helper import SdkHelper
from unstract.sdk.tool.stream import StreamMixin
from unstract.sdk.utils.token_counter import TokenCounter
class Audit(StreamMixin):
"""The 'Audit' class is responsible for pushing usage data to the platform
service.
Methods:
- push_usage_data: Pushes the usage data to the platform service.
Attributes:
None
"""
def __init__(self, log_level: LogLevel = LogLevel.INFO) -> None:
super().__init__(log_level)
def push_usage_data(
self,
platform_api_key: str,
token_counter: Union[TokenCountingHandler, TokenCounter] = None,
model_name: str = "",
event_type: CBEventType = None,
kwargs: dict[Any, Any] = None,
) -> None:
"""Pushes the usage data to the platform service.
Args:
platform_api_key (str): The platform API key.
token_counter (TokenCountingHandler, optional): The token counter
object. Defaults to None.
model_name (str, optional): The name of the model.
Defaults to "".
event_type (CBEventType, optional): The type of the event. Defaults
to None.
**kwargs: Optional keyword arguments.
workflow_id (str, optional): The ID of the workflow.
Defaults to "".
execution_id (str, optional): The ID of the execution. Defaults
to "".
adapter_instance_id (str, optional): The adapter instance ID.
Defaults to "".
run_id (str, optional): The run ID. Defaults to "".
Returns:
None
Raises:
requests.RequestException: If there is an error while pushing the
usage details.
"""
platform_host = self.get_env_or_die(ToolEnv.PLATFORM_HOST)
platform_port = self.get_env_or_die(ToolEnv.PLATFORM_PORT)
base_url = SdkHelper.get_platform_base_url(
platform_host=platform_host, platform_port=platform_port
)
bearer_token = platform_api_key
workflow_id = kwargs.get("workflow_id", "")
execution_id = kwargs.get("execution_id", "")
adapter_instance_id = kwargs.get("adapter_instance_id", "")
run_id = kwargs.get("run_id", "")
provider = kwargs.get("provider", "")
llm_usage_reason = ""
if event_type == "llm":
llm_usage_reason = kwargs.get("llm_usage_reason", "")
data = {
"workflow_id": workflow_id,
"execution_id": execution_id,
"adapter_instance_id": adapter_instance_id,
"run_id": run_id,
"usage_type": event_type,
"llm_usage_reason": llm_usage_reason,
"model_name": model_name,
"provider": provider,
"embedding_tokens": token_counter.total_embedding_token_count,
"prompt_tokens": token_counter.prompt_llm_token_count,
"completion_tokens": token_counter.completion_llm_token_count,
"total_tokens": token_counter.total_llm_token_count,
}
url = f"{base_url}/usage"
headers = {"Authorization": f"Bearer {bearer_token}"}
try:
response = requests.post(url, headers=headers, json=data, timeout=30)
if response.status_code != 200:
self.stream_log(
log=(
"Error while pushing usage details: "
f"{response.status_code} {response.reason}",
),
level=LogLevel.ERROR,
)
else:
self.stream_log(
f"Successfully pushed usage details, {data}", level=LogLevel.DEBUG
)
except requests.RequestException as e:
self.stream_log(
log=f"Error while pushing usage details: {e}",
level=LogLevel.ERROR,
)
finally:
if isinstance(token_counter, TokenCountingHandler):
token_counter.reset_counts()
def push_page_usage_data(
self,
platform_api_key: str,
page_count: int,
file_size: int,
file_type: str,
kwargs: dict[Any, Any] = None,
) -> None:
platform_host = self.get_env_or_die(ToolEnv.PLATFORM_HOST)
platform_port = self.get_env_or_die(ToolEnv.PLATFORM_PORT)
run_id = kwargs.get("run_id", "")
file_name = kwargs.get("file_name", "")
base_url = SdkHelper.get_platform_base_url(
platform_host=platform_host, platform_port=platform_port
)
bearer_token = platform_api_key
url = f"{base_url}/page-usage"
headers = {"Authorization": f"Bearer {bearer_token}"}
data = {
"page_count": page_count,
"file_name": file_name,
"file_size": file_size,
"file_type": file_type,
"run_id": run_id,
}
try:
response = requests.post(url, headers=headers, json=data, timeout=30)
if response.status_code != 200:
self.stream_log(
log=(
"Error while pushing page usage details: "
f"{response.status_code} {response.reason}",
),
level=LogLevel.ERROR,
)
else:
self.stream_log(
"Successfully pushed page usage details", level=LogLevel.DEBUG
)
except requests.RequestException as e:
self.stream_log(
log=f"Error while pushing page usage details: {e}",
level=LogLevel.ERROR,
)
| 5,996 | Python | .py | 144 | 29.659722 | 86 | 0.561825 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,655 | validator.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/validator.py | import re
from json import JSONDecodeError
from pathlib import Path
from typing import Any
from jsonschema import Draft202012Validator, ValidationError, validators
from unstract.sdk.constants import MetadataKey, PropKey
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.tool.mime_types import EXT_MIME_MAP
from unstract.sdk.utils import ToolUtils
def extend_with_default(validator_class: Any) -> Any:
"""Extend a JSON schema validator class with a default value functionality.
Parameters:
- validator_class (Any): The JSON schema validator class to be extended.
Returns:
- Any: The extended JSON schema validator class.
Example:
extend_with_default(Draft202012Validator)
"""
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(
validator: Any, properties: Any, instance: Any, schema: Any
) -> Any:
for property_, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property_, subschema["default"])
yield from validate_properties(
validator,
properties,
instance,
schema,
)
return validators.extend(
validator_class,
{"properties": set_defaults},
)
# Helps validate a JSON against a schema and applies missing key's defaults too.
DefaultsGeneratingValidator = extend_with_default(Draft202012Validator)
class ToolValidator:
"""Class to validate a tool and its configuration before its executed with
an input."""
def __init__(self, tool: BaseTool) -> None:
self.tool = tool
props = self.tool.properties
self.restrictions = props.get(PropKey.RESTRICTIONS)
def validate_pre_execution(
self, settings: dict[str, Any]
) -> dict[str, Any]:
"""Performs validation before the tool executes on the input file.
Args:
settings (dict[str, Any]): Settings configured for the tool
Returns:
dict[str, Any]: Settings JSON for a tool (filled with defaults)
"""
input_file = Path(self.tool.get_input_file())
if not input_file.is_file():
self.tool.stream_error_and_exit(
f"Input file not found: {input_file}"
)
self._validate_restrictions(input_file)
self._validate_settings_and_fill_defaults(settings)
# Call tool's validation hook to execute custom validation
self.tool.validate(str(input_file), settings)
return settings
def _validate_restrictions(self, input_file: Path) -> None:
"""Validates the restrictions mentioned in the tool's PROPERTIES.
Args:
input_file (Path): Path object to the input file to be validated
"""
self._validate_file_size(input_file)
self._validate_file_type(input_file)
def _validate_settings_and_fill_defaults(
self, tool_settings: dict[str, Any]
) -> None:
"""Validates and obtains settings for a tool.
Validation is done against the tool's settings based
on its declared SPEC. Validator also fills in the missing defaults.
Args:
tool_settings (dict[str, Any]): Tool settings to validate
"""
try:
spec_schema = self.tool.spec
DefaultsGeneratingValidator(spec_schema).validate(tool_settings)
except JSONDecodeError as e:
self.tool.stream_error_and_exit(
f"Settings are not a valid JSON: {str(e)}"
)
except ValidationError as e:
self.tool.stream_error_and_exit(f"Invalid settings: {str(e)}")
def _validate_file_size(self, input_file: Path) -> None:
"""Validates the input file size against the max allowed size set in
the tool's PROPERTIES.
Raises:
RuntimeError: File size exceeds max allowed size
"""
max_file_size = self.restrictions.get(PropKey.MAX_FILE_SIZE)
max_size_in_bytes = self._parse_size_string(max_file_size)
self.tool.stream_log(
f"Checking input file size... (max file size: {max_file_size})"
)
file_size = input_file.stat().st_size
self.tool.stream_log(
f"Input file size: {self._human_readable_size(file_size)}"
)
if file_size > max_size_in_bytes:
source_name = self.tool.get_exec_metadata.get(
MetadataKey.SOURCE_NAME
)
self.tool.stream_error_and_exit(
f"File {source_name} exceeds the maximum "
f"allowed size of {max_file_size}"
)
def _human_readable_size(self, num: float, suffix: str = "B") -> str:
"""Gets the human readable size for a file,
Args:
num (int): Size in bytes to parse
suffix (str, optional): _description_. Defaults to "B".
Returns:
str: Human readable size
"""
for unit in ("", "K", "M", "G", "T"):
if abs(num) < 1024.0:
return f"{num:3.1f}{unit}{suffix}"
num /= 1024.0
return f"{num:.1f}{suffix}"
def _parse_size_string(self, size_string: str) -> int:
"""Parses the size string for validation.
Args:
size_string (str): Size string to be parsed
Raises:
ValueError: Invalid size format
Returns:
int: Size in bytes
"""
size_match = re.match(PropKey.FILE_SIZE_REGEX, size_string)
if not size_match:
self.tool.stream_error_and_exit(
f"Invalid size string format: {size_string}"
)
size, unit = size_match.groups()
size_in_bytes = int(size)
if unit.upper() == "KB":
size_in_bytes *= 1024
elif unit.upper() == "MB":
size_in_bytes *= 1024 * 1024
elif unit.upper() == "GB":
size_in_bytes *= 1024 * 1024 * 1024
elif unit.upper() == "TB":
size_in_bytes *= 1024 * 1024 * 1024 * 1024
return size_in_bytes
def _validate_file_type(self, input_file: Path) -> None:
"""Validate the input file type against the allowed types mentioned in
tool's PROPERTIES.
Args:
input_file (Path): Path obj of input file to validate
Raises:
RuntimeError: If file type is not supported by the tool
"""
self.tool.stream_log("Checking input file type...")
allowed_exts: list[str] = self.restrictions.get(
PropKey.ALLOWED_FILE_TYPES
)
allowed_exts = [allowed_type.lower() for allowed_type in allowed_exts]
if "*" in allowed_exts:
self.tool.stream_log("Skipping check, tool allows all file types")
return
allowed_mimes = []
for ext in allowed_exts:
if ext not in EXT_MIME_MAP:
self.tool.stream_error_and_exit(
f"{ext} mentioned in tool PROPERTIES is not supported"
)
allowed_mimes.append(EXT_MIME_MAP[ext])
input_file_mime = ToolUtils.get_file_mime_type(input_file=input_file)
self.tool.stream_log(f"Input file MIME: {input_file_mime}")
if input_file_mime not in allowed_mimes:
self.tool.stream_error_and_exit(
f"File type of {input_file_mime} is not supported by"
" the tool, check its PROPERTIES for a list of supported types"
)
| 7,568 | Python | .py | 179 | 32.553073 | 80 | 0.61372 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,656 | executor.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/executor.py | import argparse
import logging
import shutil
from json import loads
from pathlib import Path
from typing import Any
from unstract.sdk import get_sdk_version
from unstract.sdk.constants import Command
from unstract.sdk.tool.base import BaseTool
from unstract.sdk.tool.validator import ToolValidator
logger = logging.getLogger(__name__)
class ToolExecutor:
"""Takes care of executing a tool's intended command."""
def __init__(self, tool: BaseTool) -> None:
self.tool = tool
def execute(self, args: argparse.Namespace) -> None:
"""Executes the tool with the passed arguments.
Args:
args (argparse.Namespace): Parsed arguments to execute with
"""
command = str.upper(args.command)
if command in Command.static_commands():
self.tool.handle_static_command(command)
elif command == Command.RUN:
self.execute_run(args=args)
else:
self.tool.stream_error_and_exit(f"Command {command} not supported")
def _setup_for_run(self) -> None:
"""Helps initialize tool execution for RUN command."""
shutil.rmtree(self.tool.get_output_dir(), ignore_errors=True)
Path(self.tool.get_output_dir()).mkdir(parents=True, exist_ok=True)
def execute_run(self, args: argparse.Namespace) -> None:
"""Executes the tool's RUN command.
Args:
args (argparse.Namespace): Parsed arguments to execute with
"""
if args.settings is None:
self.tool.stream_error_and_exit("--settings are required for RUN command")
settings: dict[str, Any] = loads(args.settings)
tool_name = self.tool.properties["displayName"]
tool_version = self.tool.properties["toolVersion"]
self.tool.stream_log(
f"Running tool '{tool_name}:{tool_version}' with "
f"Workflow ID: {self.tool.workflow_id}, "
f"Execution ID: {self.tool.execution_id}, "
f"SDK Version: {get_sdk_version()}"
)
self._setup_for_run()
validator = ToolValidator(self.tool)
settings = validator.validate_pre_execution(settings=settings)
self.tool.stream_log(
f"Executing for file: {self.tool.get_exec_metadata['source_name']}, "
f"with tool settings: {settings}"
)
try:
self.tool.run(
settings=settings,
input_file=self.tool.get_input_file(),
output_dir=self.tool.get_output_dir(),
)
except Exception as e:
msg = f"Error while running tool '{tool_name}': {str(e)}"
logger.error(msg, stack_info=True, exc_info=True)
self.tool.stream_error_and_exit(msg)
# TODO: Call tool method to validate if output was written
| 2,826 | Python | .py | 65 | 34.6 | 86 | 0.638251 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,657 | mixin.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/mixin.py | import logging
from typing import Any
from unstract.sdk.utils import ToolUtils
logger = logging.getLogger(__name__)
class ToolConfigHelper:
"""Helper class to handle static commands for tools."""
@staticmethod
def spec(spec_file: str = "config/spec.json") -> dict[str, Any]:
"""Returns the JSON schema for the tool settings.
Args:
spec_file (str): The path to the JSON schema file.
The default is config/spec.json.
Returns:
str: The JSON schema of the tool.
"""
return ToolUtils.load_json(spec_file)
@staticmethod
def properties(
properties_file: str = "config/properties.json",
) -> dict[str, Any]:
"""Returns the properties of the tool.
Args:
properties_file (str): The path to the properties file.
The default is config/properties.json.
Returns:
str: The properties of the tool.
"""
return ToolUtils.load_json(properties_file)
@staticmethod
def variables(
variables_file: str = "config/runtime_variables.json",
) -> dict[str, Any]:
"""Returns the JSON schema of the runtime variables.
Args:
variables_file (str): The path to the JSON schema file.
The default is config/runtime_variables.json.
Returns:
str: The JSON schema for the runtime variables.
"""
try:
return ToolUtils.load_json(variables_file)
# Allow runtime variables definition to be optional
except FileNotFoundError:
logger.info("No runtime variables defined for tool")
return {}
@staticmethod
def icon(icon_file: str = "config/icon.svg") -> str:
"""Returns the icon of the tool.
Args:
icon_file (str): The path to the icon file.
The default is config/icon.svg.
Returns:
str: The icon of the tool.
"""
with open(icon_file, encoding="utf-8") as f:
icon = f.read()
return icon
| 2,098 | Python | .py | 57 | 27.789474 | 68 | 0.606509 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,658 | stream.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/stream.py | import datetime
import json
import os
from typing import Any
from deprecated import deprecated
from unstract.sdk.constants import Command, LogLevel, LogStage, ToolEnv
from unstract.sdk.utils import ToolUtils
class StreamMixin:
"""Helper class for streaming Unstract tool commands.
A utility class to make writing Unstract tools easier. It provides
methods to stream the JSON schema, properties, icon, log messages,
cost, single step messages, and results using the Unstract protocol
to stdout.
"""
def __init__(self, log_level: LogLevel = LogLevel.INFO, **kwargs) -> None:
"""
Args:
log_level (LogLevel): The log level for filtering of log messages.
The default is INFO.
Allowed values are DEBUG, INFO, WARN, ERROR, and FATAL.
"""
self.log_level = log_level
self._exec_by_tool = ToolUtils.str_to_bool(
os.environ.get(ToolEnv.EXECUTION_BY_TOOL, "False")
)
super().__init__(**kwargs)
def stream_log(
self,
log: str,
level: LogLevel = LogLevel.INFO,
stage: str = LogStage.TOOL_RUN,
**kwargs: Any,
) -> None:
"""Streams a log message using the Unstract protocol LOG to stdout.
Args:
log (str): The log message.
level (LogLevel): The log level. The default is INFO.
Allowed values are DEBUG, INFO, WARN, ERROR, and FATAL.
stage (str): LogStage from constant default Tool_RUN
Returns:
None
"""
levels = [
LogLevel.DEBUG,
LogLevel.INFO,
LogLevel.WARN,
LogLevel.ERROR,
LogLevel.FATAL,
]
if levels.index(level) < levels.index(self.log_level):
return
record = {
"type": "LOG",
"stage": stage,
"level": level.value,
"log": log,
"emitted_at": datetime.datetime.now().isoformat(),
**kwargs,
}
print(json.dumps(record))
def stream_error_and_exit(self, message: str) -> None:
"""Stream error log and exit.
Args:
message (str): Error message
"""
self.stream_log(message, level=LogLevel.ERROR)
if self._exec_by_tool:
exit(1)
else:
raise RuntimeError("RuntimeError from SDK, check the above log for details")
def get_env_or_die(self, env_key: str) -> str:
"""Returns the value of an env variable.
If its empty or None, raises an error and exits
Args:
env_key (str): Key to retrieve
Returns:
str: Value of the env
"""
env_value = os.environ.get(env_key)
if env_value is None or env_value == "":
self.stream_error_and_exit(f"Env variable '{env_key}' is required")
return env_value
@staticmethod
def stream_spec(spec: str) -> None:
"""Streams JSON schema of the tool using the Unstract protocol SPEC to
stdout.
Args:
spec (str): The JSON schema of the tool.
Typically returned by the spec() method.
Returns:
None
"""
record = {
"type": "SPEC",
"spec": spec,
"emitted_at": datetime.datetime.now().isoformat(),
}
print(json.dumps(record))
@staticmethod
def stream_properties(properties: str) -> None:
"""Streams the properties of the tool using the Unstract protocol
PROPERTIES to stdout.
Args:
properties (str): The properties of the tool.
Typically returned by the properties() method.
Returns:
None
"""
record = {
"type": "PROPERTIES",
"properties": properties,
"emitted_at": datetime.datetime.now().isoformat(),
}
print(json.dumps(record))
@staticmethod
def stream_variables(variables: str) -> None:
"""Streams JSON schema of the tool's variables using the Unstract
protocol VARIABLES to stdout.
Args:
variables (str): The tool's runtime variables.
Typically returned by the spec() method.
Returns:
None
"""
record = {
"type": Command.VARIABLES,
"variables": variables,
"emitted_at": datetime.datetime.now().isoformat(),
}
print(json.dumps(record))
@staticmethod
def stream_icon(icon: str) -> None:
"""Streams the icon of the tool using the Unstract protocol ICON to
stdout.
Args:
icon (str): The icon of the tool.
Typically returned by the icon() method.
Returns:
None
"""
record = {
"type": "ICON",
"icon": icon,
"emitted_at": datetime.datetime.now().isoformat(),
}
print(json.dumps(record))
@staticmethod
def stream_update(message: str, state: str, **kwargs: Any) -> None:
"""Streams a log message using the Unstract protocol UPDATE to stdout.
Args:
message (str): The log message.
state (str): LogState from constant
"""
record = {
"type": "UPDATE",
"state": state,
"message": message,
"emitted_at": datetime.datetime.now().isoformat(),
**kwargs,
}
print(json.dumps(record))
@staticmethod
@deprecated(version="0.4.4", reason="Unused in workflow execution")
def stream_cost(cost: float, cost_units: str, **kwargs: Any) -> None:
"""Streams the cost of the tool using the Unstract protocol COST to
stdout.
Args:
cost (float): The cost of the tool.
cost_units (str): The cost units of the tool.
**kwargs: Additional keyword arguments to include in the record.
Returns:
None
"""
record = {
"type": "COST",
"cost": cost,
"cost_units": cost_units,
"emitted_at": datetime.datetime.now().isoformat(),
**kwargs,
}
print(json.dumps(record))
@staticmethod
@deprecated(version="0.4.4", reason="Unused in workflow execution")
def stream_single_step_message(message: str, **kwargs: Any) -> None:
"""Streams a single step message using the Unstract protocol
SINGLE_STEP_MESSAGE to stdout.
Args:
message (str): The single step message.
**kwargs: Additional keyword arguments to include in the record.
Returns:
None
"""
record = {
"type": "SINGLE_STEP_MESSAGE",
"message": message,
"emitted_at": datetime.datetime.now().isoformat(),
**kwargs,
}
print(json.dumps(record))
@staticmethod
@deprecated(version="0.4.4", reason="Use `BaseTool.write_to_result()` instead")
def stream_result(result: dict[Any, Any], **kwargs: Any) -> None:
"""Streams the result of the tool using the Unstract protocol RESULT to
stdout.
Args:
result (dict): The result of the tool. Refer to the
Unstract protocol for the format of the result.
**kwargs: Additional keyword arguments to include in the record.
Returns:
None
"""
record = {
"type": "RESULT",
"result": result,
"emitted_at": datetime.datetime.now().isoformat(),
**kwargs,
}
print(json.dumps(record))
| 7,741 | Python | .py | 218 | 25.619266 | 88 | 0.566297 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,659 | mime_types.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/mime_types.py | # flake8: noqa
"""Contains a mapping of file extension to MIME types.
Multiple extensions can be mapped to the same MIME type Through
http://svn.apache.org/repos/asf/httpd/httpd/trunk/docs/conf/mime.types
"""
EXT_MIME_MAP = {
"123": "application/vnd.lotus-1-2-3",
"3dml": "text/vnd.in3d.3dml",
"3ds": "image/x-3ds",
"3g2": "video/3gpp2",
"3gp": "video/3gpp",
"7z": "application/x-7z-compressed",
"aab": "application/x-authorware-bin",
"aac": "audio/x-aac",
"aam": "application/x-authorware-map",
"aas": "application/x-authorware-seg",
"abw": "application/x-abiword",
"ac": "application/pkix-attr-cert",
"acc": "application/vnd.americandynamics.acc",
"ace": "application/x-ace-compressed",
"acu": "application/vnd.acucobol",
"acutc": "application/vnd.acucorp",
"adp": "audio/adpcm",
"aep": "application/vnd.audiograph",
"afm": "application/x-font-type1",
"afp": "application/vnd.ibm.modcap",
"ahead": "application/vnd.ahead.space",
"ai": "application/postscript",
"aif": "audio/x-aiff",
"aifc": "audio/x-aiff",
"aiff": "audio/x-aiff",
"air": "application/vnd.adobe.air-application-installer-package+zip",
"ait": "application/vnd.dvb.ait",
"ami": "application/vnd.amiga.ami",
"apk": "application/vnd.android.package-archive",
"appcache": "text/cache-manifest",
"application": "application/x-ms-application",
"apr": "application/vnd.lotus-approach",
"arc": "application/x-freearc",
"asc": "application/pgp-signature",
"asf": "video/x-ms-asf",
"asm": "text/x-asm",
"aso": "application/vnd.accpac.simply.aso",
"asx": "video/x-ms-asf",
"atc": "application/vnd.acucorp",
"atom": "application/atom+xml",
"atomcat": "application/atomcat+xml",
"atomsvc": "application/atomsvc+xml",
"atx": "application/vnd.antix.game-component",
"au": "audio/basic",
"avi": "video/x-msvideo",
"aw": "application/applixware",
"azf": "application/vnd.airzip.filesecure.azf",
"azs": "application/vnd.airzip.filesecure.azs",
"azw": "application/vnd.amazon.ebook",
"bat": "application/x-msdownload",
"bcpio": "application/x-bcpio",
"bdf": "application/x-font-bdf",
"bdm": "application/vnd.syncml.dm+wbxml",
"bed": "application/vnd.realvnc.bed",
"bh2": "application/vnd.fujitsu.oasysprs",
"bin": "application/octet-stream",
"blb": "application/x-blorb",
"blorb": "application/x-blorb",
"bmi": "application/vnd.bmi",
"bmp": "image/bmp",
"book": "application/vnd.framemaker",
"box": "application/vnd.previewsystems.box",
"boz": "application/x-bzip2",
"bpk": "application/octet-stream",
"btif": "image/prs.btif",
"bz2": "application/x-bzip2",
"bz": "application/x-bzip",
"c11amc": "application/vnd.cluetrust.cartomobile-config",
"c11amz": "application/vnd.cluetrust.cartomobile-config-pkg",
"c4d": "application/vnd.clonk.c4group",
"c4f": "application/vnd.clonk.c4group",
"c4g": "application/vnd.clonk.c4group",
"c4p": "application/vnd.clonk.c4group",
"c4u": "application/vnd.clonk.c4group",
"cab": "application/vnd.ms-cab-compressed",
"caf": "audio/x-caf",
"cap": "application/vnd.tcpdump.pcap",
"car": "application/vnd.curl.car",
"cat": "application/vnd.ms-pki.seccat",
"cb7": "application/x-cbr",
"cba": "application/x-cbr",
"cbr": "application/x-cbr",
"cbt": "application/x-cbr",
"cbz": "application/x-cbr",
"cct": "application/x-director",
"cc": "text/x-c",
"ccxml": "application/ccxml+xml",
"cdbcmsg": "application/vnd.contact.cmsg",
"cdf": "application/x-netcdf",
"cdkey": "application/vnd.mediastation.cdkey",
"cdmia": "application/cdmi-capability",
"cdmic": "application/cdmi-container",
"cdmid": "application/cdmi-domain",
"cdmio": "application/cdmi-object",
"cdmiq": "application/cdmi-queue",
"cdx": "chemical/x-cdx",
"cdxml": "application/vnd.chemdraw+xml",
"cdy": "application/vnd.cinderella",
"cer": "application/pkix-cert",
"cfs": "application/x-cfs-compressed",
"cgm": "image/cgm",
"chat": "application/x-chat",
"chm": "application/vnd.ms-htmlhelp",
"chrt": "application/vnd.kde.kchart",
"cif": "chemical/x-cif",
"cii": "application/vnd.anser-web-certificate-issue-initiation",
"cil": "application/vnd.ms-artgalry",
"cla": "application/vnd.claymore",
"class": "application/java-vm",
"clkk": "application/vnd.crick.clicker.keyboard",
"clkp": "application/vnd.crick.clicker.palette",
"clkt": "application/vnd.crick.clicker.template",
"clkw": "application/vnd.crick.clicker.wordbank",
"clkx": "application/vnd.crick.clicker",
"clp": "application/x-msclip",
"cmc": "application/vnd.cosmocaller",
"cmdf": "chemical/x-cmdf",
"cml": "chemical/x-cml",
"cmp": "application/vnd.yellowriver-custom-menu",
"cmx": "image/x-cmx",
"cod": "application/vnd.rim.cod",
"com": "application/x-msdownload",
"conf": "text/plain",
"cpio": "application/x-cpio",
"cpp": "text/x-c",
"cpt": "application/mac-compactpro",
"crd": "application/x-mscardfile",
"crl": "application/pkix-crl",
"crt": "application/x-x509-ca-cert",
"cryptonote": "application/vnd.rig.cryptonote",
"csh": "application/x-csh",
"csml": "chemical/x-csml",
"csp": "application/vnd.commonspace",
"css": "text/css",
"cst": "application/x-director",
"csv": "text/csv",
"c": "text/x-c",
"cu": "application/cu-seeme",
"curl": "text/vnd.curl",
"cww": "application/prs.cww",
"cxt": "application/x-director",
"cxx": "text/x-c",
"dae": "model/vnd.collada+xml",
"daf": "application/vnd.mobius.daf",
"dart": "application/vnd.dart",
"dataless": "application/vnd.fdsn.seed",
"davmount": "application/davmount+xml",
"dbk": "application/docbook+xml",
"dcr": "application/x-director",
"dcurl": "text/vnd.curl.dcurl",
"dd2": "application/vnd.oma.dd2+xml",
"ddd": "application/vnd.fujixerox.ddd",
"deb": "application/x-debian-package",
"def": "text/plain",
"deploy": "application/octet-stream",
"der": "application/x-x509-ca-cert",
"dfac": "application/vnd.dreamfactory",
"dgc": "application/x-dgc-compressed",
"dic": "text/x-c",
"dir": "application/x-director",
"dis": "application/vnd.mobius.dis",
"dist": "application/octet-stream",
"distz": "application/octet-stream",
"djv": "image/vnd.djvu",
"djvu": "image/vnd.djvu",
"dll": "application/x-msdownload",
"dmg": "application/x-apple-diskimage",
"dmp": "application/vnd.tcpdump.pcap",
"dms": "application/octet-stream",
"dna": "application/vnd.dna",
"doc": "application/msword",
"docm": "application/vnd.ms-word.document.macroenabled.12",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"dot": "application/msword",
"dotm": "application/vnd.ms-word.template.macroenabled.12",
"dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
"dp": "application/vnd.osgi.dp",
"dpg": "application/vnd.dpgraph",
"dra": "audio/vnd.dra",
"dsc": "text/prs.lines.tag",
"dssc": "application/dssc+der",
"dtb": "application/x-dtbook+xml",
"dtd": "application/xml-dtd",
"dts": "audio/vnd.dts",
"dtshd": "audio/vnd.dts.hd",
"dump": "application/octet-stream",
"dvb": "video/vnd.dvb.file",
"dvi": "application/x-dvi",
"dwf": "model/vnd.dwf",
"dwg": "image/vnd.dwg",
"dxf": "image/vnd.dxf",
"dxp": "application/vnd.spotfire.dxp",
"dxr": "application/x-director",
"ecelp4800": "audio/vnd.nuera.ecelp4800",
"ecelp7470": "audio/vnd.nuera.ecelp7470",
"ecelp9600": "audio/vnd.nuera.ecelp9600",
"ecma": "application/ecmascript",
"edm": "application/vnd.novadigm.edm",
"edx": "application/vnd.novadigm.edx",
"efif": "application/vnd.picsel",
"ei6": "application/vnd.pg.osasli",
"elc": "application/octet-stream",
"emf": "application/x-msmetafile",
"eml": "message/rfc822",
"emma": "application/emma+xml",
"emz": "application/x-msmetafile",
"eol": "audio/vnd.digital-winds",
"eot": "application/vnd.ms-fontobject",
"eps": "application/postscript",
"epub": "application/epub+zip",
"es3": "application/vnd.eszigno3+xml",
"esa": "application/vnd.osgi.subsystem",
"esf": "application/vnd.epson.esf",
"et3": "application/vnd.eszigno3+xml",
"etx": "text/x-setext",
"eva": "application/x-eva",
"evy": "application/x-envoy",
"exe": "application/x-msdownload",
"exi": "application/exi",
"ext": "application/vnd.novadigm.ext",
"ez2": "application/vnd.ezpix-album",
"ez3": "application/vnd.ezpix-package",
"ez": "application/andrew-inset",
"f4v": "video/x-f4v",
"f77": "text/x-fortran",
"f90": "text/x-fortran",
"fbs": "image/vnd.fastbidsheet",
"fcdt": "application/vnd.adobe.formscentral.fcdt",
"fcs": "application/vnd.isac.fcs",
"fdf": "application/vnd.fdf",
"fe_launch": "application/vnd.denovo.fcselayout-link",
"fg5": "application/vnd.fujitsu.oasysgp",
"fgd": "application/x-director",
"fh4": "image/x-freehand",
"fh5": "image/x-freehand",
"fh7": "image/x-freehand",
"fhc": "image/x-freehand",
"fh": "image/x-freehand",
"fig": "application/x-xfig",
"flac": "audio/x-flac",
"fli": "video/x-fli",
"flo": "application/vnd.micrografx.flo",
"flv": "video/x-flv",
"flw": "application/vnd.kde.kivio",
"flx": "text/vnd.fmi.flexstor",
"fly": "text/vnd.fly",
"fm": "application/vnd.framemaker",
"fnc": "application/vnd.frogans.fnc",
"for": "text/x-fortran",
"fpx": "image/vnd.fpx",
"frame": "application/vnd.framemaker",
"fsc": "application/vnd.fsc.weblaunch",
"fst": "image/vnd.fst",
"ftc": "application/vnd.fluxtime.clip",
"f": "text/x-fortran",
"fti": "application/vnd.anser-web-funds-transfer-initiation",
"fvt": "video/vnd.fvt",
"fxp": "application/vnd.adobe.fxp",
"fxpl": "application/vnd.adobe.fxp",
"fzs": "application/vnd.fuzzysheet",
"g2w": "application/vnd.geoplan",
"g3": "image/g3fax",
"g3w": "application/vnd.geospace",
"gac": "application/vnd.groove-account",
"gam": "application/x-tads",
"gbr": "application/rpki-ghostbusters",
"gca": "application/x-gca-compressed",
"gdl": "model/vnd.gdl",
"geo": "application/vnd.dynageo",
"gex": "application/vnd.geometry-explorer",
"ggb": "application/vnd.geogebra.file",
"ggs": "application/vnd.geogebra.slides",
"ggt": "application/vnd.geogebra.tool",
"ghf": "application/vnd.groove-help",
"gif": "image/gif",
"gim": "application/vnd.groove-identity-message",
"gml": "application/gml+xml",
"gmx": "application/vnd.gmx",
"gnumeric": "application/x-gnumeric",
"gph": "application/vnd.flographit",
"gpx": "application/gpx+xml",
"gqf": "application/vnd.grafeq",
"gqs": "application/vnd.grafeq",
"gram": "application/srgs",
"gramps": "application/x-gramps-xml",
"gre": "application/vnd.geometry-explorer",
"grv": "application/vnd.groove-injector",
"grxml": "application/srgs+xml",
"gsf": "application/x-font-ghostscript",
"gtar": "application/x-gtar",
"gtm": "application/vnd.groove-tool-message",
"gtw": "model/vnd.gtw",
"gv": "text/vnd.graphviz",
"gxf": "application/gxf",
"gxt": "application/vnd.geonext",
"h261": "video/h261",
"h263": "video/h263",
"h264": "video/h264",
"hal": "application/vnd.hal+xml",
"hbci": "application/vnd.hbci",
"hdf": "application/x-hdf",
"hh": "text/x-c",
"hlp": "application/winhlp",
"hpgl": "application/vnd.hp-hpgl",
"hpid": "application/vnd.hp-hpid",
"hps": "application/vnd.hp-hps",
"hqx": "application/mac-binhex40",
"h": "text/x-c",
"htke": "application/vnd.kenameaapp",
"html": "text/html",
"htm": "text/html",
"hvd": "application/vnd.yamaha.hv-dic",
"hvp": "application/vnd.yamaha.hv-voice",
"hvs": "application/vnd.yamaha.hv-script",
"i2g": "application/vnd.intergeo",
"icc": "application/vnd.iccprofile",
"ice": "x-conference/x-cooltalk",
"icm": "application/vnd.iccprofile",
"ico": "image/x-icon",
"ics": "text/calendar",
"ief": "image/ief",
"ifb": "text/calendar",
"ifm": "application/vnd.shana.informed.formdata",
"iges": "model/iges",
"igl": "application/vnd.igloader",
"igm": "application/vnd.insors.igm",
"igs": "model/iges",
"igx": "application/vnd.micrografx.igx",
"iif": "application/vnd.shana.informed.interchange",
"imp": "application/vnd.accpac.simply.imp",
"ims": "application/vnd.ms-ims",
"ink": "application/inkml+xml",
"inkml": "application/inkml+xml",
"install": "application/x-install-instructions",
"in": "text/plain",
"iota": "application/vnd.astraea-software.iota",
"ipfix": "application/ipfix",
"ipk": "application/vnd.shana.informed.package",
"irm": "application/vnd.ibm.rights-management",
"irp": "application/vnd.irepository.package+xml",
"iso": "application/x-iso9660-image",
"itp": "application/vnd.shana.informed.formtemplate",
"ivp": "application/vnd.immervision-ivp",
"ivu": "application/vnd.immervision-ivu",
"jad": "text/vnd.sun.j2me.app-descriptor",
"jam": "application/vnd.jam",
"jar": "application/java-archive",
"java": "text/x-java-source",
"jisp": "application/vnd.jisp",
"jlt": "application/vnd.hp-jlyt",
"jnlp": "application/x-java-jnlp-file",
"joda": "application/vnd.joost.joda-archive",
"jpeg": "image/jpeg",
"jpe": "image/jpeg",
"jpg": "image/jpeg",
"jpgm": "video/jpm",
"jpgv": "video/jpeg",
"jpm": "video/jpm",
"json": "application/json",
"jsonml": "application/jsonml+json",
"js": "text/javascript",
"kar": "audio/midi",
"karbon": "application/vnd.kde.karbon",
"kfo": "application/vnd.kde.kformula",
"kia": "application/vnd.kidspiration",
"kml": "application/vnd.google-earth.kml+xml",
"kmz": "application/vnd.google-earth.kmz",
"kne": "application/vnd.kinar",
"knp": "application/vnd.kinar",
"kon": "application/vnd.kde.kontour",
"kpr": "application/vnd.kde.kpresenter",
"kpt": "application/vnd.kde.kpresenter",
"kpxx": "application/vnd.ds-keypoint",
"ksp": "application/vnd.kde.kspread",
"ktr": "application/vnd.kahootz",
"ktx": "image/ktx",
"ktz": "application/vnd.kahootz",
"kwd": "application/vnd.kde.kword",
"kwt": "application/vnd.kde.kword",
"lasxml": "application/vnd.las.las+xml",
"latex": "application/x-latex",
"lbd": "application/vnd.llamagraphics.life-balance.desktop",
"lbe": "application/vnd.llamagraphics.life-balance.exchange+xml",
"les": "application/vnd.hhe.lesson-player",
"lha": "application/x-lzh-compressed",
"link66": "application/vnd.route66.link66+xml",
"list3820": "application/vnd.ibm.modcap",
"listafp": "application/vnd.ibm.modcap",
"list": "text/plain",
"lnk": "application/x-ms-shortcut",
"log": "text/plain",
"lostxml": "application/lost+xml",
"lrf": "application/octet-stream",
"lrm": "application/vnd.ms-lrm",
"ltf": "application/vnd.frogans.ltf",
"lvp": "audio/vnd.lucent.voice",
"lwp": "application/vnd.lotus-wordpro",
"lzh": "application/x-lzh-compressed",
"m13": "application/x-msmediaview",
"m14": "application/x-msmediaview",
"m1v": "video/mpeg",
"m21": "application/mp21",
"m2a": "audio/mpeg",
"m2v": "video/mpeg",
"m3a": "audio/mpeg",
"m3u8": "application/vnd.apple.mpegurl",
"m3u": "audio/x-mpegurl",
"m4a": "audio/mp4",
"m4u": "video/vnd.mpegurl",
"m4v": "video/x-m4v",
"ma": "application/mathematica",
"mads": "application/mads+xml",
"mag": "application/vnd.ecowin.chart",
"maker": "application/vnd.framemaker",
"man": "text/troff",
"mar": "application/octet-stream",
"mathml": "application/mathml+xml",
"mb": "application/mathematica",
"mbk": "application/vnd.mobius.mbk",
"mbox": "application/mbox",
"mc1": "application/vnd.medcalcdata",
"mcd": "application/vnd.mcd",
"mcurl": "text/vnd.curl.mcurl",
"mdb": "application/x-msaccess",
"mdi": "image/vnd.ms-modi",
"mesh": "model/mesh",
"meta4": "application/metalink4+xml",
"metalink": "application/metalink+xml",
"me": "text/troff",
"mets": "application/mets+xml",
"mfm": "application/vnd.mfmp",
"mft": "application/rpki-manifest",
"mgp": "application/vnd.osgeo.mapguide.package",
"mgz": "application/vnd.proteus.magazine",
"mid": "audio/midi",
"midi": "audio/midi",
"mie": "application/x-mie",
"mif": "application/vnd.mif",
"mime": "message/rfc822",
"mj2": "video/mj2",
"mjp2": "video/mj2",
"mjs": "text/javascript",
"mk3d": "video/x-matroska",
"mka": "audio/x-matroska",
"mks": "video/x-matroska",
"mkv": "video/x-matroska",
"mlp": "application/vnd.dolby.mlp",
"mmd": "application/vnd.chipnuts.karaoke-mmd",
"mmf": "application/vnd.smaf",
"mmr": "image/vnd.fujixerox.edmics-mmr",
"mng": "video/x-mng",
"mny": "application/x-msmoney",
"mobi": "application/x-mobipocket-ebook",
"mods": "application/mods+xml",
"movie": "video/x-sgi-movie",
"mov": "video/quicktime",
"mp21": "application/mp21",
"mp2a": "audio/mpeg",
"mp2": "audio/mpeg",
"mp3": "audio/mpeg",
"mp4a": "audio/mp4",
"mp4s": "application/mp4",
"mp4": "video/mp4",
"mp4v": "video/mp4",
"mpc": "application/vnd.mophun.certificate",
"mpeg": "video/mpeg",
"mpe": "video/mpeg",
"mpg4": "video/mp4",
"mpga": "audio/mpeg",
"mpg": "video/mpeg",
"mpkg": "application/vnd.apple.installer+xml",
"mpm": "application/vnd.blueice.multipass",
"mpn": "application/vnd.mophun.application",
"mpp": "application/vnd.ms-project",
"mpt": "application/vnd.ms-project",
"mpy": "application/vnd.ibm.minipay",
"mqy": "application/vnd.mobius.mqy",
"mrc": "application/marc",
"mrcx": "application/marcxml+xml",
"mscml": "application/mediaservercontrol+xml",
"mseed": "application/vnd.fdsn.mseed",
"mseq": "application/vnd.mseq",
"msf": "application/vnd.epson.msf",
"msh": "model/mesh",
"msi": "application/x-msdownload",
"msl": "application/vnd.mobius.msl",
"ms": "text/troff",
"msty": "application/vnd.muvee.style",
"mts": "model/vnd.mts",
"mus": "application/vnd.musician",
"musicxml": "application/vnd.recordare.musicxml+xml",
"mvb": "application/x-msmediaview",
"mwf": "application/vnd.mfer",
"mxf": "application/mxf",
"mxl": "application/vnd.recordare.musicxml",
"mxml": "application/xv+xml",
"mxs": "application/vnd.triscape.mxs",
"mxu": "video/vnd.mpegurl",
"n3": "text/n3",
"nb": "application/mathematica",
"nbp": "application/vnd.wolfram.player",
"nc": "application/x-netcdf",
"ncx": "application/x-dtbncx+xml",
"nfo": "text/x-nfo",
"n-gage": "application/vnd.nokia.n-gage.symbian.install",
"ngdat": "application/vnd.nokia.n-gage.data",
"nitf": "application/vnd.nitf",
"nlu": "application/vnd.neurolanguage.nlu",
"nml": "application/vnd.enliven",
"nnd": "application/vnd.noblenet-directory",
"nns": "application/vnd.noblenet-sealer",
"nnw": "application/vnd.noblenet-web",
"npx": "image/vnd.net-fpx",
"nsc": "application/x-conference",
"nsf": "application/vnd.lotus-notes",
"ntf": "application/vnd.nitf",
"nzb": "application/x-nzb",
"oa2": "application/vnd.fujitsu.oasys2",
"oa3": "application/vnd.fujitsu.oasys3",
"oas": "application/vnd.fujitsu.oasys",
"obd": "application/x-msbinder",
"obj": "application/x-tgif",
"oda": "application/oda",
"odb": "application/vnd.oasis.opendocument.database",
"odc": "application/vnd.oasis.opendocument.chart",
"odf": "application/vnd.oasis.opendocument.formula",
"odft": "application/vnd.oasis.opendocument.formula-template",
"odg": "application/vnd.oasis.opendocument.graphics",
"odi": "application/vnd.oasis.opendocument.image",
"odm": "application/vnd.oasis.opendocument.text-master",
"odp": "application/vnd.oasis.opendocument.presentation",
"ods": "application/vnd.oasis.opendocument.spreadsheet",
"odt": "application/vnd.oasis.opendocument.text",
"oga": "audio/ogg",
"ogg": "audio/ogg",
"ogv": "video/ogg",
"ogx": "application/ogg",
"omdoc": "application/omdoc+xml",
"onepkg": "application/onenote",
"onetmp": "application/onenote",
"onetoc2": "application/onenote",
"onetoc": "application/onenote",
"opf": "application/oebps-package+xml",
"opml": "text/x-opml",
"oprc": "application/vnd.palm",
"opus": "audio/ogg",
"org": "application/vnd.lotus-organizer",
"osf": "application/vnd.yamaha.openscoreformat",
"osfpvg": "application/vnd.yamaha.openscoreformat.osfpvg+xml",
"otc": "application/vnd.oasis.opendocument.chart-template",
"otf": "font/otf",
"otg": "application/vnd.oasis.opendocument.graphics-template",
"oth": "application/vnd.oasis.opendocument.text-web",
"oti": "application/vnd.oasis.opendocument.image-template",
"otp": "application/vnd.oasis.opendocument.presentation-template",
"ots": "application/vnd.oasis.opendocument.spreadsheet-template",
"ott": "application/vnd.oasis.opendocument.text-template",
"oxps": "application/oxps",
"oxt": "application/vnd.openofficeorg.extension",
"p10": "application/pkcs10",
"p12": "application/x-pkcs12",
"p7b": "application/x-pkcs7-certificates",
"p7c": "application/pkcs7-mime",
"p7m": "application/pkcs7-mime",
"p7r": "application/x-pkcs7-certreqresp",
"p7s": "application/pkcs7-signature",
"p8": "application/pkcs8",
"pas": "text/x-pascal",
"paw": "application/vnd.pawaafile",
"pbd": "application/vnd.powerbuilder6",
"pbm": "image/x-portable-bitmap",
"pcap": "application/vnd.tcpdump.pcap",
"pcf": "application/x-font-pcf",
"pcl": "application/vnd.hp-pcl",
"pclxl": "application/vnd.hp-pclxl",
"pct": "image/x-pict",
"pcurl": "application/vnd.curl.pcurl",
"pcx": "image/x-pcx",
"pdb": "application/vnd.palm",
"pdf": "application/pdf",
"pfa": "application/x-font-type1",
"pfb": "application/x-font-type1",
"pfm": "application/x-font-type1",
"pfr": "application/font-tdpfr",
"pfx": "application/x-pkcs12",
"pgm": "image/x-portable-graymap",
"pgn": "application/x-chess-pgn",
"pgp": "application/pgp-encrypted",
"pic": "image/x-pict",
"pkg": "application/octet-stream",
"pki": "application/pkixcmp",
"pkipath": "application/pkix-pkipath",
"plb": "application/vnd.3gpp.pic-bw-large",
"plc": "application/vnd.mobius.plc",
"plf": "application/vnd.pocketlearn",
"pls": "application/pls+xml",
"pml": "application/vnd.ctc-posml",
"png": "image/png",
"pnm": "image/x-portable-anymap",
"portpkg": "application/vnd.macports.portpkg",
"pot": "application/vnd.ms-powerpoint",
"potm": "application/vnd.ms-powerpoint.template.macroenabled.12",
"potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
"ppam": "application/vnd.ms-powerpoint.addin.macroenabled.12",
"ppd": "application/vnd.cups-ppd",
"ppm": "image/x-portable-pixmap",
"pps": "application/vnd.ms-powerpoint",
"ppsm": "application/vnd.ms-powerpoint.slideshow.macroenabled.12",
"ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
"ppt": "application/vnd.ms-powerpoint",
"pptm": "application/vnd.ms-powerpoint.presentation.macroenabled.12",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"pqa": "application/vnd.palm",
"prc": "application/x-mobipocket-ebook",
"pre": "application/vnd.lotus-freelance",
"prf": "application/pics-rules",
"ps": "application/postscript",
"psb": "application/vnd.3gpp.pic-bw-small",
"psd": "image/vnd.adobe.photoshop",
"psf": "application/x-font-linux-psf",
"pskcxml": "application/pskc+xml",
"p": "text/x-pascal",
"ptid": "application/vnd.pvi.ptid1",
"pub": "application/x-mspublisher",
"pvb": "application/vnd.3gpp.pic-bw-var",
"pwn": "application/vnd.3m.post-it-notes",
"pya": "audio/vnd.ms-playready.media.pya",
"pyv": "video/vnd.ms-playready.media.pyv",
"qam": "application/vnd.epson.quickanime",
"qbo": "application/vnd.intu.qbo",
"qfx": "application/vnd.intu.qfx",
"qps": "application/vnd.publishare-delta-tree",
"qt": "video/quicktime",
"qwd": "application/vnd.quark.quarkxpress",
"qwt": "application/vnd.quark.quarkxpress",
"qxb": "application/vnd.quark.quarkxpress",
"qxd": "application/vnd.quark.quarkxpress",
"qxl": "application/vnd.quark.quarkxpress",
"qxt": "application/vnd.quark.quarkxpress",
"ra": "audio/x-pn-realaudio",
"ram": "audio/x-pn-realaudio",
"rar": "application/x-rar-compressed",
"ras": "image/x-cmu-raster",
"rcprofile": "application/vnd.ipunplugged.rcprofile",
"rdf": "application/rdf+xml",
"rdz": "application/vnd.data-vision.rdz",
"rep": "application/vnd.businessobjects",
"res": "application/x-dtbresource+xml",
"rgb": "image/x-rgb",
"rif": "application/reginfo+xml",
"rip": "audio/vnd.rip",
"ris": "application/x-research-info-systems",
"rl": "application/resource-lists+xml",
"rlc": "image/vnd.fujixerox.edmics-rlc",
"rld": "application/resource-lists-diff+xml",
"rm": "application/vnd.rn-realmedia",
"rmi": "audio/midi",
"rmp": "audio/x-pn-realaudio-plugin",
"rms": "application/vnd.jcp.javame.midlet-rms",
"rmvb": "application/vnd.rn-realmedia-vbr",
"rnc": "application/relax-ng-compact-syntax",
"roa": "application/rpki-roa",
"roff": "text/troff",
"rp9": "application/vnd.cloanto.rp9",
"rpss": "application/vnd.nokia.radio-presets",
"rpst": "application/vnd.nokia.radio-preset",
"rq": "application/sparql-query",
"rs": "application/rls-services+xml",
"rsd": "application/rsd+xml",
"rss": "application/rss+xml",
"rtf": "application/rtf",
"rtx": "text/richtext",
"s3m": "audio/s3m",
"saf": "application/vnd.yamaha.smaf-audio",
"sbml": "application/sbml+xml",
"sc": "application/vnd.ibm.secure-container",
"scd": "application/x-msschedule",
"scm": "application/vnd.lotus-screencam",
"scq": "application/scvp-cv-request",
"scs": "application/scvp-cv-response",
"scurl": "text/vnd.curl.scurl",
"sda": "application/vnd.stardivision.draw",
"sdc": "application/vnd.stardivision.calc",
"sdd": "application/vnd.stardivision.impress",
"sdkd": "application/vnd.solent.sdkm+xml",
"sdkm": "application/vnd.solent.sdkm+xml",
"sdp": "application/sdp",
"sdw": "application/vnd.stardivision.writer",
"see": "application/vnd.seemail",
"seed": "application/vnd.fdsn.seed",
"sema": "application/vnd.sema",
"semd": "application/vnd.semd",
"semf": "application/vnd.semf",
"ser": "application/java-serialized-object",
"setpay": "application/set-payment-initiation",
"setreg": "application/set-registration-initiation",
"sfd-hdstx": "application/vnd.hydrostatix.sof-data",
"sfs": "application/vnd.spotfire.sfs",
"sfv": "text/x-sfv",
"sgi": "image/sgi",
"sgl": "application/vnd.stardivision.writer-global",
"sgml": "text/sgml",
"sgm": "text/sgml",
"sh": "application/x-sh",
"shar": "application/x-shar",
"shf": "application/shf+xml",
"sid": "image/x-mrsid-image",
"sig": "application/pgp-signature",
"sil": "audio/silk",
"silo": "model/mesh",
"sis": "application/vnd.symbian.install",
"sisx": "application/vnd.symbian.install",
"sit": "application/x-stuffit",
"sitx": "application/x-stuffitx",
"skd": "application/vnd.koan",
"skm": "application/vnd.koan",
"skp": "application/vnd.koan",
"skt": "application/vnd.koan",
"sldm": "application/vnd.ms-powerpoint.slide.macroenabled.12",
"sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
"slt": "application/vnd.epson.salt",
"sm": "application/vnd.stepmania.stepchart",
"smf": "application/vnd.stardivision.math",
"smi": "application/smil+xml",
"smil": "application/smil+xml",
"smv": "video/x-smv",
"smzip": "application/vnd.stepmania.package",
"snd": "audio/basic",
"snf": "application/x-font-snf",
"so": "application/octet-stream",
"spc": "application/x-pkcs7-certificates",
"spf": "application/vnd.yamaha.smaf-phrase",
"spl": "application/x-futuresplash",
"spot": "text/vnd.in3d.spot",
"spp": "application/scvp-vp-response",
"spq": "application/scvp-vp-request",
"spx": "audio/ogg",
"sql": "application/x-sql",
"src": "application/x-wais-source",
"srt": "application/x-subrip",
"sru": "application/sru+xml",
"srx": "application/sparql-results+xml",
"ssdl": "application/ssdl+xml",
"sse": "application/vnd.kodak-descriptor",
"ssf": "application/vnd.epson.ssf",
"ssml": "application/ssml+xml",
"st": "application/vnd.sailingtracker.track",
"stc": "application/vnd.sun.xml.calc.template",
"std": "application/vnd.sun.xml.draw.template",
"s": "text/x-asm",
"stf": "application/vnd.wt.stf",
"sti": "application/vnd.sun.xml.impress.template",
"stk": "application/hyperstudio",
"stl": "application/vnd.ms-pki.stl",
"str": "application/vnd.pg.format",
"stw": "application/vnd.sun.xml.writer.template",
"sub": "text/vnd.dvb.subtitle",
"sus": "application/vnd.sus-calendar",
"susp": "application/vnd.sus-calendar",
"sv4cpio": "application/x-sv4cpio",
"sv4crc": "application/x-sv4crc",
"svc": "application/vnd.dvb.service",
"svd": "application/vnd.svd",
"svg": "image/svg+xml",
"svgz": "image/svg+xml",
"swa": "application/x-director",
"swf": "application/x-shockwave-flash",
"swi": "application/vnd.aristanetworks.swi",
"sxc": "application/vnd.sun.xml.calc",
"sxd": "application/vnd.sun.xml.draw",
"sxg": "application/vnd.sun.xml.writer.global",
"sxi": "application/vnd.sun.xml.impress",
"sxm": "application/vnd.sun.xml.math",
"sxw": "application/vnd.sun.xml.writer",
"t3": "application/x-t3vm-image",
"taglet": "application/vnd.mynfc",
"tao": "application/vnd.tao.intent-module-archive",
"tar": "application/x-tar",
"tcap": "application/vnd.3gpp2.tcap",
"tcl": "application/x-tcl",
"teacher": "application/vnd.smart.teacher",
"tei": "application/tei+xml",
"teicorpus": "application/tei+xml",
"tex": "application/x-tex",
"texi": "application/x-texinfo",
"texinfo": "application/x-texinfo",
"text": "text/plain",
"tfi": "application/thraud+xml",
"tfm": "application/x-tex-tfm",
"tga": "image/x-tga",
"thmx": "application/vnd.ms-officetheme",
"tiff": "image/tiff",
"tif": "image/tiff",
"tmo": "application/vnd.tmobile-livetv",
"torrent": "application/x-bittorrent",
"tpl": "application/vnd.groove-tool-template",
"tpt": "application/vnd.trid.tpt",
"tra": "application/vnd.trueapp",
"trm": "application/x-msterminal",
"tr": "text/troff",
"tsd": "application/timestamped-data",
"tsv": "text/tab-separated-values",
"ttc": "font/collection",
"t": "text/troff",
"ttf": "font/ttf",
"ttl": "text/turtle",
"twd": "application/vnd.simtech-mindmapper",
"twds": "application/vnd.simtech-mindmapper",
"txd": "application/vnd.genomatix.tuxedo",
"txf": "application/vnd.mobius.txf",
"txt": "text/plain",
"u32": "application/x-authorware-bin",
"udeb": "application/x-debian-package",
"ufd": "application/vnd.ufdl",
"ufdl": "application/vnd.ufdl",
"ulx": "application/x-glulx",
"umj": "application/vnd.umajin",
"unityweb": "application/vnd.unity",
"uoml": "application/vnd.uoml+xml",
"uris": "text/uri-list",
"uri": "text/uri-list",
"urls": "text/uri-list",
"ustar": "application/x-ustar",
"utz": "application/vnd.uiq.theme",
"uu": "text/x-uuencode",
"uva": "audio/vnd.dece.audio",
"uvd": "application/vnd.dece.data",
"uvf": "application/vnd.dece.data",
"uvg": "image/vnd.dece.graphic",
"uvh": "video/vnd.dece.hd",
"uvi": "image/vnd.dece.graphic",
"uvm": "video/vnd.dece.mobile",
"uvp": "video/vnd.dece.pd",
"uvs": "video/vnd.dece.sd",
"uvt": "application/vnd.dece.ttml+xml",
"uvu": "video/vnd.uvvu.mp4",
"uvva": "audio/vnd.dece.audio",
"uvvd": "application/vnd.dece.data",
"uvvf": "application/vnd.dece.data",
"uvvg": "image/vnd.dece.graphic",
"uvvh": "video/vnd.dece.hd",
"uvvi": "image/vnd.dece.graphic",
"uvvm": "video/vnd.dece.mobile",
"uvvp": "video/vnd.dece.pd",
"uvvs": "video/vnd.dece.sd",
"uvvt": "application/vnd.dece.ttml+xml",
"uvvu": "video/vnd.uvvu.mp4",
"uvv": "video/vnd.dece.video",
"uvvv": "video/vnd.dece.video",
"uvvx": "application/vnd.dece.unspecified",
"uvvz": "application/vnd.dece.zip",
"uvx": "application/vnd.dece.unspecified",
"uvz": "application/vnd.dece.zip",
"vcard": "text/vcard",
"vcd": "application/x-cdlink",
"vcf": "text/x-vcard",
"vcg": "application/vnd.groove-vcard",
"vcs": "text/x-vcalendar",
"vcx": "application/vnd.vcx",
"vis": "application/vnd.visionary",
"viv": "video/vnd.vivo",
"vob": "video/x-ms-vob",
"vor": "application/vnd.stardivision.writer",
"vox": "application/x-authorware-bin",
"vrml": "model/vrml",
"vsd": "application/vnd.visio",
"vsf": "application/vnd.vsf",
"vss": "application/vnd.visio",
"vst": "application/vnd.visio",
"vsw": "application/vnd.visio",
"vtu": "model/vnd.vtu",
"vxml": "application/voicexml+xml",
"w3d": "application/x-director",
"wad": "application/x-doom",
"wasm": "application/wasm",
"wav": "audio/x-wav",
"wax": "audio/x-ms-wax",
"wbmp": "image/vnd.wap.wbmp",
"wbs": "application/vnd.criticaltools.wbs+xml",
"wbxml": "application/vnd.wap.wbxml",
"wcm": "application/vnd.ms-works",
"wdb": "application/vnd.ms-works",
"wdp": "image/vnd.ms-photo",
"weba": "audio/webm",
"webm": "video/webm",
"webp": "image/webp",
"wg": "application/vnd.pmi.widget",
"wgt": "application/widget",
"wks": "application/vnd.ms-works",
"wma": "audio/x-ms-wma",
"wmd": "application/x-ms-wmd",
"wmf": "application/x-msmetafile",
"wmlc": "application/vnd.wap.wmlc",
"wmlsc": "application/vnd.wap.wmlscriptc",
"wmls": "text/vnd.wap.wmlscript",
"wml": "text/vnd.wap.wml",
"wm": "video/x-ms-wm",
"wmv": "video/x-ms-wmv",
"wmx": "video/x-ms-wmx",
"wmz": "application/x-ms-wmz",
"woff2": "font/woff2",
"woff": "font/woff",
"wpd": "application/vnd.wordperfect",
"wpl": "application/vnd.ms-wpl",
"wps": "application/vnd.ms-works",
"wqd": "application/vnd.wqd",
"wri": "application/x-mswrite",
"wrl": "model/vrml",
"wsdl": "application/wsdl+xml",
"wspolicy": "application/wspolicy+xml",
"wtb": "application/vnd.webturbo",
"wvx": "video/x-ms-wvx",
"x32": "application/x-authorware-bin",
"x3db": "model/x3d+binary",
"x3dbz": "model/x3d+binary",
"x3d": "model/x3d+xml",
"x3dv": "model/x3d+vrml",
"x3dvz": "model/x3d+vrml",
"x3dz": "model/x3d+xml",
"xaml": "application/xaml+xml",
"xap": "application/x-silverlight-app",
"xar": "application/vnd.xara",
"xbap": "application/x-ms-xbap",
"xbd": "application/vnd.fujixerox.docuworks.binder",
"xbm": "image/x-xbitmap",
"xdf": "application/xcap-diff+xml",
"xdm": "application/vnd.syncml.dm+xml",
"xdp": "application/vnd.adobe.xdp+xml",
"xdssc": "application/dssc+xml",
"xdw": "application/vnd.fujixerox.docuworks",
"xenc": "application/xenc+xml",
"xer": "application/patch-ops-error+xml",
"xfdf": "application/vnd.adobe.xfdf",
"xfdl": "application/vnd.xfdl",
"xht": "application/xhtml+xml",
"xhtml": "application/xhtml+xml",
"xhvml": "application/xv+xml",
"xif": "image/vnd.xiff",
"xla": "application/vnd.ms-excel",
"xlam": "application/vnd.ms-excel.addin.macroenabled.12",
"xlc": "application/vnd.ms-excel",
"xlf": "application/x-xliff+xml",
"xlm": "application/vnd.ms-excel",
"xls": "application/vnd.ms-excel",
"xlsb": "application/vnd.ms-excel.sheet.binary.macroenabled.12",
"xlsm": "application/vnd.ms-excel.sheet.macroenabled.12",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xlt": "application/vnd.ms-excel",
"xltm": "application/vnd.ms-excel.template.macroenabled.12",
"xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
"xlw": "application/vnd.ms-excel",
"xm": "audio/xm",
"xml": "application/xml",
"xo": "application/vnd.olpc-sugar",
"xop": "application/xop+xml",
"xpi": "application/x-xpinstall",
"xpl": "application/xproc+xml",
"xpm": "image/x-xpixmap",
"xpr": "application/vnd.is-xpr",
"xps": "application/vnd.ms-xpsdocument",
"xpw": "application/vnd.intercon.formnet",
"xpx": "application/vnd.intercon.formnet",
"xsl": "application/xml",
"xslt": "application/xslt+xml",
"xsm": "application/vnd.syncml+xml",
"xspf": "application/xspf+xml",
"xul": "application/vnd.mozilla.xul+xml",
"xvm": "application/xv+xml",
"xvml": "application/xv+xml",
"xwd": "image/x-xwindowdump",
"xyz": "chemical/x-xyz",
"xz": "application/x-xz",
"yang": "application/yang",
"yin": "application/yin+xml",
"z1": "application/x-zmachine",
"z2": "application/x-zmachine",
"z3": "application/x-zmachine",
"z4": "application/x-zmachine",
"z5": "application/x-zmachine",
"z6": "application/x-zmachine",
"z7": "application/x-zmachine",
"z8": "application/x-zmachine",
"zaz": "application/vnd.zzazz.deck+xml",
"zip": "application/zip",
"zir": "application/vnd.zul",
"zirz": "application/vnd.zul",
"zmm": "application/vnd.handheld-entertainment+xml",
}
| 38,700 | Python | .py | 994 | 33.960765 | 88 | 0.644079 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,660 | base.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/base.py | import datetime
import json
from abc import ABC, abstractmethod
from json import JSONDecodeError, loads
from pathlib import Path
from typing import Any, Union
from unstract.sdk.constants import (
Command,
LogLevel,
MetadataKey,
PropKey,
ToolEnv,
ToolExecKey,
)
from unstract.sdk.tool.mixin import ToolConfigHelper
from unstract.sdk.tool.parser import ToolArgsParser
from unstract.sdk.tool.stream import StreamMixin
from unstract.sdk.utils import ToolUtils
class BaseTool(ABC, StreamMixin):
"""Abstract class for Unstract tools."""
def __init__(self, log_level: LogLevel = LogLevel.INFO) -> None:
"""Creates an UnstractTool.
Args:
log_level (str): Log level for the tool
Can be one of INFO, DEBUG, WARN, ERROR, FATAL.
"""
self.start_time = datetime.datetime.now()
super().__init__(log_level=log_level)
self.properties = ToolConfigHelper.properties()
self.spec = ToolConfigHelper.spec()
self.variables = ToolConfigHelper.variables()
self.workflow_id = ""
self.execution_id = ""
self.org_id = ""
self._exec_metadata = {}
@classmethod
def from_tool_args(cls, args: list[str]) -> "BaseTool":
"""Builder method to create a tool from args passed to a tool.
Refer the tool's README to know more about the possible args
Args:
args (List[str]): Arguments passed to a tool
Returns:
AbstractTool: Abstract base tool class
"""
parsed_args = ToolArgsParser.parse_args(args)
tool = cls(log_level=parsed_args.log_level)
if parsed_args.command not in Command.static_commands():
tool._exec_metadata = tool._get_exec_metadata()
tool.workflow_id = tool._exec_metadata.get(MetadataKey.WORKFLOW_ID)
tool.execution_id = tool._exec_metadata.get(MetadataKey.EXECUTION_ID)
tool.org_id = tool._exec_metadata.get(MetadataKey.ORG_ID)
return tool
def elapsed_time(self) -> float:
"""Returns the elapsed time since the tool was created."""
return (datetime.datetime.now() - self.start_time).total_seconds()
def handle_static_command(self, command: str) -> None:
"""Handles a static command.
Used to handle commands that do not require any processing. Currently,
the only supported static commands are
SPEC, PROPERTIES, VARIABLES and ICON.
This is used by the Unstract SDK to handle static commands.
It is not intended to be used by the tool. The tool
stub will automatically handle static commands.
Args:
command (str): The static command.
Returns:
None
"""
if command == Command.SPEC:
self.stream_spec(ToolUtils.json_to_str(self.spec))
elif command == Command.PROPERTIES:
self.stream_properties(ToolUtils.json_to_str(self.properties))
elif command == Command.ICON:
self.stream_icon(ToolConfigHelper.icon())
elif command == Command.VARIABLES:
self.stream_variables(ToolUtils.json_to_str(self.variables))
else:
raise ValueError(f"Unknown command {command}")
def _get_data_dir(self) -> Path:
"""Gets the TOOL_DATA_DIR that houses the input and output files of
tool execution.
Returns:
Path: Path object of the TOOL_DATA_DIR that's configured.
"""
data_dir = self.get_env_or_die(ToolEnv.DATA_DIR)
base_path = Path(data_dir)
if not base_path.exists():
self.stream_error_and_exit(f"{data_dir} does not exist")
if not base_path.is_dir():
self.stream_error_and_exit(f"{data_dir} is not a directory")
return base_path.absolute()
def _get_file_from_data_dir(self, file_to_get: str, raise_err: bool = False) -> str:
base_path: Path = self._get_data_dir()
file_path = base_path / file_to_get
if raise_err and not file_path.exists():
self.stream_error_and_exit(f"{file_to_get} is missing in TOOL_DATA_DIR")
return str(file_path)
def get_source_file(self) -> str:
"""Gets the absolute path to the workflow execution's input file
(SOURCE).
Returns:
str: Absolute path to the source file
"""
return self._get_file_from_data_dir(ToolExecKey.SOURCE, raise_err=True)
def get_input_file(self) -> str:
"""Gets the absolute path to the input file that's meant for the tool
being run (INFILE).
Returns:
str: Absolute path to the input file
"""
return self._get_file_from_data_dir(ToolExecKey.INFILE, raise_err=True)
def get_output_dir(self) -> str:
"""Get the absolute path to the output folder where the tool needs to
place its output file. This is where the tool writes its output files
that need to be copied into the destination (COPY_TO_FOLDER path).
Returns:
str: Absolute path to the output directory.
"""
base_path: Path = self._get_data_dir()
return str(base_path / ToolExecKey.OUTPUT_DIR)
@property
def get_exec_metadata(self) -> dict[str, Any]:
"""Getter for `exec_metadata` of the tool.
Returns:
dict[str, Any]: Contents of METADATA.json
"""
return self._exec_metadata
def _get_exec_metadata(self) -> dict[str, Any]:
"""Retrieve the contents from METADATA.json present in the data
directory. This file contains metadata for the tool execution.
Returns:
dict[str, Any]: Contents of METADATA.json
"""
base_path: Path = self._get_data_dir()
metadata_path = base_path / ToolExecKey.METADATA_FILE
metadata_json = {}
try:
with open(metadata_path, encoding="utf-8") as f:
metadata_json = loads(f.read())
except JSONDecodeError as e:
self.stream_error_and_exit(f"JSON decode error for {metadata_path}: {e}")
except FileNotFoundError:
self.stream_error_and_exit(f"Metadata file not found at {metadata_path}")
except OSError as e:
self.stream_error_and_exit(f"OS Error while opening {metadata_path}: {e}")
return metadata_json
def _write_exec_metadata(self, metadata: dict[str, Any]) -> None:
"""Helps write the `METADATA.JSON` file.
Args:
metadata (dict[str, Any]): Metadata to write
"""
base_path: Path = self._get_data_dir()
metadata_path = base_path / ToolExecKey.METADATA_FILE
with metadata_path.open("w", encoding="utf-8") as f:
f.write(ToolUtils.json_to_str(metadata))
def _update_exec_metadata(self) -> None:
"""Updates the execution metadata after a tool executes.
Currently overrwrites most of the keys with the latest tool
executed.
"""
tool_metadata = {
MetadataKey.TOOL_NAME: self.properties[PropKey.FUNCTION_NAME],
MetadataKey.ELAPSED_TIME: self.elapsed_time(),
MetadataKey.OUTPUT_TYPE: self.properties[PropKey.RESULT][PropKey.TYPE],
}
if MetadataKey.TOTAL_ELA_TIME not in self._exec_metadata:
self._exec_metadata[MetadataKey.TOTAL_ELA_TIME] = self.elapsed_time()
else:
self._exec_metadata[MetadataKey.TOTAL_ELA_TIME] += self.elapsed_time()
if MetadataKey.TOOL_META not in self._exec_metadata:
self._exec_metadata[MetadataKey.TOOL_META] = [tool_metadata]
else:
self._exec_metadata[MetadataKey.TOOL_META].append(tool_metadata)
self._write_exec_metadata(metadata=self._exec_metadata)
def update_exec_metadata(self, metadata: dict[str, Any]) -> None:
"""Helps update the execution metadata with the provided metadata
dictionary.
This method iterates over the key-value pairs in the input metadata dictionary
and updates the internal `_exec_metadata` dictionary of the tool instance
accordingly. It then writes the updated metadata to the `METADATA.json`
file in the tool's data directory.
Args:
metadata (dict[str, Any]): A dictionary containing the metadata
key-value pairs to update in the execution metadata.
Returns:
None
"""
for key, value in metadata.items():
self._exec_metadata[key] = value
self._write_exec_metadata(metadata=self._exec_metadata)
def write_tool_result(self, data: Union[str, dict[str, Any]]) -> None:
"""Helps write contents of the tool result into TOOL_DATA_DIR.
Args:
data (Union[str, dict[str, Any]]): Data to be written
"""
output_type = self.properties[PropKey.RESULT][PropKey.TYPE]
if output_type is PropKey.OutputType.JSON and not isinstance(data, dict):
# TODO: Validate JSON type output with output schema as well
self.stream_error_and_exit(
f"Expected result to have type {PropKey.OutputType.JSON} "
f"but {type(data)} is passed"
)
# TODO: Review if below is necessary
result = {
"workflow_id": self.workflow_id,
"elapsed_time": self.elapsed_time(),
"output": data,
}
self.stream_result(result)
self._update_exec_metadata()
json_data = json.dumps(data)
# INFILE is overwritten for next tool to run
input_file_path: Path = Path(self.get_input_file())
with input_file_path.open("w", encoding="utf-8") as f:
f.write(json_data)
def validate(self, input_file: str, settings: dict[str, Any]) -> None:
"""Override to implement custom validation for the tool.
Args:
input_file (str): Path to the file that will be used for execution.
settings (dict[str, Any]): Settings configured for the tool.
Defaults initialized through the tool's SPEC.
"""
pass
@abstractmethod
def run(
self,
settings: dict[str, Any],
input_file: str,
output_dir: str,
) -> None:
"""Implements RUN command for the tool.
Args:
settings (dict[str, Any]): Settings for the tool
input_file (str): Path to the input file
output_dir (str): Path to write the tool output to which gets copied
into the destination
"""
pass
| 10,668 | Python | .py | 238 | 35.445378 | 88 | 0.630393 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,661 | parser.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/parser.py | import argparse
from typing import Optional
from dotenv import find_dotenv, load_dotenv
from unstract.sdk.constants import LogLevel
class ToolArgsParser:
"""Class to help with parsing arguments to a tool."""
@staticmethod
def parse_args(args_to_parse: list[str]) -> argparse.Namespace:
"""Helps parse arguments to a tool.
Args:
args_to_parse (List[str]): Command line arguments received by a tool
Returns:
argparse.Namespace: Parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--command", type=str, help="Command to execute", required=True
)
parser.add_argument(
"--settings", type=str, help="Settings to be used", required=False
)
parser.add_argument(
"--log-level",
type=LogLevel,
help="Log level",
required=False,
default=LogLevel.ERROR,
)
parser.add_argument(
"--env",
type=str,
help="Env file to load environment from",
required=False,
default=find_dotenv(usecwd=True),
)
parsed_args = parser.parse_args(args_to_parse)
ToolArgsParser.load_environment(parsed_args.env)
return parsed_args
@staticmethod
def load_environment(path: Optional[str] = None) -> None:
"""Loads env variables with python-dotenv.
Args:
path (Optional[str], optional): Path to the env file to load.
Defaults to None.
"""
if path is None:
load_dotenv()
else:
load_dotenv(path)
| 1,696 | Python | .py | 49 | 25.102041 | 80 | 0.592186 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,662 | entrypoint.py | Zipstack_unstract-sdk/src/unstract/sdk/tool/entrypoint.py | from unstract.sdk.tool.base import BaseTool
from unstract.sdk.tool.executor import ToolExecutor
from unstract.sdk.tool.parser import ToolArgsParser
class ToolEntrypoint:
"""Class that contains methods for the entrypoint for a tool."""
@staticmethod
def launch(tool: BaseTool, args: list[str]) -> None:
"""Entrypoint function for a tool.
It parses the arguments passed to a tool and executes
the intended command.
Args:
tool (AbstractTool): Tool to execute
args (List[str]): Arguments passed to a tool
"""
parsed_args = ToolArgsParser.parse_args(args)
executor = ToolExecutor(tool=tool)
executor.execute(parsed_args)
| 721 | Python | .py | 17 | 35.235294 | 68 | 0.695279 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,663 | tool_utils.py | Zipstack_unstract-sdk/src/unstract/sdk/utils/tool_utils.py | import json
from hashlib import md5, sha256
from pathlib import Path
from typing import Any
import magic
class ToolUtils:
"""Class containing utility methods."""
@staticmethod
def hash_str(string_to_hash: Any, hash_method: str = "sha256") -> str:
"""Computes the hash for a given input string.
Useful to hash strings needed for caching and other purposes.
Hash method defaults to "md5"
Args:
string_to_hash (str): String to be hashed
hash_method (str): Hash hash_method to use, supported ones
- "md5"
Returns:
str: Hashed string
"""
if hash_method == "md5":
if isinstance(string_to_hash, bytes):
return str(md5(string_to_hash).hexdigest())
return str(md5(string_to_hash.encode()).hexdigest())
elif hash_method == "sha256":
if isinstance(string_to_hash, (bytes, bytearray)):
return str(sha256(string_to_hash).hexdigest())
return str(sha256(string_to_hash.encode()).hexdigest())
else:
raise ValueError(f"Unsupported hash_method: {hash_method}")
@staticmethod
def get_hash_from_file(file_path: str) -> str:
"""Computes the hash for a file.
Uses sha256 to compute the file hash through a buffered read.
Args:
file_path (str): Path to file that needs to be hashed
Returns:
str: SHA256 hash of the file
"""
h = sha256()
b = bytearray(128 * 1024)
mv = memoryview(b)
with open(file_path, "rb", buffering=0) as f:
while n := f.readinto(mv):
h.update(mv[:n])
return str(h.hexdigest())
@staticmethod
def load_json(file_to_load: str) -> dict[str, Any]:
"""Loads and returns a JSON from a file.
Args:
file_to_load (str): Path to the file containing JSON
Returns:
dict[str, Any]: The JSON loaded from file
"""
with open(file_to_load, encoding="utf-8") as f:
loaded_json: dict[str, Any] = json.load(f)
return loaded_json
@staticmethod
def json_to_str(json_to_dump: dict[str, Any]) -> str:
"""Helps convert the JSON to a string. Useful for dumping the JSON to a
file.
Args:
json_to_dump (dict[str, Any]): Input JSON to dump
Returns:
str: String representation of the JSON
"""
compact_json = json.dumps(json_to_dump, separators=(",", ":"))
return compact_json
@staticmethod
def get_file_mime_type(input_file: Path) -> str:
"""Gets the file MIME type for an input file. Uses libmagic to perform
the same.
Args:
input_file (Path): Path object of the input file
Returns:
str: MIME type of the file
"""
input_file_mime = ""
with open(input_file, mode="rb") as input_file_obj:
sample_contents = input_file_obj.read(100)
input_file_mime = magic.from_buffer(sample_contents, mime=True)
input_file_obj.seek(0)
return input_file_mime
@staticmethod
def get_file_size(input_file: Path) -> int:
"""Gets the file size in bytes for an input file.
Args:
input_file (Path): Path object of the input file
Returns:
str: MIME type of the file
"""
with open(input_file, mode="rb") as input_file_obj:
input_file_obj.seek(0, 2) # Move the cursor to the end of the file
file_length = (
input_file_obj.tell()
) # Get the current position of the cursor, which is the file length
input_file_obj.seek(0)
return file_length
@staticmethod
def str_to_bool(string: str) -> bool:
"""String value of boolean to boolean.
Useful while parsing envs to bool.
Args:
string (str): value like "true", "True" etc..
Returns:
bool
"""
return string.lower() == "true"
# Used the same function from LLM Whisperer
@staticmethod
def calculate_page_count(
pages_string: str, max_page: int = 0, min_page: int = 1
) -> int:
"""Calculates the total number of pages based on the input string of
page numbers or ranges.
Parses the input 'pages_string' to extract individual page numbers or
ranges separated by commas.
Supports ranges like '1-5' or open-ended ranges like '4-'.
The 'max_page' parameter defines the upper limit for page numbers.
The 'min_page' parameter defines the lower limit for page numbers.
Args:
pages_string (str): String containing page numbers or ranges
separated by commas
max_page (int): Upper limit for page numbers (default is 0)
min_page (int): Lower limit for page numbers (default is 1)
Returns:
int: Total count of individual pages extracted from the input string
"""
if not pages_string:
return max_page
pages_list: list[int] = []
parts = pages_string.split(",")
for part in parts:
part = part.strip()
if "-" in part:
if part.startswith("-"): # e.g., "-5"
end = int(part[1:])
end = min(end, max_page)
pages_list.extend(range(min_page, end + 1))
elif part.endswith("-"): # e.g., "4-"
start = int(part[:-1])
if start < 0:
start = 0
if max_page is None:
raise ValueError(
"max_page must be defined for open-ended ranges like '4-'"
)
pages_list.extend(range(start, max_page + 1))
else: # e.g., "1-5"
start, end = map(int, part.split("-"))
if start < 0:
start = 0
if end > max_page:
end = max_page
pages_list.extend(range(start, end + 1))
else:
pages_list.append(int(part))
return len(pages_list)
| 6,382 | Python | .py | 157 | 29.318471 | 86 | 0.553672 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,664 | common_utils.py | Zipstack_unstract-sdk/src/unstract/sdk/utils/common_utils.py | import functools
import logging
import time
import uuid
from unstract.sdk.constants import LogLevel
logger = logging.getLogger(__name__)
class CommonUtils:
@staticmethod
def generate_uuid() -> str:
"""Class method to get uuid."""
return str(uuid.uuid4())
# Mapping from python log level to Unstract counterpart
PY_TO_UNSTRACT_LOG_LEVEL = {
logging.DEBUG: LogLevel.DEBUG,
logging.INFO: LogLevel.INFO,
logging.WARNING: LogLevel.WARN,
logging.ERROR: LogLevel.ERROR,
}
def log_elapsed(operation):
"""Adds an elapsed time log.
Args:
operation (str): Operation being measured
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
try:
result = func(*args, **kwargs)
finally:
end_time = time.time()
elapsed_time = end_time - start_time
logger.info(f"Time taken for '{operation}': {elapsed_time:.3f}s")
return result
return wrapper
return decorator
| 1,106 | Python | .py | 36 | 23.722222 | 81 | 0.63327 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,665 | callback_manager.py | Zipstack_unstract-sdk/src/unstract/sdk/utils/callback_manager.py | import logging
from typing import Callable, Optional, Union
import tiktoken
from deprecated import deprecated
from llama_index.core.callbacks import CallbackManager as LlamaIndexCallbackManager
from llama_index.core.callbacks import TokenCountingHandler
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.llms import LLM
from unstract.sdk.utils.usage_handler import UsageHandler
logger = logging.getLogger(__name__)
class CallbackManager:
"""Class representing the CallbackManager to manage callbacks.
Use this over the default service context of llama index
This class supports a tokenizer, token counter,
usage handler, and callback manager.
Attributes:
None
Methods:
set_callback_manager: Returns a standard callback manager
Example:
callback_manager = CallbackManager.
set_callback_manager(
llm="default",
embedding="default")
"""
@staticmethod
def set_callback(
platform_api_key: str,
model: Union[LLM, BaseEmbedding],
kwargs,
) -> None:
"""Sets the standard callback manager for the llm. This is to be called
explicitly whenever there is a need for the callback handling defined
here as handlers is to be invoked.
Parameters:
llm (LLM): The LLM type
Returns:
CallbackManager type of llama index
Example:
UNCallbackManager.set_callback_manager(
platform_api_key: "abc",
llm=llm,
embedding=embedding
)
"""
# Nothing to do if callback manager is already set for the instance
if (
model
and model.callback_manager
and len(model.callback_manager.handlers) > 0
):
return
model.callback_manager = CallbackManager.get_callback_manager(
model, platform_api_key, kwargs
)
@staticmethod
def get_callback_manager(
model: Union[LLM, BaseEmbedding],
platform_api_key: str,
kwargs,
) -> LlamaIndexCallbackManager:
llm = None
embedding = None
handler_list = []
if isinstance(model, LLM):
llm = model
usage_handler = UsageHandler(
platform_api_key=platform_api_key,
llm_model=llm,
embed_model=embedding,
kwargs=kwargs,
)
handler_list.append(usage_handler)
elif isinstance(model, BaseEmbedding):
embedding = model
# Get a tokenizer
tokenizer = CallbackManager.get_tokenizer(model)
token_counter = TokenCountingHandler(tokenizer=tokenizer, verbose=True)
usage_handler = UsageHandler(
token_counter=token_counter,
platform_api_key=platform_api_key,
llm_model=llm,
embed_model=embedding,
kwargs=kwargs,
)
handler_list.append(token_counter)
handler_list.append(usage_handler)
callback_manager: LlamaIndexCallbackManager = LlamaIndexCallbackManager(
handlers=handler_list
)
return callback_manager
@staticmethod
def get_tokenizer(
model: Optional[Union[LLM, BaseEmbedding, None]],
fallback_tokenizer: Callable[[str], list] = tiktoken.encoding_for_model(
"gpt-3.5-turbo"
).encode,
) -> Callable[[str], list]:
"""Returns a tokenizer function based on the provided model.
Args:
model (Optional[Union[LLM, BaseEmbedding]]): The model to use for
tokenization.
Returns:
Callable[[str], List]: The tokenizer function.
Raises:
OSError: If an error occurs while loading the tokenizer.
"""
try:
if isinstance(model, LLM):
model_name: str = model.metadata.model_name
elif isinstance(model, BaseEmbedding):
model_name = model.model_name
tokenizer: Callable[[str], list] = tiktoken.encoding_for_model(
model_name
).encode
return tokenizer
except (KeyError, ValueError) as e:
logger.warning(str(e))
return fallback_tokenizer
@staticmethod
@deprecated("Use set_callback() instead")
def set_callback_manager(
platform_api_key: str,
llm: Optional[LLM] = None,
embedding: Optional[BaseEmbedding] = None,
**kwargs,
) -> LlamaIndexCallbackManager:
callback_manager: LlamaIndexCallbackManager = LlamaIndexCallbackManager()
if llm:
CallbackManager.set_callback(platform_api_key, model=llm, **kwargs)
callback_manager = llm.callback_manager
if embedding:
CallbackManager.set_callback(platform_api_key, model=embedding, **kwargs)
callback_manager = embedding.callback_manager
return callback_manager
| 5,158 | Python | .py | 135 | 27.777778 | 85 | 0.617247 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,666 | usage_handler.py | Zipstack_unstract-sdk/src/unstract/sdk/utils/usage_handler.py | from typing import Any, Optional
from llama_index.core.callbacks import CBEventType, TokenCountingHandler
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.llms import LLM
from unstract.sdk.audit import Audit
from unstract.sdk.constants import LogLevel
from unstract.sdk.tool.stream import StreamMixin
from unstract.sdk.utils.token_counter import TokenCounter
class UsageHandler(StreamMixin, BaseCallbackHandler):
"""UsageHandler class is a subclass of BaseCallbackHandler and is
responsible for handling usage events in the LLM or Embedding models. It
provides methods for starting and ending traces, as well as handling event
starts and ends.
Attributes:
- token_counter (TokenCountingHandler): The token counter object used
to count tokens in the LLM or Embedding models.
- llm_model (LLM): The LLM model object.
- embed_model (BaseEmbedding): The embedding model object.
- workflow_id (str): The ID of the workflow.
- execution_id (str): The ID of the execution.
- event_starts_to_ignore (Optional[list[CBEventType]]): A list of event
types to ignore at the start.
- event_ends_to_ignore (Optional[list[CBEventType]]): A list of event
types to ignore at the end.
- verbose (bool): A flag indicating whether to print verbose output.
"""
def __init__(
self,
platform_api_key: str,
token_counter: Optional[TokenCountingHandler] = None,
llm_model: LLM = None,
embed_model: BaseEmbedding = None,
event_starts_to_ignore: Optional[list[CBEventType]] = None,
event_ends_to_ignore: Optional[list[CBEventType]] = None,
verbose: bool = False,
log_level: LogLevel = LogLevel.INFO,
kwargs: dict[Any, Any] = None,
) -> None:
self.kwargs = kwargs.copy()
self._verbose = verbose
self.token_counter = token_counter
self.llm_model = llm_model
self.embed_model = embed_model
self.platform_api_key = platform_api_key
super().__init__(
log_level=log_level, # StreamMixin's args
event_starts_to_ignore=event_starts_to_ignore or [],
event_ends_to_ignore=event_ends_to_ignore or [],
)
def start_trace(self, trace_id: Optional[str] = None) -> None:
return
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[dict[str, list[str]]] = None,
) -> None:
return
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
kwargs: dict[Any, Any] = None,
) -> str:
return event_id
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[dict[str, Any]] = None,
event_id: str = "",
kwargs: dict[Any, Any] = None,
) -> None:
"""Push the usage of LLM or Embedding to platform service."""
if (
event_type == CBEventType.LLM
and event_type not in self.event_ends_to_ignore
and payload is not None
):
model_name = self.llm_model.metadata.model_name
# Need to push the data to via platform service
self.stream_log(
log=f"Pushing llm usage for model {model_name}", level=LogLevel.DEBUG
)
llm_token_counter: TokenCounter = TokenCounter.get_llm_token_counts(payload)
Audit(log_level=self.log_level).push_usage_data(
platform_api_key=self.platform_api_key,
token_counter=llm_token_counter,
event_type=event_type,
model_name=self.llm_model.metadata.model_name,
kwargs=self.kwargs,
)
elif (
event_type == CBEventType.EMBEDDING
and event_type not in self.event_ends_to_ignore
and payload is not None
):
model_name = self.embed_model.model_name
# Need to push the data to via platform service
self.stream_log(
log=f"Pushing embedding usage for model {model_name}",
level=LogLevel.DEBUG,
)
Audit(log_level=self.log_level).push_usage_data(
platform_api_key=self.platform_api_key,
token_counter=self.token_counter,
event_type=event_type,
model_name=self.embed_model.model_name,
kwargs=self.kwargs,
)
self.token_counter.reset_counts()
| 4,762 | Python | .py | 112 | 32.866071 | 88 | 0.621901 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,667 | token_counter.py | Zipstack_unstract-sdk/src/unstract/sdk/utils/token_counter.py | from typing import Any, Union
from llama_index.core.callbacks.schema import EventPayload
from llama_index.core.llms import ChatResponse, CompletionResponse
class Constants:
DEFAULT_TOKEN_COUNT = 0
class TokenCounter:
prompt_llm_token_count: int
completion_llm_token_count: int
total_llm_token_count: int = 0
total_embedding_token_count: int = 0
def __init__(self, input_tokens, output_tokens):
self.prompt_llm_token_count = input_tokens
self.completion_llm_token_count = output_tokens
self.total_llm_token_count = (
self.prompt_llm_token_count + self.completion_llm_token_count
)
# TODO: Add unit test cases for the following function
# for ease of manintenance
@staticmethod
def get_llm_token_counts(payload: dict[str, Any]):
prompt_tokens = Constants.DEFAULT_TOKEN_COUNT
completion_tokens = Constants.DEFAULT_TOKEN_COUNT
if EventPayload.PROMPT in payload:
response = payload.get(EventPayload.COMPLETION)
(
prompt_tokens,
completion_tokens,
) = TokenCounter._get_tokens_from_response(response)
elif EventPayload.MESSAGES in payload:
response = payload.get(EventPayload.RESPONSE)
if response:
(
prompt_tokens,
completion_tokens,
) = TokenCounter._get_tokens_from_response(response)
token_counter = TokenCounter(
input_tokens=prompt_tokens,
output_tokens=completion_tokens,
)
return token_counter
@staticmethod
def _get_tokens_from_response(
response: Union[CompletionResponse, ChatResponse, dict]
) -> tuple[int, int]:
"""Get the token counts from a raw response."""
prompt_tokens, completion_tokens = 0, 0
if isinstance(response, CompletionResponse) or isinstance(
response, ChatResponse
):
raw_response = response.raw
if not isinstance(raw_response, dict):
raw_response = dict(raw_response)
usage = raw_response.get("usage", None)
if usage is None:
if (
hasattr(response, "additional_kwargs")
and "prompt_tokens" in response.additional_kwargs
):
usage = response.additional_kwargs
elif hasattr(response, "raw"):
completion_raw = response.raw
if ("_raw_response" in completion_raw) and hasattr(
completion_raw["_raw_response"], "usage_metadata"
):
usage = completion_raw["_raw_response"].usage_metadata
prompt_tokens = usage.prompt_token_count
completion_tokens = usage.candidates_token_count
return prompt_tokens, completion_tokens
elif "inputTextTokenCount" in completion_raw:
prompt_tokens = completion_raw["inputTextTokenCount"]
if "results" in completion_raw:
result_list: list = completion_raw["results"]
if len(result_list) > 0:
result: dict = result_list[0]
if "tokenCount" in result:
completion_tokens = result.get("tokenCount", 0)
return prompt_tokens, completion_tokens
else:
usage = response.raw
else:
usage = response
if not isinstance(usage, dict):
usage = usage.model_dump()
possible_input_keys = (
"prompt_tokens",
"input_tokens",
"prompt_eval_count",
)
possible_output_keys = (
"completion_tokens",
"output_tokens",
"eval_count",
)
prompt_tokens = 0
for input_key in possible_input_keys:
if input_key in usage:
prompt_tokens = int(usage[input_key])
break
completion_tokens = 0
for output_key in possible_output_keys:
if output_key in usage:
completion_tokens = int(usage[output_key])
break
return prompt_tokens, completion_tokens
| 4,376 | Python | .py | 104 | 29.278846 | 79 | 0.573643 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,668 | enums.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/enums.py | from enum import Enum
class AdapterTypes(Enum):
UNKNOWN = "UNKNOWN"
LLM = "LLM"
EMBEDDING = "EMBEDDING"
VECTOR_DB = "VECTOR_DB"
OCR = "OCR"
X2TEXT = "X2TEXT"
| 184 | Python | .py | 8 | 18.75 | 27 | 0.637931 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,669 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/constants.py | class Common:
METADATA = "metadata"
MODULE = "module"
ADAPTER = "adapter"
SRC_FOLDER = "src"
ADAPTER_METADATA = "adapter_metadata"
ICON = "icon"
ADAPTER_ID = "adapter_id"
ADAPTER_TYPE = "adapter_type"
DEFAULT_ERR_MESSAGE = "Something went wrong"
| 282 | Python | .py | 10 | 23.6 | 48 | 0.654412 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,670 | adapterkit.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/adapterkit.py | import logging
from typing import Any
from singleton_decorator import singleton
from unstract.sdk.adapters import AdapterDict
from unstract.sdk.adapters.base import Adapter
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.embedding import adapters as embedding_adapters
from unstract.sdk.adapters.llm import adapters as llm_adapters
from unstract.sdk.adapters.ocr import adapters as ocr_adapters
from unstract.sdk.adapters.vectordb import adapters as vectordb_adapters
from unstract.sdk.adapters.x2text import adapters as x2text_adapters
logger = logging.getLogger(__name__)
# Declaring this class as a Singleton to avoid initialising
# adapters list everytime
@singleton
class Adapterkit:
def __init__(self) -> None:
self._adapters: AdapterDict = (
embedding_adapters
| llm_adapters
| vectordb_adapters
| x2text_adapters
| ocr_adapters
)
@property
def adapters(self) -> AdapterDict:
return self._adapters
def get_adapter_class_by_adapter_id(self, adapter_id: str) -> Adapter:
if adapter_id in self._adapters:
adapter_class: Adapter = self._adapters[adapter_id][
Common.METADATA
][Common.ADAPTER]
return adapter_class
else:
raise RuntimeError(f"Couldn't obtain adapter for {adapter_id}")
def get_adapter_by_id(
self, adapter_id: str, *args: Any, **kwargs: Any
) -> Adapter:
"""Instantiates and returns a adapter.
Args:
adapter_id (str): Identifies adapter to create
Raises:
RuntimeError: If the ID is invalid/adapter is missing
Returns:
Adapter: Concrete impl of the `Adapter` base
"""
adapter_class: Adapter = self.get_adapter_class_by_adapter_id(
adapter_id
)
return adapter_class(*args, **kwargs)
def get_adapters_list(self) -> list[dict[str, Any]]:
adapters = []
for adapter_id, adapter_registry_metadata in self._adapters.items():
m: Adapter = adapter_registry_metadata[Common.METADATA][
Common.ADAPTER
]
_id = m.get_id()
name = m.get_name()
adapter_type = m.get_adapter_type().name
json_schema = m.get_json_schema()
desc = m.get_description()
icon = m.get_icon()
adapters.append(
{
"id": _id,
"name": name,
"class_name": m.__name__,
"description": desc,
"icon": icon,
"adapter_type": adapter_type,
"json_schema": json_schema,
}
)
return adapters
| 2,837 | Python | .py | 74 | 28.472973 | 76 | 0.605598 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,671 | utils.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/utils.py | from pathlib import Path
import filetype
import magic
from requests import Response
from requests.exceptions import RequestException
from unstract.sdk.adapters.constants import Common
class AdapterUtils:
@staticmethod
def get_msg_from_request_exc(
err: RequestException,
message_key: str,
default_err: str = Common.DEFAULT_ERR_MESSAGE,
) -> str:
"""Gets the message from the RequestException.
Args:
err_response (Response): Error response from the exception
message_key (str): Key from response containing error message
Returns:
str: Error message returned by the server
"""
if hasattr(err, "response"):
err_response: Response = err.response # type: ignore
if err_response.headers["Content-Type"] == "application/json":
err_json = err_response.json()
if message_key in err_json:
return str(err_json[message_key])
elif err_response.headers["Content-Type"] == "text/plain":
return err_response.text # type: ignore
return default_err
@staticmethod
def get_file_mime_type(input_file: Path) -> str:
"""Gets the file MIME type for an input file. Uses libmagic to perform
the same.
Args:
input_file (Path): Path object of the input file
Returns:
str: MIME type of the file
"""
input_file_mime = ""
with open(input_file, mode="rb") as input_file_obj:
sample_contents = input_file_obj.read(100)
input_file_mime = magic.from_buffer(sample_contents, mime=True)
input_file_obj.seek(0)
return input_file_mime
@staticmethod
def guess_extention(input_file_path: str) -> str:
"""Returns the extention of the file passed.
Args:
input_file_path (str): String holding the path
Returns:
str: File extention
"""
input_file_extention = ""
with open(input_file_path, mode="rb") as file_obj:
sample_contents = file_obj.read(100)
file_type = filetype.guess(sample_contents)
input_file_extention = file_type.EXTENSION
return input_file_extention
| 2,314 | Python | .py | 58 | 30.413793 | 78 | 0.620321 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,672 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/__init__.py | import logging
from logging import NullHandler
from typing import Any
logging.getLogger(__name__).addHandler(NullHandler())
AdapterDict = dict[str, dict[str, Any]]
| 167 | Python | .py | 5 | 31.8 | 53 | 0.811321 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,673 | base.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/base.py | import logging
from abc import ABC, abstractmethod
from unstract.sdk.adapters.enums import AdapterTypes
logger = logging.getLogger(__name__)
class Adapter(ABC):
def __init__(self, name: str):
self.name = name
@staticmethod
@abstractmethod
def get_id() -> str:
return ""
@staticmethod
@abstractmethod
def get_name() -> str:
return ""
@staticmethod
@abstractmethod
def get_description() -> str:
return ""
@staticmethod
@abstractmethod
def get_icon() -> str:
return ""
@staticmethod
@abstractmethod
def get_json_schema() -> str:
return ""
@staticmethod
@abstractmethod
def get_adapter_type() -> AdapterTypes:
return ""
@abstractmethod
def test_connection(self) -> bool:
"""Override to test connection for a adapter.
Returns:
bool: Flag indicating if the credentials are valid or not
"""
pass
| 985 | Python | .py | 38 | 19.763158 | 69 | 0.629947 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,674 | exceptions.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/exceptions.py | from unstract.sdk.exceptions import SdkError
class AdapterError(SdkError):
pass
class LLMError(AdapterError):
pass
class VectorDBError(AdapterError):
pass
class EmbeddingError(AdapterError):
pass
class ExtractorError(AdapterError):
pass
| 267 | Python | .py | 11 | 20.545455 | 44 | 0.817073 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,675 | registry.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/registry.py | from abc import ABC, abstractmethod
from typing import Any
class AdapterRegistry(ABC):
def __init__(self, name: str):
self.name = name
@staticmethod
@abstractmethod
def register_adapters(adapters: dict[str, Any]) -> None:
pass
| 262 | Python | .py | 9 | 24.222222 | 60 | 0.692 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,676 | constants.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/constants.py | class VectorDbConstants:
VECTOR_DB_NAME = "collection_name"
EMBEDDING_DIMENSION = "embedding_dimension"
DEFAULT_VECTOR_DB_NAME = "unstract"
DEFAULT_EMBEDDING_SIZE = 2
WAIT_TIME = "wait_time"
| 211 | Python | .py | 6 | 30.833333 | 47 | 0.721951 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,677 | vectordb_adapter.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/vectordb_adapter.py | from abc import ABC
from typing import Any, Union
from llama_index.core.schema import BaseNode
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.vector_stores.types import BasePydanticVectorStore, VectorStore
from unstract.sdk.adapters.base import Adapter
from unstract.sdk.adapters.enums import AdapterTypes
class VectorDBAdapter(Adapter, ABC):
def __init__(
self,
name: str,
vector_db_instance: Union[VectorStore, BasePydanticVectorStore],
):
super().__init__(name)
self.name = name
self._vector_db_instance: Union[VectorStore, BasePydanticVectorStore] = (
vector_db_instance
)
@staticmethod
def get_id() -> str:
return ""
@staticmethod
def get_name() -> str:
return ""
@staticmethod
def get_description() -> str:
return ""
@staticmethod
def get_icon() -> str:
return ""
@staticmethod
def get_json_schema() -> str:
return ""
@staticmethod
def get_adapter_type() -> AdapterTypes:
return AdapterTypes.VECTOR_DB
def get_vector_db_instance(
self, vector_db_config: dict[str, Any]
) -> Union[BasePydanticVectorStore, VectorStore]:
"""Instantiate the llama index VectorStore / BasePydanticVectorStore
class.
Returns:
BasePydanticVectorStore / VectorStore:
llama index implementation of the vector store
Raises exceptions for any error
"""
return SimpleVectorStore()
def close(self, **kwargs: Any) -> None:
"""Closes the client connection.
Returns:
None
"""
# Overriding implementations will have the corresponding
# library methods invoked
pass
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete the specified docs.
Returns:
None
"""
# Overriding implementations will have the corresponding
# library methods invoked
self._vector_db_instance.delete(
ref_doc_id=ref_doc_id, delete_kwargs=delete_kwargs
)
def add(self, ref_doc_id: str, nodes: list[BaseNode]) -> list[str]:
return self._vector_db_instance.add(nodes=nodes)
| 2,330 | Python | .py | 67 | 26.955224 | 85 | 0.64203 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,678 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/__init__.py | from unstract.sdk.adapters import AdapterDict
from unstract.sdk.adapters.vectordb.register import VectorDBRegistry
adapters: AdapterDict = {}
VectorDBRegistry.register_adapters(adapters)
| 189 | Python | .py | 4 | 45.75 | 68 | 0.874317 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,679 | helper.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/helper.py | import logging
import os
from typing import Optional
from llama_index.core import (
MockEmbedding,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.core.llms import MockLLM
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
logger = logging.getLogger(__name__)
class VectorDBHelper:
@staticmethod
def test_vector_db_instance(
vector_store: Optional[BasePydanticVectorStore],
) -> bool:
try:
if vector_store is None:
return False
storage_context = StorageContext.from_defaults(vector_store=vector_store)
local_path = f"{os.path.dirname(__file__)}/samples/"
# Using mock llm and embedding here.
# For custom embedding args will be:
# embed_model - InstructorEmbeddings(embed_batch_size=2)
# chunk_size - 512
# llm=None
llm = MockLLM()
embed_model = MockEmbedding(
embed_dim=VectorDbConstants.DEFAULT_EMBEDDING_SIZE
)
index = VectorStoreIndex.from_documents(
# By default, SimpleDirectoryReader discards paths which
# contain one or more parts that are hidden.
# In local, packages could be installed in a venv. This
# means a path can contain a ".venv" in it which will
# then be treated as hidden and subsequently discarded.
documents=SimpleDirectoryReader(
local_path, exclude_hidden=False
).load_data(),
storage_context=storage_context,
llm=llm,
embed_model=embed_model,
)
query_engine = index.as_query_engine(llm=llm)
query_engine.query("What did the author learn?")
return True
except Exception as e:
logger.error(f"Error occured while testing adapter {e}")
raise AdapterError(str(e))
@staticmethod
def get_collection_name(
collection_name_prefix: str,
embedding_dimension: int,
) -> str:
"""
Notes:
This function constructs the collection / table name to store the
documents in the vector db.
If user supplies this field in the config metadata then system
would pick that and append it as prefix to embedding type.
If this does not come as user setting, then system looks for it
in the get_vector_db() argument and append it to embedding type
If it is not there in both places then system appends
"unstract_vector_db" as prefix to embedding type.
If embedding type is not passed in get_vector_db() as arg,
then system ignores appending that
Args:
collection_name_prefix (str): the prefix to be added. If this is
not passed in, then the default DEFAULT_VECTOR_DB_NAME
will be picked up for prefixing
embedding_dimension (str): this will be suffixed.
If this value is not passed in,
then only collection_name_prefix will be returned
Eg. collection_name_prefix -> mock_org
embedding_dimension -> 1536
return value -> mock_org_unstract_1536
collection_name_prefix -> No value
embedding_type -> No value
return value -> unstract_vector_db
"""
vector_db_collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
if embedding_dimension:
vector_db_collection_name = (
vector_db_collection_name + "_" + str(embedding_dimension)
)
if collection_name_prefix:
vector_db_collection_name = (
collection_name_prefix + "_" + vector_db_collection_name
)
logger.info(f"Vector DB name: {vector_db_collection_name}")
return vector_db_collection_name
| 4,241 | Python | .py | 94 | 33.361702 | 85 | 0.610977 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,680 | register.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/register.py | import logging
import os
from importlib import import_module
from typing import Any
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.registry import AdapterRegistry
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
logger = logging.getLogger(__name__)
class VectorDBRegistry(AdapterRegistry):
@staticmethod
def register_adapters(adapters: dict[str, Any]) -> None:
current_directory = os.path.dirname(os.path.abspath(__file__))
package = "unstract.sdk.adapters.vectordb"
for adapter in os.listdir(current_directory):
adapter_path = os.path.join(current_directory, adapter, Common.SRC_FOLDER)
# Check if the item is a directory and not a
# special directory like __pycache__
if os.path.isdir(adapter_path) and not adapter.startswith("__"):
VectorDBRegistry._build_adapter_list(adapter, package, adapters)
if len(adapters) == 0:
logger.warning("No vectorDB adapter found.")
@staticmethod
def _build_adapter_list(
adapter: str, package: str, adapters: dict[str, Any]
) -> None:
try:
full_module_path = f"{package}.{adapter}.{Common.SRC_FOLDER}"
module = import_module(full_module_path)
metadata = getattr(module, Common.METADATA, {})
if metadata.get("is_active", False):
adapter_class: VectorDBAdapter = metadata[Common.ADAPTER]
adapter_id = adapter_class.get_id()
if not adapter_id or (adapter_id in adapters):
logger.warning(f"Duplicate Id : {adapter_id}")
else:
adapters[adapter_id] = {
Common.MODULE: module,
Common.METADATA: metadata,
}
except ModuleNotFoundError as exception:
logger.warning(f"Unable to import vectorDB adapters : {exception}")
| 1,992 | Python | .py | 41 | 38.073171 | 86 | 0.638046 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,681 | pinecone.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/pinecone/src/pinecone.py | import logging
import os
from typing import Any, Optional
from llama_index.core.schema import BaseNode
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.pinecone import PineconeVectorStore
from pinecone import NotFoundException
from pinecone import Pinecone as LLamaIndexPinecone
from pinecone import PodSpec, ServerlessSpec
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
logger = logging.getLogger(__name__)
class Constants:
API_KEY = "api_key"
ENVIRONMENT = "environment"
NAMESPACE = "namespace"
DIMENSION = 1536
METRIC = "euclidean"
SPECIFICATION = "spec"
SPEC_POD = "pod"
SPEC_SERVERLESS = "serverless"
CLOUD = "cloud"
REGION = "region"
DEFAULT_SPEC_COUNT_VALUE = 1
DEFAULT_POD_TYPE = "p1.x1"
class Pinecone(VectorDBAdapter):
def __init__(self, settings: dict[str, Any]):
self._config = settings
self._client: Optional[LLamaIndexPinecone] = None
self._collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._vector_db_instance = self._get_vector_db_instance()
super().__init__("Pinecone", self._vector_db_instance)
@staticmethod
def get_id() -> str:
return "pinecone|83881133-485d-4ecc-b1f7-0009f96dc74a"
@staticmethod
def get_name() -> str:
return "Pinecone"
@staticmethod
def get_description() -> str:
return "Pinecone VectorDB"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/pinecone.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_vector_db_instance(self) -> BasePydanticVectorStore:
return self._vector_db_instance
def _get_vector_db_instance(self) -> BasePydanticVectorStore:
try:
self._client = LLamaIndexPinecone(
api_key=str(self._config.get(Constants.API_KEY))
)
dimension = self._config.get(
VectorDbConstants.EMBEDDING_DIMENSION,
VectorDbConstants.DEFAULT_EMBEDDING_SIZE,
)
collection_name = VectorDBHelper.get_collection_name(
self._config.get(VectorDbConstants.VECTOR_DB_NAME),
dimension,
)
self._collection_name = collection_name.replace("_", "-").lower()
specification = self._config.get(Constants.SPECIFICATION)
if specification == Constants.SPEC_POD:
environment = self._config.get(Constants.ENVIRONMENT)
spec = PodSpec(
environment=environment,
replicas=Constants.DEFAULT_SPEC_COUNT_VALUE,
shards=Constants.DEFAULT_SPEC_COUNT_VALUE,
pods=Constants.DEFAULT_SPEC_COUNT_VALUE,
pod_type=Constants.DEFAULT_POD_TYPE,
)
elif specification == Constants.SPEC_SERVERLESS:
cloud = self._config.get(Constants.CLOUD)
region = self._config.get(Constants.REGION)
spec = ServerlessSpec(cloud=cloud, region=region)
logger.info(f"Setting up Pinecone spec for {spec}")
try:
self._client.describe_index(name=self._collection_name)
except NotFoundException:
logger.info(
f"Index:{self._collection_name} does not exist. Creating it."
)
self._client.create_index(
name=self._collection_name,
dimension=dimension,
metric=Constants.METRIC,
spec=spec,
)
self.vector_db: BasePydanticVectorStore = PineconeVectorStore(
index_name=self._collection_name,
api_key=str(self._config.get(Constants.API_KEY)),
environment=str(self._config.get(Constants.ENVIRONMENT)),
)
return self.vector_db
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
vector_db = self.get_vector_db_instance()
test_result: bool = VectorDBHelper.test_vector_db_instance(
vector_store=vector_db
)
# Delete the collection that was created for testing
if self._client:
self._client.delete_index(self._collection_name)
return test_result
def close(self, **kwargs: Any) -> None:
# Close connection is not defined for this client
pass
def delete(self, ref_doc_id: str, **delete_kwargs: dict[Any, Any]) -> None:
specification = self._config.get(Constants.SPECIFICATION)
if specification == Constants.SPEC_SERVERLESS:
# To delete all records representing chunks of a single document,
# first list the record IDs based on their common ID prefix,
# and then delete the records by ID:
try:
index = self._client.Index(self._collection_name) # type: ignore
# Get all record having the ref_doc_id and delete them
for ids in index.list(prefix=ref_doc_id):
logger.info(ids)
index.delete(ids=ids)
except Exception as e:
raise AdapterError(str(e))
elif specification == Constants.SPEC_POD:
if self.vector_db.environment == "gcp-starter": # type: ignore
raise AdapterError(
"Re-indexing is not supported on Starter indexes. "
"Use Serverless or paid plan for Pod spec"
)
else:
super().delete(ref_doc_id=ref_doc_id, **delete_kwargs)
def add(
self,
ref_doc_id: str,
nodes: list[BaseNode],
) -> list[str]:
for i, node in enumerate(nodes):
node_id = ref_doc_id + "-" + node.node_id
nodes[i].id_ = node_id
return self.vector_db.add(nodes=nodes)
| 6,372 | Python | .py | 146 | 32.869863 | 81 | 0.616562 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,682 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/pinecone/src/__init__.py | from .pinecone import Pinecone
metadata = {
"name": Pinecone.__name__,
"version": "1.0.0",
"adapter": Pinecone,
"description": "Pinecone VectorDB adapter",
"is_active": True,
}
| 198 | Python | .py | 8 | 21.125 | 47 | 0.640212 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,683 | qdrant.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/qdrant/src/qdrant.py | import logging
import os
from typing import Any, Optional
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
logger = logging.getLogger(__name__)
class Constants:
URL = "url"
API_KEY = "api_key"
class Qdrant(VectorDBAdapter):
def __init__(self, settings: dict[str, Any]):
self._config = settings
self._client: Optional[QdrantClient] = None
self._collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._vector_db_instance = self._get_vector_db_instance()
super().__init__("Qdrant", self._vector_db_instance)
@staticmethod
def get_id() -> str:
return "qdrant|41f64fda-2e4c-4365-89fd-9ce91bee74d0"
@staticmethod
def get_name() -> str:
return "Qdrant"
@staticmethod
def get_description() -> str:
return "Qdrant LLM"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/qdrant.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_vector_db_instance(self) -> BasePydanticVectorStore:
return self._vector_db_instance
def _get_vector_db_instance(self) -> BasePydanticVectorStore:
try:
self._collection_name = VectorDBHelper.get_collection_name(
self._config.get(VectorDbConstants.VECTOR_DB_NAME),
self._config.get(VectorDbConstants.EMBEDDING_DIMENSION),
)
url = self._config.get(Constants.URL)
api_key: Optional[str] = self._config.get(Constants.API_KEY, None)
if api_key:
self._client = QdrantClient(url=url, api_key=api_key)
else:
self._client = QdrantClient(url=url)
vector_db: BasePydanticVectorStore = QdrantVectorStore(
collection_name=self._collection_name,
client=self._client,
url=url,
api_key=api_key,
)
return vector_db
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
vector_db = self.get_vector_db_instance()
test_result: bool = VectorDBHelper.test_vector_db_instance(
vector_store=vector_db
)
# Delete the collection that was created for testing
if self._client is not None:
self._client.delete_collection(self._collection_name)
return test_result
def close(self, **kwargs: Any) -> None:
if self._client:
self._client.close(**kwargs)
| 3,060 | Python | .py | 74 | 32.891892 | 78 | 0.654882 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,684 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/qdrant/src/__init__.py | from .qdrant import Qdrant
metadata = {
"name": Qdrant.__name__,
"version": "1.0.0",
"adapter": Qdrant,
"description": "Qdrant VectorDB adapter",
"is_active": True,
}
| 188 | Python | .py | 8 | 19.875 | 45 | 0.620112 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,685 | weaviate.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/weaviate/src/weaviate.py | import logging
import os
from typing import Any, Optional
import weaviate
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from weaviate.classes.init import Auth
from weaviate.exceptions import UnexpectedStatusCodeException
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
logger = logging.getLogger(__name__)
class Constants:
URL = "url"
API_KEY = "api_key"
class Weaviate(VectorDBAdapter):
def __init__(self, settings: dict[str, Any]):
self._config = settings
self._client: Optional[weaviate.Client] = None
self._collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._vector_db_instance = self._get_vector_db_instance()
super().__init__("Weaviate", self._vector_db_instance)
@staticmethod
def get_id() -> str:
return "weaviate|294e08df-4e4a-40f2-8f0d-9e4940180ccc"
@staticmethod
def get_name() -> str:
return "Weaviate"
@staticmethod
def get_description() -> str:
return "Weaviate VectorDB"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/Weaviate.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_vector_db_instance(self) -> BasePydanticVectorStore:
return self._vector_db_instance
def _get_vector_db_instance(self) -> BasePydanticVectorStore:
try:
collection_name = VectorDBHelper.get_collection_name(
self._config.get(VectorDbConstants.VECTOR_DB_NAME),
self._config.get(VectorDbConstants.EMBEDDING_DIMENSION),
)
# Capitalise the frst letter as Weaviate expects this
# LLama-index throws the error if not capitalised while using
# Weaviate
self._collection_name = collection_name.capitalize()
self._client = weaviate.connect_to_weaviate_cloud(
cluster_url=str(self._config.get(Constants.URL)),
auth_credentials=Auth.api_key(str(self._config.get(Constants.API_KEY))),
)
try:
# Class definition object. Weaviate's autoschema
# feature will infer properties when importing.
class_obj = {
"class": self._collection_name,
"vectorizer": "none",
}
# Create the colletion
self._client.collections.create_from_dict(class_obj)
except Exception as e:
if isinstance(e, UnexpectedStatusCodeException):
if "already exists" in e.message:
logger.warning(f"Collection already exists: {e}")
else:
raise e
vector_db: BasePydanticVectorStore = WeaviateVectorStore(
weaviate_client=self._client,
index_name=self._collection_name,
)
return vector_db
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
vector_db = self.get_vector_db_instance()
test_result: bool = VectorDBHelper.test_vector_db_instance(
vector_store=vector_db
)
# Delete the collection that was created for testing
if self._client is not None:
self._client.collections.delete(self._collection_name)
return test_result
def close(self, **kwargs: Any) -> None:
if self._client:
self._client.close(**kwargs)
| 3,946 | Python | .py | 91 | 33.56044 | 88 | 0.642001 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,686 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/weaviate/src/__init__.py | from .weaviate import Weaviate
metadata = {
"name": Weaviate.__name__,
"version": "1.0.0",
"adapter": Weaviate,
"description": "Weaviate VectorDB adapter",
"is_active": True,
}
| 198 | Python | .py | 8 | 21.125 | 47 | 0.640212 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,687 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/postgres/src/__init__.py | from .postgres import Postgres
metadata = {
"name": Postgres.__name__,
"version": "1.0.0",
"adapter": Postgres,
"description": "Postgres VectorDB adapter",
"is_active": True,
}
| 198 | Python | .py | 8 | 21.125 | 47 | 0.640212 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,688 | postgres.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/postgres/src/postgres.py | import os
from typing import Any, Optional
from urllib.parse import quote_plus
import psycopg2
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.postgres import PGVectorStore
from psycopg2._psycopg import connection
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
class Constants:
DATABASE = "database"
HOST = "host"
PASSWORD = "password"
PORT = "port"
USER = "user"
SCHEMA = "schema"
ENABLE_SSL = "enable_ssl"
class Postgres(VectorDBAdapter):
def __init__(self, settings: dict[str, Any]):
self._config = settings
self._client: Optional[connection] = None
self._collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._schema_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._vector_db_instance = self._get_vector_db_instance()
super().__init__("Postgres", self._vector_db_instance)
@staticmethod
def get_id() -> str:
return "postgres|70ab6cc2-e86a-4e5a-896f-498a95022d34"
@staticmethod
def get_name() -> str:
return "Postgres"
@staticmethod
def get_description() -> str:
return "Postgres VectorDB"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/postgres.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_vector_db_instance(self) -> BasePydanticVectorStore:
return self._vector_db_instance
def _get_vector_db_instance(self) -> BasePydanticVectorStore:
try:
encoded_password = quote_plus(str(self._config.get(Constants.PASSWORD)))
dimension = self._config.get(
VectorDbConstants.EMBEDDING_DIMENSION,
VectorDbConstants.DEFAULT_EMBEDDING_SIZE,
)
self._collection_name = VectorDBHelper.get_collection_name(
self._config.get(VectorDbConstants.VECTOR_DB_NAME),
dimension,
)
self._schema_name = self._config.get(
Constants.SCHEMA,
VectorDbConstants.DEFAULT_VECTOR_DB_NAME,
)
vector_db: BasePydanticVectorStore = PGVectorStore.from_params(
database=self._config.get(Constants.DATABASE),
schema_name=self._schema_name,
host=self._config.get(Constants.HOST),
password=encoded_password,
port=str(self._config.get(Constants.PORT)),
user=self._config.get(Constants.USER),
table_name=self._collection_name,
embed_dim=dimension,
)
if self._config.get(Constants.ENABLE_SSL, True):
ssl_mode = "require"
else:
ssl_mode = "disable"
self._client = psycopg2.connect(
database=self._config.get(Constants.DATABASE),
host=self._config.get(Constants.HOST),
user=self._config.get(Constants.USER),
password=self._config.get(Constants.PASSWORD),
port=str(self._config.get(Constants.PORT)),
sslmode=ssl_mode,
)
return vector_db
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
vector_db = self.get_vector_db_instance()
test_result: bool = VectorDBHelper.test_vector_db_instance(
vector_store=vector_db
)
# Delete the collection that was created for testing
if self._client is not None:
self._client.cursor().execute(
f"DROP TABLE IF EXISTS "
f"{self._schema_name}.data_{self._collection_name} CASCADE"
)
self._client.commit()
return test_result
def close(self, **kwargs: Any) -> None:
if self._client:
self._client.close()
| 4,283 | Python | .py | 103 | 31.631068 | 84 | 0.626622 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,689 | supabase.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/supabase/src/supabase.py | import os
from typing import Any, Optional
from urllib.parse import quote_plus
from llama_index.core.vector_stores.types import VectorStore
from llama_index.vector_stores.supabase import SupabaseVectorStore
from vecs import Client
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
class Constants:
DATABASE = "database"
HOST = "host"
PASSWORD = "password"
PORT = "port"
USER = "user"
COLLECTION_NAME = "base_demo"
class Supabase(VectorDBAdapter):
def __init__(self, settings: dict[str, Any]):
self._config = settings
self._client: Optional[Client] = None
self._collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._vector_db_instance = self._get_vector_db_instance()
super().__init__("Supabase", self._vector_db_instance)
@staticmethod
def get_id() -> str:
return "supabase|e6998e3c-3595-48c0-a190-188dbd803858"
@staticmethod
def get_name() -> str:
return "Supabase"
@staticmethod
def get_description() -> str:
return "Supabase VectorDB"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/supabase.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_vector_db_instance(self) -> VectorStore:
return self._vector_db_instance
def _get_vector_db_instance(self) -> VectorStore:
try:
dimension = self._config.get(
VectorDbConstants.EMBEDDING_DIMENSION,
VectorDbConstants.DEFAULT_EMBEDDING_SIZE,
)
self._collection_name = VectorDBHelper.get_collection_name(
self._config.get(VectorDbConstants.VECTOR_DB_NAME),
self._config.get(
VectorDbConstants.EMBEDDING_DIMENSION,
dimension,
),
)
user = str(self._config.get(Constants.USER))
password = str(self._config.get(Constants.PASSWORD))
encoded_password = quote_plus(str(password))
host = str(self._config.get(Constants.HOST))
port = str(self._config.get(Constants.PORT))
db_name = str(self._config.get(Constants.DATABASE))
postgres_connection_string = (
f"postgresql://{user}:{encoded_password}@{host}:{port}/{db_name}"
)
vector_db: VectorStore = SupabaseVectorStore(
postgres_connection_string=postgres_connection_string,
collection_name=self._collection_name,
dimension=dimension,
)
if vector_db is not None:
self._client = vector_db.client
return vector_db
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
vector_db = self.get_vector_db_instance()
test_result: bool = VectorDBHelper.test_vector_db_instance(
vector_store=vector_db
)
# Delete the collection that was created for testing
if self._client is not None:
self._client.delete_collection(self._collection_name)
return test_result
def close(self, **kwargs: Any) -> None:
if self._client:
self._client.close()
| 3,630 | Python | .py | 88 | 32.022727 | 81 | 0.63755 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,690 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/supabase/src/__init__.py | from .supabase import Supabase
metadata = {
"name": Supabase.__name__,
"version": "1.0.0",
"adapter": Supabase,
"description": "Supabase VectorDB adapter",
"is_active": False,
}
| 199 | Python | .py | 8 | 21.25 | 47 | 0.642105 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,691 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/milvus/src/__init__.py | from .milvus import Milvus
metadata = {
"name": Milvus.__name__,
"version": "1.0.0",
"adapter": Milvus,
"description": "Milvus VectorDB adapter",
"is_active": True,
}
| 188 | Python | .py | 8 | 19.875 | 45 | 0.620112 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,692 | milvus.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/milvus/src/milvus.py | import os
from typing import Any, Optional
from llama_index.core.vector_stores.types import VectorStore
from llama_index.vector_stores.milvus import MilvusVectorStore
from pymilvus import MilvusClient
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
class Constants:
URI = "uri"
TOKEN = "token"
DIM_VALUE = 1536
class Milvus(VectorDBAdapter):
def __init__(self, settings: dict[str, Any]):
self._config = settings
self._client: Optional[MilvusClient] = None
self._collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._vector_db_instance = self._get_vector_db_instance()
super().__init__("Milvus", self._vector_db_instance)
@staticmethod
def get_id() -> str:
return "milvus|3f42f6f9-4b8e-4546-95f3-22ecc9aca442"
@staticmethod
def get_name() -> str:
return "Milvus"
@staticmethod
def get_description() -> str:
return "Milvus VectorDB"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/Milvus.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_vector_db_instance(self) -> VectorStore:
return self._vector_db_instance
def _get_vector_db_instance(self) -> VectorStore:
try:
dimension = self._config.get(
VectorDbConstants.EMBEDDING_DIMENSION,
VectorDbConstants.DEFAULT_EMBEDDING_SIZE,
)
self._collection_name = VectorDBHelper.get_collection_name(
self._config.get(VectorDbConstants.VECTOR_DB_NAME),
dimension,
)
vector_db: VectorStore = MilvusVectorStore(
uri=self._config.get(Constants.URI, ""),
collection_name=self._collection_name,
token=self._config.get(Constants.TOKEN, ""),
dim=dimension,
)
if vector_db is not None:
self._client = vector_db.client
return vector_db
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
vector_db = self.get_vector_db_instance()
test_result: bool = VectorDBHelper.test_vector_db_instance(
vector_store=vector_db
)
# Delete the collection that was created for testing
if self._client is not None:
self._client.drop_collection(self._collection_name)
return test_result
def close(self, **kwargs: Any) -> None:
if self._client:
self._client.close()
| 2,935 | Python | .py | 73 | 31.547945 | 77 | 0.646294 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,693 | no_op_custom_vectordb.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/no_op/src/no_op_custom_vectordb.py | import time
from typing import Any
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import VectorStore, VectorStoreQueryResult
class NoOpCustomVectorDB(VectorStore):
stores_text: bool = True
stores_node: bool = True
is_embedding_query: bool = True
wait_time: float = 0
def __init__(
self,
wait_time: float,
dim: int,
) -> None:
"""Initialize params."""
wait_time = wait_time
dim = dim
def query(self, query, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store."""
node1 = TextNode(text="This is a dummy document.", id_="1")
similarity_scores = [0.9]
embeddings = ["test"] # Dummy embeddings for each node
query_result = VectorStoreQueryResult(
nodes=[node1], similarities=similarity_scores, ids=embeddings
)
time.sleep(self.wait_time)
return query_result
| 964 | Python | .py | 27 | 28.851852 | 84 | 0.649839 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,694 | no_op_vectordb.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/no_op/src/no_op_vectordb.py | import os
import time
from typing import Any
from llama_index.core.schema import BaseNode
from llama_index.core.vector_stores.types import VectorStore
from unstract.sdk.adapters.exceptions import AdapterError
from unstract.sdk.adapters.vectordb.constants import VectorDbConstants
from unstract.sdk.adapters.vectordb.helper import VectorDBHelper
from unstract.sdk.adapters.vectordb.no_op.src.no_op_custom_vectordb import (
NoOpCustomVectorDB,
)
from unstract.sdk.adapters.vectordb.vectordb_adapter import VectorDBAdapter
class NoOpVectorDB(VectorDBAdapter):
def __init__(self, settings: dict[str, Any]):
self._config = settings
self._collection_name: str = VectorDbConstants.DEFAULT_VECTOR_DB_NAME
self._vector_db_instance = self._get_vector_db_instance()
super().__init__("NoOpVectorDB", self._vector_db_instance)
@staticmethod
def get_id() -> str:
return "noOpVectorDb|ca4d6056-4971-4bc8-97e3-9e36290b5bc0"
@staticmethod
def get_name() -> str:
return "No Op VectorDB"
@staticmethod
def get_description() -> str:
return "No Op VectorDB"
@staticmethod
def get_icon() -> str:
return "/icons/adapter-icons/noOpVectorDb.png"
@staticmethod
def get_json_schema() -> str:
f = open(f"{os.path.dirname(__file__)}/static/json_schema.json")
schema = f.read()
f.close()
return schema
def get_vector_db_instance(self) -> VectorStore:
return self._vector_db_instance
def _get_vector_db_instance(self) -> VectorStore:
try:
dimension = self._config.get(
VectorDbConstants.EMBEDDING_DIMENSION,
VectorDbConstants.DEFAULT_EMBEDDING_SIZE,
)
self._collection_name = VectorDBHelper.get_collection_name(
self._config.get(VectorDbConstants.VECTOR_DB_NAME),
dimension,
)
vector_db: VectorStore = NoOpCustomVectorDB(
dim=dimension, wait_time=self._config.get(VectorDbConstants.WAIT_TIME)
)
self._client = vector_db.client
return vector_db
except Exception as e:
raise AdapterError(str(e))
def test_connection(self) -> bool:
time.sleep(self._config.get("wait_time"))
return True
def close(self, **kwargs: Any) -> None:
pass
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
pass
def add(self, ref_doc_id: str, nodes: list[BaseNode]) -> list[str]:
mock_result: list[str] = []
time.sleep(self._config.get("wait_time"))
return mock_result
| 2,675 | Python | .py | 66 | 32.69697 | 86 | 0.661141 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,695 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/vectordb/no_op/src/__init__.py | from unstract.sdk.adapters.vectordb.no_op.src.no_op_vectordb import NoOpVectorDB
metadata = {
"name": NoOpVectorDB.__name__,
"version": "1.0.0",
"adapter": NoOpVectorDB,
"description": "NoOpVectorDB",
"is_active": True,
}
| 243 | Python | .py | 8 | 26.75 | 80 | 0.683761 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,696 | __init__.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/__init__.py | from unstract.sdk.adapters import AdapterDict
from unstract.sdk.adapters.embedding.register import EmbeddingRegistry
adapters: AdapterDict = {}
EmbeddingRegistry.register_adapters(adapters)
| 191 | Python | .py | 4 | 46.5 | 70 | 0.876344 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,697 | helper.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/helper.py | import logging
from typing import Any, Optional
from llama_index.core.embeddings import BaseEmbedding
from unstract.sdk.adapters.exceptions import AdapterError
logger = logging.getLogger(__name__)
class EmbeddingConstants:
DEFAULT_EMBED_BATCH_SIZE = 10
EMBED_BATCH_SIZE = "embed_batch_size"
class EmbeddingHelper:
@staticmethod
def get_embedding_batch_size(config: dict[str, Any]) -> int:
if config.get(EmbeddingConstants.EMBED_BATCH_SIZE) is None:
embedding_batch_size = EmbeddingConstants.DEFAULT_EMBED_BATCH_SIZE
else:
embedding_batch_size = int(
config.get(
EmbeddingConstants.EMBED_BATCH_SIZE,
EmbeddingConstants.DEFAULT_EMBED_BATCH_SIZE,
)
)
return embedding_batch_size
@staticmethod
def test_embedding_instance(embedding: Optional[BaseEmbedding]) -> bool:
try:
if embedding is None:
return False
response = embedding._get_text_embedding("This is a test")
if len(response) != 0:
return True
else:
return False
except Exception as e:
logger.error(f"Error occured while testing adapter {e}")
raise AdapterError(str(e))
| 1,323 | Python | .py | 34 | 29.088235 | 78 | 0.636222 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,698 | register.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/register.py | import logging
import os
from importlib import import_module
from typing import Any
from unstract.sdk.adapters.constants import Common
from unstract.sdk.adapters.embedding.embedding_adapter import EmbeddingAdapter
from unstract.sdk.adapters.registry import AdapterRegistry
logger = logging.getLogger(__name__)
class EmbeddingRegistry(AdapterRegistry):
@staticmethod
def register_adapters(adapters: dict[str, Any]) -> None:
current_directory = os.path.dirname(os.path.abspath(__file__))
package = "unstract.sdk.adapters.embedding"
for adapter in os.listdir(current_directory):
adapter_path = os.path.join(current_directory, adapter, Common.SRC_FOLDER)
# Check if the item is a directory and not
# a special directory like __pycache__
if os.path.isdir(adapter_path) and not adapter.startswith("__"):
EmbeddingRegistry._build_adapter_list(adapter, package, adapters)
if len(adapters) == 0:
logger.warning("No embedding adapter found.")
@staticmethod
def _build_adapter_list(
adapter: str, package: str, adapters: dict[str, Any]
) -> None:
try:
full_module_path = f"{package}.{adapter}.{Common.SRC_FOLDER}"
module = import_module(full_module_path)
metadata = getattr(module, Common.METADATA, {})
if metadata.get("is_active", False):
adapter_class: EmbeddingAdapter = metadata[Common.ADAPTER]
adapter_id = adapter_class.get_id()
if not adapter_id or (adapter_id in adapters):
logger.warning(f"Duplicate Id : {adapter_id}")
else:
adapters[adapter_id] = {
Common.MODULE: module,
Common.METADATA: metadata,
}
except ModuleNotFoundError as exception:
logger.warning(f"Unable to import embedding adapters : {exception}")
| 2,001 | Python | .py | 41 | 38.292683 | 86 | 0.639713 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,699 | embedding_adapter.py | Zipstack_unstract-sdk/src/unstract/sdk/adapters/embedding/embedding_adapter.py | from abc import ABC, abstractmethod
from typing import Any
from llama_index.core import MockEmbedding
from llama_index.core.embeddings import BaseEmbedding
from unstract.sdk.adapters.base import Adapter
from unstract.sdk.adapters.enums import AdapterTypes
class EmbeddingAdapter(Adapter, ABC):
def __init__(self, name: str):
super().__init__(name)
self.name = name
@staticmethod
def get_id() -> str:
return ""
@staticmethod
def get_name() -> str:
return ""
@staticmethod
def get_description() -> str:
return ""
@staticmethod
@abstractmethod
def get_provider() -> str:
pass
@staticmethod
def get_icon() -> str:
return ""
@staticmethod
def get_json_schema() -> str:
return ""
@staticmethod
def get_adapter_type() -> AdapterTypes:
return AdapterTypes.EMBEDDING
def get_embedding_instance(self, embed_config: dict[str, Any]) -> BaseEmbedding:
"""Instantiate the llama index BaseEmbedding class.
Returns:
BaseEmbedding: llama index implementation of the Embedding
Raises exceptions for any error
"""
return MockEmbedding(embed_dim=1)
| 1,238 | Python | .py | 39 | 25.384615 | 84 | 0.666105 | Zipstack/unstract-sdk | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.