code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import operator
from fractions import Fraction
from functools import partial, reduce, wraps
def flatten(lst) -> list:
"""Flattens a list of lists"""
return [item for sublist in lst for item in sublist]
def remove_nones(lst) -> list:
"""Removes 'None' values from given list"""
return filter(lambda x: x != None, lst)
def id(x):
"""Identity function"""
return x
def merge_dicts(a, b, op=operator.add):
return dict(a.items() + b.items() + [(k, op(a[k], b[k])) for k in set(b) & set(a)])
def rotate_left(lst, n):
"""Rotate an array `n` elements to the left"""
return lst[n:] + lst[:n]
def partial_function(f):
"""Decorator for functions to support partial application. When not given enough
arguments, a decoracted function will return a new function for the remaining
arguments"""
def wrapper(*args):
try:
return f(*args)
except TypeError as e:
return partial(f, *args)
return wrapper
def show_fraction(frac):
if frac == None:
return "None"
if frac.denominator == 1:
return str(frac.numerator)
lookup = {
Fraction(1, 2): "½",
Fraction(1, 3): "⅓",
Fraction(2, 3): "⅔",
Fraction(1, 4): "¼",
Fraction(3, 4): "¾",
Fraction(1, 5): "⅕",
Fraction(2, 5): "⅖",
Fraction(3, 5): "⅗",
Fraction(4, 5): "⅘",
Fraction(1, 6): "⅙",
Fraction(5, 6): "⅚",
Fraction(1, 7): "⅐",
Fraction(1, 8): "⅛",
Fraction(3, 8): "⅜",
Fraction(5, 8): "⅝",
Fraction(7, 8): "⅞",
Fraction(1, 9): "⅑",
Fraction(1, 10): "⅒",
}
if frac in lookup:
result = lookup[frac]
else:
result = "(%d/%d)" % (frac.numerator, frac.denominator)
return result
def curry(f):
@wraps(f)
def _(arg):
try:
return f(arg)
except TypeError:
return curry(wraps(f)(partial(f, arg)))
return _
def uncurry(f):
@wraps(f)
def _(*args):
return reduce(lambda x, y: x(y), args, f)
return _
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/sequences/tidal_parser/utils.py
| 0.69181 | 0.461623 |
utils.py
|
pypi
|
from .tree_calc import CalculateTree
from ...base import BaseParser
from lark import Lark, Tree
from lark.exceptions import LarkError, UnexpectedCharacters, UnexpectedToken
from pathlib import Path
from .chord import Chord
from ...logger import print
import traceback
__all__ = ("ListParser",)
class ParserError(Exception):
pass
class ShortParserError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
grammar_path = Path(__file__).parent
grammar = grammar_path / "sardine.lark"
class ListParser(BaseParser):
def __init__(
self,
parser_type: str = "sardine",
debug: bool = False,
):
"""
ListParser is the main interface to the SPL pattern language. ListParser is
a programming language capable of handling notes, names, samples, OSC
addresses, etc...
"""
super().__init__()
self.debug = debug
self.parser_type = parser_type
# Variables usable only in the SPL environment
self.inner_variables = {}
# Current Global Scale
self.global_scale: str = "major"
def __repr__(self) -> str:
return f"<{type(self).__name__} debug={self.debug} type={self.parser_type!r}>"
def setup(self):
parsers = {
"sardine": {
"raw": Lark.open(
grammar,
rel_to=__file__,
parser="lalr",
start="start",
cache=True,
lexer="contextual",
),
"full": Lark.open(
grammar,
rel_to=__file__,
parser="lalr",
start="start",
cache=True,
lexer="contextual",
transformer=CalculateTree(
clock=self.env.clock,
variables=self.env.variables,
inner_variables=self.inner_variables,
global_scale=self.global_scale,
),
),
},
}
try:
self._result_parser = parsers[self.parser_type]["full"]
self._printing_parser = parsers[self.parser_type]["raw"]
except KeyError:
ParserError(f"Invalid Parser grammar, {self.parser_type} is not a grammar.")
def __flatten_result(self, pat):
"""Flatten a nested list, for usage after parsing a pattern. Will flatten deeply
nested lists and return a one dimensional array.
Args:
pat (list): A potentially nested list
Returns:
list: A flat list (one-dimensional)
"""
from collections.abc import Iterable
for x in pat:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes, Chord)):
yield from self._flatten_result(x)
else:
yield x
def _flatten_result(self, pat):
result = list(self.__flatten_result(pat))
return result
def pretty_print(self, expression: str):
"""Pretty print an expression coming from the parser. Works for any
parser and will print three things on stdout if successful:
- the expression itself
- the syntax tree generated by the parser for this expression
- the result of parsing that syntax tree
Args:
expression (str): An expression to pretty print
"""
print(f"EXPR: {expression}")
print(Tree.pretty(self._printing_parser.parse(expression)))
result = self._result_parser.parse(expression)
print(f"RESULT: {result}")
print(f"USER RESULT: {self._flatten_result(result)}")
def print_tree_only(self, expression: str):
"""Print the syntax tree using Lark.Tree
Args:
expression (str): An expression to print
"""
print(Tree.pretty(self._printing_parser.parse(expression)))
def parse(self, *args):
"""Main method to parse a pattern. Parses 'pattern' and returns
a flattened list to index on to extract individual values. Note
that this function is temporary. Support for stacked values is
planned.
Args:
pattern (str): A pattern to parse
Raises:
ParserError: Raised if the pattern is invalid
Returns:
list: The parsed pattern as a list of values
"""
pattern = args[0]
final_pattern = []
try:
final_pattern = self._result_parser.parse(pattern)
except Exception as e:
print(f"[red][Pattern Language Error][/red]")
if self.debug:
print(f"Pat: {self._flatten_result(final_pattern)}")
return self._flatten_result(final_pattern)
def _parse_debug(self, pattern: str):
"""Parses a whole pattern in debug mode. 'Debug mode' refers to
a mode where both the initial expression, the syntax tree and the
pattern result are printed directly on stdout. This allows to study
the construction of a result by looking at the syntax tree.
Args:
pattern (str): A pattern to be parse.
"""
try:
self.pretty_print(expression=pattern)
except Exception as e:
tb_str = traceback.format_exception(
etype=type(e), value=e, tb=e.__traceback__
)
error_message = "".join(tb_str)
raise ParserError(f"Error parsing pattern {pattern}: {error_message}")
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/sequences/sardine_parser/list_parser.py
| 0.805938 | 0.192065 |
list_parser.py
|
pypi
|
from itertools import count, cycle, dropwhile, islice, takewhile
from .chord import Chord
def floating_point_range(start, end, step):
"""Analog to range for floating point numbers
Args:
start (float): A minimum float
end (float): A maximum float
step (float): Step for increment
Returns:
list: A list of floats from 'start' to 'end', layed out
every 'step'.
"""
assert step != 0
sample_count = int(abs(end - start) / step)
return islice(count(start, step), sample_count)
def allow_silence_1(func):
"""Wrap a unary function to return None when called with None"""
def result_func(x):
if x is not None:
return func(x)
else:
return None
return result_func
def allow_silence_2(func):
"""Wrap a binary function to return None when called with None"""
def result_func(x, y):
if x is not None and y is not None:
return func(x, y)
else:
return None
return result_func
def map_unary_function(func, value):
"""Apply an unary function to a value or a list of values
Args:
func: The function to apply
value: The value or the list of values
"""
if isinstance(value, Chord):
return Chord(*[allow_silence_1(func)(x) for x in value])
else:
return [allow_silence_1(func)(x) for x in value]
def zip_cycle(left, right):
"""Zip two lists, cycling the shortest one"""
if len(left) < len(right):
return zip(cycle(left), right)
else:
return zip(left, cycle(right))
def map_binary_function(func, left, right):
"""Apply an binary function to a value or a list of values
Args:
func: The function to apply
left: The left value or list of values
right: The right value or list of values
"""
if isinstance(left, Chord) and not isinstance(right, Chord):
return [allow_silence_2(func)(left, y) for y in right]
elif isinstance(right, Chord) and not isinstance(left, Chord):
return [allow_silence_2(func)(x, right) for x in left]
else:
return [allow_silence_2(func)(x, y) for x, y in zip_cycle(left, right)]
# Taken from:
# https://stackoverflow.com/questions/26531116/is-it-a-way-to-know-index-using-itertools-cycle
class CyclicalList:
def __init__(self, initial_list):
self._initial_list = initial_list
def __getitem__(self, item):
if isinstance(item, slice):
if item.stop is None:
raise ValueError("Cannot slice without stop")
iterable = enumerate(cycle(self._initial_list))
if item.start:
iterable = dropwhile(lambda x: x[0] < item.start, iterable)
return [
element
for _, element in takewhile(lambda x: x[0] < item.stop, iterable)
]
for index, element in enumerate(cycle(self._initial_list)):
if index == item:
return element
def __iter__(self):
return cycle(self._initial_list)
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/sequences/sardine_parser/utils.py
| 0.892093 | 0.486392 |
utils.py
|
pypi
|
import asyncio
import concurrent.futures
import threading
from abc import ABC, abstractmethod
from typing import Optional
from .handler import BaseHandler
__all__ = ("BaseRunnerMixin", "BaseThreadedLoopMixin", "BaseRunnerHandler")
class BaseRunnerMixin(ABC):
"""Provides methods for running a background asynchronous function."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._run_task: Optional[asyncio.Task] = None
@abstractmethod
async def run(self):
"""The method that will be executed in the background.
This method must be ready to handle an `asyncio.CancelledError`.
"""
def is_running(self) -> bool:
"""Indicates if an asyncio task is currently executing `run()`."""
return self._run_task is not None and not self._run_task.done()
def start(self) -> bool:
"""Starts the `run()` method in the background.
Returns:
bool: True if the task was started, False otherwise.
"""
allowed = not self.is_running()
if allowed:
self._run_task = asyncio.create_task(self.run())
return allowed
def stop(self) -> bool:
"""Stops the background task by attempting to cancel it.
As with any asyncio task, the `run()` method can prevent
cancellation by catching `asyncio.CancelledError`.
Returns:
bool: True if the task was cancelled, False otherwise.
"""
if self.is_running():
return self._run_task.cancel()
return False
class BaseThreadedLoopMixin(BaseRunnerMixin, ABC):
"""Provides methods for running a looping function in another thread.
Args:
loop_interval (float):
The amount of time to sleep between each iteration.
"""
def __init__(self, *args, loop_interval: float, **kwargs):
super().__init__(*args, **kwargs)
self.loop_interval = loop_interval
self._run_thread: Optional[threading.Thread] = None
self._completed_event: Optional[asyncio.Event] = None
@abstractmethod
def loop(self):
"""Called on every iteration of the loop."""
@abstractmethod
def before_loop(self):
"""Called before the loop is about to start."""
@abstractmethod
def after_loop(self):
"""Called after the loop has stopped."""
def _run(self):
try:
self.before_loop()
fut = asyncio.run_coroutine_threadsafe(
self._completed_event.wait(), self._loop
)
try:
while not self._completed_event.is_set():
self.loop()
try:
fut.result(timeout=self.loop_interval)
except concurrent.futures.CancelledError:
break
except concurrent.futures.TimeoutError:
pass
finally:
self.after_loop()
finally:
self._completed_event.set()
async def run(self):
self._completed_event = asyncio.Event()
self._loop = asyncio.get_running_loop()
self._run_thread = threading.Thread(target=self._run)
self._run_thread.start()
try:
await self._completed_event.wait()
finally:
self._completed_event.set()
class BaseRunnerHandler(BaseRunnerMixin, BaseHandler, ABC):
"""Adds automatic starting and stopping to a runner using the handler system.
Subclasses that override `setup()`, `teardown()`, or `hook()`, must call
the corresponding super method.
"""
TRANSPORT_EVENTS = ("start", "stop", "pause", "resume")
def setup(self):
for event in self.TRANSPORT_EVENTS:
self.register(event)
if self.env.is_running():
self.start()
def teardown(self):
self.stop()
def hook(self, event: str, *args):
if event in ("start", "resume"):
self.start()
elif event == "stop":
self.stop()
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/base/runner.py
| 0.92891 | 0.172346 |
runner.py
|
pypi
|
import functools
import inspect
from typing import TYPE_CHECKING, Callable, ParamSpec, TypeVar, Union
from .Messages import *
if TYPE_CHECKING:
from ..base import BaseClock
P = ParamSpec("P")
T = TypeVar("T")
Number = Union[float, int]
MISSING = object()
def alias_param(name: str, alias: str):
"""
Alias a keyword parameter in a function. Throws a TypeError when a value is
given for both the original kwarg and the alias. Method taken from
github.com/thegamecracks/abattlemetrics/blob/main/abattlemetrics/client.py
(@thegamecracks).
"""
def deco(func: Callable[P, T]):
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
alias_value = kwargs.pop(alias, MISSING)
if alias_value is not MISSING:
if name in kwargs:
raise TypeError(f"Cannot pass both {name!r} and {alias!r} in call")
kwargs[name] = alias_value
return func(*args, **kwargs)
return wrapper
return deco
def get_snap_deadline(clock: "BaseClock", offset_beats: Union[float, int]):
time = clock.shifted_time
next_bar = clock.get_bar_time(1, time=time)
offset = clock.get_beat_time(offset_beats, sync=False)
return time + next_bar + offset
def lerp(
x: Number,
in_min: Number,
in_max: Number,
out_min: Number,
out_max: Number,
) -> float:
"""Linearly interpolates a value v from range (x, y) to range (x', y')."""
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
async def maybe_coro(func: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T:
if inspect.iscoroutinefunction(func):
return await func(*args, **kwargs)
return func(*args, **kwargs)
def plural(n: int, word: str, suffix: str = "s"):
return word if n == 1 else word + suffix
def join(*args):
"""Alternative to the str.join function. Better in live contexts!
Parameters:
*args (list[string]): a list of strings to join with a whitespace
Returns:
list[string]: strings joined using ' '.join(args)
"""
if all(isinstance(e, str) for e in args):
return " ".join(args)
else:
return args[0]
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/utils/__init__.py
| 0.811713 | 0.238772 |
__init__.py
|
pypi
|
import contextlib
import contextvars
from ..base import BaseHandler
__all__ = ("Time",)
shift = contextvars.ContextVar("shift", default=0.0)
"""
This specifies the amount of time to offset in the current context.
Usually this is updated within the context of scheduled functions
to simulate sleeping without actually blocking the function. Behavior is
undefined if time is shifted in the global context.
"""
class Time(BaseHandler):
"""Contains the origin of a FishBowl's time.
Any new clocks must continue from this origin when they are running,
and must update the origin when they are paused or stopped.
"""
def __init__(
self,
origin: float = 0.0,
):
super().__init__()
self._origin = origin
def __repr__(self) -> str:
return "{}({})".format(
type(self).__name__,
" ".join(f"{attr}={getattr(self, attr)!r}" for attr in ("origin",)),
)
@property
def origin(self) -> float:
"""The origin of the fish bowl's time.
When this property is updated, an `origin_update` event
will be dispatched with two arguments, the old and the new
origin.
"""
return self._origin
@origin.setter
def origin(self, new_origin: float):
old_origin = self._origin
self._origin = new_origin
self.env.dispatch("origin_update", old_origin, new_origin)
@property
def shift(self) -> float:
"""The time shift in the current context.
This is useful for simulating sleeps without blocking.
"""
return shift.get()
@shift.setter
def shift(self, seconds: float):
shift.set(seconds)
@contextlib.contextmanager
def scoped_shift(self, seconds: float):
"""Returns a context manager that adds `seconds` to the clock.
After the context manager is exited, the time shift is restored
to its previous value.
"""
token = shift.set(shift.get() + seconds)
try:
yield
finally:
shift.reset(token)
def reset(self):
"""Resets the time origin back to 0."""
self._origin = 0.0
|
/sardine_system-0.4.0-py3-none-any.whl/sardine_core/clock/time.py
| 0.783988 | 0.332026 |
time.py
|
pypi
|
import { Decoration, DecorationSet } from "@codemirror/view"
import { StateField, StateEffect, ChangeDesc } from "@codemirror/state"
import { EditorView } from "@codemirror/view"
import { invertedEffects } from "@codemirror/commands"
import { Extension } from "@codemirror/state"
function mapRange(range: {from: number, to: number}, change: ChangeDesc) {
let from = change.mapPos(range.from), to = change.mapPos(range.to)
return from < to ? {from, to} : undefined
}
const addHighlight = StateEffect.define<{from: number, to: number}>({
map: mapRange
})
const removeHighlight = StateEffect.define<{from: number, to: number}>({
map: mapRange
})
const highlight = Decoration.mark({
attributes: {style: `background-color: #ffad42`}
})
const highlightedRanges = StateField.define({
create() {
return Decoration.none
},
update(ranges, tr) {
ranges = ranges.map(tr.changes)
for (let e of tr.effects) {
if (e.is(addHighlight))
ranges = addRange(ranges, e.value)
else if (e.is(removeHighlight))
ranges = cutRange(ranges, e.value)
}
return ranges
},
provide: field => EditorView.decorations.from(field)
})
function cutRange(ranges: DecorationSet, r: {from: number, to: number}) {
let leftover: any[] = []
ranges.between(r.from, r.to, (from, to, deco) => {
if (from < r.from) leftover.push(deco.range(from, r.from))
if (to > r.to) leftover.push(deco.range(r.to, to))
})
return ranges.update({
filterFrom: r.from,
filterTo: r.to,
filter: () => false,
add: leftover
})
}
function addRange(ranges: DecorationSet, r: {from: number, to: number}) {
ranges.between(r.from, r.to, (from, to) => {
if (from < r.from) r = {from, to: r.to}
if (to > r.to) r = {from: r.from, to}
})
return ranges.update({
filterFrom: r.from,
filterTo: r.to,
filter: () => false,
add: [highlight.range(r.from, r.to)]
})
}
const invertHighlight = invertedEffects.of(tr => {
let found = []
for (let e of tr.effects) {
if (e.is(addHighlight)) found.push(removeHighlight.of(e.value))
else if (e.is(removeHighlight)) found.push(addHighlight.of(e.value))
}
let ranges = tr.startState.field(highlightedRanges)
tr.changes.iterChangedRanges((chFrom, chTo) => {
ranges.between(chFrom, chTo, (rFrom, rTo) => {
if (rFrom >= chFrom || rTo <= chTo) {
let from = Math.max(chFrom, rFrom), to = Math.min(chTo, rTo)
if (from < to) found.push(addHighlight.of({from, to}))
}
})
})
return found
})
export function highlightSelection(view: EditorView) {
view.dispatch({
effects: view.state.selection.ranges.filter(r => !r.empty)
.map(r => addHighlight.of(r))
})
return true
}
export function unhighlightSelection(view: EditorView) {
let highlighted = view.state.field(highlightedRanges)
let effects: any[] = []
for (let sel of view.state.selection.ranges) {
highlighted.between(sel.from, sel.to, (rFrom, rTo) => {
let from = Math.max(sel.from, rFrom), to = Math.min(sel.to, rTo)
if (from < to) effects.push(removeHighlight.of({from, to}))
})
}
view.dispatch({effects})
return true
}
export function rangeHighlighting(): Extension {
return [
highlightedRanges,
invertHighlight,
]
}
|
/sardine-web-1.1.0.tar.gz/sardine-web-1.1.0/sardine_web/client/src/highlightSelection.ts
| 0.502686 | 0.621483 |
highlightSelection.ts
|
pypi
|
<p align="center">
<img alt="sarenka-logo" src="https://raw.githubusercontent.com/pawlaczyk/sarenka/master/logo.png">
</p>
[](https://github.com/pawlaczyk/sarenka/releases/latest) [](https://github.com/pawlaczyk/sarenka/releases/latest) [](https://github.com/pawlaczyk/sarenka/releases/latest) [](https://github.com/pawlaczyk/sarenka/releases/latest)  [](https://github.com/pawlaczyk/sarenka/blob/master/LICENSE)
**♥ Free Software, requires only free accounts to third part services ♥**
> Lack of knowledge ... that is the problem.
>
>
>[William Edwards Deming]
**SARENKA** is Open Source Intelligence (**OSINT**) tool which helps you obtaining and understanding **Attack Surface**.
The main goal is to gathering infromation from search engines for Internet-connected devices (**https://censys.io/**, **https://www.shodan.io/**).
It scraps data about Common Vulnerabilities and Exposures (**CVE**), Common Weakness Enumeration (**CWE**) and also has database where CVEs are mapped to CWE.
It returns data about local machine - local installed softwares (from Windows Registry), local network information (python libraries, popular cmd commads).
For now application has also simple tools like hash calcualtor, shannon entropy calculator and very simple port scanner.
More cryptography-math tools and reconnaissance scripts are planned.
#### Look
https://www.facebook.com/ncybersec/posts/1671427243027993
# Realtion beetwen CWE and CVE - sarenka data feeder
Generating this file takes a long time e.g: 702.5641514
#### all CWE Ids with description
https://raw.githubusercontent.com/pawlaczyk/sarenka_tools/master/cwe_all.json
#### all CVE Ids with description
In progress
#### get all CVE Ids by CWE Id
In progress
# Installation
Description in progress
# Getting started
Description in progress
Sarenka is local web application for Windows.
#### Config
Rirst release gathers data from two search engines.
example sarenka/backend/connectors/credentials.json
```json
{
"censys": {
"base_url": "https://censys.io/",
"API_ID": "<my_user>",
"Secret": "<my_api_key>",
"API_URL": "https://censys.io/api/v1"
},
"shodan": {
"base_url": "https://www.shodan.io/",
"user": "<my_user>",
"api_key": "<my_api_key>"
}
}
```
# Features
- gets data from **https://censys.io/** by ip
- get data from **https://www.shodan.io/** by ip
- get **DNS** data
- get **WHOIS** data
- **banner** grabbing
- find **CVEs** by **CWE**
- generatre pdf report
You can also:
- calculate **hashes** based on user string
- calculate **shannon entropy** based on user string
- check is **port** open|closed (instead always use nmap if you can - it's slow)
#### Suggestions are welcome
[1.1]: http://i.imgur.com/tXSoThF.png (twitter icon with padding)
[2.1]: http://i.imgur.com/P3YfQoD.png (facebook icon with padding)
[1]: https://twitter.com/OsintSarenka
[2]: https://www.facebook.com/sarenka.osint.5
- Whant some feature, other tool, library functionality?
- Have any idea or question? [![alt text][1.1]][1]
- Don't hesitate to contact [](https://github.com/pawlaczyk/) .
# Database
This is tricki part, because we have 863 sqlite3 database files: default, CWE-NONE (some CVE hasn't cwe_id eg.: CVE-2013-3621) and 861 individual for CWEs
## Tech
Description in progress.
SARENKA uses a number of open source projects to work properly on:
* [Renderforest](https://www.renderforest.com/) - logo generator
* [gawk](http://gnuwin32.sourceforge.net/packages/gawk.htm) - python manage.py migrate --database CWE_ID
* [chocolatey](https://chocolatey.org/)
* [PyCharm](https://www.jetbrains.com/pycharm/) - Community Edition
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
* [Technology](url_address) - description
And of course SARENKA itself is open source with a [public repository][sarenka]
on GitHub.
#### Planned features
- Rewrite documentation in English (end of 2021)
- trello/ github instead of Jira
- Cover 100% code by tests
- typing backend
- document all functions and class
- Docker
- online demo
- Jenkins
- GraphQL
- Selenium Scrapers
- More pentesting tools
- Google Dorks
- Abstract Algebra calculator
- Number Theory calculator
- Server certificate validator
- tests on Linux
- NLP
- d3js visualizations
- alterntive pure version in command lineS
##### CI/CD Tools
- https://circleci.com/
- https://github.com/snyk-bot
#### Tests
- Tested on Windows 10
### Documentation
Till end of March, 2021 documentation will be available only in Polish!
The documentation is availabe [here](https://pawlaczyk.github.io/sarenka/).
# Authors
[](https://github.com/pawlaczyk/) [](https://github.com/michalpawlaczyk) [](https://github.com/k-slonka)
## Installation
Run the following to install:
```python
pip install sarenka
```
## Usage
```python
from abstract_algebra import say_hello
# Generate "Hello, World!"
say_hello()
# Generate "Hello, Everybody!"
say_hello("Everybody")
```
# Developing sarenka
To install sarenka, along with the tools you need to develop and run tests, run the following in your virtualenv:
```bash
$ pip install -e .[dev]
```
##### Contact
[](https://github.com/pawlaczyk/)
# License
SARENKA is **licensed** under the **[MIT License]**.
[MIT License]: https://github.com/pawlaczyk/sarenka/blob/master/LICENSE
[Mirrors]: http://mirrors.jenkins-ci.org
[GitHub]: https://github.com/pawlaczyk/sarenka
[documentation]: https://pawlaczyk.github.io/sarenka/
[public repository]: https://github.com/pawlaczyk/sarenka
[//]: # (These are reference links used in the body of this note and get stripped out when the markdown processor does its job. There is no need to format nicely because it shouldn't be seen. Thanks SO - http://stackoverflow.com/questions/4823468/store-comments-in-markdown-syntax)
[sarenka]: <https://github.com/pawlaczyk/sarenka>
[git-repo-url]: <https://github.com/pawlaczyk/sarenka>
[William Edwards Deming]: <https://deming.org/deming-the-man/>
[df1]: <http://daringfireball.net/projects/markdown/>
[markdown-it]: <https://github.com/markdown-it/markdown-it>
[Ace Editor]: <http://ace.ajax.org>
[node.js]: <http://nodejs.org>
[Twitter Bootstrap]: <http://twitter.github.com/bootstrap/>
[jQuery]: <http://jquery.com>
[@tjholowaychuk]: <http://twitter.com/tjholowaychuk>
[express]: <http://expressjs.com>
[AngularJS]: <http://angularjs.org>
[Gulp]: <http://gulpjs.com>
[PlDb]: <https://github.com/joemccann/dillinger/tree/master/plugins/dropbox/README.md>
[PlGh]: <https://github.com/joemccann/dillinger/tree/master/plugins/github/README.md>
[PlGd]: <https://github.com/joemccann/dillinger/tree/master/plugins/googledrive/README.md>
[PlOd]: <https://github.com/joemccann/dillinger/tree/master/plugins/onedrive/README.md>
[PlMe]: <https://github.com/joemccann/dillinger/tree/master/plugins/medium/README.md>
[PlGa]: <https://github.com/RahulHP/dillinger/blob/master/plugins/googleanalytics/README.md>
|
/sarenka-0.0.1.tar.gz/sarenka-0.0.1/README.md
| 0.485844 | 0.95418 |
README.md
|
pypi
|
import json
from typing import Dict, Iterable, Optional
from ..base import DALHandler, QueryFilter, FilterType
class JSONDatabase(DALHandler):
"""Manages a JSON file like a database"""
def __init__(self, filename: str):
self.__filename = filename
try:
with open(filename) as data_file:
self.__data: Dict = json.loads(data_file.read())
except FileNotFoundError:
self.__data = {}
def get(self, uuid: str) -> Optional[dict]:
return self.__data.get(uuid)
def get_all(self) -> Iterable[dict]:
return self.__data.values()
def where(self, conditions: Iterable[QueryFilter]) -> Iterable[dict]:
filter_dispatch = {
FilterType.EQ: lambda row, condition: row[condition["field"]] == condition["value"],
FilterType.GT: lambda row, condition: row[condition["field"]] > condition["value"],
FilterType.LT: lambda row, condition: row[condition["field"]] < condition["value"],
FilterType.GE: lambda row, condition: row[condition["field"]] >= condition["value"],
FilterType.LE: lambda row, condition: row[condition["field"]] <= condition["value"],
FilterType.IN: lambda row, condition: condition["value"] in row[condition["field"]],
}
for row in self.__data.values():
if all(filter_dispatch[condition.name](row, condition) for condition in conditions):
yield row
def contains(self, field: str, value: str) -> Iterable[dict]:
return [
row
for row in self.get_all()
if value.lower() in row[field].lower()
]
def update(self, conditions: Iterable[QueryFilter], changes: dict):
for row in self.where(conditions):
updated_row = {**row}
for change_key, change_value in changes.items():
updated_row[change_key] = change_value
self.__data[row["uuid"]] = updated_row
def add(self, item: dict) -> dict:
if item["uuid"] in self.__data:
raise Exception("Identifier already exist")
self.__data[item["uuid"]] = item
return item
def delete(self, uuid: str):
self.__data.pop(uuid)
def commit(self):
with open(self.__filename, "wt") as data_file:
data_file.write(json.dumps(self.__data))
|
/sarf_simple_crud-0.1.0-py3-none-any.whl/simple_crud/dal/infra/json_dal.py
| 0.738858 | 0.260657 |
json_dal.py
|
pypi
|
import json
from typing import Dict, Iterable, Optional
from ..base import DALHandler, QueryFilter, FilterType
class JSONDatabase(DALHandler):
"""Manages a JSON file like a database"""
def __init__(self, filename: str):
self.__filename = filename
try:
with open(filename) as data_file:
self.__data: Dict = json.loads(data_file.read())
except FileNotFoundError:
self.__data = {}
def get(self, uuid: str) -> Optional[dict]:
return self.__data.get(uuid)
def get_all(self) -> Iterable[dict]:
return self.__data.values()
def where(self, conditions: Iterable[QueryFilter]) -> Iterable[dict]:
filter_dispatch = {
FilterType.EQ: lambda row, condition: row[condition["field"]] == condition["value"],
FilterType.GT: lambda row, condition: row[condition["field"]] > condition["value"],
FilterType.LT: lambda row, condition: row[condition["field"]] < condition["value"],
FilterType.GE: lambda row, condition: row[condition["field"]] >= condition["value"],
FilterType.LE: lambda row, condition: row[condition["field"]] <= condition["value"],
FilterType.IN: lambda row, condition: condition["value"] in row[condition["field"]],
}
for row in self.__data.values():
if all(filter_dispatch[condition.name](row, condition) for condition in conditions):
yield row
def contains(self, field: str, value: str) -> Iterable[dict]:
return [
row
for row in self.get_all()
if value.lower() in row[field].lower()
]
def update(self, conditions: Iterable[QueryFilter], changes: dict):
for row in self.where(conditions):
updated_row = {**row}
for change_key, change_value in changes.items():
updated_row[change_key] = change_value
self.__data[row["uuid"]] = updated_row
def add(self, item: dict) -> dict:
if item["uuid"] in self.__data:
raise Exception("Identifier already exist")
self.__data[item["uuid"]] = item
return item
def delete(self, uuid: str):
self.__data.pop(uuid)
def commit(self):
with open(self.__filename, "wt") as data_file:
data_file.write(json.dumps(self.__data))
|
/sarf_simple_crud-0.1.0-py3-none-any.whl/sarf_simple_crud/dal/infra/json_dal.py
| 0.738858 | 0.260657 |
json_dal.py
|
pypi
|
import json
from typing import Dict, Iterable, Optional
from ..base import DALHandler, QueryFilter, FilterType
class JSONDatabase(DALHandler):
"""Manages a JSON file like a database"""
def __init__(self, filename: str):
self.__filename = filename
try:
with open(filename) as data_file:
self.__data: Dict = json.loads(data_file.read())
except FileNotFoundError:
self.__data = {}
def get(self, uuid: str) -> Optional[dict]:
return self.__data.get(uuid)
def get_all(self) -> Iterable[dict]:
return self.__data.values()
def where(self, conditions: Iterable[QueryFilter]) -> Iterable[dict]:
filter_dispatch = {
FilterType.EQ: lambda row, condition: row[condition["field"]] == condition["value"],
FilterType.GT: lambda row, condition: row[condition["field"]] > condition["value"],
FilterType.LT: lambda row, condition: row[condition["field"]] < condition["value"],
FilterType.GE: lambda row, condition: row[condition["field"]] >= condition["value"],
FilterType.LE: lambda row, condition: row[condition["field"]] <= condition["value"],
FilterType.IN: lambda row, condition: condition["value"] in row[condition["field"]],
}
for row in self.__data.values():
if all(filter_dispatch[condition.name](row, condition) for condition in conditions):
yield row
def contains(self, field: str, value: str) -> Iterable[dict]:
return [
row
for row in self.get_all()
if value.lower() in row[field].lower()
]
def update(self, conditions: Iterable[QueryFilter], changes: dict):
for row in self.where(conditions):
updated_row = {**row}
for change_key, change_value in changes.items():
updated_row[change_key] = change_value
self.__data[row["uuid"]] = updated_row
def add(self, item: dict) -> dict:
if item["uuid"] in self.__data:
raise Exception("Identifier already exist")
self.__data[item["uuid"]] = item
return item
def delete(self, uuid: str):
self.__data.pop(uuid)
def commit(self):
with open(self.__filename, "wt") as data_file:
data_file.write(json.dumps(self.__data))
|
/sarf_simple_crud-0.1.0-py3-none-any.whl/simplecrud/dal/infra/json_dal.py
| 0.738858 | 0.260657 |
json_dal.py
|
pypi
|
import attr
@attr.s
class Run(object):
"""Describes a single run of an analysis tool, and contains the reported output of that run."""
tool = attr.ib(metadata={"schema_property_name": "tool"})
addresses = attr.ib(default=None, metadata={"schema_property_name": "addresses"})
artifacts = attr.ib(default=None, metadata={"schema_property_name": "artifacts"})
automation_details = attr.ib(default=None, metadata={"schema_property_name": "automationDetails"})
baseline_guid = attr.ib(default=None, metadata={"schema_property_name": "baselineGuid"})
column_kind = attr.ib(default=None, metadata={"schema_property_name": "columnKind"})
conversion = attr.ib(default=None, metadata={"schema_property_name": "conversion"})
default_encoding = attr.ib(default=None, metadata={"schema_property_name": "defaultEncoding"})
default_source_language = attr.ib(default=None, metadata={"schema_property_name": "defaultSourceLanguage"})
external_property_file_references = attr.ib(default=None, metadata={"schema_property_name": "externalPropertyFileReferences"})
graphs = attr.ib(default=None, metadata={"schema_property_name": "graphs"})
invocations = attr.ib(default=None, metadata={"schema_property_name": "invocations"})
language = attr.ib(default="en-US", metadata={"schema_property_name": "language"})
logical_locations = attr.ib(default=None, metadata={"schema_property_name": "logicalLocations"})
newline_sequences = attr.ib(default=attr.Factory(lambda: ['\r\n', '\n']), metadata={"schema_property_name": "newlineSequences"})
original_uri_base_ids = attr.ib(default=None, metadata={"schema_property_name": "originalUriBaseIds"})
policies = attr.ib(default=None, metadata={"schema_property_name": "policies"})
properties = attr.ib(default=None, metadata={"schema_property_name": "properties"})
redaction_tokens = attr.ib(default=None, metadata={"schema_property_name": "redactionTokens"})
results = attr.ib(default=None, metadata={"schema_property_name": "results"})
run_aggregates = attr.ib(default=None, metadata={"schema_property_name": "runAggregates"})
special_locations = attr.ib(default=None, metadata={"schema_property_name": "specialLocations"})
taxonomies = attr.ib(default=None, metadata={"schema_property_name": "taxonomies"})
thread_flow_locations = attr.ib(default=None, metadata={"schema_property_name": "threadFlowLocations"})
translations = attr.ib(default=None, metadata={"schema_property_name": "translations"})
version_control_provenance = attr.ib(default=None, metadata={"schema_property_name": "versionControlProvenance"})
web_requests = attr.ib(default=None, metadata={"schema_property_name": "webRequests"})
web_responses = attr.ib(default=None, metadata={"schema_property_name": "webResponses"})
|
/sarif_om-1.0.4-py3-none-any.whl/sarif_om/_run.py
| 0.767341 | 0.174762 |
_run.py
|
pypi
|
import attr
@attr.s
class ToolComponent(object):
"""A component, such as a plug-in or the driver, of the analysis tool that was run."""
name = attr.ib(metadata={"schema_property_name": "name"})
associated_component = attr.ib(default=None, metadata={"schema_property_name": "associatedComponent"})
contents = attr.ib(default=attr.Factory(lambda: ['localizedData', 'nonLocalizedData']), metadata={"schema_property_name": "contents"})
dotted_quad_file_version = attr.ib(default=None, metadata={"schema_property_name": "dottedQuadFileVersion"})
download_uri = attr.ib(default=None, metadata={"schema_property_name": "downloadUri"})
full_description = attr.ib(default=None, metadata={"schema_property_name": "fullDescription"})
full_name = attr.ib(default=None, metadata={"schema_property_name": "fullName"})
global_message_strings = attr.ib(default=None, metadata={"schema_property_name": "globalMessageStrings"})
guid = attr.ib(default=None, metadata={"schema_property_name": "guid"})
information_uri = attr.ib(default=None, metadata={"schema_property_name": "informationUri"})
is_comprehensive = attr.ib(default=None, metadata={"schema_property_name": "isComprehensive"})
language = attr.ib(default="en-US", metadata={"schema_property_name": "language"})
localized_data_semantic_version = attr.ib(default=None, metadata={"schema_property_name": "localizedDataSemanticVersion"})
locations = attr.ib(default=None, metadata={"schema_property_name": "locations"})
minimum_required_localized_data_semantic_version = attr.ib(default=None, metadata={"schema_property_name": "minimumRequiredLocalizedDataSemanticVersion"})
notifications = attr.ib(default=None, metadata={"schema_property_name": "notifications"})
organization = attr.ib(default=None, metadata={"schema_property_name": "organization"})
product = attr.ib(default=None, metadata={"schema_property_name": "product"})
product_suite = attr.ib(default=None, metadata={"schema_property_name": "productSuite"})
properties = attr.ib(default=None, metadata={"schema_property_name": "properties"})
release_date_utc = attr.ib(default=None, metadata={"schema_property_name": "releaseDateUtc"})
rules = attr.ib(default=None, metadata={"schema_property_name": "rules"})
semantic_version = attr.ib(default=None, metadata={"schema_property_name": "semanticVersion"})
short_description = attr.ib(default=None, metadata={"schema_property_name": "shortDescription"})
supported_taxonomies = attr.ib(default=None, metadata={"schema_property_name": "supportedTaxonomies"})
taxa = attr.ib(default=None, metadata={"schema_property_name": "taxa"})
translation_metadata = attr.ib(default=None, metadata={"schema_property_name": "translationMetadata"})
version = attr.ib(default=None, metadata={"schema_property_name": "version"})
|
/sarif_om-1.0.4-py3-none-any.whl/sarif_om/_tool_component.py
| 0.710729 | 0.159905 |
_tool_component.py
|
pypi
|
# SARIF Tools
A set of command line tools and Python library for working with SARIF files.
Read more about the SARIF format here: https://sarifweb.azurewebsites.net/
# Installation
## Prerequisites
You need Python 3.8 or later installed. Get it from [python.org](https://www.python.org/downloads/). This document assumes that the `python` command runs that version.
## Installing on Windows
Open an Admin Command Prompt (Start > Command Prompt > Run as Administrator) and type:
```
pip install sarif-tools
```
## Installing on Linux or Mac
```
sudo pip install sarif-tools
```
## Testing the installation
After installing using `pip`, you should then be able to run:
```
sarif --version
```
## Troubleshooting installation
This section has suggestions in case the `sarif` command is not available after installation.
A launcher called `sarif` or `sarif.exe` is created in the Python installation's `Scripts` directory. The `Scripts` directory needs to be in the `PATH`
environment variable for you to be able to type `sarif` at the command prompt; this is most likely the case if `pip` is run as a
super-user when installing (e.g. Administrator Command Prompt on Windows, or using `sudo` on Linux).
If the `Scripts` directory is not in the `PATH`, then you need to type `python -m sarif` instead of `sarif` to run the tool.
Confusion can arise when the `python` and `pip` commands on the `PATH` are from different installations, or the `python` installation on the super-user's `PATH` is different from the `python` command on the normal user's path. On Windows, you can use `where python` and `where pip` in normal CMD and Admin CMD to see which installations are in use; on Linux, it's `which python` and `which pip` with and without `sudo`.
# Command Line Usage
```
usage: sarif [-h] [--version] [--debug] [--check {error,warning,note}] {blame,copy,csv,diff,html,info,ls,summary,trend,usage,word} ...
Process sets of SARIF files
positional arguments:
{blame,copy,csv,diff,html,info,ls,summary,trend,usage,word}
command
optional arguments:
-h, --help show this help message and exit
--version, -v show program's version number and exit
--debug Print information useful for debugging
--check {error,warning,note}, -x {error,warning,note}
Exit with error code if there are any issues of the specified level (or for diff, an increase in issues at that level).
commands:
blame Enhance SARIF file with information from `git blame`
copy Write a new SARIF file containing optionally-filtered data from other SARIF file(s)
csv Write a CSV file listing the issues from the SARIF files(s) specified
diff Find the difference between two [sets of] SARIF files
html Write an HTML representation of SARIF file(s) for viewing in a web browser
info Print information about SARIF file(s) structure
ls List all SARIF files in the directories specified
summary Write a text summary with the counts of issues from the SARIF files(s) specified
trend Write a CSV file with time series data from SARIF files with "yyyymmddThhmmssZ" timestamps in their filenames
usage (Command optional) - print usage and exit
word Produce MS Word .docx summaries of the SARIF files specified
Run `sarif <COMMAND> --help` for command-specific help.
```
## Commands
The commands are illustrated below assuming input files in the following locations:
- `C:\temp\sarif_files` = a directory of SARIF files with arbitrary filenames.
- `C:\temp\sarif_with_date` = a directory of SARIF files with filenames including timestamps e.g. `C:\temp\sarif_with_date\myapp_devskim_output_20211001T012000Z.sarif`.
- `C:\temp\old_sarif_files` = a directory of SARIF files with arbitrary filenames from an older build.
- `C:\code\my_source_repo` = checkout directory of source code files from which SARIF results were obtained.
### blame
```
usage: sarif blame [-h] [--output PATH] [--code PATH] [file_or_dir [file_or_dir ...]]
Enhance SARIF file with information from `git blame`
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output PATH, -o PATH
Output file or directory
--code PATH, -c PATH Path to git repository; if not specified, the current working directory is used
```
Augment SARIF files with `git blame` information, and write the augmented files to a specified location.
```shell
sarif blame -o "C:\temp\sarif_files_with_blame_info" -c "C:\code\my_source_repo" "C:\temp\sarif_files"
```
If the current working directory is the git repository, the `-c` argument can be omitted.
See [Blame filtering](blame-filtering) below for the format of the blame information that gets added to the SARIF files.
### copy
```
usage: sarif copy [-h] [--output FILE] [--blame-filter FILE] [--timestamp] [file_or_dir [file_or_dir ...]]
Write a new SARIF file containing optionally-filtered data from other SARIF file(s)
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output FILE, -o FILE
Output file
--blame-filter FILE, -b FILE
Specify the blame filter file to apply. See README for format.
--timestamp, -t Append current timestamp to output filename in the "yyyymmddThhmmssZ" format used by the `sarif trend` command
```
Write a new SARIF file containing optionally-filtered data from an existing SARIF file or multiple
SARIF files. The resulting file contains each run from the original SARIF files back-to-back.
The results can be filtered (see [Blame filtering](blame-filtering) below), in which case only
those results from the original SARIF files that meet the filter are included; the output file
contains no information about the excluded records. If a run in the original file was empty,
or all its results are filtered out, the empty run is still included.
If no output filename is provided, a file called `out.sarif` in the current directory is written.
If the output file already exists and is also in the input file list, it is not included in the
inputs, to avoid duplication of results. The output file is overwritten without warning.
The `file_or_dir` specifier can include wildcards e.g. `c:\temp\**\devskim*.sarif` (i.e.
a "glob"). This works for all commands, but it is particularly useful for `copy`.
One use for this is to combine a set of SARIF files from multiple static analysis tools run during
a build process into a single file that can be more easily stored and processed as a build asset.
### csv
```
usage: sarif csv [-h] [--output PATH] [--blame-filter FILE] [--autotrim] [--trim PREFIX] [file_or_dir [file_or_dir ...]]
Write a CSV file listing the issues from the SARIF files(s) specified
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output PATH, -o PATH
Output file or directory
--blame-filter FILE, -b FILE
Specify the blame filter file to apply. See README for format.
--autotrim, -a Strip off the common prefix of paths in the CSV output
--trim PREFIX Prefix to strip from issue paths, e.g. the checkout directory on the build agent
```
Write out a simple tabular list of issues from [a set of] SARIF files. This can then be analysed, e.g. via Pivot Tables in Excel.
Use the `--trim` option to strip specific prefixes from the paths, to make the CSV less verbose. Alternatively, use `--autotrim` to strip off the longest common prefix.
Generate a CSV summary of a single SARIF file with common file path prefix suppressed:
```shell
sarif csv "C:\temp\sarif_files\devskim_myapp.sarif"
```
Generate a CSV summary of a directory of SARIF files with path prefix `C:\code\my_source_repo` suppressed:
```shell
sarif csv --trim c:\code\my_source_repo "C:\temp\sarif_files"
```
See [Blame filtering](blame-filtering) below for how to use the `--blame-filter` option.
### diff
```
usage: sarif diff [-h] [--output FILE] [--blame-filter FILE] old_file_or_dir new_file_or_dir
Find the difference between two [sets of] SARIF files
positional arguments:
old_file_or_dir An old SARIF file or a directory containing the old SARIF files
new_file_or_dir A new SARIF file or a directory containing the new SARIF files
optional arguments:
-h, --help show this help message and exit
--output FILE, -o FILE
Output file
--blame-filter FILE, -b FILE
Specify the blame filter file to apply. See README for format.
```
Print the difference between two [sets of] SARIF files.
Difference between the issues in two SARIF files:
```shell
sarif diff "C:\temp\old_sarif_files\devskim_myapp.sarif" "C:\temp\sarif_files\devskim_myapp.sarif"
```
Difference between the issues in two directories of SARIF files:
```shell
sarif diff "C:\temp\old_sarif_files" "C:\temp\sarif_files"
```
Write output to JSON file instead of printing to stdout:
```shell
sarif diff -o mydiff.json "C:\temp\old_sarif_files\devskim_myapp.sarif" "C:\temp\sarif_files\devskim_myapp.sarif"
```
See [Blame filtering](blame-filtering) below for how to use the `--blame-filter` option.
### html
```
usage: sarif html [-h] [--output PATH] [--blame-filter FILE] [--no-autotrim] [--image IMAGE] [--trim PREFIX] [file_or_dir [file_or_dir ...]]
Write an HTML representation of SARIF file(s) for viewing in a web browser
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output PATH, -o PATH
Output file or directory
--blame-filter FILE, -b FILE
Specify the blame filter file to apply. See README for format.
--no-autotrim, -n Do not strip off the common prefix of paths in the output document
--image IMAGE Image to include at top of file - SARIF logo by default
--trim PREFIX Prefix to strip from issue paths, e.g. the checkout directory on the build agent
```
Create an HTML file summarising SARIF results.
```shell
sarif html -o summary.html "C:\temp\sarif_files"
```
Use the `--trim` option to strip specific prefixes from the paths, to make the generated HTML page less verbose. The longest common prefix of the paths will be trimmed unless `--no-autotrim` is specified.
Use the `--image` option to provide a header image for the top of the HTML page. The image is embedded into the HTML, so the HTML document remains a portable standalone file.
See [Blame filtering](blame-filtering) below for how to use the `--blame-filter` option.
### info
```
usage: sarif info [-h] [--output FILE] [file_or_dir [file_or_dir ...]]
Print information about SARIF file(s) structure
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output FILE, -o FILE
Output file
```
Print information about the structure of a SARIF file or multiple files. This is about the JSON
structure rather than any meaning of the results produced by the tool. The summary includes the
full path of the file, its size and modified date, the number of runs, and for each run, the
tool that generated the run, the number of results, and the entries in the results' property bags.
```
c:\temp\sarif_files\ios_devskim_output.sarif
1256241 bytes (1.2 MiB)
modified: 2021-10-13 21:50:01.251544, accessed: 2022-01-09 18:23:00.060573, ctime: 2021-10-13 20:49:00
1 run
Tool: devskim
1323 results
All results have properties: tags, DevSkimSeverity
```
### ls
```
usage: sarif ls [-h] [--output FILE] [file_or_dir [file_or_dir ...]]
List all SARIF files in the directories specified
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output FILE, -o FILE
Output file
```
List SARIF files in one or more directories.
```shell
sarif ls "C:\temp\sarif_files" "C:\temp\sarif_with_date"
```
### summary
```
usage: sarif ls [-h] [--output FILE] [file_or_dir [file_or_dir ...]]
List all SARIF files in the directories specified
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output FILE, -o FILE
Output file
```
Print a summary of the issues in one or more SARIF file(s), grouped by severity and then ordered by number of occurrences.
When directories are provided as input and output, a summary is written for each input file, along with another file containing the totals.
```shell
sarif summary -o summaries "C:\temp\sarif_files"
```
When no output directory or file is specified, the overall summary is printed to the standard output.
```shell
sarif summary "C:\temp\sarif_files\devskim_myapp.sarif"
```
See [Blame filtering](blame-filtering) below for how to use the `--blame-filter` option.
### trend
```
usage: sarif trend [-h] [--output FILE] [--blame-filter FILE] [--dateformat {dmy,mdy,ymd}] [file_or_dir [file_or_dir ...]]
Write a CSV file with time series data from SARIF files with "yyyymmddThhmmssZ" timestamps in their filenames
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output FILE, -o FILE
Output file
--blame-filter FILE, -b FILE
Specify the blame filter file to apply. See README for format.
--dateformat {dmy,mdy,ymd}, -f {dmy,mdy,ymd}
Date component order to use in output CSV. Default is `dmy`
```
Generate a CSV showing a timeline of issues from a set of SARIF files in a directory. The SARIF file names must contain a
timestamp in the specific format `yyyymmddThhhmmss` e.g. `20211012T110000Z`.
The CSV can be loaded in Microsoft Excel for graphing and trend analysis.
```shell
sarif trend -o timeline.csv "C:\temp\sarif_with_date" --dateformat dmy
```
See [Blame filtering](blame-filtering) below for how to use the `--blame-filter` option.
### usage
```
usage: sarif usage [-h] [--output FILE]
(Command optional) - print usage and exit
optional arguments:
-h, --help show this help message and exit
--output FILE, -o FILE
Output file
```
Print usage and exit.
### word
```
usage: sarif word [-h] [--output PATH] [--blame-filter FILE] [--no-autotrim] [--image IMAGE] [--trim PREFIX] [file_or_dir [file_or_dir ...]]
Produce MS Word .docx summaries of the SARIF files specified
positional arguments:
file_or_dir A SARIF file or a directory containing SARIF files
optional arguments:
-h, --help show this help message and exit
--output PATH, -o PATH
Output file or directory
--blame-filter FILE, -b FILE
Specify the blame filter file to apply. See README for format.
--no-autotrim, -n Do not strip off the common prefix of paths in the output document
--image IMAGE Image to include at top of file - SARIF logo by default
--trim PREFIX Prefix to strip from issue paths, e.g. the checkout directory on the build agent
```
Create Word documents representing a SARIF file or multiple SARIF files.
If directories are provided for the `-o` option and the input, then a Word document is produced for each individual SARIF file
and for the full set of SARIF files. Otherwise, a single Word document is created.
Create a Word document for each SARIF file and one for all of them together, in the `reports` directory (created if non-existent):
```shell
sarif word -o reports "C:\temp\sarif_files"
```
Create a Word document for a single SARIF file:
```shell
sarif word -o "reports\devskim_myapp.docx" "C:\temp\sarif_files\devskim_myapp.sarif"
```
Use the `--trim` option to strip specific prefixes from the paths, to make the generated documents less verbose. The longest common prefix of the paths will be trimmed unless `--no-autotrim` is specified.
Use the `--image` option to provide a header image for the top of the Word document.
See [Blame filtering](blame-filtering) below for how to use the `--blame-filter` option.
# Blame filtering
Use the `sarif blame` command to augment a SARIF file or multiple SARIF files with blame information.
Blame information is added to the property bag of each `result` object for which it was successfully obtained. The keys and values used are as in the [git blame porcelain format](https://git-scm.com/docs/git-blame#_the_porcelain_format). E.g.:
```json
{
"ruleId": "SM00702",
...
"properties": {
"blame": {
"author": "aperson",
"author-mail": "<[email protected]>",
"author-time": "1350899798",
"author-tz": "+0000",
"committer": "aperson",
"committer-mail": "<[email protected]>",
"committer-time": "1350899798",
"committer-tz": "+0000",
"summary": "blah blah commit comment blah",
"boundary": true,
"filename": "src/net/myproject/mypackage/MyClass.java"
}
}
}
```
Note that the bare `boundary` key is given the automatic value `true`.
This blame data can then be used for filtering and summarising via the `--blame-filter` option available for various commands. This option requires a path to a filter-list file, containing a list of patterns and substrings to match against the blame information author email. The format of a filter-list file is as follows:
```
# Lines beginning with # are interpreted as comments and ignored.
# A line beginning with "description: " is interpreted as an optional description for the filter. If no title is specified, the filter file name is used.
description: Example filter from README.md
# Lines beginning with "+: " are interpreted as inclusion substrings. E.g. the following line includes issues whose author-mail field contains "@microsoft.com".
+: @microsoft.com
# The "+: " can be omitted.
@microsoft.com
# Instead of a substring, a regular expression can be used, enclosed in "/" characters. Issues whose author-mail field includes a string matching the regular expression are included. Use ^ and $ to match the whole author-mail field.
+: /^<myname.*\.com>$/
# Again, the "+: " can be omitted for a regular expression include pattern.
/^<myname.*\.com>$/
# Lines beginning with "-: " are interpreted as exclusion substrings. E.g. the following line excludes issues whose author-mail field contains "[email protected]".
-: [email protected]
# Instead of a substring, a regular expression can be used, enclosed in "/" characters. Issues whose author-mail field includes a string matching the regular expression are excluded. Use ^ and $ to match the whole author-mail field. E.g. the following pattern excludes all issues whose author-mail field contains a GUID.
-: /[0-9A-F]{8}[-][0-9A-F]{4}[-][0-9A-F]{4}[-][0-9A-F]{4}[-][0-9A-F]{12}/
```
Here's an example of a filter-file that includes issues on lines changed by an `@microsoft.com` email address or a `myname.SOMETHING.com` email address, but not if those email addresses end in `[email protected]` or contain a GUID. It's the same as the above example, with comments stripped out.
```
description: Example filter from README.md
+: @microsoft.com
+: /^<myname.*\.com>$/
-: [email protected]
-: /[0-9A-F]{8}[-][0-9A-F]{4}[-][0-9A-F]{4}[-][0-9A-F]{4}[-][0-9A-F]{12}/
```
All matching is case insensitive, because email addresses are. Whitespace at the start and end of lines is ignored, which also means that line ending characters don't matter. The blame filter file must be UTF-8 encoded (including plain ASCII7). It can have a byte order mark or not.
If there are no inclusion patterns, all issues are included except for those matching the exclusion patterns. If there are inclusion patterns, only issues matching the inclusion patterns are included. If an issue matches one or more inclusion patterns and also at least one exclusion pattern, it is excluded.
Sometimes, there may be issues in the SARIF file to which the filter cannot be applied, because blame information is not available. This can be for two reasons: either there is no blame information recorded for the file in which the issue occurred, or the issue location lacks a line number (or specifies line number 1 as a placeholder) so that blame information cannot be correlated to the issue. These issues are included by default. To identify which issues these are, create a filter file that excludes everything to which the filter can be applied:
```
description: Exclude everything filterable
-: /.*/
```
Then run a `sarif` command using this filter file as the `--blame-filter` to see the default-included issues.
# Usage as a Python library
Although not its primary purpose, you can use sarif-tools from a Python script or module to
load and summarise SARIF results.
## Basic usage pattern
After installation, use `sarif.loader` to load a SARIF file or files, and then use the operations
on the returned `SarifFile` or `SarifFileSet` objects to explore the data.
```python
from sarif import loader
sarif_data = loader.load_sarif_file(path_to_sarif_file)
issue_count_by_severity = sarif_data.get_result_count_by_severity()
error_histogram = sarif_data.get_issue_code_histogram("error")
```
## Result access API
The three classes defined in the `sarif_files` module, `SarifFileSet`, `SarifFile` and `SarifRun`,
provide similar APIs, which allows SARIF results to be handled similarly at multiple levels of
aggregation. This section briefly describes some of the key APIs at the three levels of
aggregation.
### get_distinct_tool_names()
Returns a list of distinct tool names in a `SarifFile` or for all files in a `SarifFileSet`.
A `SarifRun` has a single tool name so the equivalent method is `get_tool_name()`.
### get_results()
Return the list of SARIF results. These are objects as defined in the
[SARIF standard section 3.27](https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638).
### get_records()
Return the list of SARIF results as simplified, flattened record dicts. Each record has the
attributes defined in `sarif_file.RECORD_ATTRIBUTES`.
- `"Tool"` - the tool name for the run containing the result.
- `"Severity"` - the SARIF severity for the record. One of `error`, `warning` (the default if the
record doesn't specify) or `note`.
- `"Code"` - the issue code from the result.
- `"Location"` - the location of the issue, typically the file containing the issue. Format varies
by tool.
- `"Line"` - the line number in the file where the issue occurs. Value is a string. This defaults
to `"1"` if the tool failed to identify the line.
### get_records_grouped_by_severity()
As per `get_records()`, but the result is a dict from SARIF severity level (`error`, `warning` and
`note`) to the list of records of that severity level.
### get_result_count(), get_result_count_by_severity()
Get the total number of SARIF results. `get_result_count_by_severity()` returns a dict from
SARIF severity level (`error`, `warning` and `note`) to the integer number of results of that
severity.
### get_issue_code_histogram(severity)
For the given severity, get histogram in the form of a list of pairs. The first item in each pair
is the issue code, the second item is the number of matching records, and the list is sorted in
decreasing order of frequency (the same as the `sarif summary` command output).
### Disaggregation and filename access
These fields and methods allow access to the underlying information about the SARIF files.
- `SarifFileSet.subdirs` - a list of `SarifFileSet` objects corresponding to the subdirectories of
the directory from which the `SarifFileSet` was created.
- `SarifFileSet.files` - a list of `SarifFile` objects corresponding to the SARIF files contained
in the directory from which the `SarifFileSet` was created.
- `SarifFile.get_abs_file_path()` - get the absolute path to the SARIF file.
- `SarifFile.get_file_name()` - get the name of the SARIF file.
- `SarifFile.get_file_name_without_extension()` - get the name of the SARIF file without its
extension. Useful for constructing derived filenames.
- `SarifFile.get_filename_timestamp()` - extract the timestamp from the filename of a SARIF file,
and return it as a string. The timestamp must be in the format specified in the `sarif trend`
command.
- `SarifFile.runs` - a list of `SarifRun` objects contained in the SARIF file. Most SARIF files
only contain a single run, but it is possible to aggregate runs from multiple tools into a
single SARIF file.
### Path shortening API
Call `init_path_prefix_stripping(autotrim, path_prefixes)` on a `SarifFileSet`, `SarifFile` or `SarifRun` object to set up path filtering, either automatically removing the longest common prefix (`autotrim=True`) or removing specific prefixes (`autotrim=False` and a list of strings in `path_prefixes`).
### Blame filtering API
Call `init_blame_filter(filter_description, include_substrings, include_regexes, exclude_substrings, exclude_regexes)` on a `SarifFileSet`, `SarifFile` or `SarifRun` object to set up blame filtering. `filter_description` is a string and the other parameters are lists of strings (with no `/` characters around the regular expressions). They correspond in an obvious way to the filter file contents described in [Blame filtering](blame-filtering) above.
Call `get_filter_stats()` to retrieve the filter stats after reading the results or records from sarif files. It returns `None` if there is no filter, or otherwise a `sarif_file.FilterStats` object with integer fields `filtered_in_result_count`, `filtered_out_result_count`, `missing_blame_count` and `unconvincing_line_number_count`. Call `to_string()` on the `FilterStats` object for a readable representation of these statistics, which also includes the filter file name or description (`filter_description` field).
# Suggested usage in CI pipelines
Using the `--check` option in combination with the `summary` command causes sarif-tools to exit
with a nonzero exit code if there are any issues of the specified level, or higher. This can
be useful to fail a continuous integration (CI) pipeline in the case of SAST violation.
The SARIF issue levels are `error`, `warning` and `note`. These are all valid options for the
`--check` option.
E.g. to fail if there are any errors or warnings:
```
sarif --check warning summary c:\temp\sarif_files
```
The `diff` command can check for any increase in issues of the specified level or above, relative
to a previous or baseline build.
E.g. to fail if there are any new issue codes at error level:
```
sarif --check error diff c:\temp\old_sarif_files c:\temp\sarif_files
```
You can also use sarif-tools to filter and consolidate the output from multiple tools. E.g.
```
# First run your static analysis tools, configured to write SARIF output. How to do that depends
# the tool.
# Now run the blame command to augment the output with blame information.
sarif blame -o with_blame/myapp_mytool_with_blame.sarif myapp_mytool.sarif
# Now combine all tools' output into a single file
sarif copy --timestamp -o artifacts/myapp_alltools_with_blame.sarif
```
Download the file `myapp_alltools_with_blame_TIMESTAMP.sarif` that is generated. Then later you can
filter the results using the `--blame-filter` argument, or generate graph of code quality over time
using `sarif trend`.
# Credits
sarif-tools was originally developed during the Microsoft Global Hackathon 2021 by Simon Abykov, Nick Brabbs, Anthony Hayward, Sivaji Kondapalli, Matt Parkes and Kathryn Pentland.
|
/sarif-tools-1.0.0.tar.gz/sarif-tools-1.0.0/README.md
| 0.428592 | 0.907763 |
README.md
|
pypi
|
import copy
import datetime
import os
import re
from typing import Dict, Iterator, List, Optional, Tuple
SARIF_SEVERITIES = ["error", "warning", "note"]
RECORD_ATTRIBUTES = ["Tool", "Severity", "Code", "Location", "Line"]
# Standard time format for filenames, e.g. `20211012T110000Z` (not part of the SARIF standard).
# Can obtain from bash via `date +"%Y%m%dT%H%M%SZ"``
DATETIME_FORMAT = "%Y%m%dT%H%M%SZ"
DATETIME_REGEX = r"\d{8}T\d{6}Z"
_SLASHES = ["\\", "/"]
def has_sarif_file_extension(filename):
"""
As per section 3.2 of the SARIF standard, SARIF filenames SHOULD end in ".sarif" and MAY end in
".sarif.json".
https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317421
"""
filename_upper = filename.upper().strip()
return any(filename_upper.endswith(x) for x in [".SARIF", ".SARIF.JSON"])
def _read_result_location(result) -> Tuple[str, str]:
"""
Extract the file path and line number strings from the Result.
Tools store this in different ways, so this function tries a few different JSON locations.
"""
file_path = None
line_number = None
locations = result.get("locations", [])
if locations:
location = locations[0]
physical_location = location.get("physicalLocation", {})
# SpotBugs has some errors with no line number so deal with them by just leaving it at 1
line_number = physical_location.get("region", {}).get("startLine", None)
# For file name, first try the location written by DevSkim
file_path = (
location.get("physicalLocation", {})
.get("address", {})
.get("fullyQualifiedName", None)
)
if not file_path:
# Next try the physical location written by MobSF and by SpotBugs (for some errors)
file_path = (
location.get("physicalLocation", {})
.get("artifactLocation", {})
.get("uri", None)
)
if not file_path:
logical_locations = location.get("logicalLocations", None)
if logical_locations:
# Finally, try the logical location written by SpotBugs for some errors
file_path = logical_locations[0].get("fullyQualifiedName", None)
return (file_path, line_number)
def _group_records_by_severity(records) -> Dict[str, List[Dict]]:
"""
Get the records, grouped by severity.
"""
return {
severity: [record for record in records if record["Severity"] == severity]
for severity in SARIF_SEVERITIES
}
def _count_records_by_issue_code(records, severity) -> List[Tuple]:
"""
Return a list of pairs (code, count) of the records with the specified
severities.
"""
code_to_count = {}
for record in records:
if record["Severity"] == severity:
code = record["Code"]
code_to_count[code] = code_to_count.get(code, 0) + 1
return sorted(code_to_count.items(), key=lambda x: x[1], reverse=True)
class FilterStats:
"""
Statistics that record the outcome of a a filter.
"""
def __init__(self, filter_description):
self.filter_description = filter_description
# Filter stats can also be loaded from a file created by `sarif copy`.
self.rehydrated = False
self.filter_datetime = None
self.filtered_in_result_count = 0
self.filtered_out_result_count = 0
self.missing_blame_count = 0
self.unconvincing_line_number_count = 0
def reset_counters(self):
"""
Zero all the counters.
"""
self.filter_datetime = datetime.datetime.now()
self.filtered_in_result_count = 0
self.filtered_out_result_count = 0
self.missing_blame_count = 0
self.unconvincing_line_number_count = 0
def add(self, other_filter_stats):
"""
Add another set of filter stats to my totals.
"""
if other_filter_stats:
if other_filter_stats.filter_description and (
other_filter_stats.filter_description != self.filter_description
):
self.filter_description += f", {other_filter_stats.filter_description}"
self.filtered_in_result_count += other_filter_stats.filtered_in_result_count
self.filtered_out_result_count += (
other_filter_stats.filtered_out_result_count
)
self.missing_blame_count += other_filter_stats.missing_blame_count
self.unconvincing_line_number_count += (
other_filter_stats.unconvincing_line_number_count
)
def __str__(self):
"""
Automatic to_string()
"""
return self.to_string()
def to_string(self):
"""
Generate a summary string for these filter stats.
"""
ret = f"'{self.filter_description}'"
if self.filter_datetime:
ret += " at "
ret += self.filter_datetime.strftime("%c")
ret += (
f": {self.filtered_out_result_count} filtered out, "
f"{self.filtered_in_result_count} passed the filter"
)
if self.unconvincing_line_number_count:
ret += (
f", {self.unconvincing_line_number_count} included by default "
"for lacking line number information"
)
if self.missing_blame_count:
ret += (
f", {self.missing_blame_count} included by default "
"for lacking blame data to filter"
)
return ret
def to_json_camel_case(self):
"""
Generate filter stats as JSON using camelCase naming, to fit with SARIF standard section
3.8.1 (Property Bags).
"""
return {
"filter": self.filter_description,
"in": self.filtered_in_result_count,
"out": self.filtered_out_result_count,
"default": {
"noLineNumber": self.unconvincing_line_number_count,
"noBlame": self.missing_blame_count,
},
}
def load_filter_stats_from_json_camel_case(json_data):
"""
Load filter stats from a SARIF file property bag
"""
ret = None
if json_data:
ret = FilterStats(json_data["filter"])
ret.rehydrated = True
ret.filtered_in_result_count = json_data.get("in", 0)
ret.filtered_out_result_count = json_data.get("out", 0)
ret.unconvincing_line_number_count = json_data.get("default", {}).get(
"noLineNumber", 0
)
ret.missing_blame_count = json_data.get("default", {}).get("noBlame", 0)
return ret
def _add_filter_stats(accumulator, filter_stats):
if filter_stats:
if accumulator:
accumulator.add(filter_stats)
return accumulator
return copy.copy(filter_stats)
return accumulator
class _BlameFilter:
"""
Class that implements blame filtering.
"""
def __init__(self):
self.filter_stats = None
self.include_substrings = None
self.include_regexes = None
self.apply_inclusion_filter = False
self.exclude_substrings = None
self.exclude_regexes = None
self.apply_exclusion_filter = False
def init_blame_filter(
self,
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
):
"""
Initialise the blame filter with the given filter patterns.
"""
self.filter_stats = FilterStats(filter_description)
self.include_substrings = (
[s.upper().strip() for s in include_substrings]
if include_substrings
else None
)
self.include_regexes = include_regexes[:] if include_regexes else None
self.apply_inclusion_filter = bool(
self.include_substrings or self.include_regexes
)
self.exclude_substrings = (
[s.upper().strip() for s in exclude_substrings]
if exclude_substrings
else None
)
self.exclude_regexes = exclude_regexes[:] if exclude_regexes else None
self.apply_exclusion_filter = bool(
self.exclude_substrings or self.exclude_regexes
)
def rehydrate_filter_stats(self, dehydrated_filter_stats, filter_datetime):
"""
Restore filter stats from the SARIF file directly, where they were recordd when the filter
was previously run.
Note that if init_blame_filter is called, these rehydrated stats are discarded.
"""
self.filter_stats = load_filter_stats_from_json_camel_case(
dehydrated_filter_stats
)
self.filter_stats.filter_datetime = filter_datetime
def _zero_counts(self):
if self.filter_stats:
self.filter_stats.reset_counters()
def _check_include_result(self, author_mail):
author_mail_upper = author_mail.upper().strip()
matched_include_substrings = None
matched_include_regexes = None
if self.apply_inclusion_filter:
if self.include_substrings:
matched_include_substrings = [
s for s in self.include_substrings if s in author_mail_upper
]
if self.include_regexes:
matched_include_regexes = [
r
for r in self.include_regexes
if re.search(r, author_mail, re.IGNORECASE)
]
if (not matched_include_substrings) and (not matched_include_regexes):
return False
if self.exclude_substrings and any(
s in author_mail_upper for s in self.exclude_substrings
):
return False
if self.exclude_regexes and any(
re.search(r, author_mail, re.IGNORECASE) for r in self.exclude_regexes
):
return False
return {
"state": "included",
"matchedSubstring": [s.lower() for s in matched_include_substrings]
if matched_include_substrings
else [],
"matchedRegex": [r.lower() for r in matched_include_regexes]
if matched_include_regexes
else [],
}
def _filter_append(self, filtered_results, result, blame_info):
# Remove any existing filter log on the result
result.setdefault("properties", {}).pop("filtered", None)
if blame_info:
author_mail = blame_info.get("author-mail", None) or blame_info.get(
"committer-mail", None
)
if author_mail:
# First, check inclusion
included = self._check_include_result(author_mail)
if included:
self.filter_stats.filtered_in_result_count += 1
included["filter"] = self.filter_stats.filter_description
result["properties"]["filtered"] = included
filtered_results.append(result)
else:
(_file_path, line_number) = _read_result_location(result)
if line_number == "1" or not line_number:
# Line number is not convincing. Blame information may be misattributed.
self.filter_stats.unconvincing_line_number_count += 1
result["properties"]["filtered"] = {
"filter": self.filter_stats.filter_description,
"state": "default",
"missing": "line",
}
filtered_results.append(result)
else:
self.filter_stats.filtered_out_result_count += 1
else:
self.filter_stats.missing_blame_count += 1
# Result did not contain complete blame information, so don't filter it out.
result["properties"]["filtered"] = {
"filter": self.filter_stats.filter_description,
"state": "default",
"missing": "blame",
}
filtered_results.append(result)
else:
self.filter_stats.missing_blame_count += 1
# Result did not contain blame information, so don't filter it out.
filtered_results.append(result)
def filter_results(self, results):
"""
Apply this blame filter to a list of results, return the results that pass the filter
and as a side-effect, update the filter stats.
"""
if self.apply_inclusion_filter or self.apply_exclusion_filter:
self._zero_counts()
ret = []
for result in results:
blame_info = result.get("properties", {}).get("blame", None)
self._filter_append(ret, result, blame_info)
return ret
# No inclusion or exclusion patterns
return results
def get_filter_stats(self) -> Optional[FilterStats]:
"""
Get the statistics from running this filter.
"""
return self.filter_stats
class SarifRun:
"""
Class to hold a run object from a SARIF file (an entry in the top-level "runs" list
in a SARIF file), as defined in SARIF standard section 3.14.
https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317484
"""
def __init__(self, sarif_file_object, run_index, run_data):
self.sarif_file = sarif_file_object
self.run_index = run_index
self.run_data = run_data
self._path_prefixes_upper = None
self._cached_records = None
self._filter = _BlameFilter()
self._default_line_number = None
conversion = run_data.get("conversion", None)
if conversion:
conversion_driver = conversion.get("tool", {}).get("driver", {})
if conversion_driver.get("name", None) == "sarif-tools":
# This run was written out by this tool! Can restore filter stats.
dehydrated_filter_stats = conversion_driver.get("properties", {}).get(
"filtered", None
)
if dehydrated_filter_stats:
filter_date = conversion_driver["properties"].get("processed", None)
self._filter.rehydrate_filter_stats(
dehydrated_filter_stats,
datetime.datetime.fromisoformat(filter_date)
if filter_date
else None,
)
def init_path_prefix_stripping(self, autotrim=False, path_prefixes=None):
"""
Set up path prefix stripping. When records are subsequently obtained, the start of the
path is stripped.
If no path_prefixes are specified, the default behaviour is to strip the common prefix
from each run.
If path prefixes are specified, the specified prefixes are stripped.
"""
prefixes = []
if path_prefixes:
prefixes = [prefix.strip().upper() for prefix in path_prefixes]
if autotrim:
autotrim_prefix = None
records = self.get_records()
if len(records) == 1:
loc = records[0]["Location"].strip()
slash_pos = max(loc.rfind(slash) for slash in _SLASHES)
autotrim_prefix = loc[0:slash_pos] if slash_pos > -1 else None
elif len(records) > 1:
common_prefix = records[0]["Location"].strip()
for record in records[1:]:
for (char_pos, char) in enumerate(record["Location"].strip()):
if char_pos >= len(common_prefix):
break
if char != common_prefix[char_pos]:
common_prefix = common_prefix[0:char_pos]
break
if not common_prefix:
break
if common_prefix:
autotrim_prefix = common_prefix.upper()
if autotrim_prefix and not any(
p.startswith(autotrim_prefix.strip().upper()) for p in prefixes
):
prefixes.append(autotrim_prefix)
self._path_prefixes_upper = prefixes or None
# Clear the untrimmed records cached by get_records() above.
self._cached_records = None
def init_default_line_number_1(self):
"""
Some SARIF records lack a line number. If this method is called, the default line number
"1" is substituted in that case in the records returned by get_records(). Otherwise,
None is returned.
"""
self._default_line_number = "1"
self._cached_records = None
def init_blame_filter(
self,
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
):
"""
Set up blame filtering. This is applied to the author_mail field added to the "blame"
property bag in each SARIF file. Raises an error if any of the SARIF files don't contain
blame information.
If only inclusion criteria are provided, only issues matching the inclusion criteria
are considered.
If only exclusion criteria are provided, only issues not matching the exclusion criteria
are considered.
If both are provided, only issues matching the inclusion criteria and not matching the
exclusion criteria are considered.
include_substrings = substrings of author_mail to filter issues for inclusion.
include_regexes = regular expressions for author_mail to filter issues for inclusion.
exclude_substrings = substrings of author_mail to filter issues for exclusion.
exclude_regexes = regular expressions for author_mail to filter issues for exclusion.
"""
self._filter.init_blame_filter(
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
)
# Clear the unfiltered records cached by get_records() above.
self._cached_records = None
def get_tool_name(self) -> str:
"""
Get the tool name from this run.
"""
return self.run_data["tool"]["driver"]["name"]
def get_conversion_tool_name(self) -> Optional[str]:
"""
Get the conversion tool name from this run, if any.
"""
if "conversion" in self.run_data:
return (
self.run_data["conversion"]["tool"].get("driver", {}).get("name", None)
)
return None
def get_results(self) -> List[Dict]:
"""
Get the results from this run. These are the Result objects as defined in the SARIF
standard section 3.27. The results are filtered if a filter has ben configured.
https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638
"""
return self._filter.filter_results(self.run_data["results"])
def get_records(self) -> List[Dict]:
"""
Get simplified records derived from the results of this run. The records have the
keys defined in `RECORD_ATTRIBUTES`.
"""
if not self._cached_records:
results = self.get_results()
self._cached_records = [self.result_to_record(result) for result in results]
return self._cached_records
def get_records_grouped_by_severity(self) -> Dict[str, List[Dict]]:
"""
Get the records, grouped by severity.
"""
return _group_records_by_severity(self.get_records())
def result_to_record(self, result):
"""
Convert a SARIF result object to a simple record with fields "Tool", "Location", "Line",
"Severity" and "Code".
See definition of result object here:
https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638
"""
error_id = result["ruleId"]
tool_name = self.get_tool_name()
(file_path, line_number) = _read_result_location(result)
if not file_path:
raise ValueError(f"No location in {error_id} output from {tool_name}")
if not line_number:
line_number = "1"
if self._path_prefixes_upper:
file_path_upper = file_path.upper()
for prefix in self._path_prefixes_upper:
if file_path_upper.startswith(prefix):
prefixlen = len(prefix)
if len(file_path) > prefixlen and file_path[prefixlen] in _SLASHES:
# Strip off trailing path separator
file_path = file_path[prefixlen + 1 :]
else:
file_path = file_path[prefixlen:]
break
# Get the error severity, if included, and code
severity = result.get(
"level", "warning"
) # If an error has no specified level then by default it is a warning
message = result["message"]["text"]
# Create a dict representing this result
record = {
"Tool": tool_name,
"Location": file_path,
"Line": line_number,
"Severity": severity,
"Code": f"{error_id} {message}",
}
return record
def get_result_count(self) -> int:
"""
Return the total number of results.
"""
return len(self.get_results())
def get_result_count_by_severity(self) -> Dict[str, int]:
"""
Return a dict from SARIF severity to number of records.
"""
records = self.get_records()
return {
severity: sum(1 for record in records if severity in record["Severity"])
for severity in SARIF_SEVERITIES
}
def get_issue_code_histogram(self, severity) -> List[Tuple]:
"""
Return a list of pairs (code, count) of the records with the specified
severities.
"""
return _count_records_by_issue_code(self.get_records(), severity)
def get_filter_stats(self) -> Optional[FilterStats]:
"""
Get the number of records that were included or excluded by the filter.
"""
return self._filter.get_filter_stats()
class SarifFile:
"""
Class to hold SARIF data parsed from a file and provide accesssors to the data.
"""
def __init__(self, file_path, data):
self.abs_file_path = os.path.abspath(file_path)
self.data = data
self.runs = [
SarifRun(self, run_index, run_data)
for (run_index, run_data) in enumerate(self.data.get("runs", []))
]
def __bool__(self):
"""
True if non-empty.
"""
return bool(self.runs)
def init_path_prefix_stripping(self, autotrim=False, path_prefixes=None):
"""
Set up path prefix stripping. When records are subsequently obtained, the start of the
path is stripped.
If no path_prefixes are specified, the default behaviour is to strip the common prefix
from each run.
If path prefixes are specified, the specified prefixes are stripped.
"""
for run in self.runs:
run.init_path_prefix_stripping(autotrim, path_prefixes)
def init_default_line_number_1(self):
"""
Some SARIF records lack a line number. If this method is called, the default line number
"1" is substituted in that case in the records returned by get_records(). Otherwise,
None is returned.
"""
for run in self.runs:
run.init_default_line_number_1()
def init_blame_filter(
self,
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
):
"""
Set up blame filtering. This is applied to the author_mail field added to the "blame"
property bag in each SARIF file. Raises an error if any of the SARIF files don't contain
blame information.
If only inclusion criteria are provided, only issues matching the inclusion criteria
are considered.
If only exclusion criteria are provided, only issues not matching the exclusion criteria
are considered.
If both are provided, only issues matching the inclusion criteria and not matching the
exclusion criteria are considered.
include_substrings = substrings of author_mail to filter issues for inclusion.
include_regexes = regular expressions for author_mail to filter issues for inclusion.
exclude_substrings = substrings of author_mail to filter issues for exclusion.
exclude_regexes = regular expressions for author_mail to filter issues for exclusion.
"""
for run in self.runs:
run.init_blame_filter(
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
)
def get_abs_file_path(self) -> str:
"""
Get the absolute file path from which this SARIF data was loaded.
"""
return self.abs_file_path
def get_file_name(self) -> str:
"""
Get the file name from which this SARIF data was loaded.
"""
return os.path.basename(self.abs_file_path)
def get_file_name_without_extension(self) -> str:
"""
Get the file name from which this SARIF data was loaded, without extension.
"""
file_name = self.get_file_name()
return file_name[0 : file_name.index(".")] if "." in file_name else file_name
def get_file_name_extension(self) -> str:
"""
Get the extension of the file name from which this SARIF data was loaded.
Initial "." exlcuded.
"""
file_name = self.get_file_name()
return file_name[file_name.index(".") + 1 :] if "." in file_name else ""
def get_filename_timestamp(self) -> str:
"""
Extract the timestamp from the filename and return the date-time string extracted.
"""
parsed_date = re.findall(DATETIME_REGEX, self.get_file_name())
return parsed_date if len(parsed_date) == 1 else None
def get_distinct_tool_names(self):
"""
Return a list of tool names that feature in the runs in this file.
The list is deduplicated and sorted into alphabetical order.
"""
return sorted(list(set(run.get_tool_name() for run in self.runs)))
def get_results(self) -> List[Dict]:
"""
Get the results from all runs in this file. These are the Result objects as defined in the
SARIF standard section 3.27.
https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638
"""
ret = []
for run in self.runs:
ret += run.get_results()
return ret
def get_records(self) -> List[Dict]:
"""
Get simplified records derived from the results of all runs. The records have the
keys defined in `RECORD_ATTRIBUTES`.
"""
ret = []
for run in self.runs:
ret += run.get_records()
return ret
def get_records_grouped_by_severity(self) -> Dict[str, List[Dict]]:
"""
Get the records, grouped by severity.
"""
return _group_records_by_severity(self.get_records())
def get_result_count(self) -> int:
"""
Return the total number of results.
"""
return sum(run.get_result_count() for run in self.runs)
def get_result_count_by_severity(self) -> Dict[str, int]:
"""
Return a dict from SARIF severity to number of records.
"""
get_result_count_by_severity_per_run = [
run.get_result_count_by_severity() for run in self.runs
]
return {
severity: sum(
rc.get(severity, 0) for rc in get_result_count_by_severity_per_run
)
for severity in SARIF_SEVERITIES
}
def get_issue_code_histogram(self, severity) -> List[Tuple]:
"""
Return a list of pairs (code, count) of the records with the specified
severities.
"""
return _count_records_by_issue_code(self.get_records(), severity)
def get_filter_stats(self) -> Optional[FilterStats]:
"""
Get the number of records that were included or excluded by the filter.
"""
ret = None
for run in self.runs:
ret = _add_filter_stats(ret, run.get_filter_stats())
return ret
class SarifFileSet:
"""
Class representing a set of SARIF files.
The "composite" pattern is used to allow multiple subdirectories.
"""
def __init__(self):
self.subdirs = []
self.files = []
def __bool__(self):
"""
Return true if there are any SARIF files, regardless of whether they contain any runs.
"""
return any(bool(subdir) for subdir in self.subdirs) or bool(self.files)
def __len__(self):
"""
Return the number of SARIF files, in total.
"""
return sum(len(subdir) for subdir in self.subdirs) + sum(
1 for f in self.files if f
)
def __iter__(self) -> Iterator[SarifFile]:
"""
Iterate the SARIF files in this set.
"""
for subdir in self.subdirs:
for input_file in subdir.files:
yield input_file
for input_file in self.files:
yield input_file
def __getitem__(self, index) -> SarifFile:
i = 0
for subdir in self.subdirs:
for input_file in subdir.files:
if i == index:
return input_file
i += 1
return self.files[index - i]
def get_description(self):
"""
Get a description of the SARIF file set - the name of the single file or the number of
files.
"""
count = len(self)
if count == 1:
return self[0].get_file_name()
return f"{count} files"
def init_path_prefix_stripping(self, autotrim=False, path_prefixes=None):
"""
Set up path prefix stripping. When records are subsequently obtained, the start of the
path is stripped.
If no path_prefixes are specified, the default behaviour is to strip the common prefix
from each run.
If path prefixes are specified, the specified prefixes are stripped.
"""
for subdir in self.subdirs:
subdir.init_path_prefix_stripping(autotrim, path_prefixes)
for input_file in self.files:
input_file.init_path_prefix_stripping(autotrim, path_prefixes)
def init_default_line_number_1(self):
"""
Some SARIF records lack a line number. If this method is called, the default line number
"1" is substituted in that case in the records returned by get_records(). Otherwise,
None is returned.
"""
for subdir in self.subdirs:
subdir.init_default_line_number_1()
for input_file in self.files:
input_file.init_default_line_number_1()
def init_blame_filter(
self,
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
):
"""
Set up blame filtering. This is applied to the author_mail field added to the "blame"
property bag in each SARIF file. Raises an error if any of the SARIF files don't contain
blame information.
If only inclusion criteria are provided, only issues matching the inclusion criteria
are considered.
If only exclusion criteria are provided, only issues not matching the exclusion criteria
are considered.
If both are provided, only issues matching the inclusion criteria and not matching the
exclusion criteria are considered.
include_substrings = substrings of author_mail to filter issues for inclusion.
include_regexes = regular expressions for author_mail to filter issues for inclusion.
exclude_substrings = substrings of author_mail to filter issues for exclusion.
exclude_regexes = regular expressions for author_mail to filter issues for exclusion.
"""
for subdir in self.subdirs:
subdir.init_blame_filter(
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
)
for input_file in self.files:
input_file.init_blame_filter(
filter_description,
include_substrings,
include_regexes,
exclude_substrings,
exclude_regexes,
)
def add_dir(self, sarif_file_set):
"""
Add a SarifFileSet as a subdirectory.
"""
self.subdirs.append(sarif_file_set)
def add_file(self, sarif_file_object: SarifFile):
"""
Add a single SARIF file to the set.
"""
self.files.append(sarif_file_object)
def get_distinct_tool_names(self) -> List[str]:
"""
Return a list of tool names that feature in the runs in these files.
The list is deduplicated and sorted into alphabetical order.
"""
all_tool_names = set()
for subdir in self.subdirs:
all_tool_names.update(subdir.get_distinct_tool_names())
for input_file in self.files:
all_tool_names.update(input_file.get_distinct_tool_names())
return sorted(list(all_tool_names))
def get_results(self) -> List[Dict]:
"""
Get the results from all runs in all files. These are the Result objects as defined in the
SARIF standard section 3.27.
https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638
"""
ret = []
for subdir in self.subdirs:
ret += subdir.get_results()
for input_file in self.files:
ret += input_file.get_results()
return ret
def get_records(self) -> List[Dict]:
"""
Get simplified records derived from the results of all runs. The records have the
keys defined in `RECORD_ATTRIBUTES`.
"""
ret = []
for subdir in self.subdirs:
ret += subdir.get_records()
for input_file in self.files:
ret += input_file.get_records()
return ret
def get_records_grouped_by_severity(self) -> Dict[str, List[Dict]]:
"""
Get the records, grouped by severity.
"""
return _group_records_by_severity(self.get_records())
def get_result_count(self) -> int:
"""
Return the total number of results.
"""
return sum(subdir.get_result_count() for subdir in self.subdirs) + sum(
input_file.get_result_count() for input_file in self.files
)
def get_result_count_by_severity(self) -> Dict[str, int]:
"""
Return a dict from SARIF severity to number of records.
"""
result_counts_by_severity = []
for subdir in self.subdirs:
result_counts_by_severity.append(subdir.get_result_count_by_severity())
for input_file in self.files:
result_counts_by_severity.append(input_file.get_result_count_by_severity())
return {
severity: sum(rc.get(severity, 0) for rc in result_counts_by_severity)
for severity in SARIF_SEVERITIES
}
def get_issue_code_histogram(self, severity) -> List[Tuple]:
"""
Return a list of pairs (code, count) of the records with the specified
severities.
"""
return _count_records_by_issue_code(self.get_records(), severity)
def get_filter_stats(self) -> Optional[FilterStats]:
"""
Get the number of records that were included or excluded by the filter.
"""
ret = None
for subdir in self.subdirs:
ret = _add_filter_stats(ret, subdir.get_filter_stats())
for input_file in self.files:
ret = _add_filter_stats(ret, input_file.get_filter_stats())
return ret
|
/sarif-tools-1.0.0.tar.gz/sarif-tools-1.0.0/sarif/sarif_file.py
| 0.77886 | 0.259817 |
sarif_file.py
|
pypi
|
from datetime import datetime
import os
import docx
from docx import oxml
from docx import shared
from docx.enum import text
from docx.oxml import ns
from sarif import charts, sarif_file
from sarif.sarif_file import SarifFileSet
def generate_word_docs_from_sarif_inputs(
input_files: SarifFileSet, image_file: str, output: str, output_multiple_files: bool
):
"""
Convert SARIF input to Word file output.
"""
if not input_files:
raise ValueError("No input files specified!")
output_file = output
output_file_name = output
if output_multiple_files:
for input_file in input_files:
output_file_name = input_file.get_file_name_without_extension() + ".docx"
print(
"Writing Word summary of",
input_file.get_file_name(),
"to",
output_file_name,
)
_generate_word_summary(
input_file,
os.path.join(output, output_file_name),
image_file,
)
output_file_name = "static_analysis_output.docx"
output_file = os.path.join(output, output_file_name)
source_description = input_files.get_description()
print("Writing Word summary of", source_description, "to", output_file_name)
_generate_word_summary(input_files, output_file, image_file)
def _generate_word_summary(sarif_data, output_file, image_file):
# Create a new document
document = docx.Document()
_add_heading_and_highlevel_info(document, sarif_data, output_file, image_file)
_dump_errors_summary_by_sev(document, sarif_data)
_dump_each_error_in_detail(document, sarif_data)
# finally, save the document.
document.save(output_file)
def _add_heading_and_highlevel_info(document, sarif_data, output_file, image_path):
tool_name = ", ".join(sarif_data.get_distinct_tool_names())
heading = f"Sarif Summary: {tool_name}"
if image_path:
document.add_picture(image_path)
last_paragraph = document.paragraphs[-1]
last_paragraph.alignment = text.WD_PARAGRAPH_ALIGNMENT.CENTER
document.add_heading(heading, 0)
document.add_paragraph(f"Document generated on: {datetime.now()}")
sevs = ", ".join(sarif_file.SARIF_SEVERITIES)
document.add_paragraph(
f"Total number of various severities ({sevs}): {sarif_data.get_result_count()}"
)
filter_stats = sarif_data.get_filter_stats()
if filter_stats:
document.add_paragraph(f"Results were filtered by {filter_stats}.")
pie_chart_image_file_path = output_file.replace(".docx", "_severity_pie_chart.png")
if charts.generate_severity_pie_chart(sarif_data, pie_chart_image_file_path):
document.add_picture(pie_chart_image_file_path)
last_paragraph = document.paragraphs[-1]
last_paragraph.alignment = text.WD_PARAGRAPH_ALIGNMENT.CENTER
document.add_page_break()
def _dump_errors_summary_by_sev(document, sarif_data):
"""
For each severity level (in priority order): create a list of the errors of
that severity, print out how many there are and then do some further analysis
of which error codes are present.
"""
severities = sarif_file.SARIF_SEVERITIES
sev_to_records = sarif_data.get_records_grouped_by_severity()
for severity in severities:
errors_of_severity = sev_to_records.get(severity, [])
document.add_heading(
f"Severity : {severity} [ {len(errors_of_severity)} ]", level=1
)
# Go through the list of errors and create a dictionary of each error code
# present to how many times that error code occurs. Sort this dict and print
# out in descending order.
dict_of_error_codes = {}
for error in errors_of_severity:
issue_code = error["Code"]
dict_of_error_codes[issue_code] = dict_of_error_codes.get(issue_code, 0) + 1
sorted_dict = sorted(
dict_of_error_codes.items(), key=lambda x: x[1], reverse=True
)
if sorted_dict:
for error in sorted_dict:
document.add_paragraph(f"{error[0]}: {error[1]}", style="List Bullet")
else:
document.add_paragraph("None", style="List Bullet")
def _dump_each_error_in_detail(document, sarif_data):
"""
Write out the errors to a table so that a human can do further analysis.
"""
document.add_page_break()
severities = sarif_file.SARIF_SEVERITIES
sev_to_records = sarif_data.get_records_grouped_by_severity()
for severity in severities:
errors_of_severity = sev_to_records.get(severity, [])
sorted_errors_by_severity = sorted(errors_of_severity, key=lambda x: x["Code"])
# Sample:
# [{'Location': 'C:\\Max\\AccessionAndroid\\scripts\\parse_coverage.py', 'Line': 119,
# 'Severity': 'error', 'Code': 'DS126186 Disabled certificate validation'},
# {'Location': 'C:\\Max\\AccessionAndroid\\scripts\\parse_code_stats.py', 'Line': 61,
# 'Severity': 'error', 'Code': 'DS126186 Disabled certificate validation'},
# ]
if errors_of_severity:
document.add_heading(f"Severity : {severity}", level=2)
table = document.add_table(rows=1 + len(errors_of_severity), cols=3)
table.style = "Table Grid" # ColorfulGrid-Accent5'
table.autofit = False
table.alignment = text.WD_TAB_ALIGNMENT.CENTER
# Cell widths
widths = [shared.Inches(2), shared.Inches(4), shared.Inches(0.5)]
# To avoid performance problems with large tables, prepare the entries first in this
# list, then iterate the table cells and copy them in.
# First populate the header row
cells_text = ["Code", "Location", "Line"]
hdr_cells = table.rows[0].cells
for i in range(3):
table.rows[0].cells[i]._tc.get_or_add_tcPr().append(
oxml.parse_xml(
r'<w:shd {} w:fill="5fe3d8"/>'.format(ns.nsdecls("w"))
)
)
run = hdr_cells[i].paragraphs[0].add_run(cells_text[i])
run.bold = True
hdr_cells[i].paragraphs[
0
].alignment = text.WD_PARAGRAPH_ALIGNMENT.CENTER
hdr_cells[i].width = widths[i]
for eachrow in sorted_errors_by_severity:
cells_text += [
eachrow["Code"],
eachrow["Location"],
str(eachrow["Line"]),
]
# Note: using private property table._cells to avoid performance issue. See
# https://stackoverflow.com/a/69105798/316578
col_index = 0
for (cell, cell_text) in zip(table._cells, cells_text):
cell.text = cell_text
cell.width = widths[col_index]
col_index = col_index + 1 if col_index < 2 else 0
else:
document.add_heading(f"Severity : {severity}", level=2)
document.add_paragraph("None", style="List Bullet")
|
/sarif-tools-1.0.0.tar.gz/sarif-tools-1.0.0/sarif/operations/word_op.py
| 0.413477 | 0.256797 |
word_op.py
|
pypi
|
<div align="center">
<img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/sarina/text_on_contour.png" width=400>
<br/>
<h1>Sarina</h1>
<br/>
<img src="https://img.shields.io/badge/Python-14354C?style=for-the-badge&logo=python&logoColor=white" alt="built with Python3" />
<img src="https://img.shields.io/badge/C%2B%2B-00599C?style=for-the-badge&logo=c%2B%2B&logoColor=white" alt="built with C++" />
</div>
----------
Sarina: An ASCII Art generator command line tool to create word clouds from text words based on contours of the given image.
<table border="0">
<tr>
<td>The program is dedicated to <a href="https://en.wikipedia.org/wiki/Death_of_Sarina_Esmailzadeh">Sarina Esmailzadeh</a>, a 16-year-old teenager who lost her life during the <a href="https://en.wikipedia.org/wiki/Mahsa_Amini_protests">Mahsa Amini protests</a>, as a result of violence inflicted by the IRGC forces. Her memory serves as a reminder of the importance of justice and human rights.
</td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/sarina/assets/images/Sarina.png" alt="Sarina Esmailzadeh" width=400 /></td>
</tr>
</table>
----------
## Table of contents
* [Introduction](https://github.com/AminAlam/Sarina#overview)
* [Installation](https://github.com/AminAlam/Sarina#installation)
* [Usage](https://github.com/AminAlam/Sarina#usage)
* [How It Works](https://github.com/AminAlam/Sarina#how-it-works)
----------
## Overview
<p align="justify">
Sarina is an ASCII art generator written in Python3 and C++. It transforms an input image and a text file containing words and their weights into a unique ASCII art representation. The algorithm behind Sarina is randomized, ensuring that every output is distinct, even for identical inputs.
</p>
----------
## Installation
### PyPI
- Check [Python Packaging User Guide](https://packaging.python.org/installing/)
- Run `pip install sarina-cli` or `pip3 install sarina-cli`
### Source code
- Clone the repository or download the source code.
- Run `pip3 install -r requirements.txt` or `pip install -r requirements.txt`
## Usage
### Default image and words
```console
Amin@Maximus:Sarina $ sarina
Sarina is generating your word cloud...
100%|███████████████████████████████████████████████████████████| 132/132 [01:09<00:00, 1.89it/s]
Done!
Images are saved in ./results
```
<table border="0">
<tr>
<td> Input Image </td>
<td> Generated Output </td>
<td> Generated Output </td>
<td> Generated Output </td>
<td> Generated Output </td>
<td> Generated Output </td>
</tr>
<tr>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/sarina/assets/images/iran_map.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/iran_map/just_text.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/iran_map/just_text_reverse.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/iran_map/text_on_contour.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/iran_map/text_on_contour_reverse.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/iran_map/text_on_main_image.png" width=400 /></td>
</tr>
</table>
### Custom image and options
```console
Amin@Maximus:Sarina $ sarina -if 'assets/images/Sarina.png' -ct 100 -ft 20 -tc [255,255,255] -pc -cs
Enter the contour indices to keep (+) or to remove (-) (separated by space): +1 -2 -3 -4
Sarina is generating your word cloud...
100%|███████████████████████████████████████████████████████████| 132/132 [01:06<00:00, 1.98it/s]
Done!
Images are saved in ./results
```
<table border="0">
<tr>
<td> Input Image </td>
<td> Generated Output </td>
<td> Generated Output </td>
<td> Generated Output </td>
<td> Generated Output </td>
<td> Generated Output </td>
</tr>
<tr>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/sarina/assets/images/Sarina.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/sarina/just_text.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/sarina/just_text_reverse.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/sarina/text_on_contour.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/sarina/text_on_contour_reverse.png" width=400 /></td>
<td><img src="https://github.com/AminAlam/Sarina/blob/dev/other_files/sarina/text_on_main_image.png" width=400 /></td>
</tr>
</table>
To learn more about the options, you can use the following command:
```console
Amin@Maximus:Sarina $ sarina --help
Usage: sarina [OPTIONS]
Sarina: An ASCII Art Generator to create word clouds from text files based
on image contours
Options:
-if, --img_file PATH Path to image file
-tf, --txt_file PATH Path to text file. Each line of the text
file should be in the following format:
WORD|WEIGHT
-cs, --contour_selection Contour selection - if selected, user will
be prompted to enter the contours index. For
example, if you want to keep the contours
with index 0, 3, 4, and remove contours with
index 1, 2, you should enter +0 +3 +4 -1 -2
-ct, --contour_treshold INTEGER RANGE
Threshold value to detect the contours.
Sarina uses intensity thresholding to detect
the contours. The higher the value, the more
contours will be detected but the less
accurate the result will be [default: 100;
0<=x<=255]
--max_iter INTEGER RANGE Maximum number of iterations. Higher number
of iterations will result in more consistent
results with the given texts and weights,
but it will take more time to generate the
result [default: 1000; 100<=x<=10000]
--decay_rate FLOAT RANGE Decay rate for font scale. Higher decay rate
will result in more consistent results with
the given texts and weights, but it will
take more time to generate the result
[default: 0.9; 0.1<=x<=1.0]
-ft, --font_thickness INTEGER Font thickness. Higher values will make the
texts font thicker. Choose this value based
on the size of the image [default: 10]
--margin INTEGER RANGE Margin between texts in pixels. Higher
values will result in more space between the
texts [default: 20; 0<=x<=100]
-tc, --text_color TEXT Text color in RGB format. For example,
[255,0,0] is red. Note to use square
brackets and commas. Also, just enter the
numbers, do not use spaces [default:
[0,0,0]]
-pc, --plot_contour Plot contour on the generated images. If
selected, the generated images will be
plotted with the detected/selected contours
-op, --opacity If selected, opacity of each text will be
selected based on its weight [default:
True]
-sp, --save_path PATH Path to save the generated images. If not
selected, the generated images will be saved
in the same results folder in the directory
as the function is called.
```
|
/sarina-cli-0.0.22.tar.gz/sarina-cli-0.0.22/README.md
| 0.513668 | 0.934515 |
README.md
|
pypi
|
==========
Why Sarkas
==========
Problem
-------
The typical workflow of MD simulations in plasma physics looks something like this
#. Write your own MD code, or use a pre-existing code, in a low-level language such as C/C++ or (even better) Fortran to exploit their computational speed.
#. Run multiple simulations with different initial conditions.
#. Analyze the output of each simulation. This is usually done in a interpreted, high-level language like Python.
#. Make plots and publish
There are two main issues with the above workflow as it requires `i)` a high-level of computing knowledge to write/understand and run
an MD code, `ii)` a graduate level of plasma physics for calculating physical observables and transport coefficients.
Solution
--------
Sarkas: a fast pure-Python molecular dynamics suite for non-ideal plasmas.
Sarkas aims at lowering the entry barrier for computational plasma physics by providing a comprehensive MD suite complete
with pre- and post-processing tools commonly found in plasma physics.
Sarkas is entirely written in Python without calls to C hence avoiding a two-language problem. It relies on the most
common Python scientific packages, *e.g.* `NumPy <https://numpy.org/>`_, `Numba <http://numba.pydata.org/>`_,
`SciPy <https://www.scipy.org/>`_, and `Pandas <https://pandas.pydata.org/>`_, which provide a solid foundation built,
optimized, and well documented by one of the largest community of developers.
Furthermore, Sarkas is developed using an object-oriented approach allowing users to add new features
in a straight-forward way.
Sarkas targets a broad user base: from experimentalists to computational physicists, from students approaching plasma
physics for the first time to seasoned researchers. Therefore Sarkas' design revolves around two primary requirements:
ease-of-use and extensibility.
Old School
==========
First and foremost we run the help command in a terminal window
.. code-block:: bash
$ sarkas_simulate -h
This will produce the following output
.. figure:: Help_output.png
:alt: Figure not found
This output prints out the different options with which you can run Sarkas.
- ``-i`` or ``--input`` is required and is the path to the YAML input file of our simulation.
- ``-c`` or ``--check_status`` which can be either ``equilibration`` or ``production`` and indicates whether we want to check the equilibration or production phase of the run.
- ``-t`` or ``--pre_run_testing`` is a boolean flag indicating whether to run a test of our input parameters and estimate the simulation times.
- ``-p`` or ``--plot_show`` is a boolean flag indicating whether to show plots to screen.
- ``-v`` or ``--verbose`` boolean for verbose output.
- ``-d`` or ``--sim_dir`` name of the directory storing all the simulations.
- ``-j`` or ``--job_id`` name of the directory of the current run.
- ``-s`` or ``--seed`` sets the random number seed.
- ``-r`` or ``--restart`` for starting the simulation from a specific point.
The ``--input`` option is the only required option as it refers to the input file.
If we wanted to run multiple simulations of the same system but with different initial conditions
a typical bash script would look like this
.. code-block:: bash
conda activate sarkas
sarkas_simulate -i sarkas/examples/yukawa_mks_p3m.yaml -s 125125 -d run1
sarkas_simulate -i sarkas/examples/yukawa_mks_p3m.yaml -s 281756 -d run2
sarkas_simulate -i sarkas/examples/yukawa_mks_p3m.yaml -s 256158 -d run3
sarkas_simulate -i sarkas/examples/yukawa_mks_p3m.yaml -s 958762 -d run4
sarkas_simulate -i sarkas/examples/yukawa_mks_p3m.yaml -s 912856 -d run5
conda deactivate
If you are familiar with ``bash`` scripting you could make the above statements in a loop and make many more simulations.
Once the simulations are done it's time to analyze the data. This is usually done by a python script.
This was the old way of running simulations.
Here we find the first advantage of Sarkas: removing the need to know multiple languages. Sarkas is not a Python wrapper
around an existing MD code. It is entirely written in Python to allow the user to modify the codes for their specific needs.
This choice, however, does not come at the expense of speed. In fact, Sarkas makes heavy use of ``Numpy`` and ``Numba``
packages so that the code can run as fast, if not faster, than low-level languages like ``C/C++`` and ``Fortran``.
New School
==========
Sarkas was created with the idea of incorporating the entire simulation workflow in a single Python
script. Let's say we want to run a set of ten simulations of a Yukawa OCP for different
screening parameters and measure their diffusion coefficient. An example script looks like this
.. code-block:: python
from sarkas.processes import Simulation, PostProcess
from sarkas.tools.observables import VelocityAutoCorrelationFunction
from sarkas.tools.transport import TransportCoefficients
import numpy as np
import os
# Path to the input file
examples_folder = os.path.join('sarkas', 'examples')
input_file_name = os.path.join(examples_folder,'yukawa_mks.yaml')
# Create arrays of screening parameters
kappas = np.linspace(1, 10)
# Run 10 simulations
for i, kappa in enumerate(kappas):
# Note that we don't want to overwrite each simulation
# So we save each simulation in its own folder by passing
# a dictionary of dictionary with folder's name
args = {
"IO":
{
"job_dir": "yocp_kappa{}".format(kappa)
},
"Potential":
{"kappa": kappa}
}
# Initialize and run the simulation
sim = Simulation(input_file_name)
sim.setup(read_yaml=True, other_inputs=args)
sim.run()
# Make Temperature and Energy plots.
postproc = PostProcess(input_file_name)
postproc.setup(read_yaml = True, other_inputs = args)
postproc.run()
# Calculate the VACF
vacf = VelocityAutoCorrelationFunction()
vacf.setup(postproc.parameters)
vacf.compute()
# Calculate the diffusion coefficient
tc = TransportCoefficients(postproc.parameters)
tc.diffusion(vacf, plot=True)
Notice how both the simulation and the postprocessing can be done all in one script.
|
/sarkas-1.0.2.tar.gz/sarkas-1.0.2/docs/documentation/why_sarkas.rst
| 0.941909 | 0.810891 |
why_sarkas.rst
|
pypi
|
==========
Input File
==========
The first step in any MD simulation is the creation of an input file containing all the relevant parameters
of our simulation. Take a look at the file ``yukawa_mks_p3m.yaml`` that can be
found `here <https://raw.githubusercontent.com/murillo-group/sarkas/master/docs/documentation/Tutorial_NB/input_files/yukawa_mks_p3m.yaml>`_.
It is very important to maintain the syntax shown in the example YAML files.
This is because the content of the YAML file is returned as a dictionary of dictionaries.
Particles
---------
The ``Particles`` block contains the attribute ``Species`` which defines the first type of particles, i.e. species,
and their physical attributes.
.. code-block:: yaml
Particles:
- Species:
name: H
number_density: 1.62e+32 # /m^3
mass: 1.673e-27 # kg, ptcl mass of ion1
num: 10000 # total number of particles of ion1
Z: 1.0 # degree of ionization
temperature_eV: 0.5
In the case of a multi-component plasma we need only add another ``Species`` attribute with corresponding physical
parameters, see ``ybim_mks_p3m.yaml``. The attributes of ``Species`` take only numerical values, apart from ``name``,
in the correct choice of units which is defined in the block ``Control``, see below.
Notice that in this section we also define the mass of the particles, ``mass``, and their charge number ``Z``.
Future developments of Sarkas are aiming to automatically calculate the degree of ionization given by the density and
temperature of the system, but for now we need to define it. The parameters given here are not the only options,
more information of all the possible inputs can be found in the page ``input file``.
The initial velocity distribution can be set by ``initial_velocity_distribution`` and defaults to a ``boltzmann``
distribution but can also be set to ``monochromatic`` where a fixed energy is applied to the particles with a random
distribution of the directions.
Interaction
-----------
The next section of the input file defines our interaction potential's parameters
.. code-block:: yaml
Potential:
type: Yukawa
method: P3M # Particle-Particle Particle-Mesh
kappa: 0.5
rc: 2.79946255e-10 # [m]
pppm_mesh: [64, 64, 64]
pppm_aliases: [3,3,3]
pppm_cao: 6
pppm_alpha_ewald: 1.16243741e+10 # 1/[m]
The instance ``type`` defines the interaction potential. Currently Sarkas supports the following interaction potentials:
Coulomb, Yukawa, Exact-gradient corrected Yukawa, Quantum Statistical Potentials, Moliere, Lennard-Jones 6-12. More info
on each of these potential can be found in :ref:`potentials`. Next we define the screening parameter ``kappa``.
Notice that this a non-dimensional parameter, i.e. the real screening length will be calculated
from :math:`\lambda = a/\kappa` where :math:`a` is the Wigner-Seitz radius.
The following parameters refer to our choice of the interaction algorithm. Details on how to choose these parameters
are given later in this page, but for now we limit to describing them. First, we find the cut-off radius, ``rc``,
for the Particle-Particle part of the P3M algorithm.
The ``pppm_mesh`` instance is a list of 3 elements corresponding to the number of mesh points in each of the three
cartesian coordinates, ``pppm_aliases`` indicates the number of aliases for anti-aliasing, see <link to anti-aliasing>.
``pppm_cao`` stands for Charge Order Parameter and indicates the number of mesh points per direction
on which the each particle's charge is to distributed and finally ``pppm_alpha_ewald`` refers to
the :math:`\alpha` parameter of the Gaussian charge cloud surrounding each particle.
To deal with diverging potentials a short-range cut-off radius, ``rs``, can be specified. If specified, the potential
:math:`U(r)` will be cut to :math:`U(rs)` for interparticle distances below ``rs``. This short-range cut-off is meant to
suppress unphysical scenarios where fast particles emerge due to the potential going to infinity. However, this feature
should be used with great care as is can also screen the short-range part of the interaction to unphysical values. That
is why the default value is zero so that the short-range cut-off is not in use.
Integrator
----------
Notice that we have not defined our integrator yet. This is done in the section ``Integrator`` of the input file
.. code-block:: yaml
Integrator:
type: Verlet # velocity integrator type
equilibration_steps: 10000 # number of timesteps for the equilibrium
production_steps: 100000 # number of timesteps after the equilibrium
eq_dump_step: 100
prod_dump_step: 100
Here ``Verlet`` refers to the common Velocity Verlet algorithm in which particles velocities are updated first. This must
not to be confused with the Position Verlet algorithm. The two algorithms are equivalent, however, Velocity Verlet
is the most efficient and the preferred choice in most MD simulations.
Currently Sarkas supports also the magnetic Velocity Verlet, see ``ybim_mks_p3m_mag.yaml`` and more details are
discussed in ... .
``equilibration_steps`` and ``production_steps`` are the number of timesteps of the equilibration and production phase,
respectively. ``eq_dump_step`` and ``prod_dump_step`` are the interval timesteps over which Sarkas will save simulations
data.
Further integrators scheme are under development: these include adaptive Runge-Kutta, symplectic high order integrators,
multiple-timestep algorithms. The Murillo group is currently looking for students willing to explore all of the above.
Thermostat
----------
Most MD simulations require an thermalization phase in which the system evolves in time in an :math:`NVT` ensemble
so that the initial configuration relaxes to the desired thermal equilibrium. The parameters
of the thermalization phase are defined in the ``Thermostat`` section of the input file.
.. code-block:: yaml
Thermostat:
type: Berendsen # thermostat type
relaxation_timestep: 50
berendsen_tau: 1.0
The first instance defines the type of Thermostat. Currently Sarkas supports only the Berendsen and Langevin type,
but other thermostats like Nose-Hoover, etc are, you guessed it!, in development.
The ``relaxation_timestep`` instance indicates the timestep number at which the Berendsen thermostat will be turned on.
The instance ``berendsen_tau`` indicates the relaxation rate of the Berendsen thermostat, see :ref:`thermostats` for more details.
The last instance defines the temperature (be careful with units!) at which the system is to be thermalized.
Notice that this takes a single value in the case of a single species, while it takes is a list in the case of
multicomponent plasmas. Note that these temperatures need not be the same as those defined in the ``Particles`` block as
it might be the case that you want to study temperature relaxation in plasma mixtures.
Parameters
----------
The next section defines some general parameters
.. code-block:: yaml
Parameters:
units: mks # units
dt: 2.000e-18 # sec
load_method: random_no_reject
boundary_conditions: periodic
The first instance defines the choice of units (mks or cgs) which must be consistent with all the other dimensional
parameters defined in previous sections. The second instance is the value of the timestep in seconds.
``load_method`` defines the way particles positions are to be initialized. The options are
- ``random_no_reject`` for a uniform spatial distribution
- ``random_reject`` for a uniform spatial distribution but with a minimum distance between particles
- ``halton``
Next we define the ``boundary_conditions`` of our simulation. At the moment Sarkas supports only ``periodic`` and
``absorbing`` boundary conditions.
Future implementations of Sarkas accepting open and mixed boundary conditions will be available in the future.
We accept pull request :) !
By specifying ``Lx``, ``Ly`` and ``Lz`` the simulation box can be specified explicitly and expanded with respect
to the initial particle distribution. This moves the walls where boundary conditions are applied away from the
initial particle volume.
Input/Output
------------
The next section defines some IO parameters
.. code-block:: yaml
IO:
verbose: yes
simulations_dir: Simulations
job_dir: yocp_pppm # dir name to save data.
job_id: yocp
``verbose`` is flag for printing progress to screen. This is useful in the initialization phase of an MD
simulation. The next instances are not necessary, as there are default values for them, however, they are useful for organizing your work. ``simulations_dir``
is the directory where all the simulations will be stored. The default value is ``Simulations`` and this will be
created in your current working directory. Next, ``job_dir`` is the name of the directory of this specific simulation
which we chose to call ``yocp_pppm``. This directory will contain ``pickle`` files storing all your simulations
parameters and physical constants, a log file of your simulation, the ``Equilibration`` and ``Production``
directories containing simulations dumps, and ``PreProcessing`` and ``PostProcessing`` directories. Finally ``job_id`` is an appendix for all the file names identifing
this specific run. This is useful when you have many runs that differ only in the choice of ``random_seed``.
Post Processing
---------------
The last two blocks are ``Observables`` and ``TransportCoefficientss``. They indicate the quantities
we want to calculate and their parameters.
Observables
***********
The observables we want to calculate are
.. code-block:: yaml
Observables:
- RadialDistributionFunction:
no_bins: 500
- Thermodynamics:
phase: production
- DynamicStructureFactor:
no_slices: 1
max_ka_value: 8
- StaticStructureFactor:
max_ka_value: 8
- CurrentCorrelationFunction:
max_ka_value: 8
- VelocityAutoCorrelationFunction
no_slices: 4
Note that ``Observables`` is again a list of dictionaries. This is because each observable is returned as
an object in the simulation. The lines below the observables' names are the parameters needed for the calculation.
The parameters are different depending on the observable. We will discuss them in the next pages of this tutorial.
Transport Coefficients
**********************
.. code-block:: yaml
TransportCoefficientss:
- Diffusion:
no_slices: 4
The available transport coefficients at this moment are: ``Diffusion``, ``Interdiffusion``, ``ElectricalConductivity``,
``Viscosity``.
Note that ``Interdiffusion`` is supported only in the case of binary mixtures.
Soon we will have support for any mixture.
|
/sarkas-1.0.2.tar.gz/sarkas-1.0.2/docs/documentation/input_file.rst
| 0.936851 | 0.872619 |
input_file.rst
|
pypi
|
.. _integrators:
===========
Integrators
===========
Sarkas aims to support a variety of time integrators both built-in and user defined.
Currently the available ones are:
- Velocity Verlet
- Velocity Verlet with Langeving Thermostat
- Magnetic Velocity Verlet
- Magnetic Boris
The choice of integrator is provided in the input file and the method ``sarkas.time_evolution.integrators.Integrator.setup``
links the chosen integrator to the ``sarkas.time_evolution.integrators.Integrator.update`` method which evolves
particles' positions, velocities, and accelerations in time.
The Velocity Verlet algorithm is the most common integrator used in MD plasma codes.
It is preferred to other more accurate integrator, such as RK45, inasmuch as it conserves the symmetries of the
Hamiltonian, it is fast, and easy to implement.
Phase Space Distribution
------------------------
The state of the system is defined by the set of phase space coordinates
:math:`\{ \mathbf r, \mathbf p \} = \{ \mathbf r_1, \mathbf r_2, \dots, \mathbf r_N , \mathbf p_1, \mathbf p_2, \dots, \mathbf p_N \}`
where :math:`N` represents the number of particles. The system evolves in time according to the Hamiltonian
.. math::
\mathcal H = \mathcal T + \mathcal U,
where :math:`\mathcal T` is the kinetic energy and :math:`\mathcal U` the interaction potential. The :math:`N`-particle
probability distribution :math:`f_N(\mathbf r, \mathbf p; t)` evolves in time according to the Liouville equation
.. math::
i\mathcal L f_N(\mathbf r, \mathbf p;0) = 0,
with
.. math::
\mathcal L = \frac{\partial}{\partial t} + \dot{\mathbf r} \cdot \frac{\partial}{\partial \mathbf r} + \dot{\mathbf p}\cdot \frac{\partial}{\partial \mathbf p},
.. math::
\dot{\mathbf r} = \frac{\partial \mathcal H}{\partial \mathbf p}, \quad \dot{\mathbf p} = - \frac{\partial \mathcal H}{\partial \mathbf r}.
The solution of the Liouville equation is
.. math::
\mathcal f_N(\mathbf r, \mathbf p;t) = e^{- i \mathcal L t } f_N(\mathbf r, \mathbf p;0)
Velocity Verlet
---------------
It can be shown that the Velocity Verlet corresponds to a second order splitting of the Liouville operator :math:`\mathcal L = K + V`
.. math::
e^{i \epsilon \mathcal L} \approx e^{\frac{\Delta t}{2} K}e^{\Delta t V}e^{\frac{\Delta t}{2} K}
where :math:`\epsilon = -i \Delta t` and the operators
.. math::
K = \mathbf v \cdot \pdv{\mathbf r}, \quad
V = \mathbf a \cdot \pdv{\mathbf v}.
Any dynamical quantities :math:`W` evolves in time according to the Liouville operator :math:`\mathcal L = K + V`
.. math::
W(t) = e^{i\epsilon (K + V)} W(0).
Applying each one of these to the initial set :math:`\mathbf W = ( \mathbf r_0, \mathbf v_0)` we find
:math:`e^{i\epsilon V} \mathbf r_0 = e^{i\epsilon K} \mathbf v_0 = 0` and
.. math::
e^{i \epsilon K} \mathbf r_0 & = & \left ( 1 + \Delta t K - \frac{\Delta t^2 K^2}{2!} + \frac{\Delta t^3 K^3}{3!} + ... \right ) \mathbf r_0 \nonumber \\ & = & \left [ 1 + \Delta t \mathbf v \cdot \frac{\partial}{\partial \mathbf r} - \frac{\Delta t^2}{2} \left ( \mathbf v \cdot \frac{\partial}{\partial \mathbf r} \right )^2 + ... \right ] \mathbf r_0 \nonumber \\
& = & \mathbf r_0 + \Delta t \mathbf v
.. math::
e^{i \epsilon V} \mathbf v_0 & = & \left ( 1 + \Delta t V - \frac{\Delta t^2 V^2}{2!} + \frac{\Delta t^3 V^3}{3!} + ... \right ) \mathbf r_0 \nonumber \\ & = & \left [ 1 + \Delta t \mathbf a \cdot \frac{\partial}{\partial \mathbf v} - \frac{\Delta t^2}{2} \left ( \mathbf a \cdot \frac{\partial}{\partial \mathbf v} \right )^2 + ... \right ] \mathbf v_0 \nonumber \\
& = & \mathbf v_0 + \Delta t \mathbf a
Magnetic Velocity Verlet
------------------------
A generalization to include constant external magnetic fields leads to the Liouville operator
:math:`e^{i \epsilon( K + V + L_B)}` where :cite:`Chin2008`
.. math::
L_B = \omega_c \left ( \hat{\mathbf B} \times \mathbf v \right ) \cdot \frac{\partial}{\partial \mathbf v} = \omega_c \hat{\mathbf B} \cdot \left( \mathbf v \times \frac{\partial}{\partial \mathbf v} \right ) = \omega_c \hat{\mathbf B} \cdot \mathbf J_{\mathbf v}.
Application of this operator leads to :math:`e^{i \epsilon L_B}\vb{r}_0 = 0` and
.. math::
e^{ i \epsilon L_B } \mathbf v_0 & = & \left ( 1 + \Delta t V - \frac{\Delta t^2 V^2}{2!} + \frac{\Delta t^3 V^3}{3!} + ... \right ) \mathbf v_0 \nonumber \\ & = & \left [ 1 + \omega_c \Delta t \hat{\mathbf B} \cdot \mathbf J_{\mathbf v} - \frac{\omega_c^2 \Delta t^2}{2} \left ( \hat{\mathbf B} \cdot \mathbf J_{\mathbf v} \right )^2 + ... \right ] \mathbf v_0 \nonumber \\
& = & \begin{pmatrix}
\cos(\omega_c\Delta t) & - \sin(\omega_c\Delta t) & 0 \\
\sin(\omega_c\Delta t) & \cos(\omega_c\Delta t) & 0 \\
0 & 0 & 1 \\
\end{pmatrix} \mathbf v_0 \\
& = &\mathbf v_{0,\parallel} + \cos(\omega_c \Delta t) \mathbf v_{0,\perp} + \sin(\omega_c \Delta t) \hat{\mathbf B} \times \mathbf v_{0, \perp},
where in the last passage we have divided the velocity in its parallel and perpendicular component to the
:math:`\\mathbf B` field. In addition, we have
.. math::
e^{i \epsilon (L_B + V) } \mathbf v_0 & = & e^{i \epsilon L_B} \mathbf v_0 + \Delta t \mathbf a + \frac{1 - \cos(\omega_c \Delta t)}{\omega_c} \left ( \hat{\mathbf B} \cross \mathbf a \right ) \nonumber \\
&& + \Delta t \left ( 1 - \frac{\sin(\omega_c \Delta t)}{\omega_c \Delta t} \right ) \left [ \hat {\mathbf B} \cross \left ( \hat{\mathbf B} \cross \mathbf a \right ) \right ].
Time integrators of various order can be found by exponential splitting, that is
.. math::
e^{i \epsilon \mathcal L} \approx \prod_{ j = 1}^{N} e^{i a_j \epsilon K} e^{i b_j \epsilon \left ( L_B + V \right ) }.
The Boris algorithm, widely used in Particle in Cell simulations, corresponds to :cite:`Chin2008`
.. math::
e^{i \epsilon \mathcal L} \approx e^{i \epsilon K} e^{i \epsilon V/2} e^{i \epsilon L_B} e^{i \epsilon V/2}
while a generalization of the Velocity-Verlet :cite:`Chin2008,Spreiter1999`
.. math::
e^{i \epsilon \mathcal L} \approx e^{i \epsilon (L_B + V) /2} e^{i \epsilon K} e^{i \epsilon ( L_B + V)/2}.
Notice that all the above algorithm require one force calculation per time step.
|
/sarkas-1.0.2.tar.gz/sarkas-1.0.2/docs/theory/integrators.rst
| 0.908601 | 0.991844 |
integrators.rst
|
pypi
|
===========================================
Particle-Particle Particle-Mesh Algorithm
===========================================
Ewald Sum
=========
Long range forces are calculated using the Ewald method which consists in dividing the potential into a short
and a long range part. Physically this is equivalent to adding and subtracting a screening cloud around each charge.
This screening cloud is usually chosen to be given by a Gaussian charged density distribution, but it need not be.
The choice of a Gaussian is due to spherical symmetry.
The total charge density at point :math:`\mathbf r` is then
.. math::
\rho(\mathbf r) = \sum_{i}^N \left \{ \left ( q_i\delta( \mathbf r - \mathbf r_i) - \frac{q_i\alpha^{3/2}}{\pi} e^{-\alpha^2 \left( \mathbf r - \mathbf r_i \right )^2 } \right ) + \frac{q_i\alpha^{3/2}}{\pi} e^{-\alpha^2 \left( \mathbf r- \mathbf r_i \right )^2 } \right \},
where the first term is the charge density due to the real particles and the last two terms are a negative
and positive screening cloud. The first two terms are in parenthesis to emphasizes the splitting into
.. math::
\rho(\mathbf r) = \rho_{\mathcal R}(\mathbf r) + \rho_{\mathcal F}(\mathbf r)
.. math::
\rho_{\mathcal R} (\mathbf r) = \sum_{i}^N \left ( q_i\delta( \mathbf r- \mathbf r_i) - \frac{q_i\alpha^{3/2}}{\pi} e^{-\alpha^2 \left( \mathbf r- \mathbf r_i \right )^2 } \right ), \quad \rho_{\mathcal F}(\mathbf r) = \sum_{i}^N \frac{q_i\alpha^{3/2}}{\pi} e^{-\alpha^2 \left( \mathbf r- \mathbf r_i \right )^2 }
where :math:`\rho_{\mathcal R}(\mathbf r)` indicates the charge density leading to the short range part of the potential
and :math:`\rho_{\mathcal F}(\mathbf r)` leading to the long range part.
The subscripts :math:`\mathcal R, \mathcal F` stand for Real and Fourier space indicating the way the calculation
will be done.
The potential at every point :math:`\mathbf r` is calculated from Poisson's equation
.. math::
-\nabla^2 \phi( \mathbf r) = 4\pi \rho_{\mathcal R} (\mathbf r) + 4\pi \rho_{\mathcal F}( \mathbf r).
Short-range term
----------------
The short range term is calculated in the usual way
.. math::
-\nabla^2 \phi_{\mathcal R}( \mathbf r) = 4\pi \sum_{i}^N \left ( q_i\delta( \mathbf r- \mathbf r_i) - \frac{q_i\alpha^{3/2}}{\pi} e^{-\alpha^2 \left( \mathbf r- \mathbf r_i \right )^2 } \right ).
The first term :math:`\delta(\mathbf r - \mathbf r_i)` leads to the usual Coulomb potential (:math:`\sim 1/r`) while
the Gaussian leads to the error function
.. math::
\phi_{\mathcal R}( \mathbf r ) = \sum_i^N \frac{q_i}{r} - \frac{q_i}{r}\text{erf} (\alpha r) = \sum_i^N \frac{q_i}{r} \text{erfc}(\alpha r)
Long-range term
---------------
The long range term is calculated in Fourier space
.. math::
k^2 \tilde\phi_{\mathcal F}(k) = 4\pi \tilde\rho_{\mathcal F}(k)
where
.. math::
\tilde\rho_{\mathcal F}(k) = \frac{1}{V} \int d\mathbf re^{- i \mathbf k \cdot \mathbf r} \rho_{\mathcal F}( \mathbf r ) = \sum_{i}^N \frac{q_i\alpha^{3/2}}{\pi V} \int d\mathbf r e^{- i \mathbf k \cdot \mathbf r} e^{-\alpha^2 \left( \mathbf r - \mathbf r_i \right )^2 } = \sum_{i}^N \frac{q_i}{V} e^{-i \mathbf k \cdot \mathbf r_i} e^{-k^2/(4\alpha^2)}.
The potential is then
.. math::
\tilde \phi_{\mathcal F}(\mathbf k) = \frac{4\pi}{k^2} \frac{1}{V} \sum_{i}^N q_i e^{-i\mathbf k \cdot \mathbf r_i} e^{-k^2/(4\alpha^2)} = \frac{1}{V} \sum_i^N v(k)e^{-k^2/(4 \alpha^2)} q_i e^{-i \mathbf k \cdot \mathbf r_i}
and in real space
.. math::
\phi_{\mathcal R}( \mathbf r ) = \sum_{\mathbf k \neq 0} \tilde \phi_{\mathcal F}(\mathbf k)e^{i \mathbf k \cdot \mathbf r} = \frac{1}{V} \sum_{\mathbf k\neq 0} \sum_{i}^N v(k) e^{-k^2/(4\alpha^2)}q_i e^{i \mathbf k \cdot ( \mathbf r- \mathbf r_i) },
where the :math:`\mathbf k = 0` is removed from the sum because of the overall charge neutrality.
The potential energy created by this long range part is
.. math::
U_{\mathcal F} = \frac {1}{2} \sum_i^N q_i \phi_{\mathcal F}(\mathbf r_i) = \frac{1}{2} \frac{1}{V} \sum_{i,j}^N q_i q_j \sum_{\mathbf k \neq 0 } v(k) e^{-k^2/(4\alpha^2)}e^{i \mathbf k \cdot ( \mathbf r_i - \mathbf r_j) } = \frac{1}{2} \sum_{\mathbf k \neq 0} |\rho_0(\mathbf k)|^2 v(k) e^{-k^2/(4\alpha^2)},
where I used the definition of the charge density
.. math::
\rho_0(\mathbf k) = \frac 1V \sum_i^N q_i e^{i \mathbf k \cdot \mathbf r_i}.
However, in the above sum we are including the self-energy term, i.e. :math:`\mathbf r_i = \mathbf r_j`. This term
can be easily calculated and then removed from :math:`U_{\mathcal F}`
.. math::
\frac{\mathcal Q^2}{2V} \sum_{\mathbf k} \frac{4\pi}{k^2} e^{-k^2/(4\alpha^2)} \rightarrow \frac{\mathcal Q^2}{2V} \left ( \frac{L}{2\pi} \right )^3 \int dk (4\pi)^2 e^{-k^2/(4\alpha^2) } = \mathcal Q^2 \frac{(4\pi)^2}{2V} \left ( \frac{L}{2\pi} \right )^3 \sqrt{\pi } \alpha = \mathcal Q^2 \frac{\alpha}{\sqrt{\pi} }
where :math:`\mathcal Q^2 = \sum_i^N q_i^2`, note that in the integral we have re-included :math:`\mathbf k = 0`, but
this is not a problem. Finally the long-range potential energy is
.. math::
U_{\mathcal L} = U_{\mathcal F} - \mathcal Q^2 \frac{\alpha}{\sqrt{\pi} }
|
/sarkas-1.0.2.tar.gz/sarkas-1.0.2/docs/theory/PPPM.rst
| 0.914224 | 0.991554 |
PPPM.rst
|
pypi
|
==========
Potentials
==========
Sarkas supports a variety of potentials both built-in and user defined. Currently the potential functions that are
implemented include
- :ref:`Coulomb <coulomb_pot>`
- :ref:`Yukawa <yukawa_pot>`
- :ref:`Exact Gradient-corrected Screened Yukawa <egs_pot>`
- :ref:`Quantum Statistical Potential <qsp_pot>`
- :ref:`Moliere <moliere_pot>`
- :ref:`Lennard Jones <lj_pot>`
You can read more about each of these potentials in the corresponding sections below.
All the equations will be given in cgs units, however, for easy conversion, we define the charge
.. math::
\bar{e}^2 = \frac{e^2}{4\pi \varepsilon_0},
which when substituted in gives the equivalent mks formula.
Electron parameters and thermodynamic formulas are given in :ref:`here <Electron Properties>`.
.. _coulomb_pot:
Coulomb Potential
-----------------
Two charged particle with charge numbers :math:`Z_a` and :math:`Z_b` interact with each other via the Coulomb potential
given by
.. math::
U_{ab}(r) = \frac{Z_{a}Z_b\bar{e}^2}{r}.
where :math:`r` is the distance between ions, :math:`e` is the elementary charge.
.. _yukawa_pot:
Yukawa Potential
----------------
The Yukawa potential, or screened Coulomb potential, is widely used in the plasma community to describe the interactions
of positively charged ions in a uniform background of electrons. The form of the Yukawa potential for two ions of charge
number :math:`Z_a` and :math:`Z_b` is given by
.. math::
U_{ab}(r) = \frac{Z_{a} Z_b \bar{e}^2}{r}e^{- r /\lambda_{\textrm{TF}}}, \quad \kappa = \frac{a_{\textrm{ws}}}{\lambda_{\textrm{TF}} }
where :math:`\lambda_{\textrm{TF}}` is the Thomas-Fermi wavelength and :math:`\kappa` is the screening parameter.
In Sarkas :math:`\kappa` can be given as an input or it can be calculated from the
:ref:`Thomas-Fermi Wavelength` formula.
Notice that when :math:`\kappa = 0` we recover the Coulomb Potential.
.. _egs_pot:
Exact Gradient-corrected Screened Yukawa Potential
--------------------------------------------------
The Yukawa potential is derived on the assumption that the electron gas behaves as an ideal Fermi gas.
Improvements in this theory can be achieved by considering density gradients and exchange-correlation effects.
Stanton and Murillo :cite:`Stanton2015`, using a DFT formalism, derived an exact-gradient corrected ion pair potential
across a wide range of densities and temperatures.
The exact-gradient screened (EGS) potential introduces new parameters that can be easily calculated from initial inputs.
Density gradient corrections to the free energy functional lead to the first parameter, :math:`\nu`,
.. math::
\nu = - \frac{3\lambda}{\pi^{3/2}} \frac{4\pi \bar{e}^2 \beta }{\Lambda_{e}} \frac{d}{d\eta} \mathcal I_{-1/2}(\eta),
where :math:`\lambda` is a correction factor; :math:`\lambda = 1/9` for the true gradient corrected Thomas-Fermi model
and :math:`\lambda = 1` for the traditional von Weissaecker model, :math:`\mathcal I_{-1/2}[\eta_0]` is the
:ref:`Fermi Integral` of order :math:`-1/2`, and :math:`\Lambda_e` is the :ref:`de Broglie wavelength` of the electrons.
In the case :math:`\nu < 1` the EGS potential takes the form
.. math::
U_{ab}(r) = \frac{Z_a Z_b \bar{e}^2 }{2r}\left [ ( 1+ \alpha ) e^{-r/\lambda_-} + ( 1 - \alpha) e^{-r/\lambda_+} \right ],
with
.. math::
\lambda_\pm^2 = \frac{\nu \lambda_{\textrm{TF}}^2}{2b \pm 2b\sqrt{1 - \nu}}, \quad \alpha = \frac{b}{\sqrt{b - \nu}},
where the parameter :math:`b` arises from exchange-correlation contributions, see below.
On the other hand :math:`\nu > 1`, the pair potential has the form
.. math::
U_{ab}(r) = \frac{Z_a Z_b \bar{e}^2}{r}\left [ \cos(r/\gamma_-) + \alpha' \sin(r/\gamma_-) \right ] e^{-r/\gamma_+}
with
.. math::
\gamma_\pm^2 = \frac{\nu\lambda_{\textrm{TF}}^2}{\sqrt{\nu} \pm b}, \quad \alpha' = \frac{b}{\sqrt{\nu - b}}.
Neglect of exchange-correlational effects leads to :math:`b = 1` otherwise
.. math::
b = 1 - \frac{2}{8} \frac{1}{k_{\textrm{F}}^2 \lambda_{\textrm{TF}}^2 } \left [ h\left ( \Theta \right ) - 2 \Theta h'(\Theta) \right ]
where :math:`k_{\textrm{F}}` is the Fermi wavenumber and :math:`\Theta = (\beta E_{\textrm{F}})^{-1}` is the electron
:ref:`Degeneracy Parameter` calculated from the :ref:`Fermi Energy`.
.. math::
h \left ( \Theta \right) = \frac{N(\Theta)}{D(\Theta)}\tanh \left( \Theta^{-1} \right ),
.. math::
N(\Theta) = 1 + 2.8343\Theta^2 - 0.2151\Theta^3 + 5.2759\Theta^4,
.. math::
D \left ( \Theta \right ) = 1 + 3.9431\Theta^2 + 7.9138\Theta^4.
.. _qsp_pot:
Quantum Statistical Potentials
------------------------------
An extensive review on Quantum Statistical Potentials is given in :cite:`Jones2007`. The following module uses that as
the main reference.
Quantum Statistical Potentials are defined by three terms
.. math::
U(r) = U_{\textrm{pauli}}(r) + U_{\textrm{coul}}(r) + U_{\textrm{diff} }(r)
where
.. math::
U_{\textrm{pauli}}(r) = - k_BT \ln \left [ 1 - \frac{1}{2} \exp \left ( - 2\pi r^2/ \Lambda^2 \right ) \right ]
is due to the Pauli exclusion principle and it accounts for spin-averaged effects,
.. math::
U_{\textrm{coul}}(r) = \frac{Z_a Z_b \bar{e}^2}{r}
is the usual Coulomb interaction between two charged particles with charge numbers :math:`Z_a,Z_b`,
and :math:`U_{\textrm{diff}}(r)` is a diffraction term. There are two possibilities for
the diffraction term. The most common is the Deutsch potential
.. math::
U_{\textrm{deutsch}}(r) = \frac{Z_a Z_b \bar{e}^2}{r} e^{ - 2\pi r/\Lambda_{ab}}.
The second most common form is the Kelbg potential
.. math::
U_{\textrm{kelbg}}(r) = - \frac{Z_a Z_b \bar{e}^2}{r} \left [ e^{- 2 \pi r^2/\Lambda_{ab}^2 }
- \sqrt{2} \pi \frac{r}{\Lambda_{ab}} \textrm{erfc} \left ( \sqrt{ 2\pi} r/ \Lambda_{ab} \right )
\right ]
In the above equations the screening length :math:`\Lambda_{ab}` is the thermal de Broglie wavelength
between the two charges defined as
.. math::
\Lambda_{ab} = \sqrt{\frac{2\pi \hbar^2}{\mu_{ab} k_BT}}, \quad \mu_{ab} = \frac{m_a m_b}{m_a + m_b}
Note that the de Broglie wavelength is defined differently in :cite:`Hansen1981` hence the factor of :math:`2\pi` in
the exponential.
The long range part of the potential is computed using the PPPM algorithm where only the
:math:`U_{\textrm{coul}}(r)` term is split into a short range and long range part.
The choice of this potential is due to its widespread use in the High Energy Density Physics community.
.. _moliere_pot:
Moliere Potential
-----------------
Moliere-type potentials have the form
.. math::
\phi(r) = \frac{Z_a Z_b \bar{e}^2}{r} \left [ \sum_{j}^{3} C_j e^{-b_j r} \right]
with the contraint
.. math::
\sum_{j}^{3} C_j = 1
more info can be found in :cite:`Wilson1977`
.. _lj_pot:
Lennard Jones
-------------
Sarkas support the general form of the multispecies Lennard Jones potential
.. math::
U_{\mu\nu}(r) = k \epsilon_{\mu\nu} \left [ \left ( \frac{\sigma_{\mu\nu}}{r}\right )^m -
\left ( \frac{\sigma_{\mu\nu}}{r}\right )^n \right ],
where
.. math::
k = \frac{n}{m-n} \left ( \frac{n}{m} \right )^{\frac{m}{n-m}}.
In the case of multispecies liquids we use the `Lorentz-Berthelot <https://en.wikipedia.org/wiki/Combining_rules>`_
mixing rules
.. math::
\epsilon_{12} = \sqrt{\epsilon_{11} \epsilon_{22}}, \quad \sigma_{12} = \frac{\sigma_{11} + \sigma_{22}}{2}.
|
/sarkas-1.0.2.tar.gz/sarkas-1.0.2/docs/theory/potentials.rst
| 0.9314 | 0.863966 |
potentials.rst
|
pypi
|
===========
Force Error
===========
The Force error is the error incurred when we cut the potential interaction after a certain distance. Following the works
of :cite:`Kolafa1992,Stern2008,Dharuman2017` we define the total force error for our P3M algorithm as
.. math::
\Delta F_{\textrm{tot}} = \sqrt{ \Delta F_{\mathcal R}^2 + \Delta F_{\mathcal F}^2 }
where :math:`\Delta F_{\mathcal R}` is the error obtained in the PP part of the force calculation and
:math:`\Delta F_{\mathcal F}` is the error obtained in the PM part, the subscripts :math:`\mathcal{R, F}` stand for
real space and Fourier space respectively. :math:`\Delta F_{\mathcal R}` is calculated as follows
.. math::
\Delta F_{\mathcal R} = \sqrt{\frac{N}{V} } \left [ \int_{r_c}^{\infty} d^3r
\left | \nabla \phi_{\mathcal R}( \mathbf r) \right |^2 \right ]^{1/2},
where :math:`\phi_{\mathcal R}( \mathbf r)` is the short-range part of the chosen potential. In our example case of a
Yukawa potential we have
.. math::
\phi_{\mathcal R}(r) = \frac{Q^2}{2r}
\left [ e^{- \kappa r} \text{erfc} \left( \alpha r - \frac{\kappa}{2\alpha} \right )
+ e^{\kappa r} \text{erfc} \left( \alpha r + \frac{\kappa}{2\alpha} \right ) \right ],
where :math:`\kappa, \alpha` are the dimensionless screening parameter and Ewald parameter respectively and, for the
sake of clarity, we have a charge :math:`Q = Ze/\sqrt{4\pi \epsilon_0}` with an ionization state of :math:`Z = 1`. Integrating this potential,
and neglecting fast decaying terms, we find
.. math::
\Delta F_{\mathcal R} \simeq 2 Q^2 \sqrt{\frac{N}{V}} \frac{e^{-\alpha^2 r_c^2}}{\sqrt{r_c}} e^{-\kappa^2/4 \alpha^2}.
On the other hand :math:`\Delta F_{\mathcal F}` is calculated from the following formulas
.. math::
\Delta F_{\mathcal F} = \sqrt{\frac{N}{V}} \frac{Q^2 \chi}{\sqrt{V^{1/3}}}
.. math::
\chi^2V^{2/3} = \left ( \sum_{\mathbf k \neq 0} G_{\mathbf k}^2 |\mathbf k |^2 \right )
- \sum_{\mathbf n} \left [ \frac{\left ( \sum_{\mathbf m} \hat{U}_{\mathbf{k + m}}^2
G_{\mathbf{k+m}} \mathbf{k_n} \cdot \mathbf{k_{n + m}} \right )^2 }{ \left( \sum_{\mathbf m} \hat{U}_{\mathbf{k_{n+m}}}^2 \right )^2 |\mathbf{k_{n} }|^2 } \right ].
This is a lot to take in, so let's unpack it. The first term is the RMS of the force field in Fourier space
obtained from solving Poisson's equation :math:`-\nabla \phi(\mathbf r) = \delta( \mathbf r - \mathbf r')` in Fourier
space. In a raw Ewald algorithm this term would be the PM part of the force. However, the P3M variant
solves Poisson's equation on a Mesh, hence, the second term which is non other than the RMS of the force obtained on the mesh.
:math:`G_{\mathbf k}` is the optimal Green's function which for the Yukawa potential is
.. math::
G_{\mathbf k} = \frac{4\pi e^{-( \kappa^2 + \left |\mathbf k \right |^2)/(4\alpha^2)} }{\kappa^2 + |\mathbf {k}|^2}
where
.. math::
\mathbf k ( n_x, n_y, n_z) = \mathbf{k_n} = \left ( \frac{2 \pi n_x}{L_x},
\frac{2 \pi n_y}{L_y},
\frac{2 \pi n_z}{L_z} \right ).
:math:`\hat{U}_{\mathbf k}` is the Fourier transform of the B-spline of order :math:`p`
.. math::
\hat U_{\mathbf{k_n}} = \left[ \frac{\sin(\pi n_x /M_x) }{ \pi n_x/M_x} \right ]^p
\left[ \frac{\sin(\pi n_y /M_y) }{ \pi n_y/M_y} \right ]^p
\left[ \frac{\sin(\pi n_z /M_z) }{ \pi n_z/M_z} \right ]^p,
where :math:`M_{x,y,z}` is the number of mesh points along each direction. Finally the :math:`\mathbf{m}` refers to the
triplet of grid indices :math:`(m_x,m_y,m_z)` that contribute to aliasing. Note that in the above equations
as :math:`\kappa \rightarrow 0` (Coulomb limit), we recover the corresponding error estimate for the Coulomb potential.
The reason for this discussion is that by inverting the above equations we can find optimal parameters
:math:`r_c,\; \alpha` given some desired errors :math:`\Delta F_{\mathcal {R,F}}`. While
the equation for :math:`\Delta F_{\mathcal R}` can be easily inverted for :math:`r_c`, such task seems impossible for
:math:`\Delta F_{\mathcal F}` without having to calculate a Green's function for each chosen :math:`\alpha`. As you can
see in the second part of the output the time it takes to calculate :math:`G_{\mathbf k}` is in the order of seconds,
thus, a loop over several :math:`\alpha` values would be very time consuming. Fortunately researchers
have calculated an analytical approximation allowing for the exploration of the whole :math:`r_c,\; \alpha` parameter
space :cite:`Dharuman2017`. The equations of this approximation are
.. math::
\Delta F_{\mathcal F}^{(\textrm{approx})} \simeq Q^2 \sqrt{\frac{N}{V}} A_{\mathcal F}^{1/2},
.. math::
A_{\mathcal F} \simeq \frac{3}{2\pi^2} \sum_{m = 0}^{p -1 } C_{m}^{(p)} \left ( \frac{h}2 \right )^{2 (p + m)}
\frac{2}{1 + 2(p + m)} \beta(p,m),
.. math::
\beta(p,m) = \int_0^{\infty} dk \; G_k^2 k^{2(p + m + 2)},
where :math:`h = L_x/M_x` and the coefficients :math:`C_m^{(p)}` are listed in Table I of :cite:`Deserno1998`.
Finally, by calculating
.. math::
\Delta F_{\textrm{tot}}^{(\textrm{apprx})}( r_c, \alpha) = \sqrt{ \Delta F_{\mathcal R}^2 +
( \Delta F_{\mathcal F}^{(\textrm{approx})} ) ^2 }
we are able to investigate which parameters :math:`r_c,\; \alpha` are optimal for our simulation.
|
/sarkas-1.0.2.tar.gz/sarkas-1.0.2/docs/theory/force_error.rst
| 0.920518 | 0.979393 |
force_error.rst
|
pypi
|
# Thermostats
## Berendsen Thermostat
The Berendsen Thermostat (BT) uses a tapered velocity scaling approach. In a strict velocity scaling approach
the temperature $T_e$ is estimated, through a quantity proportional to $\langle v^2 \rangle$,
and the velocities are scaled to values consistent with the desired temperature $T_d$,
as in $v_i \mapsto \alpha v_i$. Being completely consistent with physical laws,
it is preferable to use the same simple algorithm but more gently so that the dynamics during the thermostat period is more consistent with the underlying equations of motion.
In the BT we begin with an model for the temperature as we would like to see it evolve over a slower
timescale $\tau_{B}$. One model is
$$
\frac{dT}{dt} = \frac{T_d - T}{\tau_{B}},
$$
This equation can be solved analytically to yield
$$
T(t) = T(0)e^{-t/\tau_B} + \left(1 - e^{-t/\tau_B} \right)T_d ,
$$
which can be seen to transition from the initial temperature $T(0)$ to the desired temperature $T_d$
on a time scale of $\tau_{B}$. By choosing $\tau_{B}$ to be many timesteps we can eventually equilibrate
the system while allowing it to explore configurations closer to the real (not velocity scaled) dynamics.
To implement BT we discretize the BT model across one time step to obtain
$$
T(t + \Delta t) = T(t) + \frac{\Delta t}{\tau_B}\left(T_d - T(t) \right).
$$
We want to scale the current velocities such that this new temperature $T(t+\Delta t)$ is achieved,
because that the temperature prescribed by the BT.
Finding the ratio then of the target temperature and the current temperature, we get
$$
\frac{T(t + \Delta t)}{T(t) } = 1+ \frac{\Delta t}{\tau_{B}}\left(\frac{T_d}{T(t) } - 1 \right).
$$
Taking the square root of this yields the scaling factor for the velocities:
$$
\alpha = \sqrt{ 1+ \frac{\Delta t}{\tau_{B}}\left(\frac{T_d}{T(t) } - 1 \right) }.
$$
Below we show an example notebook that runs Sarkas for different $\tau_B$ values.
```
# Import the usual libraries
%pylab
%matplotlib inline
import os
plt.style.use('MSUstyle')
# Import sarkas
from sarkas.processes import Simulation, PostProcess, PreProcess
# Create the file path to the YAML input file
input_file_name = os.path.join('input_files', 'yocp_cgs_pp_therm.yaml' )
preproc = PreProcess(input_file_name)
preproc.setup(read_yaml =True)
preproc.run()
```
We select six different values of $\tau_B$ and for each of them run a simulation.
```
taus = np.array([ 1.0, 2.0, 5.0, 10., 50., 100. ])
for i, tau in enumerate(taus):
args = {
'Thermostat': {'relaxation_timestep': 100,
'berendsen_tau': tau}, # Change tau for each simulation
"IO": # Store all simulations' data in simulations_dir,
# but save the dumps in different subfolders (job_dir)
{
"simulations_dir": 'Berendsen_runs',
"job_dir": "tau_{}".format(int(tau) ),
"verbose": False # This is so not to print to screen for every run
},
}
# Run the simulation.
sim = Simulation(input_file_name)
sim.setup(read_yaml=True, other_inputs=args)
sim.run()
print('Tau = {} Done'.format(tau))
```
Now we plot the temperature evolution to show the effect of changing $\tau_B$.
```
fig, axt = plt.subplots(1,1, figsize = (12,9))
for i, tau in enumerate(taus):
args = {
'Thermostat': {'relaxation_timestep': 100,
'berendsen_tau': tau}, # Change tau for each simulation
"IO": # Store all simulations' data in simulations_dir,
# but save the dumps in different subfolders (job_dir)
{
"simulations_dir": 'Berendsen_runs',
"job_dir": "tau_{}".format(int(tau) ),
"verbose": False # This is so not to print to screen for every run
},
}
postproc = PostProcess(input_file_name)
postproc.setup(read_yaml=True, other_inputs = args)
postproc.therm.setup(postproc.parameters, phase = 'equilibration')
postproc.therm.parse()
postproc.therm.plot(
scaling = (postproc.therm.dt, postproc.parameters.eV2K),
y = 'Temperature',
ylabel = 'Temperature [eV]',
xlabel = 'Number of timesteps', logx = True, ax = axt)
axt.axhline(postproc.thermostat.temperatures_eV[0], ls = '--', color = 'r')
axt.legend([
r'$\tau_B = 1.0$',
r'$\tau_B = 2.0$',
r'$\tau_B = 5.0$',
r'$\tau_B = 10.0$',
r'$\tau_B = 50.0$',
r'$\tau_B = 100.0$',
r'$T_d$'])
```
As you can see, the temperature increase for the first hundred time steps. The reason being that we selected the option `relaxation_timestep: 100`. We do this to allow the system to transform all of its initial potential energy into kinetic energy.
|
/sarkas-1.0.2.tar.gz/sarkas-1.0.2/docs/theory/Berendsen_NB/Berendsen_Thermostat.ipynb
| 0.474631 | 0.985841 |
Berendsen_Thermostat.ipynb
|
pypi
|
from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
from typing import Any, List
from uuid import uuid4
from sarmat.core.actions import (
ADestinationPoint,
AGeoLocation,
AJourney,
AJourneyBunch,
ARoute,
AStation,
)
from sarmat.core.behavior import (
BhCrew,
BhDestinationPoint,
BhDirection,
BhGeo,
BhPeriod,
BhPeriodItem,
BhPermit,
BhRoad,
BhRoadName,
BhRoute,
BhRouteItem,
BhStation,
BhJourney,
BhJourneyBunch,
BhJourneyBunchItem,
BhVehicle,
NoActionBehavior,
)
from sarmat.core.containers import (
CrewStructure,
DestinationPointStructure,
DirectionStructure,
GeoStructure,
PeriodItemStructure,
PeriodStructure,
PermitStructure,
RoadStructure,
RoadNameStructure,
RouteStructure,
RouteItemStructure,
StationStructure,
JourneyBunchStructure,
JourneyBunchItemStructure,
JourneyStructure,
VehicleStructure,
)
class SarmatCreator:
"""Класс реализует паттерн "Фабричный метод", создает классы с реализованной в них логикой"""
# хранилище тегов
role_tags = defaultdict(list)
@classmethod
def register_class(cls, tag: str, cls_behavior: Any) -> None:
"""
Регистрация поведенческого класса
Args:
tag: тэг
cls_behavior: поведенческий класс
"""
sub_tags = tag.split('.')
for sub_tag in sub_tags:
classes = cls.role_tags[sub_tag]
if classes and cls_behavior in classes:
idx = classes.index(cls_behavior)
cls.role_tags[sub_tag][idx] = cls_behavior
else:
cls.role_tags[sub_tag].append(cls_behavior)
def _get_behavior_classes(self, tag: str) -> List[Any]:
"""Получение списка поведенческих классов по тегу"""
sub_tags = tag.split('.')
roles = []
for item in sub_tags:
roles += self.role_tags.get(item) or []
return roles or [NoActionBehavior]
def create_direction(self, tag: str, **kwargs):
"""Создание объекта 'Направления'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhDirection], classes, [DirectionStructure])
Direction = type('Direction', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
return Direction(**kwargs)
def create_destination_point(self, tag: str, **kwargs):
"""Создание объекта 'Пункт назначения'"""
classes = self._get_behavior_classes(tag)
parents = chain([ADestinationPoint, BhDestinationPoint], classes, [DestinationPointStructure])
DestinationPointObject = type(
'DestinationPointObject',
tuple(parents),
{"permission_tag": tag, "controller": self},
)
if kwargs['state']:
kwargs['state'] = self.create_geo_object(tag, **kwargs['state'])
if kwargs.get('id') is None:
kwargs['id'] = 0
return DestinationPointObject(**kwargs)
def create_geo_object(self, tag: str, **kwargs):
"""Создание объекта 'Географическая точка'"""
classes = self._get_behavior_classes(tag)
parents = chain([AGeoLocation, BhGeo], classes, [GeoStructure])
GeoObject = type('GeoObject', tuple(parents), {"permission_tag": tag, "controller": self})
parent = kwargs.get('parent')
if parent:
kwargs['parent'] = self.create_geo_object(tag, **parent)
if kwargs.get('id') is None:
kwargs['id'] = 0
return GeoObject(**kwargs)
def create_road_name(self, tag: str, **kwargs):
"""Создание объекта 'Описание дороги'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhRoadName], classes, [RoadNameStructure])
RoadName = type('RoadName', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
return RoadName(**kwargs)
# TODO: по контейнерам создать базовые поведенческие классы
# JourneyProgressStructure, JourneyScheduleStructure, IntervalStructure
def create_route_item(self, tag: str, **kwargs):
"""Создание объекта 'Пункт маршрута'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhRouteItem], classes, [RouteItemStructure])
RouteItem = type('RouteItem', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('road'):
kwargs['road'] = self.create_road(tag, **kwargs['road'])
if kwargs.get('station'):
kwargs['station'] = self.create_station(tag, **kwargs['station'])
if kwargs.get('point'):
kwargs['point'] = self.create_destination_point(tag, **kwargs['point'])
return RouteItem(**kwargs)
def create_route(self, tag: str, **kwargs):
"""Создание объекта 'Маршрут'"""
classes = self._get_behavior_classes(tag)
parents = chain([ARoute, BhRoute], classes, [RouteStructure])
Route = type('Route', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('first_station'):
kwargs['first_station'] = self.create_station(tag, **kwargs['first_station'])
if kwargs.get('structure'):
kwargs['structure'] = [self.create_route_item(tag, **item) for item in kwargs['structure']]
if kwargs.get('direction'):
kwargs['direction'] = [self.create_direction(tag, **item) for item in kwargs['direction']]
return Route(**kwargs)
def create_station(self, tag: str, **kwargs):
"""Создание объекта 'Станция'"""
classes = self._get_behavior_classes(tag)
parents = chain([AStation, BhStation], classes, [StationStructure])
Station = type('Station', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('point'):
kwargs['point'] = self.create_destination_point(tag, **kwargs['point'])
return Station(**kwargs)
def create_journey(self, tag: str, **kwargs):
"""Создание объекта 'Рейс'"""
classes = self._get_behavior_classes(tag)
parents = chain([AJourney, BhJourney], classes, [JourneyStructure])
Journey = type('journey', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('first_station'):
kwargs['first_station'] = self.create_station(tag, **kwargs['first_station'])
if kwargs.get('structure'):
kwargs['structure'] = [self.create_route_item(tag, **item) for item in kwargs['structure']]
if kwargs.get('direction'):
kwargs['direction'] = [self.create_direction(tag, **item) for item in kwargs['direction']]
return Journey(**kwargs)
def create_road(self, tag: str, **kwargs):
"""Создание объекта 'Дорога'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhRoad], classes, [RoadStructure])
Road = type('Road', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('start_point'):
kwargs['start_point'] = self.create_destination_point(tag, **kwargs['start_point'])
if kwargs.get('end_point'):
kwargs['end_point'] = self.create_destination_point(tag, **kwargs['end_point'])
if kwargs.get('road'):
kwargs['road'] = self.create_road_name(tag, **kwargs['road'])
return Road(**kwargs)
def create_journey_bunch_item(self, tag: str, **kwargs):
"""Создание объекта 'Элемент связки'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhJourneyBunchItem], classes, [JourneyBunchItemStructure])
JourneyBunchItem = type('JourneyBunchItem', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('journey'):
kwargs['journey'] = self.create_journey(tag, **kwargs['journey'])
return JourneyBunchItem(**kwargs)
def create_journey_bunch(self, tag: str, **kwargs):
"""Создание объекта 'Связка рейсов'"""
classes = self._get_behavior_classes(tag)
parents = chain([AJourneyBunch, BhJourneyBunch], classes, [JourneyBunchStructure])
JourneyBunch = type('JourneyBunch', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('journeys'):
kwargs['journeys'] = [self.create_journey_bunch_item(tag, **i) for i in kwargs['journeys']]
return JourneyBunch(**kwargs)
def create_period_item(self, tag: str, **kwargs):
"""Создание объекта 'Период'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhPeriodItem], classes, [PeriodItemStructure])
PeriodItem = type('PeriodItem', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
return PeriodItem(**kwargs)
def create_period(self, tag: str, **kwargs):
classes = self._get_behavior_classes(tag)
parents = chain([BhPeriod], classes, [PeriodStructure])
Period = type('Period', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
if kwargs.get('period'):
kwargs['period'] = self.create_period_item(tag, **kwargs['period'])
if kwargs.get('periods'):
kwargs['periods'] = [self.create_period_item(tag, **item) for item in kwargs['periods']]
return Period(**kwargs)
def create_crew(self, tag: str, **kwargs):
classes = self._get_behavior_classes(tag)
parents = chain([BhCrew], classes, [CrewStructure])
Crew = type('Crew', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
return Crew(**kwargs)
def create_permit(self, tag: str, **kwargs):
"""Создание объекта 'Путевой лист'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhPermit], classes, [PermitStructure])
Permit = type('Permit', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = uuid4()
if kwargs.get('crew'):
kwargs['crew'] = [self.create_crew(tag, **item) for item in kwargs['crew']]
if kwargs.get('vehicle'):
kwargs['vehicle'] = [self.create_vehicle(tag, **item) for item in kwargs['vehicle']]
return Permit(**kwargs)
def create_vehicle(self, tag: str, **kwargs):
"""Создание объекта 'Транспортное средство'"""
classes = self._get_behavior_classes(tag)
parents = chain([BhVehicle], classes, [VehicleStructure])
Vehicle = type('Vehicle', tuple(parents), {"permission_tag": tag, "controller": self})
if kwargs.get('id') is None:
kwargs['id'] = 0
return Vehicle(**kwargs)
|
/core/factory.py
| 0.587943 | 0.260295 |
factory.py
|
pypi
|
from datetime import time, date
from typing import List
from .geo_locations import DirectionStructure, DestinationPointStructure, RoadNameStructure
from .sarmat import SarmatStructure, nested_dataclass
from ..constants import PeriodType, RoadType, StationType, JourneyType
@nested_dataclass
class StationStructure(SarmatStructure):
"""Станции (пункты посадки-высадки пассажиров)"""
station_type: StationType # тип станции
name: str # наименование
point: DestinationPointStructure # ближайший населенный пункт
address: str = "" # почтовый адрес
@nested_dataclass
class RoadStructure(SarmatStructure):
"""Дороги"""
start_point: DestinationPointStructure # начало дороги
end_point: DestinationPointStructure # конец дороги
direct_travel_time_min: int # время прохождения в прямом направлении
reverse_travel_time_min: int # время прохождения в обратном направлении
direct_len_km: float # расстояние в прямом направлении
reverse_len_km: float # расстояние в обратном направлении
road_type: RoadType # тип дорожного покрытия
road: RoadNameStructure = None # классификация дороги
@nested_dataclass
class RouteItemStructure(SarmatStructure):
"""Состав маршрута"""
length_from_last_km: float # расстояние от предыдущего пункта
travel_time_min: int # время движения от предыдущего пункта в минутах
road: RoadStructure = None # дорога
order: int = 1 # порядок следования
station: StationStructure = None # станция
point: DestinationPointStructure = None # ближайший населенный пункт
stop_time_min: int = None # время стоянки в минутах
@nested_dataclass
class RouteStructure(SarmatStructure):
"""Описание маршрута"""
name: str # наименование
first_station: StationStructure # станция отправления
structure: List[RouteItemStructure] # состав маршрута
direction: List[DirectionStructure] = None # направления
comments: str = None # комментарий к маршруту
number: int = None # номер маршрута
literal: str = "" # литера
@nested_dataclass
class JourneyStructure(SarmatStructure):
"""Атрибуты рейса"""
number: float # номер рейса
name: str # наименование
first_station: StationStructure # пункт отправления
structure: List[RouteItemStructure] # состав рейса
journey_type: JourneyType # тип рейса
departure_time: time # время отправления
bunch: int = None # принадлежность к связке
literal: str = None # литера
is_chartered: bool = False # признак заказного рейса
need_control: bool = False # признак именной продажи и мониторинга
season_begin: date = None # начало сезона
season_end: date = None # окончание сезона
direction: List[DirectionStructure] = None # направления
comments: str = None # комментарии по рейсу
@nested_dataclass
class JourneyBunchItemStructure(SarmatStructure):
"""Атрибуты элемента из связки рейсов"""
journey: JourneyStructure # рейс
stop_interval: int # время простоя в часах
@nested_dataclass
class JourneyBunchStructure(SarmatStructure):
"""Атрибуты связки рейсов"""
journeys: List[JourneyBunchItemStructure] # элементы связки
name: str = None # наименование связки
@nested_dataclass
class PeriodItemStructure(SarmatStructure):
"""Элементы сложного периода"""
period_type: PeriodType # тип периода
cypher: str # шифр (константа)
name: str # название
value: List[int] # список значений
is_active: bool # период активности
@nested_dataclass
class PeriodStructure(SarmatStructure):
"""Период"""
cypher: str # системное имя
name: str # константа
periods: List[PeriodItemStructure] = None # описание сложного периода
period: PeriodItemStructure = None # описание простого периода
|
/core/containers/traffic_management.py
| 0.505615 | 0.530723 |
traffic_management.py
|
pypi
|
from datetime import date, datetime
from typing import List
from uuid import UUID
from .sarmat import BaseSarmatStructure, SarmatStructure, nested_dataclass
from .traffic_management import PeriodStructure, StationStructure, JourneyStructure
from .vehicle import PermitStructure
from ..constants import JourneyClass, JourneyState
@nested_dataclass
class IntervalStructure(SarmatStructure):
"""График выполнения рейсов"""
journey: JourneyStructure # рейс
start_date: date # дата начала
interval: PeriodStructure # интервал движения
@nested_dataclass
class JourneyProgressStructure(SarmatStructure):
"""Атрибуты рейсовой ведомости"""
id: UUID # идентификатор ведомости
depart_date: date # дата отправления в рейс
journey: JourneyStructure # рейс
permit: PermitStructure # номер путевого листа
@nested_dataclass
class JourneyScheduleStructure(BaseSarmatStructure):
"""Процесс прохождения рейса по автоматизированным точкам"""
journey_progress: JourneyProgressStructure # рейсовая ведомость
journey_class: JourneyClass # классификация рейса в данном пункте
station: StationStructure # станция
state: JourneyState # состояние рейса
plan_arrive: datetime = None # плановое время прибытия
fact_arrive: datetime = None # фактическое время прибытия
plan_depart: datetime = None # плановое время отправления
fact_depart: datetime = None # фактическое время отправления
platform: str = "" # платформа
comment: str = "" # комментарий к текущему пункту
last_items: List["JourneyScheduleStructure"] = None # оставшиеся активные пункты прохождения рейса
|
/core/containers/dispatcher.py
| 0.519034 | 0.330336 |
dispatcher.py
|
pypi
|
from dataclasses import asdict, dataclass, fields, is_dataclass
from typing import _GenericAlias
def nested_dataclass(*args, **kwargs):
"""Декоратор подменяет метод __init__ для сборки вложенных структур"""
def wrapper(cls):
check_cls = dataclass(cls, **kwargs)
orig_init = cls.__init__
def __init__(self, *args, **kwargs):
for name, value in kwargs.items():
# Определяем тип поля
field_type = cls.__annotations__.get(name, None)
# Обработка вложенных структур
if isinstance(field_type, str) and field_type == cls.__name__:
field_type = cls
is_data_class = is_dataclass(field_type)
if isinstance(value, (list, tuple, set)):
if isinstance(field_type, list):
field_type = field_type[0]
elif isinstance(field_type, _GenericAlias):
field_type = field_type.__args__[0]
if isinstance(field_type, str) and field_type == cls.__name__:
field_type = cls
is_data_class = is_dataclass(field_type)
value = [
field_type(**item) if is_data_class and isinstance(item, dict) else item
for item in value
]
kwargs[name] = value
elif is_data_class and isinstance(value, dict):
kwargs[name] = field_type(**value)
orig_init(self, *args, **kwargs)
check_cls.__init__ = __init__
return check_cls
return wrapper(args[0]) if args else wrapper
class BaseSarmatStructure:
"""Базовая структура"""
@property
def sarmat_fields(self):
return [fld.name for fld in fields(self)]
def as_dict(self):
return asdict(self)
@dataclass
class SarmatStructure(BaseSarmatStructure):
"""Основной класс с идентификатором"""
id: int
@dataclass
class PersonStructure:
"""Реквизиты человека"""
last_name: str # фамилия
first_name: str # имя
middle_name: str # отчество
|
/core/containers/sarmat.py
| 0.736685 | 0.175644 |
sarmat.py
|
pypi
|
from collections import defaultdict
from sarmat.core.context.containers import (
JourneyBunchContainer,
RouteContainer,
RouteItemContainer,
)
from sarmat.core.exceptions import WrongValueError
from .base_verifications import VerifyOnEmptyValues
class StationVerifier(VerifyOnEmptyValues):
"""Класс верификации станций"""
attributes = ['station_type', 'name', 'point']
class RouteItemVerifier(VerifyOnEmptyValues):
"""Верификауия состава маршрута"""
attributes = ['length_from_last_km', 'travel_time_min', 'order']
def verify(self, subject: RouteItemContainer) -> None: # type: ignore[override]
super().verify(subject)
if not (subject.station or subject.point):
raise WrongValueError(
'Route item must have station or destination point',
)
class RouteVerifier(VerifyOnEmptyValues):
"""Верификауия маршрута"""
attributes = ['name', 'first_station', 'structure']
def verify(self, subject: RouteContainer) -> None: # type: ignore[override]
super().verify(subject)
route_item_verifier = RouteItemVerifier()
for idx, route_item in enumerate(subject.structure):
route_item_verifier.verify(route_item)
if route_item.order != idx+1:
raise WrongValueError(
f'Wrong item number ({route_item.order}). Expected {idx+1}',
)
class JourneyVerifier(RouteVerifier):
"""Верификация рейса"""
attributes = RouteVerifier.attributes + ['journey_type', 'departure_time']
class JourneyBunchItemVerifier(VerifyOnEmptyValues):
"""Верификация элемента связки"""
attributes = ['journey']
class JourneyBunchVerifier(VerifyOnEmptyValues):
"""Верификация связки"""
attributes = ['journeys']
def verify(self, subject: JourneyBunchContainer) -> None: # type: ignore[override]
super().verify(subject)
journey_counters: defaultdict = defaultdict(int)
journey_verifier = JourneyBunchItemVerifier()
for journey in subject.journeys:
journey_verifier.verify(journey)
if journey.id:
journey_counters[journey.id] += 1
not_unique_journeys = [j for j, c in journey_counters.items() if c > 1]
if not_unique_journeys:
raise WrongValueError(f'Journeys {not_unique_journeys} has got more that one time')
|
/core/verification/traffic_management_verifications.py
| 0.558929 | 0.208441 |
traffic_management_verifications.py
|
pypi
|
from typing import List, Dict
from sarmat.core.exceptions import SarmatException
from sarmat.core.constants import ErrorCode, LocationType
from sarmat.core.context.containers import GeoContainer
from .base_verifications import VerifyOnEmptyValues
class GeoVerifier(VerifyOnEmptyValues):
"""Класс верификации гео объектов"""
attributes: List[str] = ['name', 'location_type']
# NOTE: Проверка на правильность построения иерархии.
# У страны не может быть родительских записей.
# Для республик, областей и районов в качестве родителя может выступать только страна.
possible_parent_types: Dict[LocationType, List[LocationType]] = {
LocationType.COUNTRY: [],
LocationType.DISTRICT: [LocationType.COUNTRY],
LocationType.REGION: [LocationType.COUNTRY],
LocationType.PROVINCE: [LocationType.COUNTRY],
LocationType.AREA: [LocationType.COUNTRY, LocationType.DISTRICT, LocationType.PROVINCE],
}
def verify(self, subject: GeoContainer) -> None: # type: ignore[override]
super().verify(subject)
if subject.parent:
parent_location_types: List[LocationType] = self.possible_parent_types[subject.location_type]
if subject.parent.location_type not in parent_location_types:
raise SarmatException(
f'Wrong parent type of location: {subject.parent.location_type}',
err_code=ErrorCode.WRONG_VALUE,
)
if subject.parent.id == subject.id:
raise SarmatException(
"Geo object can't be a parent for themself",
err_code=ErrorCode.WRONG_VALUE,
)
class DestinationPointVerifier(VerifyOnEmptyValues):
"""Класс верификации пунктов назначения"""
attributes = ['name', 'state', 'point_type']
class DirectionVerifier(VerifyOnEmptyValues):
"""Класс верификации направлений"""
attributes = ['name']
class RoadNameVerifier(VerifyOnEmptyValues):
"""Класс верификации наименований дорог"""
attributes = ['cypher']
|
/core/verification/geo_verifications.py
| 0.542379 | 0.397704 |
geo_verifications.py
|
pypi
|
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from sarmat.core.exceptions import IncomparableTypesError
from sarmat.core.context.models import DestinationPointModel, StationModel
@dataclass
class ScheduleItem:
"""Описание участка маршрута"""
station: Optional[StationModel] # станции отправления
point: Optional[DestinationPointModel] # крайние пункты
len_from_start: float # расстояние от начального пункта
len_from_last: float # расстояние от предыдущего пункта
time_from_start: int # время от начального пункта
time_from_last: int # время от предыдущего пункта
depart: Optional[datetime] # время прибытия
arrive: Optional[datetime] # время отправления
stop_time: Optional[int] # время стоянки в минутах
@dataclass
class ScheduleSlice:
"""Описание среза по маршруту"""
station: str
point: str
len_from_start: float # расстояние от начального пункта
len_from_last: float # расстояние от предыдущего пункта
time_from_start: int # время от начального пункта
time_from_last: int # время от предыдущего пункта
depart: Optional[datetime] # время прибытия
arrive: Optional[datetime] # время отправления
stop_time: Optional[int] # время стоянки в минутах
class BhNoAction:
"""Класс, запрещающий всякие действия над объектом"""
class BhCompare(BhNoAction):
"""Операции сравнения объектов"""
def __eq__(self, other):
"""Сравнение на равенство"""
self._check_type(other)
def __ne__(self, other):
"""Определение неравенства"""
self._check_type(other)
def __lt__(self, other):
"""Проверка на <"""
self._check_type(other)
def __gt__(self, other):
"""Проверка на >"""
self._check_type(other)
def __le__(self, other):
"""Проверка на <="""
self._check_type(other)
def __ge__(self, other):
"""Проверка на >="""
self._check_type(other)
def __contains__(self, item):
"""Проверка на вхождение во множество"""
def _compare_classes(self, other):
return isinstance(other, self.__class__)
def _check_type(self, other):
"""Проверка на соответствие типов. Объекты разных типов сравнивать нельзя"""
if not self._compare_classes(other):
message = f"Объекты {other.__class__} и {self.__class__} не сравнимы"
raise IncomparableTypesError(message)
|
/core/behavior/bases.py
| 0.691602 | 0.351701 |
bases.py
|
pypi
|
from sarmat.core.constants import LocationType, SettlementType
from sarmat.core.exceptions import IncomparableTypesError
from .bases import BhCompare
class BhDirection(BhCompare):
"""Базовое поведение объекта 'Направления'"""
compare_error_message = 'Направления сравнению не подлежат'
contains_error_message = 'Направления проверке на вхождение не подлежат'
name: str
def __eq__(self, other):
"""Сравнение на равенство направлений"""
super().__eq__(other)
return self.name == other.name
def __ne__(self, other):
"""Проверка на неравенство направлений"""
super().__ne__(other)
return self.name != other.name
def __lt__(self, item):
"""Проверка сравнение не имеет смысла для направлений"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, item):
"""Проверка сравнение не имеет смысла для направлений"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, item):
"""Проверка сравнение не имеет смысла для направлений"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, item):
"""Проверка сравнение не имеет смысла для направлений"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на вхождение не имеет смысла для направлений"""
raise IncomparableTypesError(self.contains_error_message)
def _compare_classes(self, other):
return isinstance(other, BhDirection)
class BhRoadName(BhDirection):
"""Методы сравнения объекта Дорога"""
compare_error_message = 'Дороги сравнению не подлежат'
contains_error_message = 'Дороги проверке на вхождение не подлежат'
def _compare_classes(self, other):
return isinstance(other, BhRoadName)
class BhGeo(BhCompare):
"""Методы сравнения объекта 'Географический объект'"""
name: str
location_type: LocationType
def __eq__(self, other):
"""Сравнение на равенство географических объектов.
Сравнение происходит либо по идентификаторам,
либо по полям Наименование и Тип объекта.
"""
super().__eq__(other)
return self.location_type == other.location_type and self.name == other.name
def __ne__(self, other):
"""Определение неравенства.
Сравнение происходит либо по идентификаторам,
либо по полям Наименование и Тип объекта.
"""
super().__ne__(other)
return self.location_type != other.location_type or self.name != other.name
def __lt__(self, other):
"""Проверка на <.
Анализируется тип гео. образования.
"""
super().__lt__(other)
same_types = [LocationType.DISTRICT, LocationType.REGION]
if self.location_type in same_types and other.location_type in same_types:
return False
# NOTE: крупные территориальные образования имеют меньший индекс
return self.location_type.value > other.location_type.value
def __gt__(self, other):
"""Проверка на >.
Анализируется тип гео. образования.
"""
super().__gt__(other)
same_types = [LocationType.DISTRICT, LocationType.REGION]
if self.location_type in same_types and other.location_type in same_types:
return False
# NOTE: крупные территориальные образования имеют меньший индекс
return self.location_type.value < other.location_type.value
def __le__(self, other):
"""Проверка на <=.
Анализируется тип гео. образования.
"""
super().__le__(other)
same_types = [LocationType.DISTRICT, LocationType.REGION]
if self.location_type in same_types and other.location_type in same_types:
return True
# NOTE: крупные территориальные образования имеют меньший индекс
return self.location_type.value >= other.location_type.value
def __ge__(self, other):
"""Проверка на >=.
Анализируется тип гео. образования.
"""
super().__ge__(other)
same_types = [LocationType.DISTRICT, LocationType.REGION]
if self.location_type in same_types and other.location_type in same_types:
return True
# NOTE: крупные территориальные образования имеют меньший индекс
return self.location_type.value <= other.location_type.value
def __contains__(self, item):
"""Проверка на вхождение во множество.
Для объекта гео локации происходит проверка родителя.
"""
if not item.parent:
return False
return item.parent == self
def _compare_classes(self, other):
return isinstance(other, BhGeo)
class BhDestinationPoint(BhCompare):
"""Методы сравнения объекта 'Пункт назначения'"""
compare_error_message = 'Пункты назначения сравнивать нельзя'
name: str
point_type: SettlementType
state: object
def __eq__(self, other):
"""Сравнение на равенство пунктов назначения.
Сравнение происходит либо по идентификаторам,
либо по полям Наименование и Тип поселения и Территориальное образование.
"""
super().__eq__(other)
return self.name == other.name and self.state == other.state and self.point_type == other.point_type
def __ne__(self, other):
"""Проверка на неравенство пунктов назначения.
Проверка происходит либо по идентификатору объекта, либо по наименованию, типу поселения
и территориальному образованию.
"""
super().__ne__(other)
return self.name != other.name or self.point_type != other.point_type or self.state != other.state
def __lt__(self, other):
"""Проверка на строгое неравенство не имеет смысла для пунктов назначения"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, other):
"""Проверка на строгое неравенство не имеет смысла для пунктов назначения"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Проверка на нестрогое неравенство не имеет смысла для пунктов назначения"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Проверка на нестрогое неравенство не имеет смысла для пунктов назначения"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на вхождение не имеет смысла для пунктов назначения"""
raise IncomparableTypesError('Пункты назначения проверке на вхождение не подлежат')
def _compare_classes(self, other):
return isinstance(other, BhDestinationPoint)
|
/core/behavior/geo_locations.py
| 0.476092 | 0.463323 |
geo_locations.py
|
pypi
|
from datetime import date, datetime, time, timedelta
from typing import Generator, List
from sarmat.core.constants import PeriodType, RoadType, StationType
from sarmat.core.context.models import (
DestinationPointModel,
JourneyModel,
PeriodItemModel,
RouteItemModel,
StationModel,
)
from sarmat.core.exceptions import IncomparableTypesError, WrongValueError
from .bases import BhCompare, ScheduleItem
class BhStation(BhCompare):
"""Базовое поведение объекта Станция"""
compare_error_message = 'Сравнение станций не предусмотрено'
id: int
name: str
point: DestinationPointModel
station_type: StationType
def __eq__(self, other):
"""Сравнение на равенство станций"""
super().__eq__(other)
if self.id and other.id:
return self.id == other.id
if not (self.id or other.id):
return self.station_type == other.station_type and self.point == other.point and self.name == other.name
return False
def __ne__(self, other):
"""Проверка на неравенство станций"""
super().__ne__(other)
if self.id and other.id:
return self.id != other.id
if not (self.id or other.id):
return self.station_type != other.station_type or self.point != other.point or self.name != other.name
return True
def __lt__(self, item):
"""Проверка на превосходство объекта перед станцией не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, item):
"""Проверка на превосходство станции над объектом не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Проверка на непревосходство станции над объектом не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Проверка на непревосходство объекта перед станцией не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на принадлежность станции к населенному пункту"""
raise IncomparableTypesError('Станции проверке на вхождение не подлежат')
def _compare_classes(self, other):
return isinstance(other, BhStation)
class BhRoad(BhCompare):
"""Базовое поведение объекта 'Дорога'"""
compare_error_message = 'Сравнение дорог не предусмотрено'
id: str
direct_len_km: int
direct_travel_time_min: int
reverse_len_km: int
reverse_travel_time_min: int
road_type: RoadType
def __eq__(self, other):
"""Сравнение на равенство дорог"""
super().__eq__(other)
if self.id and other.id:
return self.id == other.id
if not (self.id or other.id):
return all([
self.direct_travel_time_min == other.direct_travel_time_min,
self.reverse_travel_time_min == other.reverse_travel_time_min,
self.direct_len_km == other.direct_len_km,
self.reverse_len_km == other.reverse_len_km,
self.road_type == other.road_type,
])
return False
def __ne__(self, other):
"""Проверка на неравенство дорог"""
super().__ne__(other)
if self.id and other.id:
return self.id != other.id
if not (self.id or other.id):
return any([
self.direct_travel_time_min != other.direct_travel_time_min,
self.reverse_travel_time_min != other.reverse_travel_time_min,
self.direct_len_km != other.direct_len_km,
self.reverse_len_km != other.reverse_len_km,
self.road_type != other.road_type,
])
return True
def __lt__(self, item):
"""Проверка на превосходство объекта перед дорогой"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, item):
"""Проверка на превосходство над объектом дорога"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Проверка на непревосходство над объектом дорога"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Проверка на непревосходство объекта перед дорогой"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на принадлежность дороги"""
raise IncomparableTypesError('Проверка на вхождение для дорог не предусмотрена')
def _compare_classes(self, other):
return isinstance(other, BhRoad)
class BhRouteItem(BhCompare):
"""Базовое поведение объекта 'Пункт маршрута'"""
compare_error_message = 'Для пунктов маршрута не предусмотрены операции сравнения'
id: int
station: StationModel
point: DestinationPointModel
def __eq__(self, other):
"""Сравнение на равенство пунктов маршрута"""
super().__eq__(other)
if self.id and other.id:
return self.id == other.id
if not (self.id and other.id):
return self.station == other.station and self.point == other.point
return False
def __ne__(self, other):
"""Проверка на неравенство пунктов маршрута"""
super().__ne__(other)
if self.id and other.id:
return self.id != other.id
if not (self.id or other.od):
return self.station != other.station or self.point != other.point
return True
def __lt__(self, item):
"""Проверка на превосходство объекта перед пунктом маршрута"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, item):
"""Проверка на превосходство над объектом пункт маршрута"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Проверка на непревосходство над объектом пункт маршрута"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Проверка на непревосходство объекта перед пунктом маршрута"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на принадлежность объекта к пункту маршрута"""
if isinstance(item, StationModel):
return self.station == item
if isinstance(item, DestinationPointModel):
return self.point == item
raise IncomparableTypesError(f'Для объекта {item} проверка на вхождение в пункт маршрута не предусмотрена')
def _compare_classes(self, other):
return isinstance(other, BhRouteItem)
class BhPeriodItem(BhCompare):
"""Базовое поведение объекта 'Элемент периода'"""
period_type: PeriodType
value: int
def __eq__(self, other):
"""Сравнение на равенство периодов"""
super().__eq__(other)
return self.period_type == other.period_type and self.value == other.value
def __ne__(self, other):
"""Проверка на неравенство периодов"""
super().__ne__(other)
return self.period_type != other.period_type or self.value != other.value
def __lt__(self, item):
"""Проверка на превосходство объекта перед периодом"""
super().__lt__(item)
if self.period_type != item.period_type:
raise IncomparableTypesError('Периоды разных типов сравнивать нельзя')
return self.value < item.value
def __gt__(self, item):
"""Проверка на превосходство периода над объектом"""
super().__gt__(item)
if self.period_type != item.period_type:
raise IncomparableTypesError('Периоды разных типов сравнивать нельзя')
return self.value > item.value
def __le__(self, other):
"""Проверка на непревосходство над объектом периода"""
super().__le__(other)
if self.period_type != other.period_type:
raise IncomparableTypesError('Периоды разных типов сравнивать нельзя')
return self.value <= other.value
def __ge__(self, other):
"""Проверка на непревосходство объекта перед периодом"""
super().__ge__(other)
if self.period_type != other.period_type:
raise IncomparableTypesError('Периоды разных типов сравнивать нельзя')
return self.value >= other.value
def __contains__(self, item):
"""Проверка на принадлежность объекта к периоду не имеет смысла"""
raise IncomparableTypesError('Проверка на вхождение для периода не предусмотрена')
def _compare_classes(self, other):
return isinstance(other, BhPeriodItem)
class BhPeriod(BhCompare):
"""Базовое поведение объекта 'Период'"""
compare_error_message = 'Сравнение периодов не предусматривается'
period: PeriodItemModel
periods: List[PeriodItemModel]
def __eq__(self, other):
"""Сравнение на равенство периодов"""
super().__eq__(other)
if self.period and other.period:
return self.period == other.period
if self.periods and other.periods:
if len(self.periods) == len(other.periods):
pairs = zip(self.periods, other.periods)
return all([i == j for i, j in pairs])
return False
def __ne__(self, other):
"""Проверка на неравенство периодов"""
super().__eq__(other)
if self.period and other.period:
return self.period != other.period
if self.periods and other.periods:
if len(self.periods) != len(other.periods):
return True
pairs = zip(self.periods, other.periods)
return not all([i == j for i, j in pairs])
return True
def __lt__(self, other):
"""Проверка на превосходство объекта перед периодом"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, item):
"""Проверка на превосходство над объектом периода"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Проверка на непревосходство над объектом периода"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Проверка на непревосходство объекта перед периодом"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на принадлежность объекта к периоду"""
if isinstance(item, PeriodItemModel):
return item in self.periods
raise IncomparableTypesError(f'Объект {item} неподходящего типа')
def __len__(self):
"""Длина составной части периода"""
return len(self.periods) if self.periods else 0
def __getitem__(self, item: int) -> PeriodItemModel:
"""Получение элемента из сложного периода"""
try:
int(item)
except (ValueError, TypeError):
raise WrongValueError('Индекс элемента должен быть числом')
if abs(item) >= len(self.periods):
raise WrongValueError('Индекс превышает размер составного периода')
return self.periods[item]
def _compare_classes(self, other):
return isinstance(other, BhPeriod)
class BhRoute(BhCompare):
"""Базовые методы маршрутов"""
first_station: StationModel
structure: List[RouteItemModel]
def __eq__(self, other):
"""Сравнение на равенство маршрутов"""
super().__eq__(other)
return self.first_station == other.first_station and self.structure == other.structure
def __ne__(self, other):
"""Проверка на неравенство маршрутов"""
super().__ne__(other)
return self.first_station != other.first_station or self.structure != other.structure
def __lt__(self, item):
"""Проверка на превосходство объекта перед маршрутом"""
super().__lt__(item)
return len(self.structure) < len(item.structure)
def __gt__(self, item):
"""Проверка на превосходство над объектом маршрут"""
super().__gt__(item)
return len(self.structure) > len(item.structure)
def __le__(self, other):
"""Проверка на непревосходство над объектом маршрут"""
super().__le__(other)
return len(self.structure) <= len(other.structure)
def __ge__(self, other):
"""Проверка на непревосходство объекта перед маршрутом"""
super().__ge__(other)
return len(self.structure) >= len(other.structure)
def __contains__(self, item):
"""Проверка на принадлежность объекта к маршруту"""
if isinstance(item, StationModel):
stations = [self.first_station] + [i.station for i in self.structure]
return item in stations
if isinstance(item, DestinationPointModel):
return item in [i.point for i in self.structure]
if isinstance(item, RouteItemModel):
return item in self.structure
raise IncomparableTypesError(f'Для объекта {item} проверка на принадлежность к маршруту не выполняется')
def __getitem__(self, item: int) -> ScheduleItem:
if isinstance(item, slice):
return self._get_slice(item)
try:
int(item)
except (TypeError, ValueError):
raise WrongValueError('Индекс элемента должен быть числом')
if len(self.structure) < abs(item):
raise WrongValueError(f'Длина маршрута меньше {item}')
schedule = list(self.get_schedule())
return schedule[item]
def __len__(self) -> int:
return len(self.structure) + 1
def get_schedule(self) -> Generator[ScheduleItem, None, None]:
"""
Расчет расписания по маршруту
Returns: генератор списка пунктов прохождения по маршруту
"""
length_from_start: float = 0.
travel_min_from_start: int = 0
yield ScheduleItem(
station=self.first_station,
point=self.first_station.point,
len_from_start=length_from_start,
len_from_last=0,
arrive=None,
depart=None,
time_from_start=travel_min_from_start,
time_from_last=0,
stop_time=0,
)
for item in sorted(self.structure, key=lambda x: x.order):
length_from_start += item.length_from_last_km or 0
travel_min_from_start += item.travel_time_min or 0
yield ScheduleItem(
station=item.station,
point=item.point,
len_from_start=length_from_start,
len_from_last=item.length_from_last_km,
arrive=None,
depart=None,
time_from_start=travel_min_from_start,
time_from_last=item.travel_time_min,
stop_time=item.stop_time_min or 0,
)
def _compare_classes(self, other):
return isinstance(other, BhRoute)
def _get_slice(self, item: slice) -> ScheduleItem:
route: List[ScheduleItem] = list(self.get_schedule())[item]
if not route:
raise WrongValueError('Неверно указаны пункты отправления и назначения')
if len(route) == 1:
return ScheduleItem(
station=route[0].station,
point=route[0].point,
len_from_start=route[0].len_from_start,
len_from_last=route[0].len_from_last,
time_from_start=route[0].time_from_start,
time_from_last=route[0].time_from_last,
depart=route[0].depart,
arrive=route[0].arrive,
stop_time=route[0].stop_time,
)
start_point: ScheduleItem = route[0]
end_point: ScheduleItem = route[-1]
length, travel_time, stop_time = 0., 0, 0
for i in route[1:]:
length += i.len_from_last
travel_time += i.time_from_last
stop_time += i.stop_time or 0
if stop_time:
stop_time -= end_point.stop_time or 0
return ScheduleItem(
station=start_point.station,
point=start_point.point,
len_from_start=end_point.len_from_start - start_point.len_from_start,
len_from_last=length,
arrive=None,
depart=None,
time_from_start=end_point.time_from_start - start_point.time_from_start,
time_from_last=travel_time,
stop_time=stop_time,
)
class BhJourney(BhRoute):
"""Базовые методы рейсов"""
departure_time: time
def __eq__(self, other):
"""Сравнение на равенство рейсов"""
return super().__eq__(other) and (self.departure_time == other.departure_time)
def __ne__(self, other):
"""Проверка на неравенство рейсов"""
return super().__ne__(other) or (self.departure_time != other.departure_time)
def get_journey_schedule(self, start_date: date) -> Generator[ScheduleItem, None, None]:
"""
Расчет расписания по рейсу
Returns: генератор списка пунктов прохождения по рейсу
"""
stop_time = 0
hour, minute = self.departure_time.hour, self.departure_time.minute
departure_date_time = datetime(*start_date.timetuple()[:3], hour, minute)
for item in list(super().get_schedule()):
yield ScheduleItem(
station=item.station,
point=item.point,
len_from_start=item.len_from_start,
len_from_last=item.len_from_last,
arrive=departure_date_time + timedelta(minutes=item.time_from_start + stop_time),
depart=departure_date_time + timedelta(
minutes=(item.time_from_start + stop_time + (item.stop_time or 0)),
),
time_from_start=item.time_from_start,
time_from_last=item.time_from_last,
stop_time=item.stop_time,
)
stop_time += (item.stop_time or 0)
def _compare_classes(self, other):
return isinstance(other, BhJourney)
class BhJourneyBunchItem(BhCompare):
"""Базовое поведение объекта 'Элемент связки рейсов'"""
compare_error_message = 'Операция сравнения для элементов связок не предусмотрена'
journey: JourneyModel
stop_interval: int
def __eq__(self, other):
"""Сравнение на равенство элементов связки"""
super().__eq__(other)
return self.journey == other.journey and self.stop_interval == other.stop_interval
def __ne__(self, other):
"""Проверка на неравенство элементов связки"""
super().__ne__(other)
return self.journey != other.journey or self.stop_interval != other.stop_interval
def __lt__(self, item):
"""Проверка на превосходство объекта перед элементом связки"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, item):
"""Проверка на превосходство над объектом элемент связки"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Проверка на непревосходство над объектом элемент связки"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Проверка на непревосходство объекта перед элементом связки"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на принадлежность объекта к элементу связки"""
raise IncomparableTypesError('Проверка на вхождение для элементов связки не предусмотрена')
def _compare_classes(self, other):
return isinstance(other, BhJourneyBunchItem)
class BhJourneyBunch(BhCompare):
"""Базовое поведение объекта Связка рейсов"""
compare_message_error = 'Сравнение связок не предусматривается'
journeys: List[BhJourneyBunchItem]
def __eq__(self, other):
"""Сравнение на равенство связок рейсов"""
super().__eq__(other)
return self.journeys == other.journeys
def __ne__(self, other):
"""Проверка на неравенство связок рейсов"""
super().__ne__(other)
return self.journeys != other.journeys
def __lt__(self, item):
"""Проверка на превосходство объекта перед связкой рейса"""
raise IncomparableTypesError(self.compare_message_error)
def __gt__(self, item):
"""Проверка на превосходство над объектом связка рейсов"""
raise IncomparableTypesError(self.compare_message_error)
def __le__(self, other):
"""Проверка на непревосходство над объектом связка рейсов"""
raise IncomparableTypesError(self.compare_message_error)
def __ge__(self, other):
"""Проверка на непревосходство объекта перед связкой рейсов"""
raise IncomparableTypesError(self.compare_message_error)
def __contains__(self, item):
"""Проверка на принадлежность объекта к связке рейсов"""
if isinstance(item, BhJourneyBunchItem):
return item in self.journeys
elif isinstance(item, BhJourney):
journeys = [i.journey for i in self.journeys]
return item in journeys
raise IncomparableTypesError(
f'Для объекта {item} недопустима проверка на вхождение в связку рейсов'
)
def _compare_classes(self, other):
return isinstance(other, BhJourneyBunch)
|
/core/behavior/traffic_management.py
| 0.722233 | 0.269279 |
traffic_management.py
|
pypi
|
from abc import ABC, abstractmethod
from datetime import date, datetime, tzinfo
from typing import Dict, Optional
from uuid import uuid4
from sarmat.core.constants import JourneyClass, JourneyState
from sarmat.core.containers import (
PermitStructure,
SarmatStructure,
StationStructure,
JourneyProgressStructure,
JourneyScheduleStructure,
JourneyStructure,
)
from sarmat.core.exceptions.sarmat_exceptions import ImpossibleOperationError
from .traffic_management import BhJourney
class Journey(BhJourney):
"""Поведение объекта Рейс"""
def activate(self, start_date: date, permit: PermitStructure) -> Dict[str, SarmatStructure]:
"""
Активизация рейса (выписка рейсовой ведомости, создание расписания по рейсу)
Args:
start_date: дата начала выполнения рейса (отправление из начальной точки)
permit: путевой лист
Returns:
рейсовая ведомость
список активных пунктов маршрута
"""
# атрибуты текущего рейса и список активных станций, через которые проходит рейс
this_journey = JourneyStructure(**self.as_dict())
journey_stations = [item for item in self.get_journey_schedule(start_date) if item.station is not None]
first_station, *middleware_stations, last_station = journey_stations
journey_progress = JourneyProgressStructure( # Атрибуты рейсовой ведомости
id=uuid4(), # идентификатор ведомости
depart_date=start_date, # дата отправления в рейс
journey=this_journey, # рейс
permit=permit, # путевой лист
)
journey_schedule = [
JourneyScheduleStructure(
journey_progress=journey_progress, # рейсовая ведомость
journey_class=JourneyClass.BASE, # класс рейса (формирующийся)
station=first_station.station, # станция
state=JourneyState.READY, # состояние рейса
plan_depart=first_station.depart, # плановое время отправления
platform="", # платформа
comment="", # комментарий к текущему пункту
)
]
journey_schedule += [
JourneyScheduleStructure(
journey_progress=journey_progress, # рейсовая ведомость
journey_class=JourneyClass.TRANSIT, # класс рейса (формирующийся)
station=item.station, # станция
state=JourneyState.READY, # состояние рейса
plan_arrive=item.arrive, # плановое время прибытия
plan_depart=item.depart, # плановое время отправления
platform="", # платформа
comment="", # комментарий к текущему пункту
)
for item in middleware_stations
]
journey_schedule.append(
JourneyScheduleStructure(
journey_progress=journey_progress, # рейсовая ведомость
journey_class=JourneyClass.ARRIVING, # класс рейса (формирующийся)
station=last_station.station, # станция
state=JourneyState.READY, # состояние рейса
plan_arrive=last_station.arrive, # плановое время прибытия
platform="", # платформа
comment="", # комментарий к текущему пункту
)
)
for idx, item in enumerate(journey_schedule):
journey_schedule[idx].last_items = journey_schedule[idx+1:]
return {
'JourneyProgress': journey_progress,
'JourneyScheduleStructure': journey_schedule,
}
class JourneySchedule:
"""Поведение объекта Рейсовая ведомость в активном расписании"""
def make_departure(self, time_zone: Optional[tzinfo] = None) -> None:
"""Операция отправления на текущем пункте"""
if self.journey_class == JourneyClass.ARRIVING:
raise ImpossibleOperationError("На последнем пункте маршрута операция отправления не выполняется")
self.state = JourneyState.DEPARTED
self.fact_depart = datetime.now(time_zone)
def make_registration(self,
permit: Optional[PermitStructure] = None,
platform: Optional[str] = None,
comments: Optional[str] = None) -> None:
"""
Операция по регистрации рейса. Заполнение реквизитов
Args:
permit: путевой лист
platform: номер платформы
comments: комментарии диспетчера
"""
if permit and (permit != self.permit):
self.permit = permit
if platform and platform != self.platform:
self.platform = platform
# NOTE: комментарий дозволяется затереть
if comments != self.comment:
self.comment = comments
def make_arrival(self, time_zone: Optional[tzinfo] = None) -> None:
"""Операция прибытия на текущем пункте"""
is_last = self.journey_class == JourneyClass.ARRIVING
operational_time = datetime.now(time_zone)
new_state = JourneyState.ARRIVED if is_last else JourneyState.ON_REGISTRATION
self.state = new_state
self.fact_arrive = operational_time
def _set_state_for_items_chain(self, state: JourneyState) -> None:
"""Назначение состояния текущему элементу и всем последующим"""
self.state = state
for item in self.last_items:
item.state = state
def cancel_journey(self) -> None:
"""Отмена рейса"""
self._set_state_for_items_chain(JourneyState.CANCELLED)
def close_journey(self) -> None:
"""Закрытие рейса"""
self._set_state_for_items_chain(JourneyState.CLOSED)
def break_journey(self) -> None:
"""Отметка о срыве"""
self._set_state_for_items_chain(JourneyState.DISRUPTED)
@property
def current_station(self) -> StationStructure:
"""Текущий пункт прохождения рейса"""
return self.current_item.station
class JourneyHook(ABC):
"""Поведение объекта Рейсовая Ведомость"""
@abstractmethod
def lock_register(self):
"""Блокировка ведомости"""
@abstractmethod
def unlock_register(self):
"""Снятие блокировки с ведомости"""
@abstractmethod
def register_place(self):
"""Регистрация места на ведомости"""
@abstractmethod
def unregister_place(self):
"""Отмена регистрации места на ведомости"""
|
/core/behavior/dispatcher.py
| 0.612541 | 0.395076 |
dispatcher.py
|
pypi
|
from typing import Optional
from .bases import BhCompare
from ..exceptions import IncomparableTypesError
class BhPerson(BhCompare):
"""Методы сравнения учетных записей"""
compare_error_message = 'Учетные записи сравнению не подлежат'
contains_error_message = 'Учетные записи проверке на вхождение не подлежат'
first_name: str
last_name: str
middle_name: Optional[str]
def __eq__(self, other):
"""Сравнение учетных записей"""
super().__eq__(other)
equal_names = self.last_name == other.last_name and self.first_name == other.first_name
if equal_names:
equal_middle = (self.middle_name is not None) and (other.middle_name is not None)
return equal_middle and self.middle_name == other.middle_name
return False
def __ne__(self, other):
"""Проверка на неравенство учетных записей"""
super().__ne__(other)
ne_names = self.last_name != other.last_name or self.first_name != other.first_name
ne_middles = (self.middle_name is None) and (other.middle_name is None)
if ne_middles:
ne_middles = self.middle_name != other.middle_name
return ne_names or ne_middles
def __lt__(self, other):
"""Сравнение учетных записей не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, other):
"""Сравнение учетных записей не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Сравнение учетных записей не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Сравнение учетных записей не имеет смысла"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на вхождение учетных записей не имеет смысла"""
raise IncomparableTypesError(self.contains_error_message)
def _compare_classes(self, other):
return isinstance(other, BhPerson)
|
/core/behavior/sarmat.py
| 0.658418 | 0.254266 |
sarmat.py
|
pypi
|
from datetime import date
from typing import List
from sarmat.core.constants import CrewType, PermitType, VehicleType
from sarmat.core.context.models import CrewModel, VehicleModel
from .bases import BhCompare
from .sarmat import BhPerson
from ..exceptions import IncomparableTypesError
class BhVehicle(BhCompare):
"""Методы сравнения объекта 'Транспортное средство'"""
vehicle_type: VehicleType
vehicle_name: str
state_number: str
seats: int
stand: int
capacity: int
def __eq__(self, other):
"""Сравнение двух транспортных средств"""
super().__eq__(other)
condition1 = self.vehicle_type == other.vehicle_type and self.vehicle_name == other.vehicle_name
condition2 = self.state_number == other.state_number
return condition1 and condition2
def __ne__(self, other):
"""Проверка на неравенство двух транспортных средств"""
super().__ne__(other)
return any(
[
self.state_number != other.state_number,
self.vehicle_type != other.vehicle_type,
self.vehicle_name != other.vehicle_name,
],
)
def __lt__(self, other):
"""Сравнение транспортных средств по вместимости.
Количество посадочных мест, количество мест стоя, вместимость багажного отделения
"""
super().__lt__(other)
return (self.seats, self.stand, self.capacity) < (other.seats, other.stand, other.capacity)
def __gt__(self, other):
"""Сравнение транспортных средств по вместимости.
Количество посадочных мест, количество мест стоя, вместимость багажного отделения
"""
super().__gt__(other)
return (self.seats, self.stand, self.capacity) > (other.seats, other.stand, other.capacity)
def __le__(self, other):
"""Сравнение транспортных средств по вместимости.
Количество посадочных мест, количество мест стоя, вместимость багажного отделения
"""
super().__le__(other)
return (self.seats, self.stand, self.capacity) <= (other.seats, other.stand, other.capacity)
def __ge__(self, other):
"""Сравнение транспортных средств по вместимости.
Количество посадочных мест, количество мест стоя, вместимость багажного отделения
"""
super().__ge__(other)
return (self.seats, self.stand, self.capacity) >= (other.seats, other.stand, other.capacity)
def __contains__(self, item):
"""Проверка на вхождение для транспортного средства не имеет смысла"""
raise IncomparableTypesError('Проверка на вхождение для транспортного средства не производится')
def _compare_classes(self, other):
return isinstance(other, BhVehicle)
class BhCrew(BhPerson):
"""Методы сравнения сведений об экипаже"""
crew_type: CrewType
def __eq__(self, other):
person_compare = super().__eq__(other)
return person_compare and self.crew_type == other.crew_type
def __ne__(self, other):
person_compare = super().__ne__(other)
return person_compare or self.crew_type != other.crew_type
def _compare_classes(self, other):
return isinstance(other, BhCrew)
class BhPermit(BhCompare):
"""Методы сравнения путевых листов"""
compare_error_message = 'Путевые листы сравнению не подлежат'
number: str
depart_date: date
permit_type: PermitType
crew: List[CrewModel]
vehicle: List[VehicleModel]
def __eq__(self, other):
"""Сравнение путевых листов"""
super().__eq__(other)
return all(
[
self.number == other.number,
self.permit_type == other.permit_type,
self.depart_date == other.depart_date,
],
)
def __ne__(self, other):
"""Проверка на неравенство путевых листов"""
super().__ne__(other)
return any(
[
self.number != other.number,
self.permit_type != other.permit_type,
self.depart_date != other.depart_date,
],
)
def __lt__(self, other):
"""Проверка сравнение не имеет смысла для путевых листов"""
raise IncomparableTypesError(self.compare_error_message)
def __gt__(self, other):
"""Проверка сравнение не имеет смысла для путевых листов"""
raise IncomparableTypesError(self.compare_error_message)
def __le__(self, other):
"""Проверка сравнение не имеет смысла для путевых листов"""
raise IncomparableTypesError(self.compare_error_message)
def __ge__(self, other):
"""Проверка сравнение не имеет смысла для путевых листов"""
raise IncomparableTypesError(self.compare_error_message)
def __contains__(self, item):
"""Проверка на вхождение транспортного средства или водителя в состав экипажа"""
if isinstance(item, BhCrew):
if self.crew is None:
return False
return item in self.crew
if isinstance(item, BhVehicle):
if self.vehicle is None:
return False
return item in self.vehicle
raise IncomparableTypesError(f'Тип {item.__class__} не предназначен для проверки на вхождение в состав экипажа')
def _compare_classes(self, other):
return isinstance(other, BhPermit)
|
/core/behavior/vehicle.py
| 0.664323 | 0.405213 |
vehicle.py
|
pypi
|
import json
from datetime import date, datetime
from typing import Any, Dict, List, Optional
import pydantic
from sarmat.core.constants import PeriodType, DATE_FORMAT, FULL_DATETIME_FORMAT
from sarmat.tools.json_encoder import json_dumps
class SarmatContainer(pydantic.BaseModel):
custom_attributes: Optional[Dict[str, Any]] = None
def sarmat_fields(self):
return list(self.model_fields.keys())
@classmethod
def _parse_date(cls, value) -> date:
return datetime.strptime(value, DATE_FORMAT).date()
@classmethod
def _parse_datetime(cls, value) -> datetime:
return datetime.strptime(value, FULL_DATETIME_FORMAT)
class ConfigDict:
json_loads = json.loads
json_dumps = json_dumps
class BaseIdSarmatContainer(SarmatContainer):
"""Базовая модель с числовым идентификатором."""
id: Optional[int] = 0
class BaseUidSarmatContainer(SarmatContainer):
"""Базовая модель с UUID идентификатором."""
uid: Optional[str] = ''
class PeriodItemContainer(BaseIdSarmatContainer):
"""Элементы сложного периода"""
period_type: PeriodType # тип периода
cypher: str # шифр (константа)
name: str # название
value: List[int] # список значений
is_active: bool # период активности
class PeriodContainer(BaseIdSarmatContainer):
"""Период"""
cypher: str # системное имя
name: str # константа
periods: Optional[List[PeriodItemContainer]] = None # описание сложного периода
period: Optional[PeriodItemContainer] = None # описание простого периода
class BasePersonSarmatContainer(SarmatContainer):
"""Базовая модель для описания человека."""
last_name: str # фамилия
first_name: str # имя
middle_name: Optional[str] = None # отчество
male: bool # пол
|
/core/context/containers/sarmat_containers.py
| 0.70416 | 0.202759 |
sarmat_containers.py
|
pypi
|
import datetime
from typing import List, Optional
from pydantic import field_serializer, field_validator, ConfigDict
from sarmat.core.constants import (
DATE_FORMAT,
JourneyType,
RoadType,
StationType,
)
from .geo_containers import (
DestinationPointContainer,
DirectionContainer,
RoadNameContainer,
)
from .sarmat_containers import BaseIdSarmatContainer
class StationContainer(BaseIdSarmatContainer):
"""Станции (пункты посадки-высадки пассажиров)"""
station_type: StationType # тип станции
name: str # наименование
point: DestinationPointContainer # ближайший населенный пункт
address: str = "" # почтовый адрес
class RoadContainer(BaseIdSarmatContainer):
"""Дороги"""
start_point: DestinationPointContainer # начало дороги
end_point: DestinationPointContainer # конец дороги
direct_travel_time_min: int # время прохождения в прямом направлении
reverse_travel_time_min: int # время прохождения в обратном направлении
direct_len_km: float # расстояние в прямом направлении
reverse_len_km: float # расстояние в обратном направлении
road_type: RoadType # тип дорожного покрытия
road_name: Optional[RoadNameContainer] = None # классификация дороги
class RouteItemContainer(BaseIdSarmatContainer):
"""Состав маршрута"""
length_from_last_km: float # расстояние от предыдущего пункта
travel_time_min: int # время движения от предыдущего пункта в минутах
road: Optional[RoadContainer] = None # дорога
order: int = 1 # порядок следования
station: Optional[StationContainer] = None # станция
point: Optional[DestinationPointContainer] = None # ближайший населенный пункт
stop_time_min: Optional[int] = None # время стоянки в минутах
class RouteContainer(BaseIdSarmatContainer):
"""Описание маршрута"""
name: str # наименование
first_station: StationContainer # станция отправления
structure: List[RouteItemContainer] # состав маршрута
direction: Optional[List[DirectionContainer]] = None # направления
comments: Optional[str] = None # комментарий к маршруту
number: Optional[int] = None # номер маршрута
literal: str = "" # литера
is_active: bool = True # признак активности маршрута
class JourneyContainer(RouteContainer):
"""Атрибуты рейса"""
journey_type: JourneyType # тип рейса
departure_time: datetime.time # время отправления
is_chartered: bool = False # признак заказного рейса
need_control: bool = False # признак именной продажи и мониторинга
season_begin: Optional[datetime.date] = None # начало сезона
season_end: Optional[datetime.date] = None # окончание сезона
@field_validator('season_begin', mode="before")
@classmethod
def parse_season_begin(cls, val):
if val and isinstance(val, str):
return cls._parse_date(val)
return val
@field_validator('season_end', mode="before")
@classmethod
def parse_season_end(cls, val):
if val and isinstance(val, str):
return cls._parse_date(val)
return val
@field_serializer('season_begin')
def serialize_season_begin(self, season_begin: Optional[datetime.date], _info):
if season_begin:
return season_begin.strftime(DATE_FORMAT)
return None
@field_serializer('season_end')
def serialize_season_end(self, season_end: Optional[datetime.date], _info):
if season_end:
return season_end.strftime(DATE_FORMAT)
return None
model_config = ConfigDict(arbitrary_types_allowed=True)
class JourneyBunchItemContainer(BaseIdSarmatContainer):
"""Атрибуты элемента из связки рейсов"""
journey: JourneyContainer # рейс
stop_interval: int # время простоя в часах
class JourneyBunchContainer(BaseIdSarmatContainer):
"""Атрибуты связки рейсов"""
journeys: List[JourneyBunchItemContainer] # элементы связки
name: Optional[str] = None # наименование связки
|
/core/context/containers/traffic_management_containers.py
| 0.632389 | 0.410697 |
traffic_management_containers.py
|
pypi
|
from datetime import date, datetime
from typing import List, Optional
from pydantic import field_serializer, field_validator
from sarmat.core.constants import DATE_FORMAT, FULL_DATETIME_FORMAT, JourneyClass, JourneyState
from .traffic_management_containers import DestinationPointContainer, JourneyContainer, StationContainer
from .sarmat_containers import BaseIdSarmatContainer, BaseUidSarmatContainer, PeriodContainer
from .vehicle_containers import PermitContainer
class IntervalContainer(BaseIdSarmatContainer):
"""График выполнения рейсов"""
journey: JourneyContainer # рейс
start_date: date # дата начала
interval: PeriodContainer # интервал движения
@field_validator('start_date', mode="before")
@classmethod
def parse_start_date(cls, val):
if val and isinstance(val, str):
return cls._parse_date(val)
return val
@field_serializer('start_date')
def serialize_start_date(self, start_date: date, _info):
return start_date.strftime(DATE_FORMAT)
class JourneyProgressContainer(BaseUidSarmatContainer):
"""Атрибуты рейсовой ведомости"""
depart_date: date # дата отправления в рейс
journey: JourneyContainer # рейс
permit: PermitContainer # номер путевого листа
@field_validator('depart_date', mode="before")
@classmethod
def parse_depart_date(cls, val):
if val and isinstance(val, str):
return cls._parse_date(val)
return val
@field_serializer('depart_date')
def serialize_depart_date(self, depart_date: date, _info):
return depart_date.strftime(DATE_FORMAT)
class JourneyScheduleContainer(BaseUidSarmatContainer):
"""Процесс прохождения рейса по автоматизированным точкам"""
journey_progress: JourneyProgressContainer # рейсовая ведомость
journey_class: JourneyClass # классификация рейса в данном пункте
station: Optional[StationContainer] = None # станция
point: Optional[DestinationPointContainer] = None # точка прохождения маршрута
state: JourneyState # состояние рейса
plan_arrive: Optional[datetime] = None # плановое время прибытия
fact_arrive: Optional[datetime] = None # фактическое время прибытия
plan_depart: Optional[datetime] = None # плановое время отправления
fact_depart: Optional[datetime] = None # фактическое время отправления
platform: str = '' # платформа
comment: str = '' # комментарий к текущему пункту
last_items: Optional[List['JourneyScheduleContainer']] = None # оставшиеся активные пункты прохождения рейса
@field_validator('plan_arrive', mode="before")
@classmethod
def parse_plan_arrive(cls, val):
if val and isinstance(val, str):
return cls._parse_datetime(val)
return val
@field_serializer('plan_arrive')
def serialize_plan_arrive(self, plan_arrive: Optional[datetime], _info):
if plan_arrive:
return plan_arrive.strftime(FULL_DATETIME_FORMAT)
return None
@field_validator('fact_arrive', mode="before")
@classmethod
def parse_fact_arrive(cls, val):
if val and isinstance(val, str):
return cls._parse_datetime(val)
return val
@field_serializer('fact_arrive')
def serialize_fact_arrive(self, fact_arrive: Optional[datetime], _info):
if fact_arrive:
return fact_arrive.strftime(FULL_DATETIME_FORMAT)
return None
@field_validator('plan_depart', mode="before")
@classmethod
def parse_plan_depart(cls, val):
if val and isinstance(val, str):
return cls._parse_datetime(val)
return val
@field_serializer('plan_depart')
def serialize_plan_depart(self, plan_depart: Optional[datetime], _info):
if plan_depart:
return plan_depart.strftime(FULL_DATETIME_FORMAT)
return None
@field_validator('fact_depart', mode="before")
@classmethod
def parse_fact_depart(cls, val):
if val and isinstance(val, str):
return cls._parse_datetime(val)
return val
@field_serializer('fact_depart')
def serialize_fact_depart(self, fact_depart: Optional[datetime], _info):
if fact_depart:
return fact_depart.strftime(FULL_DATETIME_FORMAT)
return None
|
/core/context/containers/dispatcher_containers.py
| 0.760384 | 0.214362 |
dispatcher_containers.py
|
pypi
|
from dataclasses import asdict, dataclass, fields
from typing import Any, Dict, List, Optional
from sarmat.core.constants import PeriodType
from sarmat.core.context.containers.sarmat_containers import SarmatContainer, PeriodItemContainer, PeriodContainer
@dataclass
class BaseModel:
@property
def sarmat_fields(self):
return [fld.name for fld in fields(self)]
@property
def as_dict(self):
return asdict(self)
@classmethod
def from_container(cls, container: SarmatContainer) -> 'BaseModel':
raise NotImplementedError
def raw(self) -> SarmatContainer:
raise NotImplementedError
@dataclass
class BaseIdModel:
id: Optional[int] = 0
@dataclass
class BaseUidModel:
uid: Optional[str] = ""
@dataclass
class CustomAttributesModel:
custom_attributes: Optional[Dict[str, Any]] = None
@property
def custom_fields(self) -> List[str]:
return list(self.custom_attributes.keys()) if self.custom_attributes else []
@dataclass
class PersonModel(BaseModel):
"""Данные человека"""
last_name: str # фамилия
first_name: str # имя
middle_name: str # отчество
male: bool # пол: М
@dataclass
class BasePeriodItemModel(BaseModel):
"""Элементы сложного периода"""
period_type: PeriodType # тип периода
cypher: str # шифр (константа)
name: str # название
value: List[int] # список значений
is_active: bool # период активности
@dataclass
class PeriodItemModel(BaseIdModel, CustomAttributesModel, BasePeriodItemModel):
@classmethod
def from_container(cls, container: PeriodItemContainer) -> 'PeriodItemModel': # type: ignore[override]
return cls(
id=container.id,
custom_attributes=container.custom_attributes,
period_type=container.period_type,
cypher=container.cypher,
name=container.name,
value=container.value,
is_active=container.is_active,
)
def raw(self) -> PeriodItemContainer: # type: ignore[override]
return PeriodItemContainer(
id=self.id,
custom_attributes=self.custom_attributes,
period_type=self.period_type,
cypher=self.cypher,
name=self.name,
value=self.value,
is_active=self.is_active,
)
@dataclass
class BasePeriodModel(BaseModel):
"""Период"""
cypher: str # системное имя
name: str # константа
periods: Optional[List[PeriodItemModel]] = None # описание сложного периода
period: Optional[PeriodItemModel] = None # описание простого периода
@dataclass
class PeriodModel(BaseIdModel, CustomAttributesModel, BasePeriodModel):
@classmethod
def from_container(cls, container: PeriodContainer) -> 'PeriodModel': # type: ignore[override]
period, periods = None, []
if container.periods:
periods = [PeriodItemModel.from_container(item) for item in container.periods]
if container.period:
period = PeriodItemModel.from_container(container.period)
return cls(
id=container.id,
custom_attributes=container.custom_attributes,
cypher=container.cypher,
name=container.name,
periods=periods,
period=period,
)
def raw(self) -> PeriodContainer: # type: ignore[override]
return PeriodContainer(
id=self.id,
custom_attributes=self.custom_attributes,
cypher=self.cypher,
name=self.name,
periods=[item.raw() for item in self.periods] if self.periods else None,
period=self.period.raw() if self.period else None,
)
|
/core/context/models/sarmat_models.py
| 0.879458 | 0.248283 |
sarmat_models.py
|
pypi
|
from dataclasses import dataclass
from datetime import date, datetime
from typing import List, Optional
from sarmat.core.constants import JourneyClass, JourneyState
from .sarmat_models import BaseIdModel, BaseModel, BaseUidModel, CustomAttributesModel, PeriodModel
from .traffic_management_models import DestinationPointModel, JourneyModel, StationModel
from .vehicle_models import PermitModel
from ..containers import IntervalContainer, JourneyProgressContainer, JourneyScheduleContainer
@dataclass
class BaseIntervalModel(BaseModel):
"""График выполнения рейсов"""
journey: JourneyModel # рейс
start_date: date # дата начала
interval: PeriodModel # интервал движения
@dataclass
class IntervalModel(BaseIdModel, CustomAttributesModel, BaseIntervalModel):
@classmethod
def from_container(cls, container: IntervalContainer) -> 'IntervalModel': # type: ignore[override]
return cls(
id=container.id,
custom_attributes=container.custom_attributes,
journey=JourneyModel.from_container(container.journey),
start_date=container.start_date,
interval=PeriodModel.from_container(container.interval),
)
def raw(self) -> IntervalContainer: # type: ignore[override]
return IntervalContainer(
id=self.id,
custom_attributes=self.custom_attributes,
journey=self.journey.raw(),
start_date=self.start_date,
interval=self.interval.raw(),
)
@dataclass
class BaseJourneyProgressModel(BaseModel):
"""Атрибуты рейсовой ведомости"""
depart_date: date # дата отправления в рейс
journey: JourneyModel # рейс
permit: PermitModel # номер путевого листа
@dataclass
class JourneyProgressModel(BaseUidModel, CustomAttributesModel, BaseJourneyProgressModel):
@classmethod
def from_container(cls, container: JourneyProgressContainer) -> 'JourneyProgressModel': # type: ignore[override]
return cls(
uid=container.uid,
custom_attributes=container.custom_attributes,
depart_date=container.depart_date,
journey=JourneyModel.from_container(container.journey),
permit=PermitModel.from_container(container.permit),
)
def raw(self) -> JourneyProgressContainer: # type: ignore[override]
return JourneyProgressContainer(
uid=self.uid,
custom_attributes=self.custom_attributes,
depart_date=self.depart_date,
journey=self.journey.raw(),
permit=self.permit.raw(),
)
@dataclass
class BaseJourneyScheduleModel(BaseModel):
"""Процесс прохождения рейса по автоматизированным точкам"""
journey_progress: JourneyProgressModel # рейсовая ведомость
journey_class: JourneyClass # классификация рейса в данном пункте
station: Optional[StationModel] # станция
point: Optional[DestinationPointModel] # точка прохождения маршрута
state: JourneyState # состояние рейса
plan_arrive: Optional[datetime] = None # плановое время прибытия
fact_arrive: Optional[datetime] = None # фактическое время прибытия
plan_depart: Optional[datetime] = None # плановое время отправления
fact_depart: Optional[datetime] = None # фактическое время отправления
platform: str = '' # платформа
comment: str = '' # комментарий к текущему пункту
last_items: Optional[List['JourneyScheduleModel']] = None # оставшиеся активные пункты прохождения рейса
@dataclass
class JourneyScheduleModel(BaseUidModel, CustomAttributesModel, BaseJourneyScheduleModel):
@classmethod
def from_container(cls, container: JourneyScheduleContainer) -> 'JourneyScheduleModel': # type: ignore[override]
last_items = None
if container.last_items:
last_items = [JourneyScheduleModel.from_container(item) for item in container.last_items]
return cls(
uid=container.uid,
custom_attributes=container.custom_attributes,
journey_progress=JourneyProgressModel.from_container(container.journey_progress),
journey_class=container.journey_class,
station=StationModel.from_container(container.station) if container.station else None,
point=DestinationPointModel.from_container(container.point) if container.point else None,
state=container.state,
plan_arrive=container.plan_arrive,
fact_arrive=container.fact_arrive,
plan_depart=container.plan_depart,
fact_depart=container.fact_depart,
platform=container.platform,
comment=container.comment,
last_items=last_items,
)
def raw(self) -> JourneyScheduleContainer: # type: ignore[override]
return JourneyScheduleContainer(
uid=self.uid,
custom_attributes=self.custom_attributes,
journey_progress=self.journey_progress.raw(),
journey_class=self.journey_class,
station=self.station.raw() if self.station else None,
point=self.point.raw() if self.point else None,
state=self.state,
plan_arrive=self.plan_arrive,
fact_arrive=self.fact_arrive,
plan_depart=self.plan_depart,
fact_depart=self.fact_depart,
platform=self.platform,
comment=self.comment,
last_items=[item.raw() for item in self.last_items] if self.last_items else None,
)
|
/core/context/models/dispatcher_models.py
| 0.790126 | 0.15084 |
dispatcher_models.py
|
pypi
|
from dataclasses import dataclass
from datetime import date
from typing import List
from sarmat.core.constants import CrewType, PermitType, VehicleType
from sarmat.core.verification import CrewVerifier, PermitVerifier, VehicleVerifier
from .sarmat_models import PersonModel, BaseIdModel, BaseUidModel, BaseModel, CustomAttributesModel
from ..containers import CrewContainer, PermitContainer, VehicleContainer
@dataclass
class BaseVehicleModel(BaseModel):
"""Подвижной состав"""
vehicle_type: VehicleType # тип транспортного средства
vehicle_name: str # марка транспортного средства
state_number: str # гос. номер
seats: int # количество мест для посадки
stand: int = 0 # количество мест стоя
capacity: int = 0 # вместимость багажного отделения
@dataclass
class VehicleModel(BaseIdModel, CustomAttributesModel, BaseVehicleModel):
@classmethod
def from_container(cls, container: VehicleContainer) -> 'VehicleModel': # type: ignore[override]
VehicleVerifier().verify(container)
return cls(
id=container.id,
custom_attributes=container.custom_attributes,
vehicle_type=container.vehicle_type,
vehicle_name=container.vehicle_name,
state_number=container.state_number,
seats=container.seats,
stand=container.stand,
capacity=container.capacity,
)
def raw(self) -> VehicleContainer: # type: ignore[override]
return VehicleContainer(
id=self.id,
custom_attributes=self.custom_attributes,
vehicle_type=self.vehicle_type,
vehicle_name=self.vehicle_name,
state_number=self.state_number,
seats=self.seats,
stand=self.stand,
capacity=self.capacity,
)
@dataclass
class BaseCrewModel(BaseModel):
"""Экипаж"""
crew_type: CrewType # тип члена экипажа
is_main: bool = True # признак главного члена экипажа
@dataclass
class CrewModel(BaseIdModel, CustomAttributesModel, BaseCrewModel, PersonModel):
@classmethod
def from_container(cls, container: CrewContainer) -> 'CrewModel': # type: ignore[override]
CrewVerifier().verify(container)
return cls(
id=container.id,
custom_attributes=container.custom_attributes,
last_name=container.last_name,
first_name=container.first_name,
middle_name=container.middle_name or '',
male=container.male,
crew_type=container.crew_type,
is_main=container.is_main,
)
def raw(self) -> CrewContainer: # type: ignore[override]
return CrewContainer(
id=self.id,
custom_attributes=self.custom_attributes,
last_name=self.last_name,
first_name=self.first_name,
middle_name=self.middle_name,
male=self.male,
crew_type=self.crew_type,
is_main=self.is_main,
)
@dataclass
class BasePermitModel(BaseModel):
"""Путевой лист"""
number: str # номер путевого листа
permit_type: PermitType # тип путевого листа
depart_date: date # дата выезда
crew: List[CrewModel] # экипаж
vehicle: List[VehicleModel] # подвижной состав
@dataclass
class PermitModel(BaseUidModel, CustomAttributesModel, BasePermitModel):
@classmethod
def from_container(cls, container: PermitContainer) -> 'PermitModel': # type: ignore[override]
PermitVerifier().verify(container)
return cls(
uid=container.uid,
custom_attributes=container.custom_attributes,
number=container.number,
permit_type=container.permit_type,
depart_date=container.depart_date,
crew=[CrewModel.from_container(item) for item in container.crew],
vehicle=[VehicleModel.from_container(item) for item in container.vehicle],
)
def raw(self) -> PermitContainer: # type: ignore[override]
return PermitContainer(
uid=self.uid,
custom_attributes=self.custom_attributes,
number=self.number,
permit_type=self.permit_type,
depart_date=self.depart_date,
crew=[item.raw() for item in self.crew],
vehicle=[item.raw() for item in self.vehicle],
)
|
/core/context/models/vehicle_models.py
| 0.669853 | 0.17172 |
vehicle_models.py
|
pypi
|
from copy import deepcopy
from dataclasses import dataclass
from datetime import date, datetime, time, timedelta
from typing import List, Optional
from sarmat.core.constants import JourneyType, StationType
from sarmat.core.constants.sarmat_constants import MAX_SUBURBAN_ROUTE_LENGTH
from sarmat.core.context.containers import JourneyBunchItemContainer, JourneyContainer, RouteContainer
from sarmat.core.context.models import DirectionModel, JourneyBunchItemModel, StationModel, RouteItemModel
from sarmat.core.exceptions import WrongValueError
from sarmat.tools.geo_tools import get_geo_objects_projection
from .bases import ActionMixin
@dataclass
class RouteMetrics:
point: str
station: str
arrive: datetime
stop: int
depart: Optional[datetime]
length: float
total_length: int
spent_time: int
class AStationMixin(ActionMixin):
"""Действия с пунктами назначения"""
custom_attributes: dict
name: str
station_type: StationType
point: object
address: str
def copy(self):
return self.__class__(
id=0,
custom_attributes=self.custom_attributes,
station_type=self.station_type,
name=f"Копия {self.name}",
point=self.point,
address=self.address,
)
class RouteMixin:
"""Миксин для использования при работе с маршрутами и рейсами"""
id: int
custom_attributes: dict
number: int
first_station: StationModel
structure: List[RouteItemModel]
name: str
direction: Optional[List[DirectionModel]]
comments: str
def get_real_journey_type(self) -> JourneyType:
"""Метод возвращает вычисленный тип рейса"""
route_length = 0.
region_changed, country_changed = False, False
_, base_region, base_country = get_geo_objects_projection(self.first_station.point.state)
for route_item in self.structure:
item = route_item.station.point if route_item.station else route_item.point
if not item:
raise WrongValueError(
'Route item must have station or destination point',
)
_, item_region, item_country = get_geo_objects_projection(item.state)
if base_region and item_region:
region_changed = base_region != item_region
if base_country and item_country:
country_changed = base_country != item_country
if region_changed or country_changed:
break
route_length += route_item.length_from_last_km
if country_changed:
journey_type = JourneyType.INTERNATIONAL
elif region_changed:
journey_type = JourneyType.INTER_REGIONAL
elif route_length >= MAX_SUBURBAN_ROUTE_LENGTH:
journey_type = JourneyType.LONG_DISTANCE
else:
journey_type = JourneyType.SUBURBAN
return journey_type
def get_route_metrics(self):
"""Метрика состава маршрута"""
now = self.get_base_depart_date_time()
route_length = 0
spent_time_in_minutes = 0
for item in self.structure[:-1]:
spent_time_in_minutes += item.travel_time_min
spent_time_in_minutes += (item.stop_time_min or 0)
now += timedelta(minutes=item.travel_time_min)
depart_delta = timedelta(minutes=item.stop_time_min or 0)
route_length += item.length_from_last_km
if item.station:
point_name, station_name = item.station.point.name, item.station.name
else:
point_name, station_name = item.point.name, ''
yield RouteMetrics(
point=point_name,
station=station_name,
arrive=now,
stop=item.stop_time_min,
depart=(now + depart_delta),
length=item.length_from_last_km,
total_length=route_length,
spent_time=spent_time_in_minutes,
)
now += depart_delta
last_item = self.structure[-1]
route_length += last_item.length_from_last_km
spent_time_in_minutes += last_item.travel_time_min
now += timedelta(minutes=last_item.travel_time_min)
if last_item.station:
point_name, station_name = last_item.station.point.name, last_item.station.name
else:
point_name, station_name = last_item.point.name, ''
yield RouteMetrics(
point=point_name,
station=station_name,
arrive=now,
stop=0,
depart=None,
length=last_item.length_from_last_km,
total_length=route_length,
spent_time=spent_time_in_minutes,
)
def get_base_depart_date_time(self) -> datetime:
today = date.today()
return datetime(today.year, today.month, today.day, 0, 0)
class ARouteMixin(RouteMixin, ActionMixin):
"""Действия с маршрутом"""
def copy(self):
return self.__class__(
id=0,
custom_attributes=self.custom_attributes,
name=f'Копия {self.name}',
first_station=self.first_station,
structure=deepcopy(self.structure),
direction=deepcopy(self.direction) if self.direction else None,
comments=self.comments,
number=0,
literal='',
)
def create_journey_structure(self, departure_time: time) -> JourneyContainer:
return JourneyContainer(
id=0,
number=self.number,
name=self.name,
first_station=self.first_station.raw(),
structure=[item.raw() for item in self.structure],
journey_type=self.get_real_journey_type(),
departure_time=departure_time,
literal="",
is_chartered=False,
need_control=False,
season_begin=None,
season_end=None,
direction=[item.raw() for item in self.direction] if self.direction else None,
comments=f'Создан из маршрута {self.number} ({self.id})',
)
def get_route_info(self) -> dict:
"""Информация о маршруте"""
return {
'attributes': self.as_dict, # type: ignore
'real_journey_type': self.get_real_journey_type(),
'route_structure': [item.as_dict for item in self.structure],
'metrics': self.get_route_metrics(),
}
class AJourneyMixin(RouteMixin, ActionMixin):
"""Действия с рейсами"""
custom_attributes: dict
journey_type: JourneyType
departure_time: time
bunch: object
is_chartered: bool
need_control: bool
season_begin: Optional[date]
season_end: Optional[date]
def copy(self):
return self.__class__(
id=0,
custom_attributes=self.custom_attributes,
number=0,
name=f"Копия {self.name}",
first_station=self.first_station,
structure=self.structure,
journey_type=self.journey_type,
departure_time=self.departure_time,
literal="",
is_chartered=self.is_chartered,
need_control=self.need_control,
season_begin=self.season_begin,
season_end=self.season_end,
direction=self.direction,
comments=f'Создан из рейса {self.number} ({self.id})',
)
def get_base_depart_date_time(self) -> datetime:
today = date.today()
return datetime(today.year, today.month, today.day, self.departure_time.hour, self.departure_time.minute)
def get_journey_info(self) -> dict:
"""Информация о рейсе"""
return {
'attributes': self.as_dict, # type: ignore
'real_journey_type': self.get_real_journey_type(),
'route_structure': [item.as_dict for item in self.structure],
'metrics': self.get_route_metrics(),
}
def create_route_structure(self) -> RouteContainer:
return RouteContainer(
id=0,
name=self.name,
first_station=self.first_station.raw(),
structure=[item.raw() for item in self.structure],
direction=[item.raw() for item in self.direction] if self.direction else None,
comments=f'Создан из рейса {self.number} ({self.id})',
number=self.number,
literal='',
)
class AJourneyBunchMixin(ActionMixin):
"""Действия со связкой рейсов"""
controller: object
permission_tag: str
journeys: List[JourneyBunchItemModel]
def add_journey_bunch_item(self, bunch_item: JourneyBunchItemModel) -> None:
if bunch_item in self.journeys:
raise WrongValueError('Item already in bunch')
if bunch_item.journey in [item.journey for item in self.journeys]:
raise WrongValueError('Journey already in bunch')
self.journeys.append(bunch_item)
def add_journey_into_bunch(self, journey: JourneyContainer, stop_interval: int = 0) -> None:
new_bunch_item = self.controller.create_journey_bunch_item( # type: ignore
self.permission_tag,
JourneyBunchItemContainer(
journey=journey,
stop_interval=stop_interval,
),
)
self.add_journey_bunch_item(new_bunch_item)
def remove_journey_bunch_item(self, bunch_item_id: int) -> None:
if self.journeys:
for idx, bunch_item in enumerate(self.journeys):
if bunch_item.id == bunch_item_id:
self.journeys.remove(self.journeys[idx])
break
else:
raise WrongValueError(
f'Journey bunch item {bunch_item_id}) is not in bunch',
)
else:
raise WrongValueError("Journey bunch is empty")
def get_finish_date_time(self, date_from: datetime) -> datetime:
for item in self.journeys:
journey = self.controller.create_journey(self.permission_tag, item.journey.raw()) # type: ignore
route_metrics = list(journey.get_journey_info()["metrics"])
journey_spent_time = route_metrics[-1].spent_time
date_from += timedelta(minutes=journey_spent_time)
if item.stop_interval:
date_from += timedelta(hours=item.stop_interval)
return date_from
|
/core/actions/traffic_management.py
| 0.742141 | 0.164148 |
traffic_management.py
|
pypi
|
from abc import ABC, abstractmethod
from datetime import date, datetime, tzinfo
from typing import Dict, Optional, List, Union
from uuid import uuid4
from sarmat.core.constants import JourneyClass, JourneyState
from sarmat.core.context.models import (
PermitModel,
RouteItemModel,
StationModel,
JourneyProgressModel,
JourneyScheduleModel,
JourneyModel,
)
from sarmat.core.exceptions.sarmat_exceptions import ImpossibleOperationError
from sarmat.core.behavior.traffic_management import BhJourney
class Journey(BhJourney):
"""Поведение объекта Рейс"""
def activate(
self,
start_date: date,
permit: PermitModel,
) -> Dict[str, Union[JourneyProgressModel, List[JourneyScheduleModel]]]:
"""
Активизация рейса (выписка рейсовой ведомости, создание расписания по рейсу)
Args:
start_date: дата начала выполнения рейса (отправление из начальной точки)
permit: путевой лист
Returns:
рейсовая ведомость
список активных пунктов маршрута
"""
# атрибуты текущего рейса и список активных станций, через которые проходит рейс
this_journey = JourneyModel.from_container(self.raw()) # type: ignore
journey_stations = [item for item in self.get_journey_schedule(start_date) if item.station is not None]
first_station, *middleware_stations, last_station = journey_stations
journey_progress = JourneyProgressModel( # Атрибуты рейсовой ведомости
uid=str(uuid4()), # идентификатор ведомости
depart_date=start_date, # дата отправления в рейс
journey=this_journey, # рейс
permit=permit, # путевой лист
)
journey_schedule = [
JourneyScheduleModel(
uid=str(uuid4()), # идентификатор строки
journey_progress=journey_progress, # рейсовая ведомость
journey_class=JourneyClass.BASE, # класс рейса (формирующийся)
station=first_station.station, # станция
point=first_station.point, # пункт прохождения маршрута
state=JourneyState.READY, # состояние рейса
plan_depart=first_station.depart, # плановое время отправления
platform='', # платформа
comment='', # комментарий к текущему пункту
)
]
journey_schedule += [
JourneyScheduleModel(
uid=str(uuid4()), # идентификатор строки
journey_progress=journey_progress, # рейсовая ведомость
journey_class=JourneyClass.TRANSIT, # класс рейса (формирующийся)
station=item.station, # станция
point=item.point, # пункт прохождения маршрута
state=JourneyState.READY, # состояние рейса
plan_arrive=item.arrive, # плановое время прибытия
plan_depart=item.depart, # плановое время отправления
platform='', # платформа
comment='', # комментарий к текущему пункту
)
for item in middleware_stations
]
journey_schedule.append(
JourneyScheduleModel(
uid=str(uuid4()), # идентификатор строки
journey_progress=journey_progress, # рейсовая ведомость
journey_class=JourneyClass.ARRIVING, # класс рейса (формирующийся)
station=last_station.station, # станция
point=last_station.point, # пункт прохождения маршрута
state=JourneyState.READY, # состояние рейса
plan_arrive=last_station.arrive, # плановое время прибытия
platform='', # платформа
comment='', # комментарий к текущему пункту
)
)
for idx, item in enumerate(journey_schedule):
journey_schedule[idx].last_items = journey_schedule[idx+1:]
return {
'JourneyProgress': journey_progress,
'JourneyScheduleStructure': journey_schedule,
}
class JourneySchedule:
"""Поведение объекта Рейсовая ведомость в активном расписании"""
journey_class: JourneyClass
state: JourneyState
fact_arrive: datetime
fact_depart: datetime
permit: Optional[PermitModel]
platform: str
comment: str
last_items: List['JourneySchedule']
current_item: RouteItemModel
def make_departure(self, time_zone: Optional[tzinfo] = None) -> None:
"""Операция отправления на текущем пункте"""
if self.journey_class == JourneyClass.ARRIVING:
raise ImpossibleOperationError("На последнем пункте маршрута операция отправления не выполняется")
self.state = JourneyState.DEPARTED
self.fact_depart = datetime.now(time_zone)
def make_registration(self,
permit: Optional[PermitModel] = None,
platform: Optional[str] = None,
comments: Optional[str] = None) -> None:
"""
Операция по регистрации рейса. Заполнение реквизитов
Args:
permit: путевой лист
platform: номер платформы
comments: комментарии диспетчера
"""
if permit and (permit != self.permit):
self.permit = permit
if platform and platform != self.platform:
self.platform = platform
# NOTE: комментарий дозволяется затереть
if comments != self.comment:
self.comment = comments or ''
def make_arrival(self, time_zone: Optional[tzinfo] = None) -> None:
"""Операция прибытия на текущем пункте"""
is_last = self.journey_class == JourneyClass.ARRIVING
operational_time = datetime.now(time_zone)
new_state = JourneyState.ARRIVED if is_last else JourneyState.ON_REGISTRATION
self.state = new_state
self.fact_arrive = operational_time
def cancel_journey(self) -> None:
"""Отмена рейса"""
self._set_state_for_items_chain(JourneyState.CANCELLED)
def close_journey(self) -> None:
"""Закрытие рейса"""
self._set_state_for_items_chain(JourneyState.CLOSED)
def break_journey(self) -> None:
"""Отметка о срыве"""
self._set_state_for_items_chain(JourneyState.DISRUPTED)
@property
def current_station(self) -> Optional[StationModel]:
"""Текущий пункт прохождения рейса"""
return self.current_item.station if self.current_item else None
def _set_state_for_items_chain(self, state: JourneyState) -> None:
"""Назначение состояния текущему элементу и всем последующим"""
self.state = state
for item in self.last_items:
item.state = state
class JourneyHook(ABC):
"""Поведение объекта Рейсовая Ведомость"""
@abstractmethod
def lock_register(self):
"""Блокировка ведомости"""
@abstractmethod
def unlock_register(self):
"""Снятие блокировки с ведомости"""
@abstractmethod
def register_place(self):
"""Регистрация места на ведомости"""
@abstractmethod
def unregister_place(self):
"""Отмена регистрации места на ведомости"""
|
/core/actions/dispatcher.py
| 0.624523 | 0.285086 |
dispatcher.py
|
pypi
|
from enum import Enum
from typing import Any, Dict
class SarmatAttribute(Enum):
"""Встроенные значения атрибутов в Sarmat."""
__description__: Dict[Any, str] = {} # описание
__cypher__: Dict[Any, str] = {} # текстовое представление значения (строковая константа)
@classmethod
def as_text(cls, value: Any) -> str:
"""Описание значения."""
if isinstance(value, cls):
return cls.__description__.get(value.value) or ''
return ''
@classmethod
def as_cypher(cls, value: Any) -> str:
"""Получение строковой константы."""
if isinstance(value, cls):
return cls.__cypher__.get(value.value) or ''
return ''
class RoadType(SarmatAttribute):
"""Тип дорожного покрытия"""
PAVED = 1
DIRT = 2
HIGHWAY = 3
__description__ = {
PAVED: 'Дорога с твердым покрытием',
DIRT: 'Грунтовая дорога',
HIGHWAY: 'Магистраль',
}
class PeriodType(SarmatAttribute):
"""Тип периода"""
MINUTE = 1
HOUR = 2
DAY = 3
WEEK = 4
MONTH = 5
YEAR = 6
EVEN = 7
ODD = 8
DAYS = 9
DAYS_OF_WEEK = 10
__description__ = {
MINUTE: 'По минутам',
HOUR: 'По часам',
DAY: 'По дням',
WEEK: 'По неделям',
MONTH: 'По месяцам',
YEAR: 'По годам',
EVEN: 'По четным дням месяца',
ODD: 'По нечетным дням месяца',
DAYS: 'По числам месяца',
DAYS_OF_WEEK: 'По дням недели',
}
__cypher__ = {
MINUTE: 'minute',
HOUR: 'hour',
DAY: 'day',
WEEK: 'week',
MONTH: 'month',
YEAR: 'year',
EVEN: 'even',
ODD: 'odd',
DAYS: 'days',
DAYS_OF_WEEK: 'dow',
}
class LocationType(SarmatAttribute):
"""Тип территориального образования"""
COUNTRY = 1
DISTRICT = 2
REGION = 3
PROVINCE = 4
AREA = 5
__description__ = {
COUNTRY: 'Страна',
DISTRICT: 'Республика',
REGION: 'Край',
PROVINCE: 'Область',
AREA: 'Район',
}
__cypher__ = {
COUNTRY: '',
DISTRICT: 'респ.',
REGION: 'кр.',
PROVINCE: 'обл.',
AREA: 'р-н',
}
class SettlementType(SarmatAttribute):
"""Тип населенного пункта"""
CITY = 1
SETTLEMENT = 2
TOWNSHIP = 3
HAMLET = 4
STANITSA = 5
FARM = 6
VILLAGE = 7
TURN = 8
POINT = 9
__description__ = {
CITY: 'Город',
SETTLEMENT: 'Поселок',
TOWNSHIP: 'Село',
HAMLET: 'Деревня',
STANITSA: 'Станица',
FARM: 'Хутор',
VILLAGE: 'Слобода',
TURN: 'Поворот',
POINT: 'Место',
}
__cypher__ = {
CITY: 'г.',
SETTLEMENT: 'пос.',
TOWNSHIP: 'с.',
HAMLET: 'дер.',
STANITSA: 'ст.',
FARM: 'х.',
VILLAGE: 'сл.',
TURN: 'пов.',
POINT: 'м.',
}
class StationType(SarmatAttribute):
"""Типы станций"""
STATION = 1
TERMINAL = 2
TICKET_OFFICE = 3
PLATFORM = 4
__description__ = {
STATION: 'Автовокзал',
TERMINAL: 'Автостанция',
TICKET_OFFICE: 'Автокасса',
PLATFORM: 'Остановочная платформа',
}
__cypher__ = {
STATION: 'АВ',
TERMINAL: 'АС',
TICKET_OFFICE: 'АК',
PLATFORM: 'ОП',
}
class RouteType(SarmatAttribute):
"""Типы маршрутов"""
TURNOVER = 1
CIRCLE = 2
__description__ = {
TURNOVER: 'Оборотный',
CIRCLE: 'Кольцевой',
}
__cypher__ = {
TURNOVER: 'turn',
CIRCLE: 'circle',
}
class JourneyType(SarmatAttribute):
"""Типы рейсов"""
SUBURBAN = 1
LONG_DISTANCE = 2
INTER_REGIONAL = 3
INTERNATIONAL = 4
__description__ = {
SUBURBAN: 'Пригородный',
LONG_DISTANCE: 'Междугородный',
INTER_REGIONAL: 'Межрегиональный',
INTERNATIONAL: 'Международный',
}
class JourneyClass(SarmatAttribute):
"""Классификация рейсов"""
BASE = 1
TRANSIT = 2
ARRIVING = 3
__description__ = {
BASE: 'Формирующийся',
TRANSIT: 'Транзитный',
ARRIVING: 'Прибывающий',
}
class JourneyState(SarmatAttribute):
"""Состояния рейсов"""
READY = 0 # рейс активен
ARRIVED = 1 # прибыл
ON_REGISTRATION = 2 # на регистрации
DEPARTED = 3 # отправлен
CANCELLED = 4 # отменен (разовая операция)
CLOSED = 5 # закрыт (массовая отмена на продолжительное время)
DISRUPTED = 6 # сорван (по тех. неисправности и т.д.)
__description__ = {
READY: 'Активен',
ARRIVED: 'Прибыл',
ON_REGISTRATION: 'На регистрации',
DEPARTED: 'Отправлен',
CANCELLED: 'Отменен',
CLOSED: 'Закрыт',
DISRUPTED: 'Сорван',
}
class VehicleType(SarmatAttribute):
"""Тип транспортного средства"""
BUS = 1
SMALL_BUS = 2
CAR = 3
TRUCK = 4
TRAILER = 5
SPECIAL = 6
__description__ = {
BUS: 'Автобус',
SMALL_BUS: 'Автобус малой вместимости',
CAR: 'Легковой автомобиль',
TRUCK: 'Грузовой автомобиль',
TRAILER: 'Прицеп',
SPECIAL: 'Спецтехника',
}
class CrewType(SarmatAttribute):
"""Тип участника экипажа"""
DRIVER = 1
TRAINEE = 2
__description__ = {
DRIVER: 'Водитель',
TRAINEE: 'Стажер',
}
class PermitType(SarmatAttribute):
"""Тип путевого листа"""
BUS_PERMIT = 1
CAR_PERMIT = 2
TRUCK_PERMIT = 3
CUSTOM_PERMIT = 4
__description__ = {
BUS_PERMIT: 'Путевой лист автобуса',
CAR_PERMIT: 'Путевой лист легкового автомобиля',
TRUCK_PERMIT: 'Путевой лист грузового автомобиля',
CUSTOM_PERMIT: 'Заказной путевой лист',
}
class PlaceKind(SarmatAttribute):
"""Тип места"""
PASSANGERS_SEAT = 1
BAGGAGE = 2
__description__ = {
PASSANGERS_SEAT: 'Пассажирское место',
BAGGAGE: 'Багажное место',
}
__cypher__ = {
PASSANGERS_SEAT: 'P',
BAGGAGE: 'B',
}
class PlaceType(SarmatAttribute):
"""Вид пассажирского места"""
STANDING = 1
SITTING = 2
__description__ = {
STANDING: 'Место для стоящих пассажиров',
SITTING: 'Место для сидящих пассажиров',
}
class PlaceState(SarmatAttribute):
"""Состояние места"""
FREE = 1
BOOKED = 2
CLOSED = 3
SOLD = 4
LOCKED = 5
TRANSFERRED = 6
__description__ = {
FREE: 'Свободно',
BOOKED: 'Забронировано',
CLOSED: 'Закрыто',
SOLD: 'Продано',
LOCKED: 'Заблокировано',
TRANSFERRED: 'Произведена пересадка',
}
MAX_SUBURBAN_ROUTE_LENGTH = 50 # Максимальная льина пригородных маршрутов (в километрах)
|
/core/constants/sarmat_constants.py
| 0.677581 | 0.369941 |
sarmat_constants.py
|
pypi
|
from sarmat.core.actions import (
AJourneyBunchMixin,
AJourneyMixin,
ARouteMixin,
AStationMixin,
)
from sarmat.core.behavior import (
BhJourney,
BhJourneyBunch,
BhJourneyBunchItem,
BhRoad,
BhRoute,
BhRouteItem,
BhStation,
)
from sarmat.core.context.containers import (
JourneyBunchContainer,
JourneyBunchItemContainer,
JourneyContainer,
RoadContainer,
RouteItemContainer,
RouteContainer,
StationContainer,
)
from sarmat.core.context.models import (
JourneyBunchModel,
JourneyBunchItemModel,
JourneyModel,
RoadModel,
RouteItemModel,
RouteModel,
StationModel,
)
from .base_creator import (
DestinationPointCreatorMixin,
DirectionCreatorMixin,
RoadNameCreatorMixin,
SarmatCreator,
)
class SarmatTrafficManagementCreator(
DestinationPointCreatorMixin,
DirectionCreatorMixin,
RoadNameCreatorMixin,
SarmatCreator,
):
"""Фабрика для создания объектов маршрутной сети."""
def create_route_item(self, tag: str, container: RouteItemContainer) -> RouteItemModel:
"""Создание объекта 'Пункт маршрута'"""
route_item_model = RouteItemModel.from_container(container)
return self.make_route_item_from_model(tag, route_item_model)
def make_route_item_from_model(self, tag: str, model: RouteItemModel) -> RouteItemModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhRouteItem, *classes, RouteItemModel])
RouteItem = type('RouteItem', parents, {"permission_tag": tag, "controller": self}) # type: ignore
road, station, point = None, None, None
if model.road:
road = self.make_road_from_model(tag, model.road)
if model.station:
station = self.make_station_from_model(tag, model.station)
if model.point:
point = self.make_destination_point_from_model(tag, model.point)
return RouteItem(
**{
**model.as_dict,
'road': road,
'station': station,
'point': point,
},
)
def create_route(self, tag: str, container: RouteContainer) -> RouteModel:
"""Создание объекта 'Маршрут'"""
route_model = RouteModel.from_container(container)
return self.make_route_from_model(tag, route_model)
def make_route_from_model(self, tag: str, model: RouteModel) -> RouteModel:
classes = self._get_behavior_classes(tag)
parents = tuple([ARouteMixin, BhRoute, *classes, RouteModel])
Route = type('Route', parents, {"permission_tag": tag, "controller": self}) # type: ignore
directions = []
if model.direction:
directions = [self.make_direction_from_model(tag, item) for item in model.direction]
return Route(
**{
**model.as_dict,
'first_station': self.make_station_from_model(tag, model.first_station),
'structure': [self.make_route_item_from_model(tag, item) for item in model.structure],
'direction': directions,
},
)
def create_station(self, tag: str, container: StationContainer) -> StationModel:
"""Создание объекта 'Станция'"""
station_model = StationModel.from_container(container)
return self.make_station_from_model(tag, station_model)
def make_station_from_model(self, tag: str, model: StationModel) -> StationModel:
classes = self._get_behavior_classes(tag)
parents = tuple([AStationMixin, BhStation, *classes, StationModel])
Station = type('Station', parents, {"permission_tag": tag, "controller": self}) # type: ignore
return Station(
**{
**model.as_dict,
'point': self.make_destination_point_from_model(tag, model.point),
},
)
def create_journey(self, tag: str, container: JourneyContainer) -> JourneyModel:
"""Создание объекта 'Рейс'"""
journey_model = JourneyModel.from_container(container)
return self.make_journey_from_model(tag, journey_model)
def make_journey_from_model(self, tag: str, model: JourneyModel) -> JourneyModel:
classes = self._get_behavior_classes(tag)
parents = tuple([AJourneyMixin, BhJourney, *classes, JourneyModel])
Journey = type('Journey', parents, {"permission_tag": tag, "controller": self}) # type: ignore
directions = []
if model.direction:
directions = [self.make_direction_from_model(tag, item) for item in model.direction]
return Journey(
**{
**model.as_dict,
'first_station': self.make_station_from_model(tag, model.first_station),
'structure': [self.make_route_item_from_model(tag, item) for item in model.structure],
'direction': directions,
},
)
def create_road(self, tag: str, container: RoadContainer) -> RoadModel:
"""Создание объекта 'Дорога'"""
road_model = RoadModel.from_container(container)
return self.make_road_from_model(tag, road_model)
def make_road_from_model(self, tag: str, model: RoadModel) -> RoadModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhRoad, *classes, RoadModel])
Road = type('Road', parents, {"permission_tag": tag, "controller": self}) # type: ignore
road_name = None
if model.road_name:
road_name = self.make_road_name_from_model(tag, model.road_name)
return Road(
**{
**model.as_dict,
'start_point': self.make_destination_point_from_model(tag, model.start_point),
'end_point': self.make_destination_point_from_model(tag, model.end_point),
'road_name': road_name,
},
)
def create_journey_bunch_item(self, tag: str, container: JourneyBunchItemContainer) -> JourneyBunchItemModel:
"""Создание объекта 'Элемент связки'"""
bunch_item_model = JourneyBunchItemModel.from_container(container)
return self.make_journey_bunch_item_from_model(tag, bunch_item_model)
def make_journey_bunch_item_from_model(self, tag: str, model: JourneyBunchItemModel) -> JourneyBunchItemModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhJourneyBunchItem, *classes, JourneyBunchItemModel])
JourneyBunchItem = type(
'JourneyBunchItem', parents, {"permission_tag": tag, "controller": self}, # type: ignore
)
return JourneyBunchItem(
**{
**model.as_dict,
'journey': self.make_journey_from_model(tag, model.journey),
},
)
def create_journey_bunch(self, tag: str, container: JourneyBunchContainer) -> JourneyBunchModel:
"""Создание объекта 'Связка рейсов'"""
bunch_model = JourneyBunchModel.from_container(container)
return self.make_journey_bunch_from_model(tag, bunch_model)
def make_journey_bunch_from_model(self, tag: str, model: JourneyBunchModel) -> JourneyBunchModel:
classes = self._get_behavior_classes(tag)
parents = tuple([AJourneyBunchMixin, BhJourneyBunch, *classes, JourneyBunchModel])
JourneyBunch = type('JourneyBunch', parents, {"permission_tag": tag, "controller": self}) # type: ignore
return JourneyBunch(
**{
**model.as_dict,
'journeys': [self.make_journey_bunch_item_from_model(tag, item) for item in model.journeys],
},
)
|
/core/factory/traffic_manager_creator.py
| 0.51879 | 0.261449 |
traffic_manager_creator.py
|
pypi
|
from sarmat.core.behavior import (
BhCrew,
BhPermit,
BhVehicle,
)
from sarmat.core.context.containers import (
CrewContainer,
PermitContainer,
VehicleContainer,
)
from sarmat.core.context.models import (
CrewModel,
PermitModel,
VehicleModel,
)
from .base_creator import SarmatCreator
class SarmatVehicleCreator(SarmatCreator):
"""Класс-фабрика для создания объектов путевой документации."""
def create_crew(self, tag: str, container: CrewContainer) -> CrewModel:
crew_model = CrewModel.from_container(container)
return self.make_crew_from_model(tag, crew_model)
def make_crew_from_model(self, tag: str, model: CrewModel) -> CrewModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhCrew, *classes, CrewModel])
Crew = type('Crew', parents, {"permission_tag": tag, "controller": self}) # type: ignore
return Crew(**model.as_dict)
def create_permit(self, tag: str, container: PermitContainer) -> PermitModel:
"""Создание объекта 'Путевой лист'"""
permit_model = PermitModel.from_container(container)
return self.make_permit_from_model(tag, permit_model)
def make_permit_from_model(self, tag: str, model: PermitModel) -> PermitModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhPermit, *classes, PermitModel])
Permit = type('Permit', parents, {"permission_tag": tag, "controller": self}) # type: ignore
crew = []
if model.crew:
crew = [self.make_crew_from_model(tag, item) for item in model.crew]
vehicle = []
if model.vehicle:
vehicle = [self.make_vehicle_from_model(tag, item) for item in model.vehicle]
return Permit(
**{
**model.as_dict,
'crew': crew,
'vehicle': vehicle,
},
)
def create_vehicle(self, tag: str, container: VehicleContainer) -> VehicleModel:
"""Создание объекта 'Транспортное средство'"""
vehicle_model = VehicleModel.from_container(container)
return self.make_vehicle_from_model(tag, vehicle_model)
def make_vehicle_from_model(self, tag: str, model: VehicleModel) -> VehicleModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhVehicle, *classes, VehicleModel])
Vehicle = type('Vehicle', parents, {"permission_tag": tag, "controller": self}) # type: ignore
return Vehicle(**model.as_dict)
|
/core/factory/vehicle_creator.py
| 0.549882 | 0.239216 |
vehicle_creator.py
|
pypi
|
from collections import defaultdict
from typing import Any, Dict, List
from sarmat.core.actions import (
ADestinationPointMixin,
AGeoLocationMixin,
)
from sarmat.core.behavior import (
BhDestinationPoint,
BhDirection,
BhGeo,
BhNoAction,
BhPeriod,
BhPeriodItem,
BhRoadName,
)
from sarmat.core.context.containers import (
PeriodItemContainer,
PeriodContainer,
)
from sarmat.core.context.models import (
DestinationPointModel,
DirectionModel,
GeoModel,
PeriodItemModel,
PeriodModel,
RoadNameModel,
)
class SarmatCreator:
"""Базовый класс-фабрика."""
# хранилище тегов
role_tags: Dict[str, List[BhNoAction]] = defaultdict(list)
@classmethod
def register_class(cls, tag: str, cls_behavior: Any) -> None:
"""
Регистрация поведенческого класса
Args:
tag: тэг
cls_behavior: поведенческий класс
"""
sub_tags = tag.split('.')
for sub_tag in sub_tags:
classes = cls.role_tags[sub_tag]
if classes and cls_behavior in classes:
idx = classes.index(cls_behavior)
cls.role_tags[sub_tag][idx] = cls_behavior
else:
cls.role_tags[sub_tag].append(cls_behavior)
def _get_behavior_classes(self, tag: str) -> List[BhNoAction]:
"""Получение списка поведенческих классов по тегу"""
sub_tags = tag.split('.')
roles: list = []
for item in sub_tags:
role = self.role_tags.get(item)
if role:
roles.extend(role)
return roles or [BhNoAction]
class SarmatBaseCreator(SarmatCreator):
"""Класс-фабрика для базовых объектов"""
def create_period_item(self, tag: str, container: PeriodItemContainer) -> PeriodItemModel:
"""Создание объекта 'Период'"""
period_item_model = PeriodItemModel.from_container(container)
return self.make_period_item_from_model(tag, period_item_model)
def make_period_item_from_model(self, tag: str, model: PeriodItemModel) -> PeriodItemModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhPeriodItem, *classes, PeriodItemModel])
PeriodItem = type('PeriodItem', parents, {"permission_tag": tag, "controller": self}) # type: ignore
return PeriodItem(**model.as_dict)
def create_period(self, tag: str, container: PeriodContainer) -> PeriodModel:
period_model = PeriodModel.from_container(container)
return self.make_period_from_model(tag, period_model)
def make_period_from_model(self, tag: str, model: PeriodModel) -> PeriodModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhPeriod, *classes, PeriodModel])
Period = type('Period', parents, {"permission_tag": tag, "controller": self}) # type: ignore
period = None
if model.period:
period = self.make_period_item_from_model(tag, model.period)
periods = []
if model.periods:
periods = [self.make_period_item_from_model(tag, item) for item in model.periods]
return Period(
**{
**model.as_dict,
'period': period,
'periods': periods,
},
)
class RoadNameCreatorMixin(SarmatCreator):
def make_road_name_from_model(self, tag: str, model: RoadNameModel) -> RoadNameModel:
classes = self._get_behavior_classes(tag)
parents = tuple([BhRoadName, *classes, RoadNameModel])
RoadName = type('RoadName', parents, {"permission_tag": tag, "controller": self}) # type: ignore
return RoadName(**model.as_dict)
class DirectionCreatorMixin(SarmatCreator):
def make_direction_from_model(self, tag: str, model: DirectionModel) -> DirectionModel:
classes: List[BhNoAction] = self._get_behavior_classes(tag)
parents = tuple([BhDirection, *classes, DirectionModel])
Direction = type('Direction', parents, {"permission_tag": tag, "controller": self}) # type: ignore
return Direction(**model.as_dict)
class GeoObjectCreatorMixin(SarmatCreator):
def make_geo_object_from_model(self, tag: str, model: GeoModel) -> GeoModel:
classes = self._get_behavior_classes(tag)
parents = tuple([AGeoLocationMixin, BhGeo, *classes, GeoModel])
GeoObject = type('GeoObject', tuple(parents), {"permission_tag": tag, "controller": self}) # type: ignore
parent = None
if model.parent:
parent = self.make_geo_object_from_model(tag, model.parent)
return GeoObject(
**{
**model.as_dict,
'parent': parent,
},
)
class DestinationPointCreatorMixin(GeoObjectCreatorMixin):
def make_destination_point_from_model(self, tag: str, model: DestinationPointModel) -> DestinationPointModel:
classes: List[BhNoAction] = self._get_behavior_classes(tag)
parents = tuple([ADestinationPointMixin, BhDestinationPoint, *classes, DestinationPointModel])
DestinationPoint = type(
'DestinationPoint',
parents, # type: ignore
{"permission_tag": tag, "controller": self},
)
state = None
if model.state:
state = self.make_geo_object_from_model(tag, model.state)
return DestinationPoint(
**{
**model.as_dict,
'state': state,
},
)
|
/core/factory/base_creator.py
| 0.852629 | 0.30243 |
base_creator.py
|
pypi
|
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import os
from typing import Union
import numpy
import tkinter
from tkinter import ttk
from tkinter.filedialog import askopenfilenames, askdirectory
from tkinter.messagebox import showinfo
from tk_builder.base_elements import TypedDescriptor, IntegerDescriptor, StringDescriptor
from tk_builder.image_reader import NumpyCanvasImageReader
from tk_builder.panels.image_panel import ImagePanel
from sarpy_apps.supporting_classes.file_filters import common_use_collection
from sarpy_apps.supporting_classes.image_reader import SICDTypeCanvasImageReader
from sarpy_apps.supporting_classes.widget_with_metadata import WidgetWithMetadata
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.utils import get_physical_coordinates
from sarpy.visualization.remap import NRL
from sarpy.processing.sicd.fft_base import fft2_sicd, fftshift
class AppVariables(object):
browse_directory = StringDescriptor(
'browse_directory', default_value=os.path.expanduser('~'),
docstring='The directory for browsing for file selection.') # type: str
remap_type = StringDescriptor(
'remap_type', default_value='', docstring='') # type: str
image_reader = TypedDescriptor(
'image_reader', SICDTypeCanvasImageReader, docstring='') # type: SICDTypeCanvasImageReader
row_line_low = IntegerDescriptor(
'row_line_low',
docstring='The id of the frequency_panel of the lower row bandwidth line.') # type: Union[None, int]
row_line_high = IntegerDescriptor(
'row_line_high',
docstring='The id of the frequency_panel of the upper row bandwidth line.') # type: Union[None, int]
col_line_low = IntegerDescriptor(
'col_line_low',
docstring='The id of the frequency_panel of the lower column bandwidth line.') # type: Union[None, int]
col_line_high = IntegerDescriptor(
'col_line_high',
docstring='The id of the frequency_panel of the upper column bandwidth line.') # type: Union[None, int]
row_deltak1 = IntegerDescriptor(
'row_deltak1',
docstring='The id of the frequency_panel of the row deltak1 line.') # type: Union[None, int]
row_deltak2 = IntegerDescriptor(
'row_deltak2',
docstring='The id of the frequency_panel of the row deltak2 line.') # type: Union[None, int]
col_deltak1 = IntegerDescriptor(
'col_deltak1',
docstring='The id of the frequency_panel of the column deltak1.') # type: Union[None, int]
col_deltak2 = IntegerDescriptor(
'col_deltak2',
docstring='The id of the frequency_panel of the column deltak2.') # type: Union[None, int]
class LocalFrequencySupportTool(tkinter.PanedWindow, WidgetWithMetadata):
def __init__(self, primary, reader=None, **kwargs):
"""
Parameters
----------
primary : tkinter.Tk|tkinter.Toplevel
reader : None|str|SICDTypeReader|SICDTypeImageCanvasReader
kwargs
"""
self.root = primary
self.variables = AppVariables()
self.phase_remap = NRL()
if 'sashrelief' not in kwargs:
kwargs['sashrelief'] = tkinter.RIDGE
if 'orient' not in kwargs:
kwargs['orient'] = tkinter.HORIZONTAL
tkinter.PanedWindow.__init__(self, primary, **kwargs)
self.image_panel = ImagePanel(self, borderwidth=0) # type: ImagePanel
self.add(
self.image_panel, width=400, height=700, padx=5, pady=5, sticky=tkinter.NSEW,
stretch=tkinter.FIRST)
WidgetWithMetadata.__init__(self, primary, self.image_panel)
self.frequency_panel = ImagePanel(self, borderwidth=0) # type: ImagePanel
self.add(
self.frequency_panel, width=400, height=700, padx=5, pady=5, sticky=tkinter.NSEW)
self.pack(fill=tkinter.BOTH, expand=tkinter.YES)
self.set_title()
# define menus
self.menu_bar = tkinter.Menu()
# file menu
self.file_menu = tkinter.Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label="Open Image", command=self.callback_select_files)
self.file_menu.add_command(label="Open Directory", command=self.callback_select_directory)
self.file_menu.add_separator()
self.file_menu.add_command(label="Exit", command=self.exit)
# menus for informational popups
self.metadata_menu = tkinter.Menu(self.menu_bar, tearoff=0)
self.metadata_menu.add_command(label="Metaicon", command=self.metaicon_popup)
self.metadata_menu.add_command(label="Metaviewer", command=self.metaviewer_popup)
self._valid_data_shown = tkinter.IntVar(self, value=0)
self.metadata_menu.add_checkbutton(
label='ValidData', variable=self._valid_data_shown, command=self.show_valid_data)
# ensure menus cascade
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
self.menu_bar.add_cascade(label="Metadata", menu=self.metadata_menu)
# handle packing
self.root.config(menu=self.menu_bar)
# hide extraneous tool elements
self.image_panel.hide_tools('shape_drawing')
self.image_panel.hide_shapes()
self.frequency_panel.hide_tools(['shape_drawing', 'select'])
self.frequency_panel.hide_shapes()
self.frequency_panel.hide_select_index()
# bind canvas events for proper functionality
self.image_panel.canvas.bind('<<SelectionChanged>>', self.handle_selection_change)
self.image_panel.canvas.bind('<<SelectionFinalized>>', self.handle_selection_change)
self.image_panel.canvas.bind('<<ImageIndexChanged>>', self.handle_image_index_changed)
self.update_reader(reader)
def set_title(self):
"""
Sets the window title.
"""
file_name = None if self.variables.image_reader is None else self.variables.image_reader.file_name
if file_name is None:
the_title = "Frequency Support Tool"
elif isinstance(file_name, (list, tuple)):
the_title = "Frequency Support Tool, Multiple Files"
else:
the_title = "Frequency Support Tool for {}".format(os.path.split(file_name)[1])
self.winfo_toplevel().title(the_title)
def exit(self):
self.root.destroy()
def show_valid_data(self):
if self.variables.image_reader is None:
return
the_value = self._valid_data_shown.get()
if the_value == 1:
# we just checked on
sicd = self.variables.image_reader.get_sicd()
if sicd.ImageData.ValidData is not None:
self.image_panel.canvas.show_valid_data(sicd.ImageData.ValidData.get_array(dtype='float64'))
else:
# we checked it off
try:
valid_data_id = self.image_panel.canvas.variables.get_tool_shape_id_by_name('VALID_DATA')
self.image_panel.canvas.hide_shape(valid_data_id)
except KeyError:
pass
def set_default_selection(self):
"""
Sets the default selection on the currently selected image.
"""
if self.variables.image_reader is None:
return
# get full image size
full_rows = self.variables.image_reader.full_image_ny
full_cols = self.variables.image_reader.full_image_nx
default_size = 512
middle = (
max(0, int(0.5*(full_rows - default_size))),
max(0, int(0.5*(full_cols - default_size))),
min(full_rows, int(0.5*(full_rows + default_size))),
min(full_cols, int(0.5*(full_cols + default_size))))
self.image_panel.canvas.zoom_to_full_image_selection((0, 0, full_rows, full_cols))
# set selection rectangle
self.image_panel.canvas.current_tool = 'SELECT'
select = self.image_panel.canvas.variables.get_tool_shape_by_name('SELECT')
self.image_panel.canvas.modify_existing_shape_using_image_coords(select.uid, middle)
self.handle_selection_change(None)
# noinspection PyUnusedLocal
def handle_selection_change(self, event):
"""
Handle a change in the selection area.
Parameters
----------
event
"""
if self.variables.image_reader is None:
return
full_image_width = self.image_panel.canvas.variables.state.canvas_width
fill_image_height = self.image_panel.canvas.variables.state.canvas_height
self.image_panel.canvas.zoom_to_canvas_selection((0, 0, full_image_width, fill_image_height))
self.update_displayed_selection()
# noinspection PyUnusedLocal
def handle_image_index_changed(self, event):
"""
Handle that the image index has changed.
Parameters
----------
event
"""
self.populate_metaicon()
self.set_default_selection()
self.show_valid_data()
def update_reader(self, the_reader, update_browse=None):
"""
Update the reader.
Parameters
----------
the_reader : None|str|SICDTypeReader|SICDTypeCanvasImageReader
update_browse : None|str
"""
if the_reader is None:
return
if update_browse is not None:
self.variables.browse_directory = update_browse
elif isinstance(the_reader, str):
self.variables.browse_directory = os.path.split(the_reader)[0]
if isinstance(the_reader, str):
the_reader = SICDTypeCanvasImageReader(the_reader)
if isinstance(the_reader, SICDTypeReader):
the_reader = SICDTypeCanvasImageReader(the_reader)
if not isinstance(the_reader, SICDTypeCanvasImageReader):
raise TypeError('Got unexpected input for the reader')
# change the tool to view
self.image_panel.canvas.current_tool = 'VIEW'
self.image_panel.canvas.current_tool = 'VIEW'
# update the reader
self.variables.image_reader = the_reader
self.image_panel.set_image_reader(the_reader)
self.set_title()
# refresh appropriate GUI elements
self.set_default_selection()
self.populate_metaicon()
self.populate_metaviewer()
self.show_valid_data()
def callback_select_files(self):
fnames = askopenfilenames(initialdir=self.variables.browse_directory, filetypes=common_use_collection)
if fnames is None or fnames in ['', ()]:
return
if len(fnames) == 1:
the_reader = SICDTypeCanvasImageReader(fnames[0])
else:
the_reader = SICDTypeCanvasImageReader(fnames)
if the_reader is None:
showinfo('Opener not found',
message='File {} was not successfully opened as a SICD type '
'file.'.format(fnames))
return
self.update_reader(the_reader, update_browse=os.path.split(fnames[0])[0])
def callback_select_directory(self):
dirname = askdirectory(initialdir=self.variables.browse_directory, mustexist=True)
if dirname is None or dirname in [(), '']:
return
# update the default directory for browsing
self.variables.browse_directory = os.path.split(dirname)[0]
the_reader = SICDTypeCanvasImageReader(dirname)
self.update_reader(the_reader, update_browse=os.path.split(dirname)[0])
def _initialize_bandwidth_lines(self):
if self.variables.row_line_low is None or \
self.frequency_panel.canvas.get_vector_object(self.variables.row_line_low) is None:
self.variables.row_deltak1 = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='red')
self.variables.row_deltak2 = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='red')
self.variables.col_deltak1 = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='red')
self.variables.col_deltak2 = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='red')
self.variables.row_line_low = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='blue', regular_options={'dash': (3, )})
self.variables.row_line_high = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='blue', regular_options={'dash': (3, )})
self.variables.col_line_low = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='blue', regular_options={'dash': (3, )})
self.variables.col_line_high = self.frequency_panel.canvas.create_new_line(
(0, 0, 0, 0), make_current=False, increment_color=False, color='blue', regular_options={'dash': (3, )})
else:
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_deltak1, (0, 0, 0, 0))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_deltak2, (0, 0, 0, 0))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_deltak1, (0, 0, 0, 0))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_deltak2, (0, 0, 0, 0))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_line_low, (0, 0, 0, 0))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_line_high, (0, 0, 0, 0))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_line_low, (0, 0, 0, 0))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_line_high, (0, 0, 0, 0))
def update_displayed_selection(self):
def get_extent(coords):
return min(coords[0::2]), max(coords[0::2]), min(coords[1::2]), max(coords[1::2])
def draw_row_delta_lines():
deltak1 = (row_count - 1)*(0.5 + the_sicd.Grid.Row.SS*the_sicd.Grid.Row.DeltaK1) + 1
deltak2 = (row_count - 1)*(0.5 + the_sicd.Grid.Row.SS*the_sicd.Grid.Row.DeltaK2) + 1
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_deltak1, (deltak1, 0, deltak1, col_count))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_deltak2, (deltak2, 0, deltak2, col_count))
def draw_col_delta_lines():
deltak1 = (col_count - 1)*(0.5 + the_sicd.Grid.Col.SS*the_sicd.Grid.Col.DeltaK1) + 1
deltak2 = (col_count - 1)*(0.5 + the_sicd.Grid.Col.SS*the_sicd.Grid.Col.DeltaK2) + 1
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_deltak1, (0, deltak1, row_count, deltak1))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_deltak2, (0, deltak2, row_count, deltak2))
def draw_row_bandwidth_lines():
# noinspection PyBroadException
try:
delta_kcoa_center = the_sicd.Grid.Row.DeltaKCOAPoly(row_phys, col_phys)
except Exception:
delta_kcoa_center = 0.0
row_bw_low = (row_count - 1)*(
0.5 + the_sicd.Grid.Row.SS*(delta_kcoa_center - 0.5*the_sicd.Grid.Row.ImpRespBW)) + 1
row_bw_high = (row_count - 1)*(
0.5 + the_sicd.Grid.Row.SS*(delta_kcoa_center + 0.5*the_sicd.Grid.Row.ImpRespBW)) + 1
row_bw_low = (row_bw_low % row_count)
row_bw_high = (row_bw_high % row_count)
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_line_low, (row_bw_low, 0, row_bw_low, col_count))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.row_line_high, (row_bw_high, 0, row_bw_high, col_count))
def draw_col_bandwidth_lines():
# noinspection PyBroadException
try:
delta_kcoa_center = the_sicd.Grid.Col.DeltaKCOAPoly(row_phys, col_phys)
except Exception:
delta_kcoa_center = 0.0
col_bw_low = (col_count - 1) * (
0.5 + the_sicd.Grid.Col.SS*(delta_kcoa_center - 0.5*the_sicd.Grid.Col.ImpRespBW)) + 1
col_bw_high = (col_count - 1) * (
0.5 + the_sicd.Grid.Col.SS*(delta_kcoa_center + 0.5*the_sicd.Grid.Col.ImpRespBW)) + 1
col_bw_low = (col_bw_low % col_count)
col_bw_high = (col_bw_high % col_count)
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_line_low, (0, col_bw_low, row_count, col_bw_low))
self.frequency_panel.canvas.modify_existing_shape_using_image_coords(
self.variables.col_line_high, (0, col_bw_high, row_count, col_bw_high))
threshold = self.image_panel.canvas.variables.config.select_size_threshold
select_id = self.image_panel.canvas.variables.get_tool_shape_id_by_name('SELECT')
rect_coords = self.image_panel.canvas.get_shape_image_coords(select_id)
extent = get_extent(rect_coords) # left, right, bottom, top
row_count = extent[1] - extent[0]
col_count = extent[3] - extent[2]
the_sicd = self.variables.image_reader.get_sicd()
row_phys, col_phys = get_physical_coordinates(
the_sicd, 0.5*(extent[0]+extent[1]), 0.5*(extent[2]+extent[3]))
if row_count < threshold or col_count < threshold:
junk_data = numpy.zeros((100, 100), dtype='uint8')
self.frequency_panel.set_image_reader(NumpyCanvasImageReader(junk_data))
self._initialize_bandwidth_lines()
else:
image_data = self.variables.image_reader.base_reader[extent[0]:extent[1], extent[2]:extent[3]]
if image_data is not None:
self.frequency_panel.set_image_reader(
NumpyCanvasImageReader(self.phase_remap(fftshift(fft2_sicd(image_data, the_sicd)))))
self._initialize_bandwidth_lines()
draw_row_delta_lines()
draw_col_delta_lines()
draw_row_bandwidth_lines()
draw_col_bandwidth_lines()
else:
junk_data = numpy.zeros((100, 100), dtype='uint8')
self.frequency_panel.set_image_reader(NumpyCanvasImageReader(junk_data))
self._initialize_bandwidth_lines()
def main(reader=None):
"""
Main method for initializing the tool
Parameters
----------
reader : None|str|SICDTypeReader|SICDTypeCanvasImageReader
"""
root = tkinter.Tk()
the_style = ttk.Style()
the_style.theme_use('classic')
app = LocalFrequencySupportTool(root, reader=reader)
root.geometry("1000x1000")
root.mainloop()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Open the local support frequency analysis tool with optional input file.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input', metavar='input', default=None, nargs='?',
help='The path to the optional image file for opening.')
args = parser.parse_args()
main(reader=args.input)
|
/sarpy_apps-1.1.23.tar.gz/sarpy_apps-1.1.23/sarpy_apps/apps/local_support_tool.py
| 0.73173 | 0.160266 |
local_support_tool.py
|
pypi
|
__classification__ = "UNCLASSIFIED"
__author__ = "Valkyrie Systems Corporation"
import functools
import itertools
import pathlib
import tkinter
from tkinter import messagebox
from tkinter import ttk
import matplotlib.backends.backend_tkagg as mpl_tk
import matplotlib.figure as mpl_fig
import numpy as np
import plotly.colors
import plotly.graph_objects as go
def plot_image_area(reader):
"""
Create a plot of a CPHD's ImageArea
"""
cphd_meta = reader.cphd_meta
fig = go.Figure()
color_set = itertools.cycle(zip(plotly.colors.qualitative.Pastel2, plotly.colors.qualitative.Set2))
im_rect, im_poly = _make_image_area(cphd_meta.SceneCoordinates.ImageArea,
name='Scene', colors=next(color_set))
iacp_labels = [f'Lat: {vertex.Lat}<br>Lon: {vertex.Lon}'
for vertex in sorted(cphd_meta.SceneCoordinates.ImageAreaCornerPoints, key=lambda x: x.index)]
for label, ptx, pty, yshift in zip(iacp_labels, im_rect['x'], im_rect['y'], [-20, 20, 20, -20]):
fig.add_annotation(x=ptx, y=pty, text=label, showarrow=False, xshift=0, yshift=yshift)
fig.add_trace(im_rect)
if im_poly:
fig.add_trace(im_poly)
if cphd_meta.SceneCoordinates.ExtendedArea is not None:
ext_rect, ext_poly = _make_image_area(cphd_meta.SceneCoordinates.ExtendedArea,
name='Extended', colors=next(color_set))
fig.add_trace(ext_rect)
if ext_poly is not None:
fig.add_trace(ext_poly)
channel_colors = {}
for chan_params in cphd_meta.Channel.Parameters:
chan_id = chan_params.Identifier
channel_colors[chan_id] = next(color_set)
for chan_params in cphd_meta.Channel.Parameters:
chan_id = chan_params.Identifier
if chan_params.ImageArea is not None:
fig.add_traces([t for t in _make_image_area(chan_params.ImageArea, name=f'Channel: {chan_id}',
colors=channel_colors[chan_id]) if t])
antenna_aiming = _antenna_aiming_in_image_area(reader)
for channel, aiming in antenna_aiming.items():
for txrcv, symbol in zip(['Tx', 'Rcv'], ('triangle-down-open', 'triangle-up-open')):
boresights = aiming[txrcv]['boresights']
apcid = aiming[txrcv]['APCId']
def add_boresight_trace(points, name, color):
fig.add_trace(go.Scatter(x=points[:, 0],
y=points[:, 1],
name=name,
legendgroup=name,
mode='markers',
marker=dict(symbol=symbol, color=color)))
first_point = points[np.isfinite(points[:, 0])][0]
fig.add_trace(go.Scatter(x=[first_point[0]],
y=[first_point[1]],
name=name,
legendgroup=name,
showlegend=False,
mode='markers',
marker=dict(symbol=symbol, size=15, color=color)))
add_boresight_trace(boresights['mechanical'],
name=f"Channel: {channel} {txrcv} MB ({apcid})",
color=channel_colors[channel][0])
if 'electrical' in boresights:
add_boresight_trace(boresights['electrical'],
name=f"Channel: {channel} {txrcv} EB ({apcid})",
color=channel_colors[channel][-1])
fig.update_layout(
xaxis_title="IAX [m]", yaxis_title="IAY [m]",
title_text='Image Area',
meta='image_area')
return fig
def _make_image_area(image_area, name=None, colors=None):
x1, y1 = image_area.X1Y1
x2, y2 = image_area.X2Y2
rect = go.Scatter(x=[x1, x1, x2, x2, x1], y=[y1, y2, y2, y1, y1], fill="toself",
name=f"{name + ' ' if name is not None else ''}Rectangle")
if colors:
rect['line']['color'] = colors[0]
if image_area.Polygon is not None:
vertices = [vertex.get_array() for vertex in sorted(image_area.Polygon, key=lambda x: x.index)]
vertices = np.array(vertices + [vertices[0]])
poly = go.Scatter(x=vertices[:, 0], y=vertices[:, 1], fill="toself",
name=f"{name + ' ' if name is not None else ''}Polygon",
line={'color': rect['line']['color'], 'dash': 'dot', 'width': 1})
if colors:
poly['line']['color'] = colors[-1]
else:
poly = None
return rect, poly
def _antenna_aiming_in_image_area(reader):
cphd_meta = reader.cphd_meta
results = {}
if cphd_meta.Antenna is None:
return results
if cphd_meta.SceneCoordinates.ReferenceSurface.Planar is None:
# Only Planar is handled
return results
apcs = {}
for apc in cphd_meta.Antenna.AntPhaseCenter:
apcs[apc.Identifier] = apc
acfs = {}
for acf in cphd_meta.Antenna.AntCoordFrame:
acfs[acf.Identifier] = acf
patterns = {}
for antpat in cphd_meta.Antenna.AntPattern:
patterns[antpat.Identifier] = antpat
iarp = cphd_meta.SceneCoordinates.IARP.ECF.get_array()
iax = cphd_meta.SceneCoordinates.ReferenceSurface.Planar.uIAX.get_array()
iay = cphd_meta.SceneCoordinates.ReferenceSurface.Planar.uIAY.get_array()
iaz = np.cross(iax, iay)
def _compute_boresights(channel_id, apc_id, antpat_id, txrcv):
times = reader.read_pvp_variable(f'{txrcv}Time', channel_id)
uacx = reader.read_pvp_variable(f'{txrcv}ACX', channel_id)
uacy = reader.read_pvp_variable(f'{txrcv}ACY', channel_id)
if uacx is None or uacy is None:
acf_id = apcs[apc_id].ACFId
uacx = acfs[acf_id].XAxisPoly(times)
uacy = acfs[acf_id].YAxisPoly(times)
uacz = np.cross(uacx, uacy)
apc_positions = reader.read_pvp_variable(f'{txrcv}Pos', channel_id)
def _project_apc_into_image_area(along):
distance = -_vdot(apc_positions - iarp, iaz) / _vdot(along, iaz)
plane_points_ecf = apc_positions + distance[:, np.newaxis] * along
plane_points_x = _vdot(plane_points_ecf - iarp, iax)
plane_points_y = _vdot(plane_points_ecf - iarp, iay)
return np.stack((plane_points_x, plane_points_y)).T
boresights = {'mechanical': _project_apc_into_image_area(uacz)}
ebpvp = reader.read_pvp_variable(f'{txrcv}EB', channel_id)
if ebpvp is not None:
eb_dcx = ebpvp[:, 0]
eb_dcy = ebpvp[:, 1]
else:
eb_dcx = patterns[antpat_id].EB.DCXPoly(times)
eb_dcy = patterns[antpat_id].EB.DCYPoly(times)
if any(eb_dcx) or any(eb_dcy):
eb_dcz = np.sqrt(1 - eb_dcx**2 - eb_dcy**2)
eb = np.stack((eb_dcx, eb_dcy, eb_dcz)).T
eb_boresight = np.zeros_like(uacz)
eb_boresight += eb[:, 0, np.newaxis] * uacx
eb_boresight += eb[:, 1, np.newaxis] * uacy
eb_boresight += eb[:, 2, np.newaxis] * uacz
boresights['electrical'] = _project_apc_into_image_area(eb_boresight)
return boresights
for chan_params in cphd_meta.Channel.Parameters:
channel_id = chan_params.Identifier
if not chan_params.Antenna:
continue
results[channel_id] = {}
tx_apc_id = chan_params.Antenna.TxAPCId
results[channel_id]['Tx'] = {
'APCId': tx_apc_id,
'boresights': _compute_boresights(channel_id,
tx_apc_id,
chan_params.Antenna.TxAPATId,
'Tx')
}
rcv_apc_id = chan_params.Antenna.RcvAPCId
results[channel_id]['Rcv'] = {
'APCId': rcv_apc_id,
'boresights': _compute_boresights(channel_id,
rcv_apc_id,
chan_params.Antenna.RcvAPATId,
'Rcv')
}
return results
def _vdot(vec1, vec2, axis=-1, keepdims=False):
"""Vectorwise dot product of two bunches of vectors.
Args
----
vec1: array-like
The first bunch of vectors
vec2: array-like
The second bunch of vectors
axis: int, optional
Which axis contains the vector components
keepdims: bool, optional
Keep the full broadcasted dimensionality of the arguments
Returns
-------
array-like
"""
return (np.asarray(vec1) * np.asarray(vec2)).sum(axis=axis, keepdims=keepdims)
class CphdVectorPower:
"""
Create a tool to visualize a CPHD vector's power
"""
def __init__(self, root, cphd_reader):
self.cphd_reader = cphd_reader
cphd_domain = cphd_reader.cphd_meta.Global.DomainType
if cphd_domain != 'FX':
root.destroy()
raise NotImplementedError(f'{cphd_domain}-domain CPHDs have not been implemented yet. '
'Only FX-domain CPHDs are supported')
ref_ch_id = cphd_reader.cphd_meta.Channel.RefChId
self.channel_datas = {x.Identifier: x for x in cphd_reader.cphd_meta.Data.Channels}
self.channel_parameters = {x.Identifier: x for x in cphd_reader.cphd_meta.Channel.Parameters}
assert ref_ch_id in self.channel_datas
self.has_signal = cphd_reader.cphd_meta.PVP.SIGNAL is not None
self.has_fxn = cphd_reader.cphd_meta.PVP.FXN1 is not None and cphd_reader.cphd_meta.PVP.FXN2 is not None
self.has_toae = cphd_reader.cphd_meta.PVP.TOAE1 is not None and cphd_reader.cphd_meta.PVP.TOAE2 is not None
self._get_noise_parameters(ref_ch_id)
# prepare figure
fig = mpl_fig.Figure(figsize=(7, 7), dpi=100)
self.ax = dict(zip(('FX', 'TOA'), fig.subplots(2, 1)))
self.ax['FX'].set_xlabel('FX Hz')
self.ax['TOA'].set_xlabel('ΔTOA [s]')
self.power_line = {}
self.power_line['FX'], = self.ax['FX'].plot(0, 0)
self.power_line['TOA'], = self.ax['TOA'].plot(0, 0)
self.span = {}
if self.has_fxn:
self.span['FXN'] = self.ax['FX'].axvspan(0, 10, label='FX Noise Bandwidth',
color='cyan', alpha=0.3, hatch='\\')
if self.has_toae:
self.span['TOAE'] = self.ax['TOA'].axvspan(0, 10, label='TOA Extended Saved',
color='cyan', alpha=0.3, hatch='\\')
self.span['FX'] = self.ax['FX'].axvspan(0, 10, label='FX Bandwidth',
color='gray', alpha=0.3, hatch='/')
self.span['TOA'] = self.ax['TOA'].axvspan(0, 10, label='TOA Saved',
color='gray', alpha=0.3, hatch='/')
self.pn_ref_marker = None
self.pn_ref_line = None
self.sn_ref_line = None
if self.sn_ref is not None:
pn_domain = self.cphd_reader.cphd_meta.Global.DomainType
sn_domain = 'TOA' if pn_domain == 'FX' else 'FX'
self.pn_ref_marker, = self.ax[pn_domain].plot(0, 0, marker='+', color='orange')
self.pn_ref_line = self.ax[pn_domain].axhline(0, color='orange')
self.sn_ref_line, = self.ax[sn_domain].plot(0, 0, color='magenta')
for ax in self.ax.values():
ax.set_ylabel("digital power")
ax.set_yscale("log")
ax.grid()
fig.subplots_adjust(bottom=0.15, hspace=0.6)
mainframe = ttk.Frame(root, padding="3 3 3 3")
mainframe.grid(column=0, row=0, sticky=tkinter.NSEW)
root.columnconfigure(index=0, weight=1)
root.rowconfigure(index=0, weight=1)
root.wm_title("CPHD - Vector Power")
self.canvas = mpl_tk.FigureCanvasTkAgg(fig, master=mainframe) # A tk.DrawingArea.
self.canvas.draw()
# pack_toolbar=False will make it easier to use a layout manager later on.
toolbar = mpl_tk.NavigationToolbar2Tk(self.canvas, mainframe, pack_toolbar=False)
toolbar.update()
self.selected_channel = tkinter.StringVar(value=ref_ch_id)
self.channel_select = ttk.Combobox(master=mainframe,
textvariable=self.selected_channel,
values=list(self.channel_datas),
width=50,
state='readonly')
self.channel_select.bind('<<ComboboxSelected>>', self._update_channel)
self.should_autoscale = tkinter.BooleanVar(value=True)
autoscale_control = tkinter.Checkbutton(master=mainframe, text="Autoscale axes?",
variable=self.should_autoscale,
command=functools.partial(self._autoscale, draw=True))
toolbar.grid(column=0, row=0, columnspan=4, sticky=tkinter.NSEW)
self.canvas.get_tk_widget().grid(column=0, row=1, columnspan=4, sticky=tkinter.NSEW)
self.channel_select.grid(column=0, row=2, columnspan=3, sticky=tkinter.NSEW)
autoscale_control.grid(column=3, row=2, sticky=tkinter.NSEW)
vectorframe = ttk.LabelFrame(mainframe, text='Vector Selection', padding="3 3 3 3")
vectorframe.grid(column=0, row=3, columnspan=4, pady=8, sticky=tkinter.NSEW)
self.selected_vector = tkinter.IntVar(value=0)
self.selected_vector.trace_add('write', self._update_plot)
self.vector_slider = tkinter.Scale(vectorframe, from_=0, to=self.channel_datas[ref_ch_id].NumVectors-1,
orient=tkinter.HORIZONTAL,
variable=self.selected_vector,
length=256,
showvalue=False)
self.vector_entry = ttk.Spinbox(vectorframe, textvariable=self.selected_vector,
from_=0, to=self.channel_datas[ref_ch_id].NumVectors-1)
self.vector_slider.grid(column=0, row=0, columnspan=3, sticky=tkinter.NSEW)
self.vector_entry.grid(column=3, row=0, padx=3, sticky=tkinter.EW)
avgframe = ttk.Labelframe(mainframe, text='Averaging', padding="3 3 3 3")
avgframe.grid(column=0, row=4, columnspan=4, sticky=tkinter.NSEW)
ttk.Label(avgframe, text="# pre/post vectors").grid(column=0, row=0, columnspan=1)
def show_vector_avg_warning():
messagebox.showwarning(parent=root, message=(
'Vector averaging assumes that the domain coordinate value PVPs (SC0, SCSS) are constant across the '
'span of selected vectors. Take care or disable vector averaging when these vary.'
))
self.vector_avg_warn_button = ttk.Button(avgframe, text="?",
command=show_vector_avg_warning)
self.vector_avg_warn_button.grid(column=1, row=0, sticky=(tkinter.S, tkinter.W))
self.num_adjacent_vec_slider = tkinter.Scale(avgframe, from_=0, to=32,
orient=tkinter.HORIZONTAL,
showvalue=True,
command=self._update_plot)
self.num_adjacent_vec_slider.grid(column=0, row=1, columnspan=2, padx=3, sticky=tkinter.EW)
ttk.Label(avgframe, text="# samples").grid(column=2, row=0, columnspan=2)
self.num_avg_samples_slider = tkinter.Scale(avgframe, from_=1, to=self.channel_datas[ref_ch_id].NumSamples//16,
orient=tkinter.HORIZONTAL,
showvalue=True,
command=self._update_plot)
self.num_avg_samples_slider.grid(column=2, row=1, columnspan=2, padx=3, sticky=tkinter.EW)
for col in range(4):
mainframe.columnconfigure(col, weight=1)
vectorframe.columnconfigure(col, weight=1)
avgframe.columnconfigure(col, weight=1)
mainframe.rowconfigure(1, weight=10)
self._update_channel()
def _update_legend(self, ax):
ax.legend(bbox_to_anchor=(0.5, -0.4), loc='lower center', borderaxespad=0, ncol=10)
def _get_noise_parameters(self, channel_id):
these_channel_parameters = self.channel_parameters[channel_id]
self.pn_ref = None
self.bn_ref = None
self.sn_ref = None
if these_channel_parameters.NoiseLevel is not None:
self.pn_ref = these_channel_parameters.NoiseLevel.PNRef
self.bn_ref = these_channel_parameters.NoiseLevel.BNRef
if self.pn_ref is not None and self.bn_ref is not None:
self.sn_ref = self.pn_ref / self.bn_ref
def _update_channel(self, *args, **kwargs):
channel_id = self.selected_channel.get()
self._get_noise_parameters(channel_id)
if self.sn_ref is not None:
self.pn_ref_line.set_ydata([self.pn_ref] * 2)
self.pn_ref_marker.set_label(f'PNRef={self.pn_ref}')
self.sn_ref_line.set_label(f'SNRef={self.sn_ref}')
pvp_fields = ['FX1', 'FX2', 'SC0', 'SCSS', 'TOA1', 'TOA2']
if self.has_signal:
pvp_fields.append('SIGNAL')
if self.has_fxn:
pvp_fields.extend(['FXN1', 'FXN2'])
if self.has_toae:
pvp_fields.extend(['TOAE1', 'TOAE2'])
self.pvps = {k: self.cphd_reader.read_pvp_variable(k, index=channel_id) for k in pvp_fields}
self.selected_vector.set(0)
self._update_slider(0)
self.channel_select.selection_clear()
def _update_slider(self, vector_index):
this_channel_data = self.channel_datas[self.selected_channel.get()]
self.vector_slider.configure(to=this_channel_data.NumVectors - 1)
self.vector_slider.set(vector_index)
self.num_avg_samples_slider.configure(to=this_channel_data.NumSamples//16)
self.num_avg_samples_slider.set(1)
self.num_adjacent_vec_slider.set(0)
def _autoscale(self, draw=False):
if self.should_autoscale.get():
for ax in self.ax.values():
ax.autoscale()
ax.relim()
ax.autoscale_view(True, True, True)
if draw:
self.canvas.draw()
def _update_plot(self, *args):
vector_index = self.selected_vector.get()
channel_id = self.selected_channel.get()
num_avg_vectors = int(self.num_adjacent_vec_slider.get())
num_avg_samples = int(self.num_avg_samples_slider.get())
num_vectors = self.channel_datas[channel_id].NumVectors
num_samples = self.channel_datas[channel_id].NumSamples
signal_chunk = self.cphd_reader.read(slice(np.maximum(vector_index - num_avg_vectors, 0),
np.minimum(vector_index + num_avg_vectors + 1, num_vectors)),
slice(None, num_samples//num_avg_samples * num_avg_samples),
index=channel_id, squeeze=False)
domain = self.cphd_reader.cphd_meta.Global.DomainType
spectral_domain = 'TOA' if domain == 'FX' else 'FX'
scss = self.pvps['SCSS'][vector_index]
domain_samples = self.pvps['SC0'][vector_index] + np.arange(signal_chunk.shape[-1]) * scss
cphd_power = (signal_chunk * np.conj(signal_chunk)).real
cphd_power_averaged = cphd_power.reshape(cphd_power.shape[0], -1, num_avg_samples).mean(axis=(2, 0))
domain_averaged = domain_samples.reshape(-1, num_avg_samples).mean(-1)
self.power_line[domain].set_data(domain_averaged, cphd_power_averaged)
sc1, sc2 = [self.pvps[f'{domain}{n}'][vector_index] for n in (1, 2)]
is_in_span = (sc1 <= domain_samples) & (domain_samples <= sc2)
in_span_chunk = signal_chunk[:, is_in_span]
in_span_chunk = in_span_chunk[:, :in_span_chunk.shape[-1]//num_avg_samples * num_avg_samples]
if self.cphd_reader.cphd_meta.Global.SGN == -1:
in_span_chunk = np.conj(in_span_chunk)
spectral_chunk = np.fft.fftshift(np.fft.fft(in_span_chunk, axis=-1), axes=-1)
spectral_chunk /= np.sqrt(spectral_chunk.shape[-1])
spectral_power = (spectral_chunk * np.conj(spectral_chunk)).real
spectral_power_averaged = spectral_power.reshape(spectral_power.shape[0], -1, num_avg_samples).mean(axis=(2, 0))
spectral_domain_samples = np.linspace(-1/(2*scss), 1/(2*scss), num=in_span_chunk.shape[-1], endpoint=False)
spectral_domain_averaged = spectral_domain_samples.reshape(-1, num_avg_samples).mean(-1)
self.power_line[spectral_domain].set_data(spectral_domain_averaged, spectral_power_averaged)
if self.sn_ref is not None:
self.pn_ref_marker.set_data((sc1 + sc2) / 2, self.pn_ref)
sn_ref_bw = self.bn_ref / scss * np.array([-1/2, 1/2])
self.sn_ref_line.set_data(sn_ref_bw, self.sn_ref * np.ones_like(sn_ref_bw))
for domain, span in self.span.items():
vertices = span.get_xy()
b1 = self.pvps[f'{domain}1'][vector_index]
b2 = self.pvps[f'{domain}2'][vector_index]
vertices[:, 0] = [b1, b1, b2, b2, b1]
span.set_xy(vertices)
self._autoscale()
# update titles
title_parts = [pathlib.Path(self.cphd_reader.file_name).name]
if self.has_signal:
title_parts.append(f"SIGNAL[{vector_index}]={self.pvps['SIGNAL'][vector_index]}")
self.ax['FX'].set_title('\n'.join(title_parts))
if self.sn_ref is not None:
self.ax['TOA'].set_title(f'BNRef={self.bn_ref}')
for ax in self.ax.values():
self._update_legend(ax)
# required to update canvas and attached toolbar!
self.canvas.draw_idle()
|
/sarpy_apps-1.1.23.tar.gz/sarpy_apps-1.1.23/sarpy_apps/supporting_classes/cphd_plotting.py
| 0.648689 | 0.226709 |
cphd_plotting.py
|
pypi
|
__classification__ = "UNCLASSIFIED"
__author__ = ("Jason Casey", "Thomas McCullough")
import tkinter
from tk_builder.widgets import basic_widgets
from sarpy.io.general.base import BaseReader
from sarpy.io.general.nitf import NITFDetails, NITFReader
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.phase_history.base import CPHDTypeReader
from sarpy.io.received.base import CRSDTypeReader
from sarpy.io.product.base import SIDDTypeReader
def _primitive_list(the_list):
primitive = True
for entry in the_list:
primitive &= isinstance(entry, (float, int, str, list))
return primitive
class Metaviewer(basic_widgets.Treeview):
"""
For viewing a rendering of a json compatible object.
"""
def __init__(self, master):
"""
Parameters
----------
master : tkinter.Tk|tkinter.Toplevel
The GUI element which is the parent
"""
basic_widgets.Treeview.__init__(self, master)
self.master.geometry("800x600")
self.pack(expand=tkinter.YES, fill=tkinter.BOTH)
def empty_entries(self):
"""
Empty all entries - for the purpose of reinitializing.
Returns
-------
None
"""
self.delete(*self.get_children())
def add_node(self, the_parent, the_key, the_value):
"""
For the given key and value, this creates the node for the given value,
and recursively adds children, as appropriate.
Parameters
----------
the_parent : str
The parent key for this entry.
the_key : str
The key for this entry - should be unique amongst children of this parent.
the_value : dict|list|str|int|float
The value for this entry.
Returns
-------
None
"""
real_key = '{}_{}'.format(the_parent, the_key)
if isinstance(the_value, list):
if _primitive_list(the_value):
self.insert(the_parent, "end", real_key, text="{}: {}".format(the_key, the_value))
else:
# add a parent node for this list
self.insert(the_parent, "end", real_key, text=the_key)
for i, value in enumerate(the_value):
# add a node for each list element
element_key = '{}[{}]'.format(the_key, i)
self.add_node(real_key, element_key, value)
elif isinstance(the_value, dict):
self.insert(the_parent, "end", real_key, text=the_key)
for key in the_value:
val = the_value[key]
self.add_node(real_key, key, val)
elif isinstance(the_value, float):
self.insert(the_parent, "end", real_key, text="{0:s}: {1:0.16G}".format(the_key, the_value))
else:
self.insert(the_parent, "end", real_key, text="{}: {}".format(the_key, the_value))
def populate_from_reader(self, reader: BaseReader) -> None:
"""
Populate the entries from a reader implementation.
Parameters
----------
reader : BaseReader
"""
# empty any present entries
self.empty_entries()
if isinstance(reader, SICDTypeReader):
sicds = reader.get_sicds_as_tuple()
if sicds is None:
return
elif len(sicds) == 1:
self.add_node("", "SICD", sicds[0].to_dict())
else:
for i, entry in enumerate(sicds):
self.add_node("", "SICD_{}".format(i), entry.to_dict())
elif isinstance(reader, SIDDTypeReader):
sidds = reader.get_sidds_as_tuple()
if sidds is None:
pass
elif len(sidds) == 1:
self.add_node("", "SIDD", sidds[0].to_dict())
else:
for i, entry in enumerate(sidds):
self.add_node("", "SIDD_{}".format(i), entry.to_dict())
sicds = reader.sicd_meta
if sicds is not None:
for i, entry in enumerate(sicds):
self.add_node("", "SICD_{}".format(i), entry.to_dict())
elif isinstance(reader, CPHDTypeReader):
cphd = reader.cphd_meta
if cphd is None:
pass
else:
self.add_node("", "CPHD", cphd.to_dict())
elif isinstance(reader, CRSDTypeReader):
crsd = reader.crsd_meta
if crsd is None:
pass
else:
self.add_node("", "CRSD", crsd.to_dict())
# TODO: image segment details?
if isinstance(reader, NITFReader):
nitf_details = reader.nitf_details # type: NITFDetails
self.add_node("", "NITF", nitf_details.get_headers_json())
|
/sarpy_apps-1.1.23.tar.gz/sarpy_apps-1.1.23/sarpy_apps/supporting_classes/metaviewer.py
| 0.583441 | 0.304076 |
metaviewer.py
|
pypi
|
__classification__ = "UNCLASSIFIED"
__author__ = ("Jason Casey", "Thomas McCullough")
import logging
from datetime import datetime
import numpy
from scipy.constants import foot
from sarpy.geometry import latlon
from sarpy.geometry.geocoords import ecf_to_geodetic, geodetic_to_ecf
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
from sarpy.io.phase_history.cphd1_elements.CPHD import CPHDType as CPHDType1
from sarpy.io.phase_history.cphd0_3_elements.CPHD import CPHDType as CPHDType0_3
from sarpy.io.received.crsd1_elements.CRSD import CRSDType # version 1.0
from sarpy.io.complex.sicd_elements.SCPCOA import GeometryCalculator
from sarpy.io.product.sidd2_elements.ExploitationFeatures import ExploitationCalculator
ANGLE_DECIMALS = {'azimuth': 1, 'graze': 1, 'layover': 0, 'shadow': 0, 'multipath': 0}
class MetaIconDataContainer(object):
"""
Container object for rendering the metaicon element.
"""
def __init__(self,
lat=None,
lon=None,
collect_start=None,
collect_duration=None,
collector_name=None,
core_name=None,
azimuth=None,
north=None,
graze=None,
layover=None,
layover_display=None,
shadow=None,
shadow_display=None,
multipath=None,
multipath_display=None,
side_of_track=None,
col_impulse_response_width=None,
row_impulse_response_width=None,
grid_column_sample_spacing=None,
grid_row_sample_spacing=None,
image_plane=None,
tx_rf_bandwidth=None,
rniirs=None,
polarization=None):
"""
Parameters
----------
lat : None|float
lon : None|float
collect_start : None|numpy.datetime64
collect_duration : None|float
collector_name : None|str
core_name : None|str
azimuth : None|float
This should be clockwise relative to True North.
north : None|float
Clockwise relative to decreasing row direction (i.e. "up").
graze : None|float
The graze angle, in degree.
layover : None|float
Clockwise relative to decreasing row direction (i.e. "up").
layover_display : None|float
The angle value for display. The meaning of this varies between different
structure types.
shadow : None|float
Clockwise relative to decreasing row direction (i.e. "up").
shadow_display : None|float
The angle value for display. The meaning of this varies between different
structure types.
multipath : None|float
Clockwise relative to decreasing row direction (i.e. "up").
multipath_display : None|float
The angle value for display. The meaning of this varies between different
structure types.
side_of_track : None|str
One of `('L', 'R')`.
col_impulse_response_width : None|float
In meters.
row_impulse_response_width : None|float
In meters.
grid_column_sample_spacing : None|float
Assumed to be in meters, but the units are not important provided
that they are the same for row and column.
grid_row_sample_spacing : None|float
Assumed to be in meters, but the units are not important provided
that they are the same for row and column.
image_plane : None|str
The image plane value.
tx_rf_bandwidth : None|float
In MHz.
rniirs : None|str
RNIIRS value.
polarization : None|str
The polarization string.
"""
self.lat = lat
self.lon = lon
self.collect_start = collect_start
self.collect_duration = collect_duration
self.collector_name = collector_name
self.core_name = core_name
self.azimuth = azimuth
self.north = north
self.graze = graze
self.layover = layover
self.layover_display = layover_display
self.shadow = shadow
self.shadow_display = shadow_display
self.multipath = multipath
self.multipath_display = multipath_display
self.side_of_track = side_of_track
self.col_impulse_response_width = col_impulse_response_width
self.row_impulse_response_width = row_impulse_response_width
self.grid_column_sample_spacing = grid_column_sample_spacing
self.grid_row_sample_spacing = grid_row_sample_spacing
self.image_plane = image_plane
self.tx_rf_bandwidth = tx_rf_bandwidth
self.rniirs = rniirs
self.polarization = polarization
@property
def is_grid(self):
"""
bool: This is a grid collection
"""
return self.grid_row_sample_spacing is not None
@property
def cdp_line(self):
"""
str: The collection duration/polarization line value.
"""
cdp_line = 'CDP: No data'
if self.collect_duration is not None:
cdp_line = "CDP: {0:0.1f} s".format(self.collect_duration)
if self.polarization is not None:
cdp_line += ' / POL: {0:s}'.format(self.polarization)
return cdp_line
@property
def geo_line(self):
"""
str: The geographic location line data.
"""
lat, lon = self.lat, self.lon
if lat is not None:
return 'Geo: {0:s}/{1:s}'.format(
latlon.string(lat, "lat", include_symbols=False),
latlon.string(lon, "lon", include_symbols=False))
return 'Geo: No data'
@property
def res_line(self):
"""
str: The impulse response data line.
"""
if self.col_impulse_response_width is not None:
az_ipr = self.col_impulse_response_width/foot
rg_ipr = self.row_impulse_response_width/foot
if az_ipr/rg_ipr - 1 < 0.2:
res_line = 'IPR: {0:0.1f} ft'.format(0.5*(az_ipr + rg_ipr))
else:
res_line = 'IPR: {0:0.1f}/{1:0.1f} ft(A/R)'.format(az_ipr, rg_ipr)
elif self.tx_rf_bandwidth is not None:
res_line = 'IPR: {0:0.0f} MHz'.format(self.tx_rf_bandwidth)
else:
res_line = 'IPR: No data'
if self.rniirs:
res_line += " RNIIRS: " + self.rniirs
return res_line
@property
def iid_line(self):
"""
str: The data/time data.
"""
if self.collector_name is not None:
if self.collect_start is not None:
dt_in_seconds = self.collect_start.astype('datetime64[s]')
dt = dt_in_seconds.astype(datetime)
date_str_1, date_str_2 = dt.strftime("%d%b%y").upper(), dt.strftime("%H%MZ")
else:
date_str_1, date_str_2 = "DDMMMYY", "HMZ"
return '{} {} / {}'.format(date_str_1, self.collector_name[:4], date_str_2)
elif self.core_name is not None:
return self.core_name[:16]
return "No iid"
def get_angle_line(self, angle_type, symbol='\xB0'):
"""
Extracts proper angle line formatting.
Parameters
----------
angle_type : str
The name of the angle type.
symbol : str
The degree symbol string, if any.
Returns
-------
str
"""
value = getattr(self, angle_type+'_display', None)
if value is None:
value = getattr(self, angle_type, None)
if value is None:
return "{}: No data".format(angle_type.capitalize())
decimals = ANGLE_DECIMALS.get(angle_type, 0)
frm_str = '{0:s}:{1:0.'+str(decimals)+'f}{2:s}'
return frm_str.format(angle_type.capitalize(), value, symbol)
@classmethod
def from_sicd(cls, sicd):
"""
Create an instance from a SICD object.
Parameters
----------
sicd : SICDType
Returns
-------
MetaIconDataContainer
"""
if not isinstance(sicd, SICDType):
raise TypeError(
'sicd is expected to be an instance of SICDType, got type {}'.format(type(sicd)))
def extract_scp():
try:
llh = sicd.GeoData.SCP.LLH.get_array()
variables['lat'] = float(llh[0])
variables['lon'] = float(llh[1])
except AttributeError:
pass
def extract_timeline():
try:
variables['collect_start'] = sicd.Timeline.CollectStart
except AttributeError:
pass
try:
variables['collect_duration'] = sicd.Timeline.CollectDuration
except AttributeError:
pass
def extract_collectioninfo():
try:
variables['collector_name'] = sicd.CollectionInfo.CollectorName
variables['core_name'] = sicd.CollectionInfo.CoreName
except AttributeError:
pass
def extract_scpcoa():
if sicd.SCPCOA is None:
return
variables['side_of_track'] = sicd.SCPCOA.SideOfTrack
azimuth = sicd.SCPCOA.AzimAng
if azimuth is None:
return
north = ((360 - azimuth) % 360)
variables['azimuth'] = azimuth
variables['north'] = north
variables['graze'] = sicd.SCPCOA.GrazeAng
layover = sicd.SCPCOA.LayoverAng
if layover is not None:
variables['layover'] = ((layover-azimuth + 360) % 360)
variables['layover_display'] = layover
shadow = sicd.SCPCOA.Shadow
if shadow is None:
shadow = 180.0
variables['shadow'] = shadow
variables['shadow_display'] = ((shadow + azimuth + 360) % 360.0)
multipath = sicd.SCPCOA.Multipath
if multipath is not None:
variables['multipath'] = ((multipath - azimuth + 360) % 360)
variables['multipath_display'] = multipath
def extract_imp_resp():
if sicd.Grid is not None:
try:
variables['image_plane'] = sicd.Grid.ImagePlane
except AttributeError:
pass
try:
variables['row_impulse_response_width'] = sicd.Grid.Row.ImpRespWid
variables['col_impulse_response_width'] = sicd.Grid.Col.ImpRespWid
except AttributeError:
pass
try:
variables['grid_row_sample_spacing'] = sicd.Grid.Row.SS
variables['grid_column_sample_spacing'] = sicd.Grid.Col.SS
except AttributeError:
pass
try:
variables['tx_rf_bandwidth'] = sicd.RadarCollection.Waveform[0].TxRFBandwidth*1e-6
except AttributeError:
pass
def extract_rniirs():
try:
variables['rniirs'] = sicd.CollectionInfo.Parameters.get('PREDICTED_RNIIRS', None)
except AttributeError:
pass
def extract_polarization():
try:
proc_pol = sicd.ImageFormation.TxRcvPolarizationProc
if proc_pol is not None:
variables['polarization'] = proc_pol
return
except AttributeError:
pass
try:
variables['polarization'] = sicd.RadarCollection.TxPolarization
except AttributeError:
logging.error('No polarization found.')
variables = {}
extract_scp()
extract_timeline()
extract_collectioninfo()
extract_scpcoa()
extract_imp_resp()
extract_rniirs()
extract_polarization()
return cls(**variables)
@classmethod
def from_cphd(cls, cphd, index):
"""
Create an instance from a CPHD object.
Parameters
----------
cphd : CPHDType1|CPHDType0_3
index
The index for the data channel.
Returns
-------
MetaIconDataContainer
"""
if isinstance(cphd, CPHDType1):
return cls._from_cphd1_0(cphd, index)
elif isinstance(cphd, CPHDType0_3):
return cls._from_cphd0_3(cphd, index)
else:
raise TypeError('Expected a CPHD type, and got type {}'.format(type(cphd)))
@classmethod
def _from_cphd1_0(cls, cphd, index):
"""
Create an instance from a CPHD version 1.0 object.
Parameters
----------
cphd : CPHDType1
index
The index of the data channel.
Returns
-------
MetaIconDataContainer
"""
if not isinstance(cphd, CPHDType1):
raise TypeError(
'cphd is expected to be an instance of CPHDType, got type {}'.format(type(cphd)))
def extract_collection_id():
if cphd.CollectionID is None:
return
try:
variables['collector_name'] = cphd.CollectionID.CollectorName
except AttributeError:
pass
try:
variables['core_name'] = cphd.CollectionID.CoreName
except AttributeError:
pass
def extract_coords():
try:
coords = cphd.SceneCoordinates.IARP.LLH.get_array()
variables['lat'] = coords[0]
variables['lon'] = coords[1]
except AttributeError:
pass
def extract_global():
if cphd.Global is None:
return
try:
variables['collect_start'] = cphd.Global.Timeline.CollectionStart
except AttributeError:
pass
try:
variables['collect_duration'] = (cphd.Global.Timeline.TxTime2 - cphd.Global.Timeline.TxTime1)
except AttributeError:
pass
def extract_reference_geometry():
if cphd.ReferenceGeometry is None:
return
if cphd.ReferenceGeometry.Monostatic is not None:
mono = cphd.ReferenceGeometry.Monostatic
variables['azimuth'] = mono.AzimuthAngle
variables['graze'] = mono.GrazeAngle
variables['layover'] = mono.LayoverAngle
variables['shadow'] = mono.Shadow
variables['multipath'] = mono.Multipath
variables['side_of_track'] = mono.SideOfTrack
elif cphd.ReferenceGeometry.Bistatic is not None:
bi = cphd.ReferenceGeometry.Bistatic
variables['azimuth'] = bi.AzimuthAngle
variables['graze'] = bi.GrazeAngle
variables['layover'] = bi.LayoverAngle
def extract_channel():
if cphd.TxRcv is None:
return
try:
tx = cphd.TxRcv.TxWFParameters[index]
rcv = cphd.TxRcv.RcvParameters[index]
variables['tx_rf_bandwidth'] = tx.RFBandwidth*1e-6
variables['polarization'] = tx.Polarization + ":" + rcv.Polarization
except AttributeError:
pass
variables = {}
extract_collection_id()
extract_coords()
extract_global()
extract_reference_geometry()
extract_channel()
return cls(**variables)
@classmethod
def _from_cphd0_3(cls, cphd, index):
"""
Create an instance from a CPHD version 0.3 object.
Parameters
----------
cphd : CPHDType0_3
index
The index of the data channel.
Returns
-------
MetaIconDataContainer
"""
if not isinstance(cphd, CPHDType0_3):
raise TypeError(
'cphd is expected to be an instance of CPHDType version 0.3, got type {}'.format(type(cphd)))
def extract_collection_info():
if cphd.CollectionInfo is None:
return
try:
variables['collector_name'] = cphd.CollectionInfo.CollectorName
except AttributeError:
pass
try:
variables['core_name'] = cphd.CollectionInfo.CoreName
except AttributeError:
pass
def extract_global():
if cphd.Global is None:
return
try:
variables['collect_start'] = cphd.Global.CollectStart
except AttributeError:
pass
try:
variables['collect_duration'] = cphd.Global.CollectDuration
except AttributeError:
pass
try:
llh_coords = cphd.Global.ImageArea.Corner.get_array(dtype=numpy.dtype('float64'))
ecf_coords = geodetic_to_ecf(llh_coords)
coords = ecf_to_geodetic(numpy.mean(ecf_coords, axis=0))
variables['lat'] = coords[0]
variables['lon'] = coords[1]
except AttributeError:
pass
def extract_channel():
if cphd.RadarCollection is not None:
rc = cphd.RadarCollection
try:
variables['tx_rf_bandwidth'] = (rc.TxFrequency.Max - rc.TxFrequency.Min)*1e-6
variables['polarization'] = rc.RcvChannels[index].TxRcvPolarization
except AttributeError:
pass
elif cphd.Channel is not None:
try:
variables['tx_rf_bandwidth'] = cphd.Channel.Parameters[index].BWSavedNom*1e-6
except AttributeError:
pass
variables = {}
extract_collection_info()
extract_global()
extract_channel()
return cls(**variables)
@classmethod
def from_sidd(cls, sidd):
"""
Create an instance from a SIDD object.
Parameters
----------
sidd : SIDDType2|SIDDType1
Returns
-------
MetaIconDataContainer
"""
if not isinstance(sidd, (SIDDType2, SIDDType1)):
raise TypeError(
'sidd is expected to be an instance of SIDD type, got type {}'.format(type(sidd)))
def extract_location():
ll_coords = None
if isinstance(sidd, SIDDType2):
try:
ll_coords = sidd.GeoData.ImageCorners.get_array(dtype=numpy.dtype('float64'))
except AttributeError:
pass
elif isinstance(sidd, SIDDType1):
try:
ll_coords = sidd.GeographicAndTarget.GeographicCoverage.Footprint.get_array(
dtype=numpy.dtype('float64'))
except AttributeError:
pass
if ll_coords is not None:
llh_coords = numpy.zeros((ll_coords.shape[0], 3), dtype=numpy.float64)
llh_coords[:, :2] = ll_coords
ecf_coords = geodetic_to_ecf(llh_coords)
coords = ecf_to_geodetic(numpy.mean(ecf_coords, axis=0))
variables['lat'] = coords[0]
variables['lon'] = coords[1]
def extract_exploitation_features():
if sidd.ExploitationFeatures is None:
return
try:
exp_info = sidd.ExploitationFeatures.Collections[0].Information
variables['collect_start'] = exp_info.CollectionDateTime
variables['collect_duration'] = exp_info.CollectionDuration
variables['collector_name'] = exp_info.SensorName
if len(exp_info.Polarizations) == 1:
variables['polarization'] = exp_info.Polarizations[0].TxPolarization + ':' + \
exp_info.Polarizations[0].RcvPolarization
except AttributeError:
pass
try:
exp_geom = sidd.ExploitationFeatures.Collections[0].Geometry
variables['azimuth'] = exp_geom.Azimuth
variables['graze'] = exp_geom.Graze
except AttributeError:
pass
if isinstance(sidd, SIDDType1):
north = sidd.ExploitationFeatures.Product.North
elif isinstance(sidd, SIDDType2):
north = sidd.ExploitationFeatures.Products[0].North
else:
raise TypeError('Unhandled sidd type `{}`'.format(sidd.__class__))
if north is None:
if sidd.Measurement.PlaneProjection is None:
return
ref_point = sidd.Measurement.PlaneProjection.ReferencePoint
ref_time = sidd.Measurement.PlaneProjection.TimeCOAPoly(ref_point.Point.Row, ref_point.Point.Col)
plane = sidd.Measurement.PlaneProjection.ProductPlane
geom_calculator = GeometryCalculator(
ref_point.ECEF.get_array(dtype='float64'),
sidd.Measurement.ARPPoly(ref_time),
sidd.Measurement.ARPPoly.derivative_eval(ref_time, der_order=1))
calculator = ExploitationCalculator(
geom_calculator,
plane.RowUnitVector.get_array(dtype='float64'),
plane.ColUnitVector.get_array(dtype='float64'))
north = calculator.North
variables['north'] = ((180.0 - north) % 360)
try:
exp_phen = sidd.ExploitationFeatures.Collections[0].Phenomenology
variables['layover'] = ((exp_phen.Layover.Angle + 180) % 360)
variables['layover_display'] = exp_phen.Layover.Angle
variables['shadow'] = ((exp_phen.Shadow.Angle + 180) % 360)
variables['shadow_display'] = exp_phen.Shadow.Angle
variables['multipath'] = exp_phen.MultiPath
variables['multipath_display'] = ((exp_phen.MultiPath + 180) % 360)
except AttributeError:
pass
def extract_spacing():
if sidd.Measurement is None:
return
meas = sidd.Measurement
if meas.PlaneProjection is not None:
variables['grid_row_sample_spacing'] = meas.PlaneProjection.SampleSpacing.Row
variables['grid_column_sample_spacing'] = meas.PlaneProjection.SampleSpacing.Col
elif meas.CylindricalProjection is not None:
variables['grid_row_sample_spacing'] = meas.CylindricalProjection.SampleSpacing.Row
variables['grid_column_sample_spacing'] = meas.CylindricalProjection.SampleSpacing.Col
elif meas.GeographicProjection is not None:
variables['grid_row_sample_spacing'] = meas.GeographicProjection.SampleSpacing.Row
variables['grid_column_sample_spacing'] = meas.GeographicProjection.SampleSpacing.Col
variables = {'image_plane': 'GROUND'}
extract_location()
extract_exploitation_features()
extract_spacing()
return cls(**variables)
@classmethod
def from_crsd(cls, crsd):
"""
Create an instance from a CRSD version 1.0 object.
Parameters
----------
crsd : CRSDType
Returns
-------
MetaIconDataContainer
"""
if not isinstance(crsd, CRSDType):
raise TypeError(
'Got unhandled crsd type `{}`'.format(type(crsd)))
def extract_collection_id():
if crsd.CollectionID is None:
return
try:
variables['collector_name'] = crsd.CollectionID.CollectorName
except AttributeError:
pass
try:
variables['core_name'] = crsd.CollectionID.CoreName
except AttributeError:
pass
def extract_coords():
try:
coords = crsd.SceneCoordinates.IARP.LLH.get_array()
variables['lat'] = coords[0]
variables['lon'] = coords[1]
except AttributeError:
pass
def extract_global():
if crsd.Global is None:
return
try:
variables['collect_start'] = crsd.Global.Timeline.CollectionRefTime
except AttributeError:
pass
try:
variables['collect_duration'] = (crsd.Global.Timeline.RcvTime2 - crsd.Global.Timeline.RcvTime1)
except AttributeError:
pass
def extract_reference_geometry():
if crsd.ReferenceGeometry is None or \
crsd.ReferenceGeometry.RcvParameters is None:
return
rcv = crsd.ReferenceGeometry.RcvParameters
variables['azimuth'] = rcv.AzimuthAngle
variables['graze'] = rcv.GrazeAngle
variables['side_of_track'] = rcv.SideOfTrack
variables = {}
extract_collection_id()
extract_coords()
extract_global()
extract_reference_geometry()
return cls(**variables)
|
/sarpy_apps-1.1.23.tar.gz/sarpy_apps-1.1.23/sarpy_apps/supporting_classes/metaicon/metaicon_data_container.py
| 0.759582 | 0.393618 |
metaicon_data_container.py
|
pypi
|
SarPy
=====
SarPy is a basic Python library to read, write, and do simple processing
of complex SAR data using the NGA SICD format *(standards linked below)*.
It has been released by NGA to encourage the use of SAR data standards
throughout the international SAR community. SarPy complements the
[SIX](https://github.com/ngageoint/six-library) library (C++) and the
[MATLAB SAR Toolbox](https://github.com/ngageoint/MATLAB_SAR), which are
implemented in other languages but have similar goals.
Some sample SICD files can be found
[here](https://github.com/ngageoint/six-library/wiki/Sample-SICDs).
Relevant Standards Documents
----------------------------
A variety of SAR format standard are mentioned throughout this ReadMe, here are
associated references.
*Sensor Independent Complex Data (SICD)* - latest version (1.3.0; 2021-11-30)
1. [Volume 1, Design & Implementation Description Document](https://nsgreg.nga.mil/doc/view?i=5381)
2. [Volume 2, File Format Description Document](https://nsgreg.nga.mil/doc/view?i=5382)
3. [Volume 3, Image Projections Description Document](https://nsgreg.nga.mil/doc/view?i=5383)
4. [Schema](https://nsgreg.nga.mil/doc/view?i=5418)
*Sensor Independent Derived Data (SIDD)* - latest version (3.0; 2021-11-30)
1. [Volume 1, Design and Implementation Description Document](https://nsgreg.nga.mil/doc/view?i=5440)
2. [Volume 2, NITF File Format Description Document]( https://nsgreg.nga.mil/doc/view?i=5441)
3. [Volume 3, GeoTIFF File Format Description Document](https://nsgreg.nga.mil/doc/view?i=5442)
4. [Schema](https://nsgreg.nga.mil/doc/view?i=5231)
*Compensated Phase History Data (CPHD)* - latest version (1.1.0; 2021-11-30)
1. [Design & Implementation Description](https://nsgreg.nga.mil/doc/view?i=5388)
2. [Design & Implementation Schema](https://nsgreg.nga.mil/doc/view?i=5421)
Both SICD and SIDD files are NITF files following specific guidelines
*Basic Image Interchange Format (BIFF)* - latest edition (2021.2; 2021-04-20)
1. [National Imagery Transmission Format](https://nsgreg.nga.mil/doc/view?i=5262)
For other NGA standards inquiries, the standards registry can be searched
[here](https://nsgreg.nga.mil/registries/search/index.jsp?registryType=doc).
Basic Capability
----------------
The basic capabilities provided in SarPy is generally SAR specific, and largely
geared towards reading and manipulating data provided in NGA SAR file formats.
Full support for reading and writing SICD, SIDD, CPHD, and CRSD (standard pending)
and associated metadata structures is currently provided, and this is the main
focus of this project.
There is additionally support for reading data from complex data formats analogous
to SICD format, *usually called Single Look Complex (SLC) or Level 1*, from a
variety of commercial or other sources including
- Capella (**partial support**)
- COSMO-SkyMed (1st and 2nd generation)
- GFF (Sandia format)
- ICEYE
- NISAR
- PALSAR2
- RadarSat-2
- Radar Constellation Mission (RCM)
- Sentinel-1
- TerraSAR-X.
For this SLC format data, it is read directly as though it were coming from a SICD
file. *This ability to read does not generally apply to data products other
than the SLC or Level 1 product, and there is typically no direct NGA standard
analog for these products.*
Some general TIFF and NITF reading support is provided, but this is not the main
goal of the SarPy library.
Documentation
-------------
Documentation for the project is available at
[readthedocs](https://sarpy.readthedocs.io/en/latest/).
If this documentation is inaccessible, it can be built locally after checking out
this repository using sphinx via the command `python setup.py build_sphinx`.
This depends on python package `sphinx`.
Origins
-------
SarPy was developed at the National Geospatial-Intelligence Agency (NGA). The
software use, modification, and distribution rights are stipulated within the
MIT license.
Dependencies
------------
The core library functionality depends only on `numpy >= 1.11.0` and `scipy`.
Optional Dependencies and Behavior
----------------------------------
There are a small collection of dependencies representing functionality which may
not be core requirements for much of the sarpy targeted tasks. The tension between
requiring the least extensive list of dependencies possible for core functionality
and not having surprise unstated dependencies which caused unexpected failures is
evident here. It is evident that there are many viable arguments for making any
or all of these formally stated dependencies. The choices made here are guided by
practical realities versus what is generally considered best practices.
For all packages on this list, the import is tried (where relevant), and any
import errors for these optional dependencies are caught and handled. In other words,
a missing optional dependency **will not** be presented as import time. Excepting
the functionality requiring `h5py`, this import error handling is probably silent.
Every module in sarpy can be successfully imported, provided that numpy and scipy
are in the environment. Attempts at using functionality depending on a missing
optional dependency will generate an error **at run time** with accompanying
message indicating the missing optional dependency.
- Support for reading single look complex data from certain sources which provide
data in hdf5 format require the `h5py` package, this includes Cosmo-Skymed, ICEYE,
and NISAR data.
- Reading an image segment in a NITF file using jpeg or jpeg 2000 compression
and/or writing a kmz image overlay requires the `pillow` package.
- CPHD consistency checks, presented in the `sarpy.consistency` module, depend on
`lxml>=4.1.1`, `networkx>=2.5`, `shapely>=1.6.4`, and `pytest>=3.3.2`. Note that these
are the versions tested for compliance.
- Some less commonly used (in the sarpy realm) NITF functionality requires the use
and interpretation of UTM coordinates, and this requires the `pyproj` package.
- Building sphinx documentation (mentioned below) requires packages `sphinx`,
and `sphinx_gallery`.
- Optional portions of running unit tests (unlikely to be of relevance to anyone
not performing development on the core sarpy package itself) require the `lxml`
package
Installation
------------
From PyPI, install using pip (may require escalated privileges e.g. sudo):
```bash
pip install sarpy
```
Note that here `pip` represents the pip utility for the desired Python environment.
For verbose instructions for installing from source, see
[here](https://docs.python.org/3/install/index.html). It is recommended that
still the package is built locally and installed using pip, which allows a proper
package update mechanism, while `python setup.py install` **does not**.
Issues and Bugs
---------------
Support for Python 2 has been dropped. The core sarpy functionality has been
tested for Python 3.6, 3.7, 3.8, 3.9, 3.10, and 3.11.
Changes to sarpy for the sole purpose of supporting a Python version beyond
end-of-life are unlikely to be considered.
Information regarding any discovered bugs would be greatly appreciated, so please
feel free to create a GitHub issue. If more appropriate, contact [email protected].
Pull Requests
-------------
Efforts at direct contribution to the project are certainly welcome, and please
feel free to make a pull request. Note that any and all contributions to this
project will be released under the MIT license.
Software source code previously released under an open source license and then
modified by NGA staff is considered a "joint work" (see 17 USC 101); it is partially
copyrighted, partially public domain, and as a whole is protected by the copyrights
of the non-government authors and must be released according to the terms of the
original open source license.
Associated GUI Capabilities
---------------------------
Some associated SAR specific graphical user interface tools are maintained in the
[sarpy_apps project](https://github.com/ngageoint/sarpy_apps).
|
/sarpy-1.3.58.tar.gz/sarpy-1.3.58/README.md
| 0.587115 | 0.945751 |
README.md
|
pypi
|
import sys
import os
import argparse
from json import load
from math import log10
from os.path import exists, join
from contextlib import contextmanager
from dark.fasta import FastaReads
from dark.aa import compareAaReads, matchToString as aaMatchToString
from dark.dna import compareDNAReads, matchToString as dnaMatchToString
from dark.reads import Read, Reads
from sars2seq.features import Features
from sars2seq.genome import SARS2Genome, addAlignerOption
from sars2seq.translate import TranslationError
from sars2seq.variants import VARIANTS
@contextmanager
def genomeFilePointer(read, args, suffix):
"""
Open a file whose name is derived from the reference read, the feature
being examined and whether nucelotides or amino acids are involved.
@param read: The C{dark.reads.Read} that was read from the input FASTA file
(this is the overall genome from which the feature was obtained).
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
@param suffix: The C{str} suffix for the filename.
@return: This is a context manager decorator, so it yields a file pointer
and then closes it.
"""
if args.outDir:
prefix = read.id.split()[0].replace('/', '_')
filename = join(args.outDir, f"{prefix}{suffix}")
with open(filename, 'w') as fp:
yield fp
else:
yield sys.stdout
@contextmanager
def featureFilePointers(read, feature, args=None):
"""
Return a dictionary of file pointers for output streams on a per-read
(i.e., per input genome) basis.
@param read: The C{dark.reads.Read} that was read from the input FASTA file
(this is the overall genome from which the feature was obtained).
@param feature: The C{str} name of the feature.
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
"""
def fp(suffix, nt):
"""
Get a file pointer.
@param suffix: A C{str} file name suffix.
@param nt: If C{True} the sequences are nucelotide, else protein.
@return: An file pointer open for writing.
"""
if args.outDir:
prefix = read.id.split()[0].replace('/', '_')
filename = join(
args.outDir,
f"{prefix}-{feature}{('-nt' if nt else '-aa')}{suffix}")
return open(filename, 'w')
else:
return sys.stdout
fps = {}
try:
if args.printNtMatch:
fps['nt-match'] = fp('-match.txt', True)
if args.printAaMatch:
fps['aa-match'] = fp('-match.txt', False)
if args.printNtSequence:
fps['nt-sequence'] = fp('-sequence.fasta', True)
if args.printAaSequence:
fps['aa-sequence'] = fp('-sequence.fasta', False)
if args.printNtAlignment:
fps['nt-align'] = fp('-align.fasta', True)
if args.printAaAlignment:
fps['aa-align'] = fp('-align.fasta', False)
yield fps
finally:
if args.outDir:
for fp in fps.values():
fp.close()
def printDiffs(read1, read2, nt, referenceOffset, fp, indent=''):
"""
Print differences between sequences.
@param read1: A C{dark.reads.Read} instance.
@param read2: A C{dark.reads.Read} instance.
@param nt: If C{True} the sequences are nucelotide, else protein.
@param referenceOffset: The C{int} 0-based offset of the feature in the
reference.
@param indent: A C{str} prefix for each output line.
"""
len1, len2 = len(read1), len(read2)
width = int(log10(max(len1, len2))) + 1
headerPrinted = False
multiplier = 1 if nt else 3
what = 'nt' if nt else 'aa'
header = '%sDifferences: site, %s1, %s2, ref nt %s' % (
indent, what, what, 'site' if nt else 'codon start')
for site, (a, b) in enumerate(zip(read1.sequence, read2.sequence)):
if a != b:
if not headerPrinted:
print(header, file=fp)
headerPrinted = True
print('%s %*d %s %s %5d' % (
indent, width, site + 1, a, b,
referenceOffset + (multiplier * site) + 1), file=fp)
def printVariantSummary(genome, fp, args):
"""
Print a summary of whether the genome fulfils the various variant
properties.
@param genome: A C{SARS2Genome} instance.
@param fp: An open file pointer to write to.
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
"""
print('Variant summary:', file=fp)
for variant in args.checkVariant:
testCount, errorCount, tests = genome.checkVariant(
variant, args.onError, sys.stderr)
successCount = testCount - errorCount
print(f' {VARIANTS[variant]["description"]}:', file=fp)
print(f' {testCount} checks, {successCount} passed.',
file=fp)
for feature in tests:
for type_ in tests[feature]:
passed = set()
failed = set()
for change, (_, _, genOK, _) in tests[feature][type_].items():
if genOK:
passed.add(change)
else:
failed.add(change)
print(f' {feature} {type_}:', file=fp, end='')
if passed:
print(' PASS:', ', '.join(sorted(passed)), file=fp,
end='')
if failed:
print(' FAIL:', ', '.join(sorted(failed)), file=fp,
end='')
print(file=fp)
def processFeature(featureName, genome, fps, featureNumber, args):
"""
Process a feature from a genome.
@param featureName: A C{str} feature name.
@param genome: A C{SARS2Genome} instance.
@param fps: A C{dict} of file pointers for the various output streams.
@param featureNumber: The C{int} 0-based count of the features requested.
This will be zero for the first feature, 1 for the second, etc.
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
"""
referenceNt, genomeNt = genome.ntSequences(featureName)
feature = genome.features[featureName]
if args.printAaMatch or args.printAaSequence or args.printAaAlignment:
try:
referenceAa, genomeAa = genome.aaSequences(featureName)
except TranslationError as e:
if args.onError == 'raise':
raise
elif args.onError == 'print':
print(f'Could not translate feature {featureName} in genome '
f'{genome.genome.id}: {e}', file=sys.stderr)
referenceAa = genomeAa = None
newlineNeeded = False
if args.printNtMatch:
fp = fps['nt-match']
if featureNumber:
print(file=fp)
print(f'Feature: {featureName} nucleotide match', file=fp)
print(f' Reference nt location {feature["start"] + 1}', file=fp)
match = compareDNAReads(referenceNt, genomeNt)
print(dnaMatchToString(match, referenceNt, genomeNt,
matchAmbiguous=False, indent=' '), file=fp)
printDiffs(referenceNt, genomeNt, True, feature['start'], fp,
indent=' ')
newlineNeeded = True
if args.printAaMatch and genomeAa:
fp = fps['aa-match']
if newlineNeeded or featureNumber:
print(file=fp)
print(f'Feature: {featureName} amino acid match', file=fp)
match = compareAaReads(referenceAa, genomeAa)
print(aaMatchToString(match, referenceAa, genomeAa, indent=' '),
file=fp)
printDiffs(referenceAa, genomeAa, False, feature['start'], fp,
indent=' ')
if args.printNtSequence:
noGaps = Read(genomeNt.id, genomeNt.sequence.replace('-', ''))
Reads([noGaps]).save(fps['nt-sequence'])
if args.printAaSequence and genomeAa:
noGaps = Read(genomeAa.id, genomeAa.sequence.replace('-', ''))
Reads([noGaps]).save(fps['aa-sequence'])
if args.printNtAlignment:
Reads([genomeNt, referenceNt]).save(fps['nt-align'])
if args.printAaAlignment and genomeAa:
Reads([genomeAa, referenceAa]).save(fps['aa-align'])
def main(args):
"""
Describe a SARS-CoV-2 genome.
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
@return: An C{int} exit status.
"""
outDir = args.outDir
if outDir:
if not exists(outDir):
os.makedirs(outDir)
features = Features(args.gbFile)
if args.feature:
if args.canonicalNames:
wantedFeatures = map(features.canonicalName, args.feature)
else:
wantedFeatures = args.feature
else:
if args.noFeatures:
wantedFeatures = []
else:
wantedFeatures = sorted(features)
if not (args.checkVariant or wantedFeatures):
print('No action specified - I have nothing to do!', file=sys.stderr)
return 1
if args.variantFile:
try:
VARIANTS.update(
load(open(args.variantFile, encoding='utf-8')))
except Exception as e:
print(f'Could not parse variant JSON in {args.variantFile!r}: {e}',
file=sys.stderr)
sys.exit(1)
count = ignoredDueToCoverageCount = 0
for count, read in enumerate(FastaReads(args.genome), start=1):
if args.minReferenceCoverage is not None:
coverage = ((len(read) - read.sequence.upper().count('N')) /
len(features.reference))
if coverage < args.minReferenceCoverage:
ignoredDueToCoverageCount += 1
print(f'Genome {read.id!r} ignored due to low '
f'({coverage * 100.0:.2f}%) coverage of the reference.',
file=sys.stderr)
continue
genome = SARS2Genome(read, features, aligner=args.aligner)
if args.checkVariant:
with genomeFilePointer(read, args, '-variant-summary.txt') as fp:
print(read.id, file=fp)
printVariantSummary(genome, fp, args)
for i, featureName in enumerate(wantedFeatures):
with featureFilePointers(read, featureName, args) as fps:
processFeature(featureName, genome, fps, i, args)
print(f'Examined {count} genome{"" if count == 1 else "s"}.')
if args.minReferenceCoverage is not None:
print(f'Ignored {ignoredDueToCoverageCount} genomes due to low '
f'coverage.')
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Describe a SARS-CoV-2 genome (or genomes).')
parser.add_argument(
'--genome', metavar='file.fasta', type=argparse.FileType('r'),
default=sys.stdin,
help='The FASTA file containing the SARS-CoV-2 genome(s) to examine.')
parser.add_argument(
'--feature', action='append', metavar='FEATURE',
help='The feature to describe (e.g., nsp2). May be repeated.')
parser.add_argument(
'--outDir', metavar='DIR',
help=('The directory to write alignments and sequences to. If not '
'specified, standard output is used.'))
parser.add_argument(
'--checkVariant', action='append',
help=(f'Check whether the genome matches the changes in a known '
f'variant. The checked variant(s) must either be found in the '
f'known variants (currently {", ".join(sorted(VARIANTS))}) '
f'or else be given in a JSON file using --variantFile. '
f'In case of conflict, names in any given --variantFile have '
f'precedence over the predefined names. May be repeated.'))
parser.add_argument(
'--variantFile', metavar='VARIANT-FILE.json',
help=('A JSON file of variant information. See sars2seq/variants.py '
'for the required format.'))
parser.add_argument(
'--printNtSequence', '--printNTSequence', action='store_true',
help='Print the nucleotide sequence.')
parser.add_argument(
'--printAaSequence', '--printAASequence', action='store_true',
help='Print the amino acid sequence.')
parser.add_argument(
'--printNtMatch', '--printNTMatch', action='store_true',
help='Print details of the nucleotide match with the reference.')
parser.add_argument(
'--printAaMatch', '--printAAMatch', action='store_true',
help='Print details of the amino acid match with the reference.')
parser.add_argument(
'--printNtAlignment', '--printNTAlignment', action='store_true',
help='Print the nucleotide alignment with the reference.')
parser.add_argument(
'--printAaAlignment', '--printAAAlignment', action='store_true',
help='Print the amino acid alignment with the reference.')
parser.add_argument(
'--canonicalNames', action='store_true',
help=('Use canonical feature names for output files, as oppposed to '
'aliases that might be given on the command line. This can be '
'used to ensure that output files have predictable names.'))
parser.add_argument(
'--noFeatures', action='store_true',
help='Do not look up any features by default.')
parser.add_argument(
'--gbFile', metavar='file.gb', default=Features.REF_GB,
help='The GenBank file to read for SARS-CoV-2 features.')
parser.add_argument(
'--minReferenceCoverage', metavar='coverage', type=float,
help=('The fraction of non-N bases required in the genome(s) in order '
'for them to be processed. Genomes with lower coverage will be '
'ignored, with a message printed to standard error. Note that '
'the denominator used to compute the coverage fraction is the '
'length of the reference. I.e., coverage is computed as number '
'of non-N bases in the genome divided by the length of the '
'reference.'))
parser.add_argument(
'--onError', choices=('ignore', 'print', 'raise'), default='print',
help=('What to do if an error occurs (e.g., due to translating or an '
'index out of range.'))
addAlignerOption(parser)
args = parser.parse_args()
sys.exit(main(args))
|
/sars2seq-0.1.0.tar.gz/sars2seq-0.1.0/bin/describe-genome.py
| 0.492188 | 0.248238 |
describe-genome.py
|
pypi
|
import sys
import os
from json import dumps
import argparse
from dark.fasta import FastaReads
import sars2seq
from sars2seq.features import Features
from sars2seq.genome import SARS2Genome, addAlignerOption
def report(genome, args, includeGenome=True):
"""
Report what's found at a site for a given genome (or report insufficient
coverage to standard error).
@param genome: A C{SARS2Genome} instance.
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
@param includeGenome: If C{True}, include information about the genome
(not just the reference).
"""
try:
offsetInfo = genome.offsetInfo(
args.site - 1, relativeToFeature=args.relativeToFeature,
aa=args.aa, featureName=args.feature,
includeUntranslated=args.includeUntranslated,
minReferenceCoverage=args.minReferenceCoverage)
except sars2seq.Sars2SeqError as e:
print(e, file=sys.stderr)
sys.exit(1)
if offsetInfo is None:
what = args.featureName or 'genome'
print(f'Insufficient {what} coverage', file=sys.stderr)
return
if args.genomeAaOnly:
print(offsetInfo['genome']['aa'])
else:
if not includeGenome:
del offsetInfo['genome']
if args.includeFeature:
featureName = offsetInfo['featureName']
if featureName:
assert 'feature' not in offsetInfo
offsetInfo['feature'] = genome.features[featureName]
# TODO: what should we print if the user doesn't want JSON? Some kind
# of textual summary, I guess. When that's implemented, remove the
# "or True" below.
if args.json or True:
# Make the featureNames into a sorted list (it is by default a
# set), so it can be printed as JSON.
offsetInfo['featureNames'] = sorted(offsetInfo['featureNames'])
print(dumps(offsetInfo, indent=4, sort_keys=True))
else:
print(offsetInfo)
def main(args):
"""
Describe a site in a SARS-CoV-2 genome or genomes.
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
@return: An C{int} exit status.
"""
features = Features(args.gbFile)
count = 0
if args.genome is None and os.isatty(0):
genome = SARS2Genome(features.reference, features,
aligner=args.aligner)
report(genome, args, False)
else:
fp = open(args.genome) if args.genome else sys.stdin
for count, read in enumerate(FastaReads(fp), start=1):
genome = SARS2Genome(read, features, aligner=args.aligner)
report(genome, args)
if args.verbose:
print(f'Examined {count} genomes.', file=sys.stderr)
if args.genome:
fp.close()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Describe a site of a SARS-CoV-2 genome(s).')
parser.add_argument(
'--genome', metavar='file.fasta',
help='The FASTA file containing the SARS-CoV-2 genome(s) to examine.')
parser.add_argument(
'--site', metavar='N', type=int, required=True,
help='The (1-based) site to find information for.')
parser.add_argument(
'--feature', metavar='FEATURE',
help=('The feature to examine (e.g., nsp2). This is required if you '
'use --aa or --relativeToFeature'))
parser.add_argument(
'--includeUntranslated', action='store_true',
help=('Include untranslated features (if no feature name is given and '
'it is necessary to identify the intended feature just based on '
'offset).'))
parser.add_argument(
'--aa', action='store_true',
help=('The given site is an amino acid count (the default is '
'nucleotides).'))
parser.add_argument(
'--relativeToFeature', action='store_true',
help='The given site is relative to the start of the feature.')
parser.add_argument(
'--json', action='store_true',
help='Print the result as JSON.')
parser.add_argument(
'--genomeAaOnly', action='store_true',
help='Only print the amino acid from the genome.')
parser.add_argument(
'--verbose', action='store_true',
help='Print information about proceesing to standard error.')
parser.add_argument(
'--includeFeature', action='store_true',
help='Also print information about the feature at the site.')
parser.add_argument(
'--minReferenceCoverage', metavar='coverage', type=float,
help=('The fraction of non-N bases required in the genome(s) in order '
'for them to be processed. Genomes with lower coverage will be '
'ignored, with a message printed to standard error. Note that '
'the denominator used to compute the coverage fraction is the '
'length of the reference. I.e., coverage is computed as number '
'of non-N bases in the genome divided by the length of the '
'reference.'))
parser.add_argument(
'--gbFile', metavar='file.gb', default=Features.REF_GB,
help='The GenBank file to read for SARS-CoV-2 features.')
addAlignerOption(parser)
args = parser.parse_args()
sys.exit(main(args))
|
/sars2seq-0.1.0.tar.gz/sars2seq-0.1.0/bin/describe-site.py
| 0.42322 | 0.276445 |
describe-site.py
|
pypi
|
import argparse
from collections import defaultdict
from sars2seq.features import Features, ALIASES
def printNames(features):
"""
Print feature names, each with all known aliases (if any).
@param features: A C{Features} instance.
"""
def key(name):
"""
Make a sort key for feature names.
@param name: A C{str} feature name.
@return: A C{str} C{int} 2-tuple for sorting feature names.
"""
if name.startswith('nsp'):
return 'nsp', int(name[3:])
elif name.startswith('ORF'):
return 'orf', int(name[3:].split()[0].rstrip('ab'))
else:
return name.lower(), 0
featureNames = sorted(features, key=key)
aka = defaultdict(set)
for alias, name in ALIASES.items():
aka[name].add(alias)
for featureName in featureNames:
try:
akas = ' ' + ', '.join(sorted(aka[featureName]))
except KeyError:
akas = ''
print(f'{featureName}:{akas}')
def main(args):
"""
Describe SARS-CoV-2 annotations.
@param args: A C{Namespace} instance as returned by argparse with
values for command-line options.
"""
features = Features(args.gbFile)
if args.names:
printNames(features)
return
if args.name:
wantedName = features.canonicalName(args.name)
else:
wantedName = None
print(f'Features for {features.reference.id}:')
for featureName, feature in sorted(features.items()):
if wantedName and featureName != wantedName:
continue
print(f'{featureName}:')
print(' start:', feature['start'])
print(' stop:', feature['stop'])
print(' length:', feature['stop'] - feature['start'])
try:
print(' product:', feature['product'])
except KeyError:
pass
try:
print(' function:', feature['function'])
except KeyError:
pass
sequence = feature['sequence']
print(f' sequence (len {len(sequence):5d} nt):',
(sequence[:args.maxLen] + '...') if len(sequence) > args.maxLen
else sequence)
try:
translation = feature['translation']
except KeyError:
# Some features (e.g., UTR, stem loops) do not have a translation.
pass
else:
print(f' translation (len {len(translation):5d} aa):',
(translation[:args.maxLen] + '...')
if len(translation) > args.maxLen else translation)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Describe a SARS-CoV-2 sequence.')
parser.add_argument(
'--gbFile', metavar='file.gb', default=Features.REF_GB,
help='The GenBank file to examine.')
parser.add_argument(
'--name', metavar='NAME',
help=('The feature to print information for (all features are '
'printed if not specified).'))
parser.add_argument(
'--maxLen', type=int, default=80,
help=('The maximum sequence length to print. Longer sequences will '
'be truncated.'))
parser.add_argument(
'--names', action='store_true',
help='Only print feature names and aliases (if any).')
args = parser.parse_args()
main(args)
|
/sars2seq-0.1.0.tar.gz/sars2seq-0.1.0/bin/describe-feature.py
| 0.577138 | 0.224523 |
describe-feature.py
|
pypi
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="KapInformationSources",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
db_index=True,
help_text="This is the stored value, required",
max_length=250,
unique=True,
verbose_name="Stored value",
),
),
(
"display_name",
models.CharField(
db_index=True,
help_text="(suggest 40 characters max.)",
max_length=250,
unique=True,
verbose_name="Name",
),
),
(
"display_index",
models.IntegerField(
db_index=True,
default=0,
help_text="Index to control display order if not alphabetical, not required",
verbose_name="display index",
),
),
(
"field_name",
models.CharField(
blank=True,
editable=False,
help_text="Not required",
max_length=25,
null=True,
),
),
(
"version",
models.CharField(default="1.0", editable=False, max_length=35),
),
],
options={
"verbose_name": "KAP Information Sources",
"verbose_name_plural": "KAP Information Sources",
"ordering": ["display_index", "display_name"],
"abstract": False,
},
),
migrations.AddIndex(
model_name="kapinformationsources",
index=models.Index(
fields=["id", "display_name", "display_index"],
name="sarscov2_ka_id_742474_idx",
),
),
]
|
/migrations/0001_initial.py
| 0.590425 | 0.173078 |
0001_initial.py
|
pypi
|
import _socket
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django_audit_fields.fields.hostname_modification_field
import django_audit_fields.fields.userfield
import django_audit_fields.fields.uuid_auto_field
import django_audit_fields.models.audit_model_mixin
import django_revision.revision_field
import edc_model.models.fields.blood_pressure
import edc_model.models.fields.height
import edc_model.models.fields.other_charfield
import edc_model.models.fields.weight
import edc_model.validators.date
import edc_protocol.validators
import edc_sites.models
import edc_utils.date
import sarscov2.models.coronavirus_kap
import simple_history.models
import uuid
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("sarscov2", "0002_auto_20200416_1937"),
]
operations = [
migrations.AlterModelOptions(
name="coronakapinformationsources",
options={
"default_permissions": (
"add",
"change",
"delete",
"view",
"export",
"import",
),
"ordering": ["display_index", "display_name"],
"verbose_name": "KAP Information Sources",
"verbose_name_plural": "KAP Information Sources",
},
),
migrations.CreateModel(
name="HistoricalCoronavirusKap",
fields=[
(
"revision",
django_revision.revision_field.RevisionField(
blank=True,
editable=False,
help_text="System field. Git repository tag:branch:commit.",
max_length=75,
null=True,
verbose_name="Revision",
),
),
(
"created",
models.DateTimeField(
blank=True,
default=django_audit_fields.models.audit_model_mixin.utcnow,
),
),
(
"modified",
models.DateTimeField(
blank=True,
default=django_audit_fields.models.audit_model_mixin.utcnow,
),
),
(
"user_created",
django_audit_fields.fields.userfield.UserField(
blank=True,
help_text="Updated by admin.save_model",
max_length=50,
verbose_name="user created",
),
),
(
"user_modified",
django_audit_fields.fields.userfield.UserField(
blank=True,
help_text="Updated by admin.save_model",
max_length=50,
verbose_name="user modified",
),
),
(
"hostname_created",
models.CharField(
blank=True,
default=_socket.gethostname,
help_text="System field. (modified on create only)",
max_length=60,
),
),
(
"hostname_modified",
django_audit_fields.fields.hostname_modification_field.HostnameModificationField(
blank=True,
help_text="System field. (modified on every save)",
max_length=50,
),
),
("device_created", models.CharField(blank=True, max_length=10)),
("device_modified", models.CharField(blank=True, max_length=10)),
(
"id",
django_audit_fields.fields.uuid_auto_field.UUIDAutoField(
blank=True,
db_index=True,
editable=False,
help_text="System auto field. UUID primary key.",
),
),
(
"crf_status",
models.CharField(
choices=[
("INCOMPLETE", "Incomplete (some data pending)"),
("COMPLETE", "Complete"),
],
default="COMPLETE",
help_text="If some data is still pending, flag this CRF as incomplete",
max_length=25,
verbose_name="CRF status",
),
),
(
"crf_status_comments",
models.TextField(
blank=True,
help_text="for example, why some data is still pending",
null=True,
verbose_name="Any comments related to status of this CRF",
),
),
(
"hiv_pos",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No"), ("unknown", "Unknown")],
max_length=25,
verbose_name="Does the patient have <u>HIV</u> infection?",
),
),
(
"hiv_pos_year",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[
django.core.validators.MinValueValidator(1950),
django.core.validators.MaxValueValidator(2020),
],
verbose_name="If 'Yes', what year did you first test positive?",
),
),
(
"hiv_year_started_art",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[django.core.validators.MinValueValidator(0)],
verbose_name="If 'Yes', what year did you <u>start antiretroviral therapy</u>?",
),
),
(
"hiv_missed_doses",
models.IntegerField(
blank=True,
null=True,
verbose_name="If 'Yes', in the last month <u>how many days</u> did you miss taking your <u>ART</u> medications?",
),
),
(
"diabetic",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No"), ("unknown", "Unknown")],
max_length=25,
verbose_name="Have you been diagnosed with <u>diabetes</u>?",
),
),
(
"diabetic_dx_year",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[
django.core.validators.MinValueValidator(1950),
django.core.validators.MaxValueValidator(2020),
],
verbose_name="If 'Yes', what year did you first learn you had <u>diabetes</u>?",
),
),
(
"diabetic_on_meds",
models.CharField(
choices=[
("Yes", "Yes"),
("No", "No"),
("N/A", "Not applicable"),
],
default="N/A",
max_length=25,
verbose_name="If 'Yes', are you taking medications to control your <u>diabetes</u>?",
),
),
(
"diabetic_missed_doses",
models.IntegerField(
blank=True,
null=True,
verbose_name="If 'Yes', in the last month <u>how many days</u> did you miss taking your <u>diabetes</u> medications?",
),
),
(
"hypertensive",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No"), ("unknown", "Unknown")],
max_length=25,
verbose_name="Have you been diagnosed with <u>hypertension</u>?",
),
),
(
"hypertensive_dx_year",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[
django.core.validators.MinValueValidator(1950),
django.core.validators.MaxValueValidator(2020),
],
verbose_name="If 'Yes', what year did you first learn you had <u>hypertension</u>?",
),
),
(
"hypertensive_on_meds",
models.CharField(
choices=[
("Yes", "Yes"),
("No", "No"),
("N/A", "Not applicable"),
],
default="N/A",
max_length=25,
verbose_name="If 'Yes', are you taking medications to control your <u>hypertension</u>?",
),
),
(
"hypertensive_missed_doses",
models.IntegerField(
blank=True,
null=True,
verbose_name="If 'Yes', in the last month <u>how many days</u> did you miss taking your <u>hypertension</u> medications?",
),
),
(
"weight",
edc_model.models.fields.weight.WeightField(blank=True, null=True),
),
(
"height",
edc_model.models.fields.height.HeightField(blank=True, null=True),
),
(
"sys_blood_pressure",
edc_model.models.fields.blood_pressure.SystolicPressureField(
blank=True, null=True
),
),
(
"dia_blood_pressure",
edc_model.models.fields.blood_pressure.DiastolicPressureField(
blank=True, null=True
),
),
(
"married",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No")],
max_length=25,
verbose_name="Are you currently married?",
),
),
(
"employment_status",
models.CharField(
choices=[
("working_for_pay", "Working for pay / Employed"),
("not_working_for_pay", "Not working for pay / Unemployed"),
],
max_length=25,
verbose_name="Are you employed / working?",
),
),
(
"employment",
models.CharField(
choices=[
("professional", "Professional (e.g. office"),
("labourer", "Labourer"),
("self_employed", "Small business, self-employed"),
("N/A", "Not applicable"),
("OTHER", "Other, specify below"),
],
default="N/A",
max_length=25,
verbose_name="What type of <u>paid</u> work / employment are you involved in?",
),
),
(
"employment_other",
edc_model.models.fields.other_charfield.OtherCharField(
blank=True,
max_length=35,
null=True,
verbose_name="If other, please specify ...",
),
),
(
"unpaid_work",
models.CharField(
choices=[
("volunteer", "Volunteer"),
("unpaid_intern", "Unpaid Intern"),
("housewife", "Housewife"),
("retired", "Retired"),
("N/A", "Not applicable"),
("OTHER", "Other, specify below"),
],
default="N/A",
max_length=25,
verbose_name="What type of <u>unpaid</u> work are you involved in?",
),
),
(
"unpaid_work_other",
edc_model.models.fields.other_charfield.OtherCharField(
blank=True,
max_length=35,
null=True,
verbose_name="If other, please specify ...",
),
),
(
"household_size",
models.IntegerField(
help_text="Family / people who spend more than 14 nights per month in your home.",
verbose_name="How many people live together in your home / dwelling?",
),
),
(
"nights_away",
models.IntegerField(
help_text="e.g. travelling for work, staying with family",
verbose_name="In the last one month, how many nights did you spend away from your home / dwelling?",
),
),
(
"education",
models.CharField(
choices=[
("primary", "Up to primary"),
("secondary", "Up to secondary"),
("tertiary", "Tertiary (University, college)"),
("no_education", "No education"),
],
max_length=25,
verbose_name="What is your highest completed education level?",
),
),
(
"health_insurance",
models.CharField(
choices=[
("work_scheme", "Work scheme health insurance"),
("private", "Private health insurance"),
("no_insurance", "No insurance, I pay"),
("OTHER", "Other, please specify below"),
],
max_length=25,
verbose_name="How are you covered for your health care expenses?",
),
),
(
"health_insurance_other",
edc_model.models.fields.other_charfield.OtherCharField(
blank=True,
max_length=35,
null=True,
verbose_name="If other, please specify ...",
),
),
(
"personal_health_opinion",
models.CharField(
choices=[
("excellent", "Excellent"),
("good", "Good"),
("fair", "Fair"),
("poor", "Poor"),
],
max_length=25,
verbose_name="In your opinion, what is your health like?",
),
),
(
"perceived_threat",
models.IntegerField(
help_text="On a scale from 1-10",
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(10),
],
verbose_name="On a scale from 1-10, how serious of a public health threat is coronavirus?",
),
),
(
"corona_concern",
models.CharField(
choices=[
("very", "Very worried"),
("somewhat", "Somewhat worried"),
("a_little", "A little worried"),
("not_at_all", "Not worried at all"),
],
max_length=25,
verbose_name="How worried are you about getting coronavirus?",
),
),
(
"personal_infection_likelihood",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="How likely do you think it is that <u>you</u> will get sick from coronavirus?",
),
),
(
"family_infection_likelihood",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="How likely do you think it is that <u>someone in your family</u> will get sick from coronavirus?",
),
),
(
"perc_die",
models.IntegerField(
help_text="On a scale from 0-100",
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
verbose_name="Out of every 100 people who get infected with coronavirus, how many do you think <u>will die</u>?",
),
),
(
"perc_mild_symptom",
models.IntegerField(
help_text="On a scale from 0-100",
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
verbose_name="Out of every 100 people who get infected with coronavirus, how many do you think <u>will have only mild symptoms</u>?",
),
),
(
"spread_droplets",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus spreads by droplets from cough and sneezes from people infected with coronavirus",
),
),
(
"spread_touch",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus can spread by people touching each other",
),
),
(
"spread_sick",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="People can transmit coronavirus when they are sick ",
),
),
(
"spread_asymptomatic",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="People can transmit coronavirus even when they do not appear to be sick",
),
),
(
"severity_age",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus is more severe in older people than children",
),
),
(
"severity_hiv",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus is more severe in people with <u>HIV infection</u>",
),
),
(
"severity_diabetes_hypertension",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus is more severe in people with <u>diabetes</u> and/or <u>hypertension</u>",
),
),
(
"hot_climate",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus does not survive in the hot climate",
),
),
(
"lives_on_materials",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus can live on clothes, plastics, cardboard for a day or more",
),
),
(
"spread_touch2",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="You can catch coronavirus if you touch an infected area and then touch your face or eyes",
),
),
(
"symptoms_fever",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Fever",
),
),
(
"symptoms_headache",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Headache",
),
),
(
"symptoms_dry_cough",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Dry persistant cough",
),
),
(
"symptoms_body_aches",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Body aches",
),
),
(
"symptoms_smell",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Loss of taste and smell",
),
),
(
"symptoms_breathing",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Fast or difficult breathing",
),
),
(
"know_other_symptoms",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No")],
max_length=25,
verbose_name="Do you know of any other symptoms of coronavirus?",
),
),
(
"symptoms_other",
models.TextField(
blank=True,
max_length=250,
null=True,
verbose_name="Please list any other symptoms of coronavirus that you are aware of:",
),
),
(
"hot_drinks",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Drink warm water or hot drinks like tea or coffee",
),
),
(
"alcohol",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Drink alcohol, spirits, etc",
),
),
(
"wash_hands",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Wash hands with soap and warm water",
),
),
(
"hand_sanitizer",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Use hand sanitisers with alcohol",
),
),
(
"take_herbs_prevention",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Taking herbs",
),
),
(
"avoid_crowds",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Avoid crowded places such as markets and public transport",
),
),
(
"face_masks",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Wear a face mask",
),
),
(
"stay_indoors",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Stay indoors",
),
),
(
"social_distance",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Keep at least a 2 metre distance from people",
),
),
(
"other_actions_prevention",
models.TextField(
blank=True,
max_length=250,
null=True,
verbose_name="Any <u>other</u> things you would do to <u>protect</u> yourself from Coronavirus?",
),
),
(
"stay_home",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Stay at home and avoid people",
),
),
(
"visit_clinic",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Go to the nearest health facility",
),
),
(
"call_nurse",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Call your nurse and tell them you are sick",
),
),
(
"take_meds",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Take medicines like chloroquine",
),
),
(
"take_herbs_symptoms",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Take herbs",
),
),
(
"stop_chronic_meds",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
help_text="For example, medicines for diabetes, hypertension and/or HIV",
max_length=25,
verbose_name="Stop taking your chronic disease medicines",
),
),
(
"visit_religious",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Go to a religious leader instead of a doctor",
),
),
(
"visit_traditional",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Go to a traditional healer instead of a doctor",
),
),
(
"other_actions_symptoms",
models.TextField(
blank=True,
max_length=250,
null=True,
verbose_name="Any <u>other</u> things you would do if you had <u>symptoms</u> of Coronavirus?",
),
),
(
"subject_identifier",
models.CharField(
db_index=True,
max_length=50,
null=True,
verbose_name="Subject identifier",
),
),
(
"screening_identifier",
models.CharField(
db_index=True,
max_length=50,
null=True,
verbose_name="Screening identifier",
),
),
(
"report_datetime",
models.DateTimeField(
default=edc_utils.date.get_utcnow,
help_text="If reporting today, use today's date/time, otherwise use the date/time this information was reported.",
validators=[
edc_protocol.validators.datetime_not_before_study_start,
edc_model.validators.date.datetime_not_future,
],
verbose_name="Report Date",
),
),
("protocol", models.CharField(default="meta_edc", max_length=50)),
(
"history_id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"site",
models.ForeignKey(
blank=True,
db_constraint=False,
editable=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="sites.Site",
),
),
],
options={
"verbose_name": "historical Coronavirus KAP",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name="CoronavirusKap",
fields=[
(
"revision",
django_revision.revision_field.RevisionField(
blank=True,
editable=False,
help_text="System field. Git repository tag:branch:commit.",
max_length=75,
null=True,
verbose_name="Revision",
),
),
(
"created",
models.DateTimeField(
blank=True,
default=django_audit_fields.models.audit_model_mixin.utcnow,
),
),
(
"modified",
models.DateTimeField(
blank=True,
default=django_audit_fields.models.audit_model_mixin.utcnow,
),
),
(
"user_created",
django_audit_fields.fields.userfield.UserField(
blank=True,
help_text="Updated by admin.save_model",
max_length=50,
verbose_name="user created",
),
),
(
"user_modified",
django_audit_fields.fields.userfield.UserField(
blank=True,
help_text="Updated by admin.save_model",
max_length=50,
verbose_name="user modified",
),
),
(
"hostname_created",
models.CharField(
blank=True,
default=_socket.gethostname,
help_text="System field. (modified on create only)",
max_length=60,
),
),
(
"hostname_modified",
django_audit_fields.fields.hostname_modification_field.HostnameModificationField(
blank=True,
help_text="System field. (modified on every save)",
max_length=50,
),
),
("device_created", models.CharField(blank=True, max_length=10)),
("device_modified", models.CharField(blank=True, max_length=10)),
(
"id",
django_audit_fields.fields.uuid_auto_field.UUIDAutoField(
blank=True,
editable=False,
help_text="System auto field. UUID primary key.",
primary_key=True,
serialize=False,
),
),
(
"crf_status",
models.CharField(
choices=[
("INCOMPLETE", "Incomplete (some data pending)"),
("COMPLETE", "Complete"),
],
default="COMPLETE",
help_text="If some data is still pending, flag this CRF as incomplete",
max_length=25,
verbose_name="CRF status",
),
),
(
"crf_status_comments",
models.TextField(
blank=True,
help_text="for example, why some data is still pending",
null=True,
verbose_name="Any comments related to status of this CRF",
),
),
(
"hiv_pos",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No"), ("unknown", "Unknown")],
max_length=25,
verbose_name="Does the patient have <u>HIV</u> infection?",
),
),
(
"hiv_pos_year",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[
django.core.validators.MinValueValidator(1950),
django.core.validators.MaxValueValidator(2020),
],
verbose_name="If 'Yes', what year did you first test positive?",
),
),
(
"hiv_year_started_art",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[django.core.validators.MinValueValidator(0)],
verbose_name="If 'Yes', what year did you <u>start antiretroviral therapy</u>?",
),
),
(
"hiv_missed_doses",
models.IntegerField(
blank=True,
null=True,
verbose_name="If 'Yes', in the last month <u>how many days</u> did you miss taking your <u>ART</u> medications?",
),
),
(
"diabetic",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No"), ("unknown", "Unknown")],
max_length=25,
verbose_name="Have you been diagnosed with <u>diabetes</u>?",
),
),
(
"diabetic_dx_year",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[
django.core.validators.MinValueValidator(1950),
django.core.validators.MaxValueValidator(2020),
],
verbose_name="If 'Yes', what year did you first learn you had <u>diabetes</u>?",
),
),
(
"diabetic_on_meds",
models.CharField(
choices=[
("Yes", "Yes"),
("No", "No"),
("N/A", "Not applicable"),
],
default="N/A",
max_length=25,
verbose_name="If 'Yes', are you taking medications to control your <u>diabetes</u>?",
),
),
(
"diabetic_missed_doses",
models.IntegerField(
blank=True,
null=True,
verbose_name="If 'Yes', in the last month <u>how many days</u> did you miss taking your <u>diabetes</u> medications?",
),
),
(
"hypertensive",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No"), ("unknown", "Unknown")],
max_length=25,
verbose_name="Have you been diagnosed with <u>hypertension</u>?",
),
),
(
"hypertensive_dx_year",
models.IntegerField(
blank=True,
help_text="format YYYY",
null=True,
validators=[
django.core.validators.MinValueValidator(1950),
django.core.validators.MaxValueValidator(2020),
],
verbose_name="If 'Yes', what year did you first learn you had <u>hypertension</u>?",
),
),
(
"hypertensive_on_meds",
models.CharField(
choices=[
("Yes", "Yes"),
("No", "No"),
("N/A", "Not applicable"),
],
default="N/A",
max_length=25,
verbose_name="If 'Yes', are you taking medications to control your <u>hypertension</u>?",
),
),
(
"hypertensive_missed_doses",
models.IntegerField(
blank=True,
null=True,
verbose_name="If 'Yes', in the last month <u>how many days</u> did you miss taking your <u>hypertension</u> medications?",
),
),
(
"weight",
edc_model.models.fields.weight.WeightField(blank=True, null=True),
),
(
"height",
edc_model.models.fields.height.HeightField(blank=True, null=True),
),
(
"sys_blood_pressure",
edc_model.models.fields.blood_pressure.SystolicPressureField(
blank=True, null=True
),
),
(
"dia_blood_pressure",
edc_model.models.fields.blood_pressure.DiastolicPressureField(
blank=True, null=True
),
),
(
"married",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No")],
max_length=25,
verbose_name="Are you currently married?",
),
),
(
"employment_status",
models.CharField(
choices=[
("working_for_pay", "Working for pay / Employed"),
("not_working_for_pay", "Not working for pay / Unemployed"),
],
max_length=25,
verbose_name="Are you employed / working?",
),
),
(
"employment",
models.CharField(
choices=[
("professional", "Professional (e.g. office"),
("labourer", "Labourer"),
("self_employed", "Small business, self-employed"),
("N/A", "Not applicable"),
("OTHER", "Other, specify below"),
],
default="N/A",
max_length=25,
verbose_name="What type of <u>paid</u> work / employment are you involved in?",
),
),
(
"employment_other",
edc_model.models.fields.other_charfield.OtherCharField(
blank=True,
max_length=35,
null=True,
verbose_name="If other, please specify ...",
),
),
(
"unpaid_work",
models.CharField(
choices=[
("volunteer", "Volunteer"),
("unpaid_intern", "Unpaid Intern"),
("housewife", "Housewife"),
("retired", "Retired"),
("N/A", "Not applicable"),
("OTHER", "Other, specify below"),
],
default="N/A",
max_length=25,
verbose_name="What type of <u>unpaid</u> work are you involved in?",
),
),
(
"unpaid_work_other",
edc_model.models.fields.other_charfield.OtherCharField(
blank=True,
max_length=35,
null=True,
verbose_name="If other, please specify ...",
),
),
(
"household_size",
models.IntegerField(
help_text="Family / people who spend more than 14 nights per month in your home.",
verbose_name="How many people live together in your home / dwelling?",
),
),
(
"nights_away",
models.IntegerField(
help_text="e.g. travelling for work, staying with family",
verbose_name="In the last one month, how many nights did you spend away from your home / dwelling?",
),
),
(
"education",
models.CharField(
choices=[
("primary", "Up to primary"),
("secondary", "Up to secondary"),
("tertiary", "Tertiary (University, college)"),
("no_education", "No education"),
],
max_length=25,
verbose_name="What is your highest completed education level?",
),
),
(
"health_insurance",
models.CharField(
choices=[
("work_scheme", "Work scheme health insurance"),
("private", "Private health insurance"),
("no_insurance", "No insurance, I pay"),
("OTHER", "Other, please specify below"),
],
max_length=25,
verbose_name="How are you covered for your health care expenses?",
),
),
(
"health_insurance_other",
edc_model.models.fields.other_charfield.OtherCharField(
blank=True,
max_length=35,
null=True,
verbose_name="If other, please specify ...",
),
),
(
"personal_health_opinion",
models.CharField(
choices=[
("excellent", "Excellent"),
("good", "Good"),
("fair", "Fair"),
("poor", "Poor"),
],
max_length=25,
verbose_name="In your opinion, what is your health like?",
),
),
(
"perceived_threat",
models.IntegerField(
help_text="On a scale from 1-10",
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(10),
],
verbose_name="On a scale from 1-10, how serious of a public health threat is coronavirus?",
),
),
(
"corona_concern",
models.CharField(
choices=[
("very", "Very worried"),
("somewhat", "Somewhat worried"),
("a_little", "A little worried"),
("not_at_all", "Not worried at all"),
],
max_length=25,
verbose_name="How worried are you about getting coronavirus?",
),
),
(
"personal_infection_likelihood",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="How likely do you think it is that <u>you</u> will get sick from coronavirus?",
),
),
(
"family_infection_likelihood",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="How likely do you think it is that <u>someone in your family</u> will get sick from coronavirus?",
),
),
(
"perc_die",
models.IntegerField(
help_text="On a scale from 0-100",
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
verbose_name="Out of every 100 people who get infected with coronavirus, how many do you think <u>will die</u>?",
),
),
(
"perc_mild_symptom",
models.IntegerField(
help_text="On a scale from 0-100",
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
verbose_name="Out of every 100 people who get infected with coronavirus, how many do you think <u>will have only mild symptoms</u>?",
),
),
(
"spread_droplets",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus spreads by droplets from cough and sneezes from people infected with coronavirus",
),
),
(
"spread_touch",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus can spread by people touching each other",
),
),
(
"spread_sick",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="People can transmit coronavirus when they are sick ",
),
),
(
"spread_asymptomatic",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="People can transmit coronavirus even when they do not appear to be sick",
),
),
(
"severity_age",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus is more severe in older people than children",
),
),
(
"severity_hiv",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus is more severe in people with <u>HIV infection</u>",
),
),
(
"severity_diabetes_hypertension",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus is more severe in people with <u>diabetes</u> and/or <u>hypertension</u>",
),
),
(
"hot_climate",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus does not survive in the hot climate",
),
),
(
"lives_on_materials",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Coronavirus can live on clothes, plastics, cardboard for a day or more",
),
),
(
"spread_touch2",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="You can catch coronavirus if you touch an infected area and then touch your face or eyes",
),
),
(
"symptoms_fever",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Fever",
),
),
(
"symptoms_headache",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Headache",
),
),
(
"symptoms_dry_cough",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Dry persistant cough",
),
),
(
"symptoms_body_aches",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Body aches",
),
),
(
"symptoms_smell",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Loss of taste and smell",
),
),
(
"symptoms_breathing",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Fast or difficult breathing",
),
),
(
"know_other_symptoms",
models.CharField(
choices=[("Yes", "Yes"), ("No", "No")],
max_length=25,
verbose_name="Do you know of any other symptoms of coronavirus?",
),
),
(
"symptoms_other",
models.TextField(
blank=True,
max_length=250,
null=True,
verbose_name="Please list any other symptoms of coronavirus that you are aware of:",
),
),
(
"hot_drinks",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Drink warm water or hot drinks like tea or coffee",
),
),
(
"alcohol",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Drink alcohol, spirits, etc",
),
),
(
"wash_hands",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Wash hands with soap and warm water",
),
),
(
"hand_sanitizer",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Use hand sanitisers with alcohol",
),
),
(
"take_herbs_prevention",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Taking herbs",
),
),
(
"avoid_crowds",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Avoid crowded places such as markets and public transport",
),
),
(
"face_masks",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Wear a face mask",
),
),
(
"stay_indoors",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Stay indoors",
),
),
(
"social_distance",
models.CharField(
choices=[
("true", "True"),
("false", "False"),
("dont_know", "Don't know"),
],
max_length=25,
verbose_name="Keep at least a 2 metre distance from people",
),
),
(
"other_actions_prevention",
models.TextField(
blank=True,
max_length=250,
null=True,
verbose_name="Any <u>other</u> things you would do to <u>protect</u> yourself from Coronavirus?",
),
),
(
"stay_home",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Stay at home and avoid people",
),
),
(
"visit_clinic",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Go to the nearest health facility",
),
),
(
"call_nurse",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Call your nurse and tell them you are sick",
),
),
(
"take_meds",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Take medicines like chloroquine",
),
),
(
"take_herbs_symptoms",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Take herbs",
),
),
(
"stop_chronic_meds",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
help_text="For example, medicines for diabetes, hypertension and/or HIV",
max_length=25,
verbose_name="Stop taking your chronic disease medicines",
),
),
(
"visit_religious",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Go to a religious leader instead of a doctor",
),
),
(
"visit_traditional",
models.CharField(
choices=[
("very", "Very likely"),
("somewhat", "Somewhat likely"),
("unlikely", "Not very likely, unlikely"),
("not_at_all", "Not at all"),
],
max_length=25,
verbose_name="Go to a traditional healer instead of a doctor",
),
),
(
"other_actions_symptoms",
models.TextField(
blank=True,
max_length=250,
null=True,
verbose_name="Any <u>other</u> things you would do if you had <u>symptoms</u> of Coronavirus?",
),
),
(
"subject_identifier",
models.CharField(
max_length=50,
null=True,
unique=True,
verbose_name="Subject identifier",
),
),
(
"screening_identifier",
models.CharField(
max_length=50,
null=True,
unique=True,
verbose_name="Screening identifier",
),
),
(
"report_datetime",
models.DateTimeField(
default=edc_utils.date.get_utcnow,
help_text="If reporting today, use today's date/time, otherwise use the date/time this information was reported.",
validators=[
edc_protocol.validators.datetime_not_before_study_start,
edc_model.validators.date.datetime_not_future,
],
verbose_name="Report Date",
),
),
("protocol", models.CharField(default="meta_edc", max_length=50)),
(
"site",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="sites.Site",
),
),
],
options={
"verbose_name": "Coronavirus KAP",
"verbose_name_plural": "Coronavirus KAP",
},
managers=[
("on_site", edc_sites.models.CurrentSiteManager()),
("objects", sarscov2.models.coronavirus_kap.CoronaKapManager()),
],
),
]
|
/migrations/0003_auto_20200515_0231.py
| 0.407333 | 0.159872 |
0003_auto_20200515_0231.py
|
pypi
|
**sarscov2vec** is an application of continuous vector space representation on novel species of coronaviruses genomes as the methodology of genome feature extraction step, to distinguish the most common 5 different SARS-CoV-2 variants (Alpha, Beta, Delta, Gamma, Omicron) by supervised machine learning model.
In this research we used **367,004 unique and complete genome sequence** records from the official virus repositories. Prepared datasets for this research had balanced classes. Sub-set of 25,000 sequences from the final dataset were randomly selected and used to train the Natural Language Processing (NLP) algorithm. The next 36,365 of samples, unseen by embedding training sessions, were processed by machine learning pipeline. Each SARS-CoV-2 variant was represented by 12,000 samples from different parts of the world. Data separation between embedding and classifier was crucial to prevent the data leakage, which is a common problem in NLP.
Our research results show that the final hiper-tuned machine learning model achieved **99% of accuracy on the test set**. Furthermore, this study demonstrated that the continuous vector space representation of SARS-CoV-2 genomes can be decomposed into 2D vector space and visualized as a method of explanation machine learning model decision.
The proposed methodology wrapped in the _sarscov2vec_ brings a new alignment-free AI-aided bioinformatics tool that distinguishes different SARS-CoV-2 variants solely on the genome sequences. Importantly, the obtained results serve as the proof of concept that the presented approach can also be applied in understanding the genomic diversity of other pathogens.
[](https://pypi.python.org/pypi/sarscov2vec/)
[](https://github.com/psf/black)
## Table of Contents
[Modules](https://github.com/ptynecki/sarscov2vec#modules) |
[Installation](https://github.com/ptynecki/sarscov2vec#installation-and-usage) |
[Contributions](https://github.com/ptynecki/sarscov2vec#contributions) |
[Have a question?](https://github.com/ptynecki/sarscov2vec#have-a-question) |
[Found a bug?](https://github.com/ptynecki/sarscov2vec#found-a-bug) |
[Team](https://github.com/ptynecki/sarscov2vec#team) |
[Change log](https://github.com/ptynecki/sarscov2vec#change-log) |
[License](https://github.com/ptynecki/sarscov2vec#license) |
[Cite](https://github.com/ptynecki/sarscov2vec#cite)
## Modules
### fastText NLP model
| Filename with SHA256 checksum | Variants | Description |
|--------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|------------------------------------------------------------------------------------------------------|
| ffasttext_unsupervised_kmer7_25k_samples.28.02.2022.bin<br/>_44f789dcb156201dac9217f8645d86ac585ec24c6eba68901695dc254a14adc3_ | Alpha, Beta, Delta, Gamma, Omicron (BA.1) | fastText unsupervised model trained on 7-mers tokens extracted from 25 000 unique SARS-CoV-2 samples |
### Machine Learning model and label encoder
| Filename with SHA256 checksum | Variants | Description |
|---------------------------------------------------------------------------------------------------------------------|-------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|
| svm_supervised_36k_samples.28.02.2022.joblib<br/>_70abd23b0181786d4ab8e06ea23bd14641f509c13db58c7f2fa2baea17aa42af_ | Alpha, Beta, Delta, Gamma, Omicron (BA.1, BA.2) | SVM supervised model trained and tested using 36,365 unique SARS-CoV-2 samples. Each genome sample was transformed by fastText model at 28.02.2022. |
| label_encoder_36k_samples.28.02.2022.joblib<br/>_7cb654924f69de6efbf6f409efd91af05874e1392220d22b9883d36c17b366c9_ | Alpha, Beta, Delta, Gamma, Omicron (BA.1, BA.2) | Label extracted from 36,365 unique SARS-CoV-2 samples at 28.02.2022. |
## Installation and usage
#### sarscov2vec package
_sarscov2vec_ requires Python 3.8.0+ to run and can be installed by running:
```
pip install sarscov2vec
```
If you can't wait for the latest hotness from the develop branch, then install it directly from the repository:
```
pip install git+git://github.com/ptynecki/sarscov2vec.git@develop
```
Package examples are available in `notebooks` directory.
## Contributions
Development on the latest stable version of Python 3+ is preferred. As of this writing it's 3.8. You can use any operating system.
If you're fixing a bug or adding a new feature, add a test with *[pytest](https://github.com/pytest-dev/pytest)* and check the code with *[Black](https://github.com/psf/black/)* and *[mypy](https://github.com/python/mypy)*. Before adding any large feature, first open an issue for us to discuss the idea with the core devs and community.
## Have a question?
Obviously if you have a private question or want to cooperate with us, you can always reach out to us directly by mail.
## Found a bug?
Feel free to add a new issue with a respective title and description on the [the sarscov2vec repository](https://github.com/ptynecki/sarscov2vec/issues). If you already found a solution to your problem, we would be happy to review your pull request.
## Team
Researchers whose contributing to the sarscov2vec:
* **Piotr Tynecki** (Faculty of Computer Science, Bialystok University of Technology, Bialystok, Poland)
* **Marcin Lubocki** (Laboratory of Virus Molecular Biology, Intercollegiate Faculty of Biotechnology, University of Gdansk, Medical University of Gdańsk, Gdansk, Poland)
## Change log
The log's will become rather long. It moved to its own file.
See [CHANGELOG.md](https://github.com/ptynecki/sarscov2vec/blob/master/CHANGELOG.md).
## License
The sarscov2vec package is released under the under terms of [the MIT License](https://github.com/ptynecki/sarscov2vec/blob/master/LICENSE).
## Cite
> **Application of continuous embedding of viral genome sequences and machine learning in the prediction of SARS-CoV-2 variants**
>
> Tynecki, P.; Lubocki, M.;
>
> Computer Information Systems and Industrial Management. CISIM 2022. Lecture Notes in Computer Science, Springer
>
|
/sarscov2vec-1.0.0.tar.gz/sarscov2vec-1.0.0/README.md
| 0.490724 | 0.981489 |
README.md
|
pypi
|
import os
from typing import Set
import pandas as pd
from pandas.core.series import Series
from Bio.SeqIO.FastaIO import FastaIterator
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
class FastaReader:
"""
Parse FASTA file with nucleotide sequences
"""
def __init__(self, fasta_file_path: str):
self.fasta_file_path = fasta_file_path
self.fasta_name = os.path.basename(self.fasta_file_path)
self.sequence = None
self.entities = 0
@staticmethod
def _fasta_reader(filename: str) -> SeqRecord:
with open(filename) as handle:
for record in FastaIterator(handle):
yield record
@staticmethod
def _normalise_sequence(entry: SeqRecord) -> str:
"""
Normalize sequence to upper case (remove blank chars at the end of sequence)
"""
return str(entry.seq).upper().strip()
def get_sequence(self) -> str:
"""
Return all entities as one long string
"""
sequences = []
for entry in self._fasta_reader(self.fasta_file_path):
sequences.append(self._normalise_sequence(entry))
self.entities += 1
self.sequence = " ".join(sequences)
return self.sequence
class KMersTransformer:
"""
K-mer transformer is responsible to extract set of words -
using configurable sliding window - which are subsequences
of length (6 by default) contained within a biological sequence
Each of the word is called k-mer and are composed of nucleotides
(i.e. A, T, G, and C). Each word which includes other characters
is removed from the output
"""
def __init__(self, size: int = 6, sliding_window: int = 1):
self.accepted_chars: Set[str] = {"A", "C", "T", "G"}
self.size: int = size
self.sliding_window: int = sliding_window
def _normalise_sequence(self, sequence: str) -> str:
"""
Return normalised upper-case sequence without blank chars
"""
return sequence.strip().upper()
def _extract_kmers_from_sequence(self, sequence: str) -> str:
"""
K-mer transformer with sliding window method,
where each k-mer has size of 6 (by default)
A sliding window is used to scan the entire sequence,
if the k-mer contains unsupported character then the
whole k-mer is ignored (not included in final string)
Method return a string with k-mers separated by space
what is expected as input for embedding
"""
# Genome normalization
sequence = self._normalise_sequence(sequence)
seq_length = len(sequence)
kmers = " ".join(
[
sequence[x : x + self.size]
for x in range(0, seq_length - self.size + 1, self.sliding_window)
if not set(sequence[x : x + self.size]) - self.accepted_chars
]
)
# If sequence length is not div by sliding window value
# then the last k-mer need to be added
if self.sliding_window > 1 and (seq_length - self.size) % self.sliding_window != 0:
# Last k-mer
kmers += f" {sequence[-self.size:]}"
return kmers
def transform(self, df: pd.DataFrame) -> Series:
"""
Execute k-mer transformer on each DNA sequence
and return it as Series with k-mers strings
"""
# sequence column is expected
assert list(df.columns) == ["sequence"]
return df.sequence.apply(self._extract_kmers_from_sequence)
|
/sarscov2vec-1.0.0.tar.gz/sarscov2vec-1.0.0/tools/sarscov2vec.py
| 0.900822 | 0.390185 |
sarscov2vec.py
|
pypi
|
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from edc_constants.choices import (
TRUE_FALSE_DONT_KNOW,
YES_NO,
YES_NO_NA,
YES_NO_UNKNOWN,
)
from edc_constants.constants import NOT_APPLICABLE
from edc_model import models as edc_models
from ..choices import (
EDUCATION_LEVELS,
EMPLOYMENT_STATUS,
HEALTH_INSURANCE,
HEALTH_OPINION,
LIKELIHOOD_SCALE,
EMPLOYMENT,
UNPAID_WORK,
WORRY_SCALE,
)
class CoronaKapDiseaseModelMixin(models.Model):
# Disease burden
hiv_pos = models.CharField(
verbose_name="Does the patient have HIV infection?",
max_length=25,
choices=YES_NO_UNKNOWN,
)
hiv_pos_year = models.IntegerField(
verbose_name="If 'Yes', what year did you first test positive?",
validators=[MinValueValidator(1950), MaxValueValidator(2020)],
null=True,
blank=True,
help_text="format YYYY",
)
hiv_year_started_art = models.IntegerField(
verbose_name="If 'Yes', what year did you start antiretroviral therapy?",
validators=[MinValueValidator(0)],
null=True,
blank=True,
help_text="format YYYY",
)
hiv_missed_doses = models.IntegerField(
verbose_name=(
"If 'Yes', in the last month how many days did you miss "
"taking your ART medications?"
),
null=True,
blank=True,
)
diabetic = models.CharField(
verbose_name="Have you been diagnosed with diabetes?",
max_length=25,
choices=YES_NO_UNKNOWN,
)
diabetic_dx_year = models.IntegerField(
verbose_name=(
"If 'Yes', what year did you first learn you had diabetes?"
),
validators=[MinValueValidator(1950), MaxValueValidator(2020)],
null=True,
blank=True,
help_text="format YYYY",
)
diabetic_on_meds = models.CharField(
verbose_name=(
"If 'Yes', are you taking medications to control your diabetes?"
),
max_length=25,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
)
diabetic_missed_doses = models.IntegerField(
verbose_name=(
"If 'Yes', in the last month how many days did you miss "
"taking your diabetes medications?"
),
null=True,
blank=True,
)
hypertensive = models.CharField(
verbose_name="Have you been diagnosed with hypertension?",
max_length=25,
choices=YES_NO_UNKNOWN,
)
hypertensive_dx_year = models.IntegerField(
verbose_name=(
"If 'Yes', what year did you first learn you had hypertension?"
),
validators=[MinValueValidator(1950), MaxValueValidator(2020)],
null=True,
blank=True,
help_text="format YYYY",
)
hypertensive_on_meds = models.CharField(
verbose_name=(
"If 'Yes', are you taking medications to control your hypertension?"
),
max_length=25,
choices=YES_NO_NA,
default=NOT_APPLICABLE,
)
hypertensive_missed_doses = models.IntegerField(
verbose_name=(
"If 'Yes', in the last month how many days did you miss "
"taking your hypertension medications?"
),
null=True,
blank=True,
)
weight = edc_models.WeightField(null=True, blank=True)
height = edc_models.HeightField(null=True, blank=True)
sys_blood_pressure = edc_models.SystolicPressureField(null=True, blank=True)
dia_blood_pressure = edc_models.DiastolicPressureField(null=True, blank=True)
class Meta:
abstract = True
class CoronaKapModelMixin(models.Model):
# PART2
married = models.CharField(
verbose_name="Are you currently married?", max_length=25, choices=YES_NO,
)
employment_status = models.CharField(
verbose_name="Are you employed / working?",
max_length=25,
choices=EMPLOYMENT_STATUS,
)
employment = models.CharField(
verbose_name="What type of paid work / employment are you involved in?",
max_length=25,
choices=EMPLOYMENT,
default=NOT_APPLICABLE,
)
employment_other = edc_models.OtherCharField(null=True, blank=True)
unpaid_work = models.CharField(
verbose_name="What type of unpaid work are you involved in?",
max_length=25,
choices=UNPAID_WORK,
default=NOT_APPLICABLE,
)
unpaid_work_other = edc_models.OtherCharField(null=True, blank=True)
household_size = models.IntegerField(
verbose_name="How many people live together in your home / dwelling?",
help_text=(
"Family / people who spend more than 14 nights per month in your home."
),
)
nights_away = models.IntegerField(
verbose_name=(
"In the last one month, how many nights did you spend "
"away from your home / dwelling?"
),
help_text="e.g. travelling for work, staying with family",
)
education = models.CharField(
verbose_name="What is your highest completed education level?",
max_length=25,
choices=EDUCATION_LEVELS,
)
health_insurance = models.CharField(
verbose_name="How are you covered for your health care expenses?",
max_length=25,
choices=HEALTH_INSURANCE,
)
health_insurance_other = edc_models.OtherCharField(null=True, blank=True)
personal_health_opinion = models.CharField(
verbose_name="In your opinion, what is your health like?",
max_length=25,
choices=HEALTH_OPINION,
)
# PART3
perceived_threat = models.IntegerField(
verbose_name=(
"On a scale from 1-10, how serious of a public "
"health threat is coronavirus?"
),
validators=[MinValueValidator(1), MaxValueValidator(10)],
help_text="On a scale from 1-10",
)
corona_concern = models.CharField(
verbose_name="How worried are you about getting coronavirus?",
max_length=25,
choices=WORRY_SCALE,
)
personal_infection_likelihood = models.CharField(
verbose_name=(
"How likely do you think it is that you "
"will get sick from coronavirus?"
),
max_length=25,
choices=LIKELIHOOD_SCALE,
)
family_infection_likelihood = models.CharField(
verbose_name=(
"How likely do you think it is that someone in your family "
"will get sick from coronavirus?"
),
max_length=25,
choices=LIKELIHOOD_SCALE,
)
perc_die = models.IntegerField(
verbose_name=(
"Out of every 100 people who get infected with "
"coronavirus, how many do you think will die?"
),
validators=[MinValueValidator(0), MaxValueValidator(100)],
help_text="On a scale from 0-100",
)
perc_mild_symptom = models.IntegerField(
verbose_name=(
"Out of every 100 people who get infected with coronavirus, "
"how many do you think will have only mild symptoms?"
),
validators=[MinValueValidator(0), MaxValueValidator(100)],
help_text="On a scale from 0-100",
)
# PART 4
spread_droplets = models.CharField(
verbose_name=(
"Coronavirus spreads by droplets from cough and sneezes "
"from people infected with coronavirus"
),
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
spread_touch = models.CharField(
verbose_name="Coronavirus can spread by people touching each other",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
spread_sick = models.CharField(
verbose_name="People can transmit coronavirus when they are sick ",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
spread_asymptomatic = models.CharField(
verbose_name=(
"People can transmit coronavirus even when they do not appear to be sick"
),
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
severity_age = models.CharField(
verbose_name="Coronavirus is more severe in older people than children",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
severity_hiv = models.CharField(
verbose_name="Coronavirus is more severe in people with HIV infection",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
severity_diabetes_hypertension = models.CharField(
verbose_name=(
"Coronavirus is more severe "
"in people with diabetes and/or hypertension"
),
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
hot_climate = models.CharField(
verbose_name="Coronavirus does not survive in the hot climate",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
lives_on_materials = models.CharField(
verbose_name="Coronavirus can live on clothes, plastics, cardboard for a day or more",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
spread_touch2 = models.CharField(
verbose_name=(
"You can catch coronavirus if you touch an infected "
"area and then touch your face or eyes"
),
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
# PART 5
symptoms_fever = models.CharField(
verbose_name="Fever", max_length=25, choices=TRUE_FALSE_DONT_KNOW,
)
symptoms_headache = models.CharField(
verbose_name="Headache", max_length=25, choices=TRUE_FALSE_DONT_KNOW,
)
symptoms_dry_cough = models.CharField(
verbose_name="Dry persistant cough",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
symptoms_body_aches = models.CharField(
verbose_name="Body aches", max_length=25, choices=TRUE_FALSE_DONT_KNOW,
)
symptoms_smell = models.CharField(
verbose_name="Loss of taste and smell",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
symptoms_breathing = models.CharField(
verbose_name="Fast or difficult breathing",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
know_other_symptoms = models.CharField(
verbose_name="Do you know of any other symptoms of coronavirus?",
max_length=25,
choices=YES_NO,
)
symptoms_other = models.TextField(
verbose_name="Please list any other symptoms of coronavirus that you are aware of:",
max_length=250,
null=True,
blank=True,
)
# PART 6
hot_drinks = models.CharField(
verbose_name="Drink warm water or hot drinks like tea or coffee",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
alcohol = models.CharField(
verbose_name="Drink alcohol, spirits, etc",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
wash_hands = models.CharField(
verbose_name="Wash hands with soap and warm water",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
hand_sanitizer = models.CharField(
verbose_name="Use hand sanitisers with alcohol",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
take_herbs_prevention = models.CharField(
verbose_name="Taking herbs", max_length=25, choices=TRUE_FALSE_DONT_KNOW,
)
avoid_crowds = models.CharField(
verbose_name="Avoid crowded places such as markets and public transport",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
face_masks = models.CharField(
verbose_name="Wear a face mask", max_length=25, choices=TRUE_FALSE_DONT_KNOW,
)
stay_indoors = models.CharField(
verbose_name="Stay indoors", max_length=25, choices=TRUE_FALSE_DONT_KNOW,
)
social_distance = models.CharField(
verbose_name="Keep at least a 2 metre distance from people",
max_length=25,
choices=TRUE_FALSE_DONT_KNOW,
)
other_actions_prevention = models.TextField(
verbose_name="Any other things you would do to protect yourself from Coronavirus?",
max_length=250,
null=True,
blank=True,
)
# PART 7
stay_home = models.CharField(
verbose_name="Stay at home and avoid people",
max_length=25,
choices=LIKELIHOOD_SCALE,
)
visit_clinic = models.CharField(
verbose_name="Go to the nearest health facility",
max_length=25,
choices=LIKELIHOOD_SCALE,
)
call_nurse = models.CharField(
verbose_name="Call your nurse and tell them you are sick",
max_length=25,
choices=LIKELIHOOD_SCALE,
)
take_meds = models.CharField(
verbose_name="Take medicines like chloroquine",
max_length=25,
choices=LIKELIHOOD_SCALE,
)
take_herbs_symptoms = models.CharField(
verbose_name="Take herbs", max_length=25, choices=LIKELIHOOD_SCALE,
)
stop_chronic_meds = models.CharField(
verbose_name="Stop taking your chronic disease medicines",
max_length=25,
choices=LIKELIHOOD_SCALE,
help_text="For example, medicines for diabetes, hypertension and/or HIV",
)
visit_religious = models.CharField(
verbose_name="Go to a religious leader instead of a doctor",
max_length=25,
choices=LIKELIHOOD_SCALE,
)
visit_traditional = models.CharField(
verbose_name="Go to a traditional healer instead of a doctor",
max_length=25,
choices=LIKELIHOOD_SCALE,
)
other_actions_symptoms = models.TextField(
verbose_name="Any other things you would do if you had symptoms of Coronavirus?",
max_length=250,
null=True,
blank=True,
)
class Meta:
abstract = True
|
/sarscov2x-1.0.0-py3-none-any.whl/coronavirus/model_mixins/coronavirus_kap_model_mixin.py
| 0.461745 | 0.273023 |
coronavirus_kap_model_mixin.py
|
pypi
|
from django.contrib import admin
from django.utils.safestring import mark_safe
from django_audit_fields import audit_fieldset_tuple
fieldsets = [
(
"Coronavirus Knowledge, Attitudes, and Practices",
{"fields": ("screening_identifier", "report_datetime")},
),
(
"Disease Burden: HIV",
{
"fields": (
"hiv_pos",
"hiv_pos_year",
"hiv_year_started_art",
"hiv_missed_doses",
)
},
),
(
"Disease Burden: Diabetes",
{
"fields": (
"diabetic",
"diabetic_dx_year",
"diabetic_on_meds",
"diabetic_missed_doses",
)
},
),
(
"Disease Burden: Hypertension",
{
"fields": (
"hypertensive",
"hypertensive_dx_year",
"hypertensive_on_meds",
"hypertensive_missed_doses",
)
},
),
(
"Indicators",
{"fields": ("height", "weight", "sys_blood_pressure", "dia_blood_pressure",)},
),
(
"Economics",
{
"fields": (
"married",
"employment_status",
"employment",
"employment_other",
"unpaid_work",
"unpaid_work_other",
"education",
"household_size",
"nights_away",
"health_insurance",
"health_insurance_other",
"personal_health_opinion",
)
},
),
(
"Awareness and Concerns",
{
"fields": (
"perceived_threat",
"corona_concern",
"personal_infection_likelihood",
"family_infection_likelihood",
"perc_die",
"perc_mild_symptom",
)
},
),
(
"Knowledge of Coronavirus",
{
"description": mark_safe(
"<h5><font color='orange'>[Interviewer]:</font> For the "
"questions in this section ask the patient the following:"
"</h5><h5><BR><B>What do you know about coronavirus "
"Answer True, False or you don't know</B></h5>"
),
"fields": (
"spread_droplets",
"spread_touch",
"spread_sick",
"spread_asymptomatic",
"severity_age",
"hot_climate",
"lives_on_materials",
"spread_touch2",
),
},
),
(
"Symptoms of Coronavirus",
{
"description": mark_safe(
"<h5><font color='orange'>[Interviewer]:</font> For the "
"questions in this section ask the patient the following:"
"</h5><h5><BR><B>Do you think any of the following symptoms are "
"linked with coronavirus infection? Answer True, False or you "
"don't know</B></h5>"
),
"fields": (
"symptoms_fever",
"symptoms_headache",
"symptoms_dry_cough",
"symptoms_body_aches",
"symptoms_smell",
"symptoms_breathing",
"know_other_symptoms",
"symptoms_other",
),
},
),
(
"Protecting yourself",
{
"description": mark_safe(
"<h5><font color='orange'>[Interviewer]:</font> For the questions "
"in this section ask the patient the following:</h5><h5><BR><B>"
"Do you think the following can protect <u>you</u> "
"against the coronavirus? True, False or you don't know.</B></h5>"
),
"fields": (
"hot_drinks",
"alcohol",
"wash_hands",
"hand_sanitizer",
"take_herbs_prevention",
"avoid_crowds",
"face_masks",
"stay_indoors",
"social_distance",
"other_actions_prevention",
),
},
),
(
"Your response to symptoms",
{
"description": mark_safe(
"<h5><font color='orange'>[Interviewer]:</font> For the questions "
"in this section ask the patient the following:</h5><h5><BR><B>If "
"you had symptoms of coronavirus, how likely are you to do any of "
"the following?</B></h5>"
),
"fields": (
"stay_home",
"visit_clinic",
"call_nurse",
"take_meds",
"take_herbs_symptoms",
"stop_chronic_meds",
"visit_religious",
"visit_traditional",
"other_actions_symptoms",
),
},
),
audit_fieldset_tuple,
]
class CoronaKapModelAdminMixin:
fieldsets = fieldsets
filter_horizaontal = ("information_sources",)
radio_fields = {
"alcohol": admin.VERTICAL,
"avoid_crowds": admin.VERTICAL,
"call_nurse": admin.VERTICAL,
"corona_concern": admin.VERTICAL,
"hiv_pos": admin.VERTICAL,
"diabetic": admin.VERTICAL,
"diabetic_on_meds": admin.VERTICAL,
"hypertensive": admin.VERTICAL,
"hypertensive_on_meds": admin.VERTICAL,
"education": admin.VERTICAL,
"employment_status": admin.VERTICAL,
"face_masks": admin.VERTICAL,
"family_infection_likelihood": admin.VERTICAL,
"hand_sanitizer": admin.VERTICAL,
"health_insurance": admin.VERTICAL,
"hot_climate": admin.VERTICAL,
"hot_drinks": admin.VERTICAL,
"know_other_symptoms": admin.VERTICAL,
"lives_on_materials": admin.VERTICAL,
"married": admin.VERTICAL,
"personal_health_opinion": admin.VERTICAL,
"personal_infection_likelihood": admin.VERTICAL,
"employment": admin.VERTICAL,
"severity_age": admin.VERTICAL,
"social_distance": admin.VERTICAL,
"spread_asymptomatic": admin.VERTICAL,
"spread_droplets": admin.VERTICAL,
"spread_sick": admin.VERTICAL,
"spread_touch": admin.VERTICAL,
"spread_touch2": admin.VERTICAL,
"stay_home": admin.VERTICAL,
"stay_indoors": admin.VERTICAL,
"stop_chronic_meds": admin.VERTICAL,
"symptoms_body_aches": admin.VERTICAL,
"symptoms_breathing": admin.VERTICAL,
"symptoms_dry_cough": admin.VERTICAL,
"symptoms_fever": admin.VERTICAL,
"symptoms_headache": admin.VERTICAL,
"symptoms_smell": admin.VERTICAL,
"take_herbs_prevention": admin.VERTICAL,
"take_herbs_symptoms": admin.VERTICAL,
"take_meds": admin.VERTICAL,
"unpaid_work": admin.VERTICAL,
"visit_clinic": admin.VERTICAL,
"visit_religious": admin.VERTICAL,
"visit_traditional": admin.VERTICAL,
"wash_hands": admin.VERTICAL,
}
list_display = (
"human_screening_identifier",
"report_datetime",
"protocol",
"user_created",
"created",
)
search_fields = [
"screening_identifier",
"subject_identifier",
]
def human_screening_identifier(self, obj):
return f"{obj.screening_identifier[0:4]}-{obj.screening_identifier[4:]}"
human_screening_identifier.short_description = "screening identifier"
|
/sarscov2x-1.0.0-py3-none-any.whl/coronavirus/admin/modeladmin_mixin.py
| 0.475118 | 0.360573 |
modeladmin_mixin.py
|
pypi
|
# Sarsen
Algorithms and utilities for Synthetic Aperture Radar (SAR) sensors.
Enables cloud-native SAR processing via [*Xarray*](https://xarray.pydata.org)
and [*Dask*](https://dask.org).
This Open Source project is sponsored by B-Open - https://www.bopen.eu.
## Features and limitations
*Sarsen* is a Python library and command line tool with the following functionalities:
- provides algorithms to terrain-correct satellite SAR data
- geometric terrain correction (geocoding)
- *fast mode*: to terrain-correct images
- *accurate mode*: for interferometric processing
- radiometric terrain correction (gamma flattening)
- accesses SAR data via [*xarray-sentinel*](https://github.com/bopen/xarray-sentinel):
- supports most Sentinel-1 data products as [distributed by ESA](https://scihub.copernicus.eu/dhus/#/home):
- Sentinel-1 Single Look Complex (SLC) SM/IW/EW
- Sentinel-1 Ground Range Detected (GRD) SM/IW/EW
- reads uncompressed and compressed SAFE data products on the local computer or
on a network via [*fsspec*](https://filesystem-spec.readthedocs.io) - *depends on rasterio>=1.3*
- accesses DEM data via [*rioxarray*](https://corteva.github.io/rioxarray):
- reads local and remote data in virtually any raster format via
[*rasterio*](https://rasterio.readthedocs.io) / [*GDAL*](https://gdal.org)
- supports larger-than-memory and distributed data access and processing via *Dask*
- efficient geometric terrain-correction for a full GRD
- efficient radiometric terrain-correction for a full GRD.
Overall, the software is in the **beta** phase and the usual caveats apply.
Current limitations:
- documentation needs improvement. See #6.
Non-objectives / Caveat emptor items:
- No attempt is made to support UTC leap seconds. Observations that include a leap second may
crash the code or silently return wrong results.
## SAR terrain-correction
The typical side-looking SAR system acquires data with uniform sampling in azimuth and slant range,
where the azimuth and range represents the time when a given target is acquired and the absolute
sensor-to-target distance, respectively.
Because of this, the near range appears compressed with respect to the far range. Furthermore,
any deviation of the target elevation from a smooth geoid results in additional local geometric and radiometric
distortions known as foreshortening, layover and shadow.
- Radar foreshortening: Terrain surfaces sloping towards the radar appear shortened relative to those sloping away from the radar.
These regions are much brighter than other places on the SAR image.
- Radar layover: It's an extreme case of foreshortening occurring when the terrain slope is greater than the angle of the incident signal.
- Radar shadows: They occur when ground points at the same azimuth but different slant ranges are aligned in the direction of the line-of-sight.
This is usually due to a back slope with an angle steeper than the viewing angle.
When this happens, the radar signal never reaches the farthest points, and thus there is no measurement, meaning that this lack of information is unrecoverable.
The geometric terrain correction (GTC) corrects the distortions due to the target elevation.
The radiometric terrain correction (RTC) also compensates for the backscatter modulation generated
by the topography of the scene.
## Install
The easiest way to install *sarsen* is in a *conda* environment.
The following commands create a new environment, activate it, install the package and its dependencies:
```shell
conda create -n SARSEN
conda activate SARSEN
conda install -c conda-forge dask proj-data sarsen
```
Note that the `proj-data` package is rather large (500+Mb) and it is only needed to handle input DEM whose
vertical coordinate is not on a known ellipsoid, for example *SRTM DEM* with heigths over the *EGM96 geoid*.
## Command line usage
The `sarsen` command line tool corrects SAR data based on a selected DEM and may produce
geometrically terrain-corrected images (GTC) or radiometrically terrain-corrected images (RTC).
Terrain-corrected images will have the same pixels as the input DEM, that should be resampled
to the target projection and spacing in advance, for example using
[`gdalwarp`](https://gdal.org/programs/gdalwarp.html).
The following command performs a geometric terrain correction:
```shell
sarsen gtc S1B_IW_GRDH_1SDV_20211217T141304_20211217T141329_030066_039705_9048.SAFE IW/VV South-of-Redmond-10m_UTM.tif
```
Performing geometric and radiometric terrain correction requires significantly more resources.
Currently it is possible to produce 50km x 50km RTC images at a 10m resolution on a 32Gb machine:
```shell
sarsen rtc S1B_IW_GRDH_1SDV_20211217T141304_20211217T141329_030066_039705_9048.SAFE IW/VV South-of-Redmond-10m_UTM.tif
```
## Python API usage
The python API has entry points to the same commands and it also gives access to several lower level
algorithms, but internal APIs should not be considered stable:
The following code applies the geometric terrain correction to the VV polarization of
"S1B_IW_GRDH_1SDV_20211217T141304_20211217T141329_030066_039705_9048.SAFE" product:
```python
>>> import sarsen
>>> gtc = sarsen.terrain_correction(
... "tests/data/S1B_IW_GRDH_1SDV_20211223T051122_20211223T051147_030148_039993_5371.SAFE",
... measurement_group="IW/VV",
... dem_urlpath="tests/data/Rome-30m-DEM.tif",
... )
```
The radiometric correction can be activated using the key `correct_radiometry`:
```python
>>> rtc = sarsen.terrain_correction(
... "tests/data/S1B_IW_GRDH_1SDV_20211223T051122_20211223T051147_030148_039993_5371.SAFE",
... measurement_group="IW/VV",
... dem_urlpath="tests/data/Rome-30m-DEM.tif",
... correct_radiometry="gamma_nearest"
... )
```
## Reference documentation
This is the list of the reference documents:
- the geometric terrain-correction algorithms are based on:
["Guide to Sentinel-1 Geocoding" UZH-S1-GC-AD 1.10 26.03.2019](https://sentinel.esa.int/documents/247904/0/Guide-to-Sentinel-1-Geocoding.pdf/e0450150-b4e9-4b2d-9b32-dadf989d3bd3)
- the radiometric terrain-correction algorithms are based on:
[D. Small, "Flattening Gamma: Radiometric Terrain Correction for SAR Imagery," in IEEE Transactions on Geoscience and Remote Sensing, vol. 49, no. 8, pp. 3081-3093, Aug. 2011, doi: 10.1109/TGRS.2011.2120616](https://www.geo.uzh.ch/microsite/rsl-documents/research/publications/peer-reviewed-articles/201108-TGRS-Small-tcGamma-3809999360/201108-TGRS-Small-tcGamma.pdf)
## Project resources
[](https://github.com/bopen/sarsen/actions/workflows/on-push.yml)
[](https://codecov.io/gh/bopen/sarsen)
## Contributing
The main repository is hosted on GitHub.
Testing, bug reports and contributions are highly welcomed and appreciated:
https://github.com/bopen/sarsen
Lead developer:
- [Alessandro Amici](https://github.com/alexamici) - [B-Open](https://bopen.eu)
Main contributors:
- [Aureliana Barghini](https://github.com/aurghs) - [B-Open](https://bopen.eu)
See also the list of [contributors](https://github.com/bopen/sarsen/contributors) who participated in this project.
## Sponsoring
[B-Open](https://bopen.eu) commits to maintain the project long term and we are happy to accept sponsorships to develop new features.
We wish to express our gratitude to the project sponsors:
- [Microsoft](https://microsoft.com) has sponsored the support for *GRD* products and the *gamma flattening* algorithm.
## License
```
Copyright 2016-2022 B-Open Solutions srl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
|
/sarsen-0.9.2.tar.gz/sarsen-0.9.2/README.md
| 0.542621 | 0.984985 |
README.md
|
pypi
|
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (10, 7)
plt.rcParams["font.size"] = 12
import inspect
import numpy as np
import xarray as xr
import xarray_sentinel
from sarsen import apps, geocoding, orbit, scene
# uncomment to check that the code below is in sync with the implementation
# print(inspect.getsource(apps.terrain_correction))
```
# define input and load data
```
product_urlpath = (
"data/S1B_S6_SLC__1SDV_20211216T115438_20211216T115501_030050_03968A_4DCB.SAFE/"
)
measurement_group = "S6/VV"
dem_urlpath = "data/Chicago-4m-DEM.tif"
orbit_group = None
calibration_group = None
output_urlpath = "Chicago-4m-GTC-SLC.tif"
interp_method = "nearest"
multilook = None
grouping_area_factor = (1.0, 1.0)
open_dem_raster_kwargs = {"chunks": {}}
kwargs = {"chunks": 2048}
!ls -d {product_urlpath}
!ls -d {dem_urlpath}
orbit_group = orbit_group or f"{measurement_group}/orbit"
calibration_group = calibration_group or f"{measurement_group}/calibration"
measurement_ds = xr.open_dataset(product_urlpath, engine="sentinel-1", group=measurement_group, **kwargs) # type: ignore
measurement = measurement_ds.measurement
dem_raster = scene.open_dem_raster(dem_urlpath, **open_dem_raster_kwargs)
orbit_ecef = xr.open_dataset(product_urlpath, engine="sentinel-1", group=orbit_group, **kwargs) # type: ignore
position_ecef = orbit_ecef.position
calibration = xr.open_dataset(product_urlpath, engine="sentinel-1", group=calibration_group, **kwargs) # type: ignore
beta_nought_lut = calibration.betaNought
```
# scene
```
dem_raster
_ = dem_raster.plot()
%%time
dem_ecef = scene.convert_to_dem_ecef(dem_raster)
dem_ecef
```
# acquisition
```
measurement
%%time
acquisition = apps.simulate_acquisition(position_ecef, dem_ecef)
acquisition
%%time
beta_nought = xarray_sentinel.calibrate_intensity(measurement, beta_nought_lut)
beta_nought
%%time
coordinate_conversion = None
if measurement_ds.attrs["sar:product_type"] == "GRD":
coordinate_conversion = xr.open_dataset(
product_urlpath,
engine="sentinel-1",
group=f"{measurement_group}/coordinate_conversion",
**kwargs,
) # type: ignore
ground_range = xarray_sentinel.slant_range_time_to_ground_range(
acquisition.azimuth_time,
acquisition.slant_range_time,
coordinate_conversion,
)
interp_kwargs = {"ground_range": ground_range}
elif measurement_ds.attrs["sar:product_type"] == "SLC":
interp_kwargs = {"slant_range_time": acquisition.slant_range_time}
if measurement_ds.attrs["sar:instrument_mode"] == "IW":
beta_nought = xarray_sentinel.mosaic_slc_iw(beta_nought)
else:
raise ValueError(
f"unsupported sar:product_type {measurement_ds.attrs['sar:product_type']}"
)
%%time
geocoded = apps.interpolate_measurement(
beta_nought,
multilook=multilook,
azimuth_time=acquisition.azimuth_time,
interp_method=interp_method,
**interp_kwargs,
)
geocoded
geocoded.rio.set_crs(dem_raster.rio.crs)
geocoded.rio.to_raster(
output_urlpath,
dtype=np.float32,
tiled=True,
blockxsize=512,
blockysize=512,
compress="ZSTD",
num_threads="ALL_CPUS",
)
_ = geocoded.plot(vmax=1.0)
```
|
/sarsen-0.9.2.tar.gz/sarsen-0.9.2/notebooks/S1-SLC-SM-backward-geocode.ipynb
| 0.430387 | 0.754418 |
S1-SLC-SM-backward-geocode.ipynb
|
pypi
|
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (10, 7)
plt.rcParams["font.size"] = 12
import inspect
import numpy as np
import xarray as xr
import xarray_sentinel
from sarsen import apps, geocoding, orbit, scene
# uncomment to check that the code below is in sync with the implementation
# print(inspect.getsource(apps.terrain_correction))
```
# define input and load data
```
product_urlpath = (
"data/S1B_S6_GRDH_1SDV_20211216T115438_20211216T115501_030050_03968A_0F8A.SAFE/"
)
measurement_group = "S6/VV"
dem_urlpath = "data/Chicago-10m-DEM.tif"
orbit_group = None
calibration_group = None
output_urlpath = "Chicago-10m-GTC-GRD.tif"
interp_method = "nearest"
multilook = None
grouping_area_factor = (1.0, 1.0)
open_dem_raster_kwargs = {"chunks": {}}
kwargs = {"chunks": 2048}
!ls -d {product_urlpath}
!ls -d {dem_urlpath}
orbit_group = orbit_group or f"{measurement_group}/orbit"
calibration_group = calibration_group or f"{measurement_group}/calibration"
measurement_ds = xr.open_dataset(product_urlpath, engine="sentinel-1", group=measurement_group, **kwargs) # type: ignore
measurement = measurement_ds.measurement
dem_raster = scene.open_dem_raster(dem_urlpath, **open_dem_raster_kwargs)
orbit_ecef = xr.open_dataset(product_urlpath, engine="sentinel-1", group=orbit_group, **kwargs) # type: ignore
position_ecef = orbit_ecef.position
calibration = xr.open_dataset(product_urlpath, engine="sentinel-1", group=calibration_group, **kwargs) # type: ignore
beta_nought_lut = calibration.betaNought
```
# scene
```
dem_raster
_ = dem_raster.plot()
%%time
dem_ecef = scene.convert_to_dem_ecef(dem_raster)
dem_ecef
```
# acquisition
```
measurement
%%time
acquisition = apps.simulate_acquisition(position_ecef, dem_ecef)
acquisition
%%time
beta_nought = xarray_sentinel.calibrate_intensity(measurement, beta_nought_lut)
beta_nought
%%time
coordinate_conversion = None
if measurement_ds.attrs["sar:product_type"] == "GRD":
coordinate_conversion = xr.open_dataset(
product_urlpath,
engine="sentinel-1",
group=f"{measurement_group}/coordinate_conversion",
**kwargs,
) # type: ignore
ground_range = xarray_sentinel.slant_range_time_to_ground_range(
acquisition.azimuth_time,
acquisition.slant_range_time,
coordinate_conversion,
)
interp_kwargs = {"ground_range": ground_range}
elif measurement_ds.attrs["sar:product_type"] == "SLC":
interp_kwargs = {"slant_range_time": acquisition.slant_range_time}
if measurement_ds.attrs["sar:instrument_mode"] == "IW":
beta_nought = xarray_sentinel.mosaic_slc_iw(beta_nought)
else:
raise ValueError(
f"unsupported sar:product_type {measurement_ds.attrs['sar:product_type']}"
)
%%time
geocoded = apps.interpolate_measurement(
beta_nought,
multilook=multilook,
azimuth_time=acquisition.azimuth_time,
interp_method=interp_method,
**interp_kwargs,
)
geocoded
geocoded.rio.set_crs(dem_raster.rio.crs)
geocoded.rio.to_raster(
output_urlpath,
dtype=np.float32,
tiled=True,
blockxsize=512,
blockysize=512,
compress="ZSTD",
num_threads="ALL_CPUS",
)
_ = geocoded.plot(vmax=1.0)
```
|
/sarsen-0.9.2.tar.gz/sarsen-0.9.2/notebooks/S1-GRD-SM-backward-geocode.ipynb
| 0.426322 | 0.750987 |
S1-GRD-SM-backward-geocode.ipynb
|
pypi
|
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12, 8)
plt.rcParams["font.size"] = 12
import numpy as np
import rioxarray
import xarray as xr
import xarray_sentinel
from sarsen import apps, geocoding, radiometry, orbit, scene
# product definition
product_urlpath = (
"data/S1A_IW_SLC__1SDV_20211223T170557_20211223T170624_041139_04E360_B8E2.SAFE"
)
dem_urlpath = "data/Gran-Sasso-3m-DEM-small.tif"
measurement_group = "IW3/VV"
output_urlpath = "Gran-Sasso-10m-RTC-SLC.tif"
output_gtc_urlpath = output_urlpath.replace("RTC", "GTC")
orbit_group = None
calibration_group = None
multilook = None
interp_method = "nearest"
grouping_area_factor = (1.0, 1.0)
open_dem_raster_kwargs = {}
kwargs = {}
%%time
apps.terrain_correction(
product_urlpath,
measurement_group,
dem_urlpath,
output_urlpath=output_gtc_urlpath,
)
geocoded_beta0 = rioxarray.open_rasterio(output_gtc_urlpath)
geocoded_beta0.plot(vmin=0, vmax=3)
apps.terrain_correction(
product_urlpath,
measurement_group,
dem_urlpath,
correct_radiometry="gamma_bilinear",
output_urlpath=output_urlpath,
grouping_area_factor=(1, 5),
)
geocoded_beta0_c = rioxarray.open_rasterio(output_urlpath)
geocoded_beta0_c.plot(vmin=0, vmax=3)
```
## CHECK INTERNAL FUNCTIONS
### READ ORBIT AND INTERPOLATE
```
orbit_group = orbit_group or f"{measurement_group}/orbit"
calibration_group = calibration_group or f"{measurement_group}/calibration"
measurement_ds = xr.open_dataset(product_urlpath, engine="sentinel-1", group=measurement_group, **kwargs) # type: ignore
measurement = measurement_ds.measurement
dem_raster = scene.open_dem_raster(dem_urlpath, **open_dem_raster_kwargs)
orbit_ecef = xr.open_dataset(product_urlpath, engine="sentinel-1", group=orbit_group, **kwargs) # type: ignore
position_ecef = orbit_ecef.position
calibration = xr.open_dataset(product_urlpath, engine="sentinel-1", group=calibration_group, **kwargs) # type: ignore
beta_nought_lut = calibration.betaNought
dem_ecef = scene.convert_to_dem_ecef(dem_raster)
```
### BACKWARD GEOCODING DEM and DEM_CENTERS
```
acquisition = apps.simulate_acquisition(position_ecef, dem_ecef)
```
### COMPUTE GAMMA WEIGHTS
```
beta_nought = xarray_sentinel.calibrate_intensity(measurement, beta_nought_lut)
beta_nought
coordinate_conversion = None
if measurement_ds.attrs["sar:product_type"] == "GRD":
coordinate_conversion = xr.open_dataset(
product_urlpath,
engine="sentinel-1",
group=f"{measurement_group}/coordinate_conversion",
**kwargs,
) # type: ignore
ground_range = xarray_sentinel.slant_range_time_to_ground_range(
acquisition.azimuth_time,
acquisition.slant_range_time,
coordinate_conversion,
)
interp_kwargs = {"ground_range": ground_range}
elif measurement_ds.attrs["sar:product_type"] == "SLC":
interp_kwargs = {"slant_range_time": acquisition.slant_range_time}
if measurement_ds.attrs["sar:instrument_mode"] == "IW":
beta_nought = xarray_sentinel.mosaic_slc_iw(beta_nought)
else:
raise ValueError(
f"unsupported sar:product_type {measurement_ds.attrs['sar:product_type']}"
)
geocoded = apps.interpolate_measurement(
beta_nought,
multilook=multilook,
azimuth_time=acquisition.azimuth_time,
interp_method=interp_method,
**interp_kwargs,
)
geocoded
grid_parameters = radiometry.azimuth_slant_range_grid(
measurement_ds, coordinate_conversion, grouping_area_factor
)
grid_parameters
%%time
weights = radiometry.gamma_weights(
dem_ecef,
acquisition,
**grid_parameters,
)
f, axes = plt.subplots(nrows=1, ncols=3, figsize=(30, 15))
_ = geocoded.plot(ax=axes[0], vmax=3)
axes[0].grid(c="black")
_ = (geocoded / weights).plot(ax=axes[1], vmax=3)
axes[1].grid(c="black")
_ = weights.plot(ax=axes[2], vmax=3, x="x")
axes[2].grid(c="black")
```
|
/sarsen-0.9.2.tar.gz/sarsen-0.9.2/notebooks/S1-SLC-IW-gamma-flattening.ipynb
| 0.550124 | 0.611005 |
S1-SLC-IW-gamma-flattening.ipynb
|
pypi
|
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (10, 7)
plt.rcParams["font.size"] = 12
import inspect
import numpy as np
import xarray as xr
import xarray_sentinel
from sarsen import apps, geocoding, orbit, scene
# uncomment to check that the code below is in sync with the implementation
# print(inspect.getsource(apps.terrain_correction))
```
# define input and load data
```
product_urlpath = (
"data/S1B_IW_GRDH_1SDV_20211223T051122_20211223T051147_030148_039993_5371.SAFE/"
)
measurement_group = "IW/VV"
dem_urlpath = "data/Rome-10m-DEM.tif"
orbit_group = None
calibration_group = None
output_urlpath = "Rome-10m-GTC-GRD.tif"
interp_method = "nearest"
multilook = None
grouping_area_factor = (1.0, 1.0)
open_dem_raster_kwargs = {"chunks": {}}
kwargs = {"chunks": 2048}
!ls -d {product_urlpath}
!ls -d {dem_urlpath}
orbit_group = orbit_group or f"{measurement_group}/orbit"
calibration_group = calibration_group or f"{measurement_group}/calibration"
measurement_ds = xr.open_dataset(product_urlpath, engine="sentinel-1", group=measurement_group, **kwargs) # type: ignore
measurement = measurement_ds.measurement
dem_raster = scene.open_dem_raster(dem_urlpath, **open_dem_raster_kwargs)
orbit_ecef = xr.open_dataset(product_urlpath, engine="sentinel-1", group=orbit_group, **kwargs) # type: ignore
position_ecef = orbit_ecef.position
calibration = xr.open_dataset(product_urlpath, engine="sentinel-1", group=calibration_group, **kwargs) # type: ignore
beta_nought_lut = calibration.betaNought
```
# scene
```
dem_raster
_ = dem_raster.plot()
%%time
dem_ecef = scene.convert_to_dem_ecef(dem_raster)
dem_ecef
```
# acquisition
```
measurement
%%time
acquisition = apps.simulate_acquisition(position_ecef, dem_ecef)
acquisition
%%time
beta_nought = xarray_sentinel.calibrate_intensity(measurement, beta_nought_lut)
beta_nought
%%time
coordinate_conversion = None
if measurement_ds.attrs["sar:product_type"] == "GRD":
coordinate_conversion = xr.open_dataset(
product_urlpath,
engine="sentinel-1",
group=f"{measurement_group}/coordinate_conversion",
**kwargs,
) # type: ignore
ground_range = xarray_sentinel.slant_range_time_to_ground_range(
acquisition.azimuth_time,
acquisition.slant_range_time,
coordinate_conversion,
)
interp_kwargs = {"ground_range": ground_range}
elif measurement_ds.attrs["sar:product_type"] == "SLC":
interp_kwargs = {"slant_range_time": acquisition.slant_range_time}
if measurement_ds.attrs["sar:instrument_mode"] == "IW":
beta_nought = xarray_sentinel.mosaic_slc_iw(beta_nought)
else:
raise ValueError(
f"unsupported sar:product_type {measurement_ds.attrs['sar:product_type']}"
)
%%time
geocoded = apps.interpolate_measurement(
beta_nought,
multilook=multilook,
azimuth_time=acquisition.azimuth_time,
interp_method=interp_method,
**interp_kwargs,
)
geocoded
geocoded.rio.set_crs(dem_raster.rio.crs)
geocoded.rio.to_raster(
output_urlpath,
dtype=np.float32,
tiled=True,
blockxsize=512,
blockysize=512,
compress="ZSTD",
num_threads="ALL_CPUS",
)
_ = geocoded.plot(vmax=1.0)
```
|
/sarsen-0.9.2.tar.gz/sarsen-0.9.2/notebooks/S1-GRD-IW-backward-geocode.ipynb
| 0.424173 | 0.75274 |
S1-GRD-IW-backward-geocode.ipynb
|
pypi
|
# Profiling of terrain corrections
<hr style="border:2px solid blue"> </hr>
### Install Dependencies and Import
Additional dependecies: `sarsen`, `snakeviz`
```
!pip install -q sarsen snakeviz
%load_ext snakeviz
import os
import tempfile
import adlfs
import planetary_computer
import pystac_client
# enable the `.rio` accessor
import rioxarray # noqa: F401
import stackstac
from sarsen.apps import terrain_correction
```
### Processing definitions
```
# create a temporary directory where to store downloaded data
tmp_dir = tempfile.gettempdir()
# DEM path
dem_path = os.path.join(tmp_dir, "South-of-Redmond-10m.tif")
# path to Sentinel-1 input product in the Planetary Computer
product_folder = "GRD/2021/12/17/IW/DV/S1B_IW_GRDH_1SDV_20211217T141304_20211217T141329_030066_039705_9048" # noqa: E501
# band to be processed
measurement_group = "IW/VV"
tmp_dir
```
#### Area of interest definition: South-of-Redmond (Seattle, US)
```
lon, lat = [-121.95, 47.04]
buffer = 0.2
bbox = [lon - buffer, lat - buffer, lon + buffer, lat + buffer]
```
#### DEMs discovery
```
catalog = pystac_client.Client.open(
"https://planetarycomputer.microsoft.com/api/stac/v1"
)
search = catalog.search(collections="3dep-seamless", bbox=bbox)
items = list(search.get_items())
# select DEMs with resolution 10 meters
items_high_res = [
planetary_computer.sign(item).to_dict()
for item in items
if item.properties["gsd"] == 10
]
dem_raster_all = stackstac.stack(items_high_res, bounds=bbox).squeeze()
dem_raster_all
```
#### DEMs average along the time dimension
```
dem_raster_geo = dem_raster_all.compute()
if "time" in dem_raster_geo.dims:
dem_raster_geo = dem_raster_geo.mean("time")
_ = dem_raster_geo.rio.set_crs(dem_raster_all.rio.crs)
```
#### Convert the DEM in UTM coordinates
```
# find the UTM zone and project in UTM
t_srs = dem_raster_geo.rio.estimate_utm_crs()
dem_raster = dem_raster_geo.rio.reproject(t_srs, resolution=(10, 10))
# crop DEM to our area of interest and save it
dem_corners = dict(x=slice(565000, 594000), y=slice(5220000, 5190000))
dem_raster = dem_raster.sel(**dem_corners)
dem_raster.rio.to_raster(dem_path)
dem_raster
```
### Define GRD parameters
```
grd_account_name = "sentinel1euwest"
grd_storage_container = "s1-grd"
grd_product_folder = f"{grd_storage_container}/{product_folder}"
grd_local_path = os.path.join(tmp_dir, product_folder)
```
### Retrieve Sentinel-1 GRD
```
grd_token = planetary_computer.sas.get_token(
grd_account_name, grd_storage_container
).token
grd_fs = adlfs.AzureBlobFileSystem(grd_account_name, credential=grd_token)
grd_fs.ls(f"{grd_product_folder}/manifest.safe")
grd_fs.get(grd_product_folder, grd_local_path, recursive=True)
!ls -d {grd_local_path}
```
### Profiling
`%%snakeviz` uses `cProfile` to generate and plot the statistics for profiling the `terrain_correction` functions. If the plots are too large and are not automatically embedded in the notebook, it is possible to visualize the statistics as follows:
* Open a terminal
* Run `snakeviz path/to/profile/stats` (the path to the statistics is displayed in the cell output)
* Use the browser tab opened by `snakeviz` to explore the statistics
#### GTC
```
%%snakeviz
terrain_correction(
product_urlpath=grd_local_path,
measurement_group=measurement_group,
dem_urlpath=dem_path,
output_urlpath=os.path.join(
tmp_dir, os.path.basename(product_folder) + ".10m.GTC.tif"
),
)
```
#### RTC
##### Nearest neighbour
```
%%snakeviz
terrain_correction(
grd_local_path,
measurement_group=measurement_group,
dem_urlpath=dem_path,
correct_radiometry="gamma_nearest",
output_urlpath=os.path.join(
tmp_dir, os.path.basename(product_folder) + ".10m.RTC.tif"
),
grouping_area_factor=(3, 3),
)
```
##### Bilinear
```
%%snakeviz
terrain_correction(
grd_local_path,
measurement_group=measurement_group,
dem_urlpath=dem_path,
correct_radiometry="gamma_bilinear",
output_urlpath=os.path.join(
tmp_dir, os.path.basename(product_folder) + ".10m.RTC.tif"
),
grouping_area_factor=(3, 3),
)
```
|
/sarsen-0.9.2.tar.gz/sarsen-0.9.2/notebooks/extended_profiling_of_terrain_corrections.ipynb
| 0.429908 | 0.807195 |
extended_profiling_of_terrain_corrections.ipynb
|
pypi
|
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (10, 7)
plt.rcParams["font.size"] = 12
import inspect
import numpy as np
import xarray as xr
import xarray_sentinel
from sarsen import apps, geocoding, orbit, scene
# uncomment to check that the code below is in sync with the implementation
# print(inspect.getsource(apps.terrain_correction))
```
# define input and load data
```
product_urlpath = (
"data/S1B_IW_SLC__1SDV_20211223T051121_20211223T051148_030148_039993_BA4B.SAFE/"
)
measurement_group = "IW3/VV"
dem_urlpath = "data/Rome-10m-DEM.tif"
orbit_group = None
calibration_group = None
output_urlpath = "Rome-10m-GTC-SLC.tif"
correct_radiometry = False
interp_method = "nearest"
multilook = None
grouping_area_factor = (1.0, 1.0)
open_dem_raster_kwargs = {"chunks": {}}
kwargs = {"chunks": {"pixel": 2048}}
!ls -d {product_urlpath}
!ls -d {dem_urlpath}
orbit_group = orbit_group or f"{measurement_group}/orbit"
calibration_group = calibration_group or f"{measurement_group}/calibration"
measurement_ds = xr.open_dataset(product_urlpath, engine="sentinel-1", group=measurement_group, **kwargs) # type: ignore
measurement = measurement_ds.measurement
dem_raster = scene.open_dem_raster(dem_urlpath, **open_dem_raster_kwargs)
orbit_ecef = xr.open_dataset(product_urlpath, engine="sentinel-1", group=orbit_group, **kwargs) # type: ignore
position_ecef = orbit_ecef.position
calibration = xr.open_dataset(product_urlpath, engine="sentinel-1", group=calibration_group, **kwargs) # type: ignore
beta_nought_lut = calibration.betaNought
```
# scene
```
dem_raster
_ = dem_raster.plot()
%%time
dem_ecef = scene.convert_to_dem_ecef(dem_raster)
dem_ecef
```
# acquisition
```
measurement
%%time
acquisition = apps.simulate_acquisition(position_ecef, dem_ecef)
acquisition
%%time
beta_nought = xarray_sentinel.calibrate_intensity(measurement, beta_nought_lut)
beta_nought
%%time
coordinate_conversion = None
if measurement_ds.attrs["sar:product_type"] == "GRD":
coordinate_conversion = xr.open_dataset(
product_urlpath,
engine="sentinel-1",
group=f"{measurement_group}/coordinate_conversion",
**kwargs,
) # type: ignore
ground_range = xarray_sentinel.slant_range_time_to_ground_range(
acquisition.azimuth_time,
acquisition.slant_range_time,
coordinate_conversion,
)
interp_kwargs = {"ground_range": ground_range}
elif measurement_ds.attrs["sar:product_type"] == "SLC":
interp_kwargs = {"slant_range_time": acquisition.slant_range_time}
if measurement_ds.attrs["sar:instrument_mode"] == "IW":
beta_nought = xarray_sentinel.mosaic_slc_iw(beta_nought)
else:
raise ValueError(
f"unsupported sar:product_type {measurement_ds.attrs['sar:product_type']}"
)
%%time
geocoded = apps.interpolate_measurement(
beta_nought,
multilook=multilook,
azimuth_time=acquisition.azimuth_time,
interp_method=interp_method,
**interp_kwargs,
)
geocoded
geocoded.rio.set_crs(dem_raster.rio.crs)
geocoded.rio.to_raster(
output_urlpath,
dtype=np.float32,
tiled=True,
blockxsize=512,
blockysize=512,
compress="ZSTD",
num_threads="ALL_CPUS",
)
_ = geocoded.plot(vmax=1.0)
```
|
/sarsen-0.9.2.tar.gz/sarsen-0.9.2/notebooks/S1-SLC-IW-backward-geocode.ipynb
| 0.42656 | 0.751352 |
S1-SLC-IW-backward-geocode.ipynb
|
pypi
|
from __future__ import annotations
from abc import abstractmethod
from enum import Enum
import datetime as dt
import logging
import typing as t
import warnings
import numpy as np
import pandas as pd
import pyarrow as pa
from sarus_data_spec.protobuf.typing import Protobuf, ProtobufWithUUID
import sarus_data_spec.manager.typing as manager_typing
import sarus_data_spec.protobuf as sp
import sarus_data_spec.storage.typing as storage_typing
logger = logging.getLogger(__name__)
try:
import tensorflow as tf
except ModuleNotFoundError:
logger.warning('tensorflow not found, tensorflow datasets not available')
try:
import sklearn # noqa: F401
except ModuleNotFoundError:
logger.warning('sklearn not found, sklearn models not available')
try:
from sarus_differential_privacy.query import (
PrivateQuery as RealPrivateQuery,
)
PrivateQuery = RealPrivateQuery
except ImportError:
PrivateQuery = t.Any # type: ignore
warnings.warn(
"`sarus_differential_privacy` not installed. "
"DP primitives not available."
)
try:
from sarus_query_builder.core.typing import Task as RealTask
Task = RealTask
except ImportError:
Task = t.Any # type: ignore
warnings.warn(
"`sarus_query_builder` not installed. DP methods not available."
)
if t.TYPE_CHECKING:
from sklearn import svm
DataSpecValue = t.Union[pd.DataFrame, np.ndarray, svm.SVC]
else:
DataSpecValue = t.Any
# List of types a Dataset can be converted to
DatasetCastable = t.Union[
pd.DataFrame,
pd.Series,
t.Iterator[pa.RecordBatch],
pd.core.groupby.DataFrameGroupBy,
pd.core.groupby.SeriesGroupBy,
]
P = t.TypeVar('P', bound=Protobuf, covariant=True)
@t.runtime_checkable
class HasProtobuf(t.Protocol[P]):
"""An object backed by a protocol buffer message."""
def protobuf(self) -> P:
"""Returns the underlying protobuf object."""
...
def prototype(self) -> t.Type[P]:
"""Returns the type of protobuf."""
...
def type_name(self) -> str:
"""Returns the name of the type."""
...
def __getitem__(self, key: str) -> str:
"""Returns the property referred by key"""
...
def properties(self) -> t.Mapping[str, str]:
"""Returns the properties"""
...
@t.runtime_checkable
class Value(t.Protocol):
"""An object with value semantics."""
def __bytes__(self) -> bytes:
...
def __repr__(self) -> str:
...
def __str__(self) -> str:
...
def __eq__(self, value: object) -> bool:
...
def __hash__(self) -> int:
...
@t.runtime_checkable
class Frozen(t.Protocol):
"""An immutable object."""
def _freeze(self) -> None:
"""Freeze the state of the object"""
...
def _frozen(self) -> bool:
"""Check if the frozen object was left unchanged"""
...
PU = t.TypeVar('PU', bound=ProtobufWithUUID, covariant=True)
@t.runtime_checkable
class Referrable(HasProtobuf[PU], Frozen, t.Protocol[PU]):
"""Can be referred to by uuid."""
def uuid(self) -> str:
"""Reference to use to refer to this object."""
...
def referring(
self, type_name: t.Optional[str] = None
) -> t.Collection[Referring[ProtobufWithUUID]]:
"""Referring objects pointing to this one."""
...
def storage(self) -> storage_typing.Storage:
...
def manager(self) -> manager_typing.Manager:
...
@t.runtime_checkable
class Referring(Referrable[PU], Frozen, t.Protocol[PU]):
"""Is referring to other Referrables"""
_referred: t.MutableSet[str] = set()
def referred(self) -> t.Collection[Referrable[ProtobufWithUUID]]:
"""Referred by this object."""
...
def referred_uuid(self) -> t.Collection[str]:
"""Uuid of object referred by this object"""
...
FM = t.TypeVar('FM', bound=Protobuf)
@t.runtime_checkable
class Factory(t.Protocol):
"""Can produce objects from protobuf messages"""
def register(self, name: str, type: t.Type[HasProtobuf[Protobuf]]) -> None:
"""Registers a class"""
...
def create(self, message: FM, store: bool) -> HasProtobuf[FM]:
"""Returns a wrapped protobuf"""
...
class VariantConstraint(Referrable[sp.VariantConstraint]):
def constraint_kind(self) -> ConstraintKind:
...
def required_context(self) -> t.List[str]:
...
def privacy_limit(self) -> t.Optional[PrivacyLimit]:
...
def accept(self, visitor: TransformVisitor) -> None:
...
# Type alias
DS = t.TypeVar('DS', bound=t.Union[sp.Scalar, sp.Dataset])
class Attribute(Referring[sp.Attribute]):
def prototype(self) -> t.Type[sp.Attribute]:
...
def name(self) -> str:
...
@t.runtime_checkable
class DataSpec(Referring[DS], t.Protocol):
def parents(
self,
) -> t.Tuple[
t.List[t.Union[DataSpec[DS], Transform]],
t.Dict[str, t.Union[DataSpec[DS], Transform]],
]:
...
def variant(
self,
kind: ConstraintKind,
public_context: t.Collection[str] = (),
privacy_limit: t.Optional[PrivacyLimit] = None,
salt: t.Optional[int] = None,
) -> t.Optional[DataSpec[DS]]:
...
def variants(self) -> t.Collection[DataSpec]:
...
def private_queries(self) -> t.List[PrivateQuery]:
"""Return the list of PrivateQueries used in a Dataspec's transform.
It represents the privacy loss associated with the current computation.
It can be used by Sarus when a user (Access object) reads a DP dataspec
to update its accountant. Note that Private Query objects are generated
with a random uuid so that even if they are submitted multiple times to
an account, they are only accounted once (ask @cgastaud for more on
accounting)."""
...
def name(self) -> str:
...
def doc(self) -> str:
...
def is_pep(self) -> bool:
...
def pep_token(self) -> t.Optional[str]:
"""Returns a PEP token if the dataset is PEP and None otherwise.
The PEP token is stored in the properties of the VariantConstraint. It
is a hash initialized with a value when the Dataset is protected.
If a transform does not preserve the PEID then the token is set to None
If a transform preserves the PEID assignment but changes the rows (e.g.
sample, shuffle, filter,...) then the token's value is changed If a
transform does not change the rows (e.g. selecting a column, adding a
scalar,...) then the token is passed without change
A Dataspec is PEP if its PEP token is not None. Two PEP Dataspecs are
aligned (i.e. they have the same number of rows and all their rows have
the same PEID) if their tokens are equal.
"""
...
def is_public(self) -> bool:
...
def is_synthetic(self) -> bool:
"""Is the dataspec synthetic."""
...
def is_dp(self) -> bool:
"""Is the dataspec the result of a DP transform."""
...
def is_transformed(self) -> bool:
"""Is the dataspec transformed."""
...
def is_source(self) -> bool:
"""Is the dataspec a source dataspec."""
...
def is_remote(self) -> bool:
"""Is the dataspec a remotely defined dataset."""
...
def sources(self, type_name: t.Optional[str]) -> t.Set[DataSpec]:
...
def transform(self) -> Transform:
...
def status(
self, task_names: t.Optional[t.List[str]]
) -> t.Optional[Status]:
...
def accept(self, visitor: Visitor) -> None:
...
def attribute(self, name: str) -> t.Optional[Attribute]:
"""Return the attribute with the given name or None if not found."""
...
def attributes(self, name: str) -> t.List[Attribute]:
"""Return all the attributes with the given name."""
...
class Dataset(DataSpec[sp.Dataset], t.Protocol):
def prototype(self) -> t.Type[sp.Dataset]:
...
def is_synthetic(self) -> bool:
...
def has_admin_columns(self) -> bool:
...
def is_protected(self) -> bool:
...
def is_file(self) -> bool:
...
def schema(self) -> Schema:
...
async def async_schema(self) -> Schema:
...
def size(self) -> t.Optional[Size]:
...
def multiplicity(self) -> t.Optional[Multiplicity]:
...
def bounds(self) -> t.Optional[Bounds]:
...
def marginals(self) -> t.Optional[Marginals]:
...
def to_arrow(self, batch_size: int = 10000) -> t.Iterator[pa.RecordBatch]:
...
async def async_to_arrow(
self, batch_size: int = 10000
) -> t.AsyncIterator[pa.RecordBatch]:
...
def to_sql(self) -> None:
...
def spec(self) -> str:
...
def __iter__(self) -> t.Iterator[pa.RecordBatch]:
...
def to_pandas(self) -> pd.DataFrame:
...
async def async_to_pandas(self) -> pd.DataFrame:
...
def to_tensorflow(self) -> tf.data.Dataset:
...
async def async_to_tensorflow(self) -> tf.data.Dataset:
...
async def async_to(
self, kind: t.Type, drop_admin: bool = True
) -> DatasetCastable:
"""Casts a Dataset to a Python type passed as argument."""
...
def to(self, kind: t.Type, drop_admin: bool = True) -> DatasetCastable:
...
def dot(self) -> str:
"""return a graphviz representation of the dataset"""
...
def sql(
self,
query: t.Union[str, t.Dict[str, t.Any]],
dialect: t.Optional[SQLDialect] = None,
batch_size: int = 10000,
) -> t.Iterator[pa.RecordBatch]:
"""Executes the sql method on the dataset"""
...
def foreign_keys(self) -> t.Dict[Path, Path]:
"""returns foreign keys of the dataset"""
...
def primary_keys(self) -> t.List[Path]:
"""Returns a list of the paths to all primary keys"""
...
def links(self) -> Links:
"""Returns the foreign keys
distributions of the dataset computed
with dp"""
...
class Scalar(DataSpec[sp.Scalar], t.Protocol):
def prototype(self) -> t.Type[sp.Scalar]:
"""Return the type of the underlying protobuf."""
...
def is_privacy_params(self) -> bool:
"""Is the scalar privacy parameters."""
def is_random_seed(self) -> bool:
"""Is the scalar a random seed."""
def is_synthetic_model(self) -> bool:
"""is the scalar a synthetic model"""
def value(self) -> DataSpecValue:
...
async def async_value(self) -> DataSpecValue:
...
def spec(self) -> str:
...
class Visitor(t.Protocol):
"""A visitor class for Dataset"""
def all(self, visited: DataSpec) -> None:
...
def transformed(
self,
visited: DataSpec,
transform: Transform,
*arguments: DataSpec,
**named_arguments: DataSpec,
) -> None:
...
def other(self, visited: DataSpec) -> None:
...
class Bounds(Referring[sp.Bounds], t.Protocol):
"""A python abstract class to describe bounds"""
def prototype(self) -> t.Type[sp.Bounds]:
"""Return the type of the underlying protobuf."""
...
def dataset(self) -> Dataset:
...
def statistics(self) -> Statistics:
...
class Marginals(Referring[sp.Marginals], t.Protocol):
"""A python abstract class to describe marginals"""
def prototype(self) -> t.Type[sp.Marginals]:
"""Return the type of the underlying protobuf."""
...
def dataset(self) -> Dataset:
...
def statistics(self) -> Statistics:
...
class Size(Referring[sp.Size], t.Protocol):
"""A python abstract class to describe size"""
def prototype(self) -> t.Type[sp.Size]:
"""Return the type of the underlying protobuf."""
...
def dataset(self) -> Dataset:
...
def statistics(self) -> Statistics:
...
class Multiplicity(Referring[sp.Multiplicity], t.Protocol):
"""A python abstract class to describe size"""
def prototype(self) -> t.Type[sp.Multiplicity]:
"""Return the type of the underlying protobuf."""
...
def dataset(self) -> Dataset:
...
def statistics(self) -> Statistics:
...
class Schema(Referring[sp.Schema], t.Protocol):
"""A python abstract class to describe schemas"""
def prototype(self) -> t.Type[sp.Schema]:
"""Return the type of the underlying protobuf."""
...
def name(self) -> str:
...
def dataset(self) -> Dataset:
...
def to_arrow(self) -> pa.Schema:
...
def type(self) -> Type:
...
def has_admin_columns(self) -> bool:
...
def is_protected(self) -> bool:
...
def tables(self) -> t.List[Path]:
...
def protected_path(self) -> Path:
...
def data_type(self) -> Type:
...
def private_tables(self) -> t.List[Path]:
...
def public_tables(self) -> t.List[Path]:
...
class Status(Referring[sp.Status], t.Protocol):
"""A python abstract class to describe status"""
def prototype(self) -> t.Type[sp.Status]:
"""Return the type of the underlying protobuf."""
...
def dataspec(self) -> DataSpec:
...
def datetime(self) -> dt.datetime:
...
def update(
self,
task_stages: t.Optional[t.Mapping[str, Stage]],
properties: t.Optional[t.Mapping[str, str]],
) -> t.Tuple[Status, bool]:
...
def task(self, task: str) -> t.Optional[Stage]:
...
def pending(self) -> bool:
...
def processing(self) -> bool:
...
def ready(self) -> bool:
...
def error(self) -> bool:
...
def owner(
self,
) -> (
manager_typing.Manager
): # TODO: Maybe find a better name, but this was shadowing the actual manager of this object. # noqa: E501
...
def clear_task(self, task: str) -> t.Tuple[Status, bool]:
"""Creates a new status removing the task specified.
If the task does not exist, nothing is created"""
class Stage(HasProtobuf[sp.Status.Stage], t.Protocol):
def accept(self, visitor: StageVisitor) -> None:
...
def stage(self) -> str:
...
def ready(self) -> bool:
...
def processing(self) -> bool:
...
def pending(self) -> bool:
...
def error(self) -> bool:
...
class StageVisitor(t.Protocol):
"""A visitor class for Status/Stage"""
def pending(self) -> None:
...
def processing(self) -> None:
...
def ready(self) -> None:
...
def error(self) -> None:
...
@t.runtime_checkable
class Transform(Referrable[sp.Transform], t.Protocol):
"""A python abstract class to describe transforms"""
def prototype(self) -> t.Type[sp.Transform]:
"""Return the type of the underlying protobuf."""
...
def name(self) -> str:
...
def doc(self) -> str:
...
def spec(self) -> str:
...
def is_composed(self) -> bool:
"""Is the transform composed."""
...
def is_variable(self) -> bool:
"""Is the transform a variable."""
...
def is_external(self) -> bool:
"""Is the transform an external operation."""
...
def infer_output_type(
self,
*arguments: t.Union[DataSpec, Transform],
**named_arguments: t.Union[DataSpec, Transform],
) -> t.Tuple[str, t.Callable[[DataSpec], None]]:
"""Guess if the external transform output is a Dataset or a Scalar.
Registers schema if it is a Dataset and returns the value type.
"""
...
def transforms(self) -> t.Set[Transform]:
"""return all transforms (and avoid infinite recursions/loops)"""
...
def variables(self) -> t.Set[Transform]:
"""Return all the variables from a composed transform"""
...
def compose(
self,
*compose_arguments: Transform,
**compose_named_arguments: Transform,
) -> Transform:
...
def apply(
self,
*apply_arguments: DataSpec,
**apply_named_arguments: DataSpec,
) -> DataSpec:
...
def abstract(
self,
*arguments: str,
**named_arguments: str,
) -> Transform:
...
def __call__(
self,
*arguments: t.Union[Transform, DataSpec, int, str],
**named_arguments: t.Union[Transform, DataSpec, int, str],
) -> t.Union[Transform, DataSpec]:
"""Applies the transform to another element"""
...
def __mul__(self, argument: Transform) -> Transform:
...
def accept(self, visitor: TransformVisitor) -> None:
...
def transform_to_apply(self) -> Transform:
"""Return the transform of a composed transform."""
def composed_parents(
self,
) -> t.Tuple[t.List[Transform], t.Dict[str, Transform]]:
"""Return the parents of a composed transform."""
def composed_callable(self) -> t.Callable[..., t.Any]:
"""Return the composed transform's equivalent callable.
The function takes an undefined number of named arguments.
"""
class TransformVisitor(t.Protocol):
"""A visitor class for Transform"""
def all(self, visited: Transform) -> None:
...
def composed(
self,
visited: Transform,
transform: Transform,
*arguments: Transform,
**named_arguments: Transform,
) -> None:
...
def variable(
self,
visited: Transform,
name: str,
position: int,
) -> None:
...
def other(self, visited: Transform) -> None:
...
class Path(HasProtobuf[sp.Path], Frozen, Value, t.Protocol):
"""A python class to describe Paths"""
def prototype(self) -> t.Type[sp.Path]:
"""Return the type of the underlying protobuf."""
...
def to_strings_list(self) -> t.List[t.List[str]]:
...
def to_dict(self) -> t.Dict[str, str]:
...
def label(self) -> str:
...
def sub_paths(self) -> t.List[Path]:
...
def select(self, select_path: Path) -> t.List[Path]:
...
class Type(HasProtobuf[sp.Type], Frozen, Value, t.Protocol):
def prototype(self) -> t.Type[sp.Type]:
"""Return the type of the underlying protobuf."""
...
def name(self) -> str:
"""Returns the name of the underlying protobuf."""
...
def data_type(self) -> Type:
"""Returns the first type level containing the data,
hence skips the protected_entity struct if there is one"""
def has_admin_columns(self) -> bool:
"""Return True if the Type has administrative columns."""
def has_protection(self) -> bool:
"""Return True if the Type has protection information."""
def latex(self: Type, parenthesized: bool = False) -> str:
"""return a latex representation of the type"""
...
def compact(self: Type, parenthesized: bool = False) -> str:
"""return a compact representation of the type"""
...
def structs(self: Type) -> t.Optional[t.List[Path]]:
"""Returns the path to the first level structs encountered in the
type.
For example, Union[Struct1,Union[Struct2[Struct3]] will return only a
path that brings to Struct1 and Struct2.
"""
...
def get(self, item: Path) -> Type:
"""Return a subtype of the considered type defined by the path."""
...
def leaves(self) -> t.List[Type]:
"""Returns the leaves contained in the type tree structure"""
...
def children(self) -> t.Dict[str, Type]:
"""Returns the children contained in the type tree structure"""
...
# A Visitor acceptor
def accept(self, visitor: TypeVisitor) -> None:
...
def sub_types(self: Type, item: Path) -> t.List[Type]:
"""Returns the terminal nodes contained in the path"""
...
def default(self: Type) -> pa.Array:
"""Returns an example of arrow array matching the type.
For an optional type, it sets the default missing value.
"""
def numpy_default(self: Type) -> np.ndarray:
"""Returns an example of numpy array matching the type.
For an optional type, it sets the default missing value
"""
def tensorflow_default(self, is_optional: bool = False) -> t.Any:
"""This methods returns a dictionary with tensors as leaves
that match the type. For an optional type, we consider
the case where the field is missing, and set the default value
for each missing type.
"""
def example(self: Type) -> pa.Array:
"""Returns an example of arrow array matching the type.
For an optional type, it returns a non missing
value of the type.
"""
def numpy_example(self: Type) -> np.ndarray:
"""Returns an example of numpy array matching the type.
For an optional type, it returns a non
missing value of the type.
"""
def tensorflow_example(self: Type) -> t.Any:
"""Returns an example of a dictionary with tensors as leaves
that match the type..
For an optional type, it returns a non missing value of the type.
"""
def path_leaves(self) -> t.Sequence[Path]:
"""Returns the list of each path to a leaf in the type. If the type
is a leaf, it returns an empty list"""
class IdBase(Enum):
INT64 = sp.Type.Id.INT64
INT32 = sp.Type.Id.INT32
INT16 = sp.Type.Id.INT16
INT8 = sp.Type.Id.INT8
STRING = sp.Type.Id.STRING
BYTES = sp.Type.Id.BYTES
class DatetimeBase(Enum):
INT64_NS = sp.Type.Datetime.INT64_NS
INT64_MS = sp.Type.Datetime.INT64_MS
STRING = sp.Type.Datetime.STRING
class DateBase(Enum):
INT32 = sp.Type.Date.INT32
STRING = sp.Type.Date.STRING
class TimeBase(Enum):
INT64_NS = sp.Type.Time.INT64_NS
INT64_US = sp.Type.Time.INT64_US
INT32_MS = sp.Type.Time.INT32_MS
STRING = sp.Type.Time.STRING
class IntegerBase(Enum):
INT64 = sp.Type.Integer.INT64
INT32 = sp.Type.Integer.INT32
INT16 = sp.Type.Integer.INT16
INT8 = sp.Type.Integer.INT8
UINT64 = sp.Type.Integer.UINT64
UINT32 = sp.Type.Integer.UINT32
UINT16 = sp.Type.Integer.UINT16
UINT8 = sp.Type.Integer.UINT8
class FloatBase(Enum):
FLOAT64 = sp.Type.Float.FLOAT64
FLOAT32 = sp.Type.Float.FLOAT32
FLOAT16 = sp.Type.Float.FLOAT16
class ConstraintKind(Enum):
SYNTHETIC = sp.ConstraintKind.SYNTHETIC
PEP = sp.ConstraintKind.PEP
DP = sp.ConstraintKind.DP
PUBLIC = sp.ConstraintKind.PUBLIC
MOCK = sp.ConstraintKind.MOCK
class SQLDialect(Enum):
"""SQL Dialects"""
POSTGRES = 1
SQL_SERVER = 2
MY_SQL = 3
SQLLITE = 4
ORACLE = 5
BIG_QUERY = 6
REDSHIFT = 7
HIVE = 8
DATABRICKS = 9
class InferredDistributionName(Enum):
UNIFORM = "Uniform"
NORMAL = "Normal"
EXPONENTIAL = "Exponential"
GAMMA = "Gamma"
BETA = "Beta"
PARETO = "Pareto"
class TypeVisitor(t.Protocol):
"""A visitor class for Type"""
@abstractmethod
def Null(self, properties: t.Optional[t.Mapping[str, str]] = None) -> None:
...
@abstractmethod
def Unit(self, properties: t.Optional[t.Mapping[str, str]] = None) -> None:
...
@abstractmethod
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
...
@abstractmethod
def Id(
self,
unique: bool,
reference: t.Optional[Path] = None,
base: t.Optional[IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Integer(
self,
min: int,
max: int,
base: IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Float(
self,
min: float,
max: float,
base: FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
...
@abstractmethod
def Struct(
self,
fields: t.Mapping[str, Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Union(
self,
fields: t.Mapping[str, Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Optional(
self,
type: Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def List(
self,
type: Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Array(
self,
type: Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Datetime(
self,
format: str,
min: str,
max: str,
base: DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Time(
self,
format: str,
min: str,
max: str,
base: TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Date(
self,
format: str,
min: str,
max: str,
base: DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Constrained(
self,
type: Type,
constraint: Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Hypothesis(
self,
*types: t.Tuple[Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
class Predicate(HasProtobuf[sp.Predicate], Frozen, Value, t.Protocol):
"""A python class to describe types"""
def prototype(self) -> t.Type[sp.Predicate]:
"""Return the type of the underlying protobuf."""
# A bunch of operators
def __or__(self, predicate: Predicate) -> Predicate:
"""Union operator"""
def __and__(self, predicate: Predicate) -> Predicate:
"""Inter operator"""
def __invert__(self) -> Predicate:
"""Complement"""
class Statistics(HasProtobuf[sp.Statistics], Frozen, Value, t.Protocol):
"""A python class to describe statistics"""
def prototype(self) -> t.Type[sp.Statistics]:
"""Return the type of the underlying protobuf."""
...
def name(self) -> str:
...
def distribution(self) -> Distribution:
...
def size(self) -> int:
...
def multiplicity(self) -> float:
...
def accept(self, visitor: StatisticsVisitor) -> None:
...
def nodes_statistics(self, path: Path) -> t.List[Statistics]:
"""Returns the List of each statistics corresponding at the leaves
of path"""
...
def children(self) -> t.Dict[str, Statistics]:
"""Returns the children contained in the type tree structure"""
...
class Distribution(HasProtobuf[sp.Distribution], Frozen, Value, t.Protocol):
"""A python class to describe distributions"""
def prototype(self) -> t.Type[sp.Distribution]:
"""Return the type of the underlying protobuf."""
...
def values(self) -> t.Union[t.List[float], t.List[int]]:
...
def probabilities(self) -> t.List[float]:
...
def names(self) -> t.Union[t.List[bool], t.List[str]]:
...
def min_value(self) -> t.Union[int, float]:
...
def max_value(self) -> t.Union[int, float]:
...
class StatisticsVisitor(t.Protocol):
"""A visitor class for Statistics"""
@abstractmethod
def Null(self, size: int, multiplicity: float) -> None:
...
@abstractmethod
def Unit(self, size: int, multiplicity: float) -> None:
...
def Boolean(
self,
size: int,
multiplicity: float,
probabilities: t.Optional[t.List[float]] = None,
names: t.Optional[t.List[bool]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Id(self, size: int, multiplicity: float) -> None:
...
@abstractmethod
def Integer(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Enum(
self,
size: int,
multiplicity: float,
probabilities: t.Optional[t.List[float]] = None,
names: t.Optional[t.List[str]] = None,
values: t.Optional[t.List[float]] = None,
name: str = 'Enum',
) -> None:
...
@abstractmethod
def Float(
self,
size: int,
multiplicity: float,
min_value: float,
max_value: float,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[float]] = None,
) -> None:
...
@abstractmethod
def Text(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
example: str = '',
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Bytes(self, size: int, multiplicity: float) -> None:
...
@abstractmethod
def Struct(
self,
fields: t.Mapping[str, Statistics],
size: int,
multiplicity: float,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Union(
self,
fields: t.Mapping[str, Statistics],
size: int,
multiplicity: float,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
...
@abstractmethod
def Optional(
self, statistics: Statistics, size: int, multiplicity: float
) -> None:
...
@abstractmethod
def List(
self,
statistics: Statistics,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
name: str = 'List',
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Array(
self,
statistics: Statistics,
size: int,
multiplicity: float,
min_values: t.Optional[t.List[float]] = None,
max_values: t.Optional[t.List[float]] = None,
name: str = 'Array',
probabilities: t.Optional[t.List[t.List[float]]] = None,
values: t.Optional[t.List[t.List[float]]] = None,
) -> None:
...
@abstractmethod
def Datetime(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Date(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Time(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Duration(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
...
@abstractmethod
def Constrained(
self, statistics: Statistics, size: int, multiplicity: float
) -> None:
...
class InferredDistribution(t.Protocol):
"""A python class to to infer user input distribution
Attributes:
nparams: number of parameters
"""
nparams: int
def estimate_params(self, x: np.ndarray) -> None:
"""estimate distribution parameters (non-DP) from data column"""
...
def log_likelihood(self, x: np.ndarray) -> float:
"""compute log-likelihood of the distribution on data column"""
...
def preprocess(self, x: np.ndarray) -> np.ndarray:
"""Shift/scale data to be able to estimate distribution parameters"""
...
def params(self) -> t.Mapping[str, float]:
"""return distribution parameters"""
...
class InferredAlphabet(t.Protocol):
"""A python class to to infer user input charset
Attributes:
charset (t.List[int]): list with int representation of unique chars
complexity (int): charset intervals used to generate the alphabet
e.g. if alphabet (ascii) is [1,2,3, ..., 126] has complexity=1
if alphabet is [1,2,3] U [10] = [1,2,3,10] has complexity=2
"""
charset: t.List[int]
complexity: int
T = t.TypeVar('T', covariant=True)
class Links(Referring[sp.Links], t.Protocol):
"""A python abstract class to describe all
the links statistics in a dataset"""
def prototype(self) -> t.Type[sp.Links]:
"""Return the type of the underlying protobuf."""
...
def dataset(self) -> Dataset:
...
def links_statistics(self) -> t.List[LinkStatistics]:
...
class LinkStatistics(HasProtobuf[sp.Links.LinkStat], t.Protocol):
"""A python class to describe the statistics of a link
for a foreign key"""
def prototype(self) -> t.Type[sp.Links.LinkStat]:
"""Return the type of the underlying protobuf."""
...
def pointing(self) -> Path:
"""returns the path of the foreign key column"""
...
def pointed(self) -> Path:
"""returns the path of the column pointed by a foreign_key"""
...
def distribution(self) -> Distribution:
"""Returns the distribution of counts
for the given pointing/pointed"""
...
class PrivacyLimit(t.Protocol):
"""An abstract Privacy Limit class."""
def delta_epsilon_dict(self) -> t.Dict[float, float]:
"""Returns the limit as a dictionnary {delta: epsilon}"""
pass
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/typing.py
| 0.887345 | 0.246239 |
typing.py
|
pypi
|
from __future__ import annotations
from os.path import basename
from typing import (
TYPE_CHECKING,
AsyncIterator,
Collection,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
Union,
cast,
)
from urllib.parse import urlparse
import json
import typing as t
import warnings
import pandas as pd
import pyarrow as pa
try:
import tensorflow as tf
except ModuleNotFoundError:
pass # Warning is displayed by typing.py
try:
from sqlalchemy.engine import make_url
except ModuleNotFoundError:
warnings.warn('SqlAlchemy not found, sql operations not available')
from sarus_data_spec.base import Referring
from sarus_data_spec.constants import DATASET_SLUGNAME
from sarus_data_spec.protobuf.utilities import to_base64
from sarus_data_spec.scalar import Scalar
from sarus_data_spec.transform import Transform
import sarus_data_spec.protobuf as sp
import sarus_data_spec.transform as sdtr
import sarus_data_spec.typing as st
if TYPE_CHECKING:
from sarus_data_spec.bounds import Bounds
from sarus_data_spec.links import Links
from sarus_data_spec.marginals import Marginals
from sarus_data_spec.multiplicity import Multiplicity
from sarus_data_spec.size import Size
class Dataset(Referring[sp.Dataset]):
"""A python class to describe datasets"""
def __init__(self, protobuf: sp.Dataset, store: bool = True) -> None:
if protobuf.spec.HasField("transformed"):
transformed = protobuf.spec.transformed
self._referred = {
transformed.transform,
*transformed.arguments,
*list(transformed.named_arguments.values()),
}
super().__init__(protobuf=protobuf, store=store)
def prototype(self) -> Type[sp.Dataset]:
"""Return the type of the underlying protobuf."""
return sp.Dataset
def name(self) -> str:
return self._protobuf.name
def doc(self) -> str:
return self._protobuf.doc
def is_transformed(self) -> bool:
"""Is the dataset composed."""
return self._protobuf.spec.HasField('transformed')
def is_file(self) -> bool:
"""Is the dataset composed."""
return self._protobuf.spec.HasField('file')
def is_synthetic(self) -> bool:
"""Is the dataset synthetic."""
return self.manager().dataspec_validator().is_synthetic(self)
def has_admin_columns(self) -> bool:
return self.schema().has_admin_columns()
def is_protected(self) -> bool:
return self.schema().is_protected()
def is_pep(self) -> bool:
"""Is the dataset PEP."""
return self.pep_token() is not None
def pep_token(self) -> Optional[str]:
"""Returns the dataset PEP token."""
return self.manager().dataspec_validator().pep_token(self)
def is_dp(self) -> bool:
"""Is the dataspec the result of a DP transform"""
return self.manager().dataspec_validator().is_dp(self)
def is_public(self) -> bool:
"""Is the dataset public."""
return self.manager().dataspec_validator().is_public(self)
def is_remote(self) -> bool:
"""Is the dataspec a remotely defined dataset."""
return self.manager().is_remote(self)
def is_source(self) -> bool:
"""Is the dataset not composed."""
return not self.is_transformed()
def sql(
self,
query: t.Union[str, t.Dict[str, t.Any]],
dialect: Optional[st.SQLDialect] = None,
batch_size: int = 10000,
) -> Iterator[pa.RecordBatch]:
"""Executes the sql method on the dataset"""
return self.manager().sql(self, query, dialect, batch_size)
def sources(
self, type_name: t.Optional[str] = sp.type_name(sp.Dataset)
) -> Set[st.DataSpec]:
"""Returns the set of non-transformed datasets that are parents
of the current dataset"""
sources = self.storage().sources(self, type_name=type_name)
return sources
def status(
self, task_names: t.Optional[t.List[str]] = None
) -> t.Optional[st.Status]:
"""This method return a status that contains all the
last updates for the task_names required. It returns None if
all the tasks are missing."""
if task_names is None:
task_names = []
if type(task_names) not in [list, set, tuple]:
raise TypeError(
f"Invalid task_names passed to dataset.status {task_names}"
)
last_status = self.manager().status(self)
if last_status is None:
return last_status
if all([last_status.task(task) is None for task in task_names]):
return None
return last_status
def schema(self) -> st.Schema:
return self.manager().schema(self)
async def async_schema(self) -> st.Schema:
return await self.manager().async_schema(self)
def size(self) -> t.Optional[Size]:
return cast('Size', self.manager().size(self))
def multiplicity(self) -> t.Optional[Multiplicity]:
return cast('Multiplicity', self.manager().multiplicity(self))
def bounds(self) -> t.Optional[Bounds]:
return cast('Bounds', self.manager().bounds(self))
def marginals(self) -> t.Optional[Marginals]:
return cast('Marginals', self.manager().marginals(self))
def links(self) -> st.Links:
return cast('Links', self.manager().links(self))
def transform(self) -> st.Transform:
return cast(
st.Transform,
self.storage().referrable(
self.protobuf().spec.transformed.transform
),
)
def to_arrow(self, batch_size: int = 10000) -> t.Iterator[pa.RecordBatch]:
return self.manager().to_arrow(self, batch_size)
async def async_to_arrow(
self, batch_size: int = 10000
) -> AsyncIterator[pa.RecordBatch]:
return await self.manager().async_to_arrow(self, batch_size)
def to_sql(self) -> None:
return self.manager().to_sql(self)
def parents(
self,
) -> Tuple[
List[Union[st.DataSpec, st.Transform]],
Dict[str, Union[st.DataSpec, st.Transform]],
]:
if not self.is_transformed():
return list(), dict()
args_id = self._protobuf.spec.transformed.arguments
kwargs_id = self._protobuf.spec.transformed.named_arguments
args_parents = [
cast(
Union[st.DataSpec, st.Transform],
self.storage().referrable(uuid),
)
for uuid in args_id
]
kwargs_parents = {
name: cast(
Union[st.DataSpec, st.Transform],
self.storage().referrable(uuid),
)
for name, uuid in kwargs_id.items()
}
return args_parents, kwargs_parents
def variant(
self,
kind: st.ConstraintKind,
public_context: Collection[str] = (),
privacy_limit: Optional[st.PrivacyLimit] = None,
salt: Optional[int] = None,
) -> Optional[st.DataSpec]:
return (
self.manager()
.dataspec_rewriter()
.variant(self, kind, public_context, privacy_limit, salt)
)
def variants(self) -> Collection[st.DataSpec]:
return self.manager().dataspec_rewriter().variants(self)
def private_queries(self) -> List[st.PrivateQuery]:
"""Return the list of PrivateQueries used in a Dataspec's transform.
It represents the privacy loss associated with the current computation.
It can be used by Sarus when a user (Access object) reads a DP dataspec
to update its accountant. Note that Private Query objects are generated
with a random uuid so that even if they are submitted multiple times to
an account, they are only accounted once (ask @cgastaud for more on
accounting).
"""
return self.manager().dataspec_validator().private_queries(self)
def spec(self) -> str:
return cast(str, self._protobuf.spec.WhichOneof('spec'))
def __iter__(self) -> Iterator[pa.RecordBatch]:
return self.to_arrow(batch_size=1)
def to_pandas(self) -> pd.DataFrame:
return self.manager().to_pandas(self)
async def async_to_pandas(self) -> pd.DataFrame:
return await self.manager().async_to_pandas(self)
async def async_to(
self, kind: t.Type, drop_admin: bool = True
) -> st.DatasetCastable:
"""Convert a Dataset's to a Python type."""
return await self.manager().async_to(self, kind, drop_admin)
def to(self, kind: t.Type, drop_admin: bool = True) -> st.DatasetCastable:
return self.manager().to(self, kind, drop_admin)
def to_tensorflow(self) -> tf.data.Dataset:
return self.manager().to_tensorflow(self)
async def async_to_tensorflow(self) -> tf.data.Dataset:
return await self.manager().async_to_tensorflow(self)
# A Visitor acceptor
def accept(self, visitor: st.Visitor) -> None:
visitor.all(self)
if self.is_transformed():
visitor.transformed(
self,
cast(
Transform,
self.storage().referrable(
self._protobuf.spec.transformed.transform
),
),
*(
cast(Dataset, self.storage().referrable(arg))
for arg in self._protobuf.spec.transformed.arguments
),
**{
name: cast(Dataset, self.storage().referrable(arg))
for name, arg in self._protobuf.spec.transformed.named_arguments.items() # noqa: E501
},
)
else:
visitor.other(self)
def foreign_keys(self) -> Dict[st.Path, st.Path]:
"""returns foreign keys of the dataset"""
return self.manager().foreign_keys(self)
def dot(self) -> str:
"""return a graphviz representation of the dataset"""
class Dot(st.Visitor):
visited: Set[st.DataSpec] = set()
nodes: Dict[str, Tuple[str, str]] = {}
edges: Dict[Tuple[str, str], str] = {}
def transformed(
self,
visited: st.DataSpec,
transform: st.Transform,
*arguments: st.DataSpec,
**named_arguments: st.DataSpec,
) -> None:
if visited not in self.visited:
if visited.prototype() == sp.Dataset:
self.nodes[visited.uuid()] = (
visited.name(),
"Dataset",
)
else:
self.nodes[visited.uuid()] = (visited.name(), "Scalar")
if not visited.is_remote():
for argument in arguments:
self.edges[
(argument.uuid(), visited.uuid())
] = transform.name()
argument.accept(self)
for _, argument in named_arguments.items():
self.edges[
(argument.uuid(), visited.uuid())
] = transform.name()
argument.accept(self)
self.visited.add(visited)
def other(self, visited: st.DataSpec) -> None:
if visited.prototype() == sp.Dataset:
self.nodes[visited.uuid()] = (
visited.name(),
"Dataset",
)
else:
self.nodes[visited.uuid()] = (visited.name(), "Scalar")
visitor = Dot()
self.accept(visitor)
result = 'digraph {'
for uuid, (label, node_type) in visitor.nodes.items():
shape = "polygon" if node_type == "Scalar" else "ellipse"
result += (
f'\n"{uuid}" [label="{label} ({uuid[:2]})", shape={shape}];'
)
for (u1, u2), label in visitor.edges.items():
result += f'\n"{u1}" -> "{u2}" [label="{label} ({uuid[:2]})"];'
result += '}'
return result
def primary_keys(self) -> List[st.Path]:
return self.manager().primary_keys(self)
def attribute(self, name: str) -> t.Optional[st.Attribute]:
return self.manager().attribute(name=name, dataspec=self)
def attributes(self, name: str) -> t.List[st.Attribute]:
return self.manager().attributes(name=name, dataspec=self)
# Builders
def transformed(
transform: st.Transform,
*arguments: t.Union[st.DataSpec, st.Transform],
dataspec_type: Optional[str] = None,
dataspec_name: Optional[str] = None,
**named_arguments: t.Union[st.DataSpec, st.Transform],
) -> st.DataSpec:
if dataspec_type is None:
dataspec_type, attach_info_callback = transform.infer_output_type(
*arguments, **named_arguments
)
else:
def attach_info_callback(ds: st.DataSpec) -> None:
return
if dataspec_name is None:
dataspec_name = "Transformed"
if dataspec_type == sp.type_name(sp.Scalar):
output_dataspec: st.DataSpec = Scalar(
sp.Scalar(
name=dataspec_name,
spec=sp.Scalar.Spec(
transformed=sp.Scalar.Transformed(
transform=transform.uuid(),
arguments=(a.uuid() for a in arguments),
named_arguments={
n: a.uuid() for n, a in named_arguments.items()
},
)
),
)
)
else:
properties = {}
ds_args = [
element
for element in arguments
if element.type_name() == sp.type_name(sp.Dataset)
]
for element in named_arguments.values():
if element.type_name() == sp.type_name(sp.Dataset):
ds_args.append(element)
if len(ds_args) == 1 and DATASET_SLUGNAME in ds_args[0].properties():
properties[DATASET_SLUGNAME] = arguments[0].properties()[
DATASET_SLUGNAME
]
output_dataspec = Dataset(
sp.Dataset(
name=dataspec_name,
spec=sp.Dataset.Spec(
transformed=sp.Dataset.Transformed(
transform=transform.uuid(),
arguments=(a.uuid() for a in arguments),
named_arguments={
n: a.uuid() for n, a in named_arguments.items()
},
)
),
properties=properties,
)
)
# Add additional information to the newly created Dataspec
# (e.g. a mock variant)
attach_info_callback(output_dataspec)
return output_dataspec
def file(
format: str,
uri: str,
doc: str = 'A file dataset',
properties: Optional[Mapping[str, str]] = None,
) -> Dataset:
return Dataset(
sp.Dataset(
name=basename(urlparse(uri).path),
doc=doc,
spec=sp.Dataset.Spec(file=sp.Dataset.File(format=format, uri=uri)),
properties=properties,
)
)
def csv_file(
uri: str,
doc: str = 'A csv file dataset',
properties: Optional[Mapping[str, str]] = None,
) -> Dataset:
return Dataset(
sp.Dataset(
name=basename(urlparse(uri).path),
doc=doc,
spec=sp.Dataset.Spec(file=sp.Dataset.File(format='csv', uri=uri)),
properties=properties,
)
)
def files(
name: str,
format: str,
uri_pattern: str,
doc: str = 'Dataset split into files',
properties: Optional[Mapping[str, str]] = None,
) -> Dataset:
return Dataset(
sp.Dataset(
name=name,
doc=doc,
spec=sp.Dataset.Spec(
files=sp.Dataset.Files(format=format, uri_pattern=uri_pattern)
),
properties=properties,
)
)
def csv_files(
name: str,
uri_pattern: str,
doc: str = 'A csv file dataset',
properties: Optional[Mapping[str, str]] = None,
) -> Dataset:
return Dataset(
sp.Dataset(
name=name,
doc=doc,
spec=sp.Dataset.Spec(
files=sp.Dataset.Files(format='csv', uri_pattern=uri_pattern)
),
properties=properties,
)
)
def sql(
uri: str,
tables: Optional[
Collection[Tuple[str, str]]
] = None, # pairs schema/table_name
properties: Optional[Mapping[str, str]] = None,
) -> Dataset:
parsed_uri = make_url(uri)
if parsed_uri.database is None:
name = f'{parsed_uri.drivername}_db_dataset'
else:
name = parsed_uri.database
if tables is None:
tables = []
return Dataset(
sp.Dataset(
name=name,
doc=f'Data from {uri}',
spec=sp.Dataset.Spec(
sql=sp.Dataset.Sql(
uri=uri,
tables=[
sp.Dataset.Sql.Table(
schema=element[0], table=element[1]
)
for element in tables
],
)
),
properties=properties,
)
)
def mapped_sql(
uri: str,
mapping_sql: Mapping[st.Path, st.Path],
schemas: Optional[Collection[str]] = None,
) -> Dataset:
parsed_uri = make_url(uri)
if parsed_uri.database is None:
name = f'{parsed_uri.drivername}_db_dataset'
else:
name = parsed_uri.database
serialized_mapping = json.dumps(
{
to_base64(original_table.protobuf()): to_base64(
synthetic_table.protobuf()
)
for original_table, synthetic_table in mapping_sql.items()
}
)
properties = {'sql_mapping': serialized_mapping}
return Dataset(
sp.Dataset(
name=name,
doc=f'Data from {uri}',
spec=sp.Dataset.Spec(
sql=sp.Dataset.Sql(
uri=uri,
)
),
properties=properties,
)
)
if t.TYPE_CHECKING:
test_sql: st.Dataset = sql(uri='sqlite:///:memory:')
test_file: st.Dataset = file(format='', uri='')
test_csv_file: st.Dataset = csv_file(uri='')
test_files: st.Dataset = files(name='', uri_pattern='', format='')
test_csv_files: st.Dataset = csv_files(name='', uri_pattern='')
test_transformed: st.DataSpec = transformed(
sdtr.protect(), sql(uri='sqlite:///:memory:')
)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/dataset.py
| 0.870101 | 0.304352 |
dataset.py
|
pypi
|
from __future__ import annotations
import datetime
import json
import typing as t
import numpy as np
import pandas as pd
import pyarrow as pa
from sarus_data_spec.base import Base
from sarus_data_spec.constants import (
ARRAY_VALUES,
DATA,
LIST_VALUES,
OPTIONAL_VALUE,
PUBLIC,
TEXT_CHARSET,
TEXT_MAX_LENGTH,
USER_COLUMN,
WEIGHTS,
)
from sarus_data_spec.path import Path
from sarus_data_spec.path import path as path_builder
from sarus_data_spec.predicate import Predicate
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
try:
import tensorflow as tf
except ModuleNotFoundError:
pass
DURATION_UNITS_TO_RANGE = {
'us': (
int(np.iinfo(np.int64).min / 1e3),
int(np.iinfo(np.int64).max / 1e3),
),
'ms': (
int(np.iinfo(np.int64).min / 1e6),
int(np.iinfo(np.int64).max / 1e6),
),
's': (
int(np.iinfo(np.int64).min / 1e9),
int(np.iinfo(np.int64).max / 1e9),
),
}
class Type(Base[sp.Type]):
"""A python class to describe types"""
def prototype(self) -> t.Type[sp.Type]:
"""Return the type of the underlying protobuf."""
return sp.Type
def name(self) -> str:
"""Returns the name of the underlying protobuf."""
return self.protobuf().name
def has_protection(self) -> bool:
"""Return True if the Type has protection information."""
protection_fields = {
PUBLIC,
USER_COLUMN,
WEIGHTS,
}
if self.has_admin_columns():
type = self.protobuf()
field_names = {element.name for element in type.struct.fields}
# there may be additional administrative columns
return protection_fields.issubset(field_names)
else:
return False
def has_admin_columns(self) -> bool:
"""Return True if the Type has administrative columns."""
type = self.protobuf()
if type.HasField('struct'):
field_names = {element.name for element in type.struct.fields}
# there may be additional administrative columns
return DATA in field_names
else:
return False
def data_type(self) -> Type:
"""Returns the first type level containing the data,
hence skips the protected_entity struct if there is one"""
if self.has_admin_columns():
data_type = next(
iter(
[
field.type
for field in self.protobuf().struct.fields
if field.name == DATA
]
),
None,
)
assert data_type
return Type(data_type)
else:
return self
# A Visitor acceptor
def accept(self, visitor: st.TypeVisitor) -> None:
dispatch: t.Callable[[], None] = {
'null': lambda: visitor.Null(properties=self._protobuf.properties),
'unit': lambda: visitor.Unit(properties=self._protobuf.properties),
'boolean': lambda: visitor.Boolean(
properties=self._protobuf.properties
),
'integer': lambda: visitor.Integer(
min=self._protobuf.integer.min,
max=self._protobuf.integer.max,
base=st.IntegerBase(self._protobuf.integer.base),
possible_values=self._protobuf.integer.possible_values,
properties=self._protobuf.properties,
),
'id': lambda: visitor.Id(
base=st.IdBase(self._protobuf.id.base),
unique=self._protobuf.id.unique,
reference=Path(self._protobuf.id.reference)
if self._protobuf.id.reference != sp.Path()
else None,
properties=self._protobuf.properties,
),
'enum': lambda: visitor.Enum(
self._protobuf.name,
[
(name_value.name, name_value.value)
for name_value in self._protobuf.enum.name_values
],
self._protobuf.enum.ordered,
properties=self._protobuf.properties,
),
'float': lambda: visitor.Float(
min=self._protobuf.float.min,
max=self._protobuf.float.max,
base=st.FloatBase(self._protobuf.float.base),
possible_values=self._protobuf.float.possible_values,
properties=self._protobuf.properties,
),
'text': lambda: visitor.Text(
self._protobuf.text.encoding,
possible_values=self._protobuf.text.possible_values,
properties=self._protobuf.properties,
),
'bytes': lambda: visitor.Bytes(
properties=self._protobuf.properties
),
'struct': lambda: visitor.Struct(
{
field.name: Type(field.type)
for field in self._protobuf.struct.fields
},
name=None
if self._protobuf.name == ''
else self._protobuf.name,
properties=self._protobuf.properties,
),
'union': lambda: visitor.Union(
{
field.name: Type(field.type)
for field in self._protobuf.union.fields
},
name=None
if self._protobuf.name == ''
else self._protobuf.name,
properties=self._protobuf.properties,
),
'optional': lambda: visitor.Optional(
Type(self._protobuf.optional.type),
None if self._protobuf.name == '' else self._protobuf.name,
properties=self._protobuf.properties,
),
'list': lambda: visitor.List(
Type(self._protobuf.list.type),
max_size=self._protobuf.list.max_size,
name=None
if self._protobuf.name == ''
else self._protobuf.name,
properties=self._protobuf.properties,
),
'array': lambda: visitor.Array(
Type(self._protobuf.array.type),
tuple(self._protobuf.array.shape),
None if self._protobuf.name == '' else self._protobuf.name,
properties=self._protobuf.properties,
),
'datetime': lambda: visitor.Datetime(
self._protobuf.datetime.format,
self._protobuf.datetime.min,
self._protobuf.datetime.max,
st.DatetimeBase(self._protobuf.datetime.base),
possible_values=self._protobuf.datetime.possible_values,
properties=self._protobuf.properties,
),
'date': lambda: visitor.Date(
self._protobuf.date.format,
self._protobuf.date.min,
self._protobuf.date.max,
st.DateBase(self._protobuf.date.base),
possible_values=self._protobuf.date.possible_values,
properties=self._protobuf.properties,
),
'time': lambda: visitor.Time(
self._protobuf.time.format,
self._protobuf.time.min,
self._protobuf.time.max,
st.TimeBase(self._protobuf.time.base),
possible_values=self._protobuf.time.possible_values,
properties=self._protobuf.properties,
),
'duration': lambda: visitor.Duration(
self._protobuf.duration.unit,
self._protobuf.duration.min,
self._protobuf.duration.max,
possible_values=self._protobuf.duration.possible_values,
properties=self._protobuf.properties,
),
'constrained': lambda: visitor.Constrained(
Type(self._protobuf.constrained.type),
Predicate(self._protobuf.constrained.constraint),
None if self._protobuf.name == '' else self._protobuf.name,
properties=self._protobuf.properties,
),
'hypothesis': lambda: visitor.Hypothesis(
*[
(Type(scored.type), scored.score)
for scored in self._protobuf.hypothesis.types
],
name=None
if self._protobuf.name == ''
else self._protobuf.name,
properties=self._protobuf.properties,
),
None: lambda: None,
}[self._protobuf.WhichOneof('type')]
dispatch()
def latex(self: st.Type, parenthesized: bool = False) -> str:
"""return a latex representation of the type"""
class Latex(st.TypeVisitor):
result: str = ''
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'\emptyset'
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'\mathbb{1}'
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'\left\{0,1\right}'
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if (
min <= np.iinfo(np.int32).min
or max >= np.iinfo(np.int32).max
):
self.result = r'\mathbb{N}'
else:
self.result = (
r'\left[' + str(min) + r'..' + str(max) + r'\right]'
)
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(name_values) > 3:
self.result = r'\left\{'
for name, _ in name_values[:2]:
self.result += r'\text{' + name + r'}, '
self.result += r',\ldots, '
for name, _ in name_values[-1:]:
self.result += r'\text{' + name + r'}, '
self.result = self.result[:-2] + r'\right\}'
elif len(name_values) > 0:
self.result = r'\left\{'
for name, _ in name_values:
self.result += r'\text{' + name + r'}, '
self.result = self.result[:-2] + r'\right\}'
else:
self.Unit()
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if (
min <= np.finfo(np.float32).min
or max >= np.finfo(np.float32).max
):
self.result = r'\mathbb{R}'
else:
self.result = (
r'\left[' + str(min) + r', ' + str(max) + r'\right]'
)
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'\text{Text}'
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'\text{Bytes}'
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(fields) > 0:
if name is None:
self.result = r'\left\{'
else:
self.result = r'\text{' + name + r'}: \left\{'
for type_name, type in fields.items():
self.result = (
self.result
+ r'\text{'
+ type_name
+ r'}:'
+ Type.latex(type, parenthesized=True)
+ r', '
)
self.result = self.result[:-2] + r'\right\}'
else:
self.Unit()
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(fields) > 0:
for type in fields.values():
self.result = (
self.result
+ Type.latex(type, parenthesized=True)
+ r' | '
)
self.result = self.result[:-2]
if parenthesized:
self.result = r'\left(' + self.result + r'\right)'
else:
self.Null()
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = Type.latex(type, parenthesized=True) + r'?'
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if max_size < 100:
self.result = (
Type.latex(type, parenthesized=True)
+ r'^{'
+ str(max_size)
+ r'}'
)
else:
self.result = Type.latex(type, parenthesized=True) + r'^*'
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = (
Type.latex(type)
+ r'^{'
+ r'\times '.join([str(i) for i in shape])
+ r'}'
)
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'\text{Datetime}'
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'\text{Date}'
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'\text{Time}'
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'\text{Duration}'
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(types) > 0:
for type, score in types:
self.result = (
self.result
+ Type.latex(type, parenthesized=True)
+ f',{score}|'
)
self.result = self.result[:-2]
self.result = r'\langle' + self.result + r'\rangle'
else:
self.Null()
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r''
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r''
visitor = Latex()
self.accept(visitor)
return visitor.result
def compact(self: st.Type, parenthesized: bool = False) -> str:
"""return a compact representation of the type"""
class Compact(st.TypeVisitor):
result: str = ''
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'∅'
self.result = r'∅'
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'𝟙'
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'𝔹'
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if (
min <= np.iinfo(np.int32).min
or max >= np.iinfo(np.int32).max
):
self.result = r'ℕ'
else:
self.result = r'[' + str(min) + r'..' + str(max) + r']'
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(name_values) > 3:
self.result = r'{'
for name, _ in name_values[:2]:
self.result += name
self.result += r',..., '
for name, _ in name_values[-1:]:
self.result += name + r', '
self.result = self.result[:-2] + r'}'
elif len(name_values) > 0:
self.result = r'{'
for name, _ in name_values:
self.result += name + r', '
self.result = self.result[:-2] + r'}'
else:
self.Unit()
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if (
min <= np.finfo(np.float32).min
or max >= np.finfo(np.float32).max
):
self.result = r'ℝ'
else:
self.result = r'[' + str(min) + r', ' + str(max) + r']'
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'𝒯'
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = r'ℬ'
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(fields) > 0:
if name is None:
self.result = '{'
else:
self.result = name + r': {'
for type_name, type in fields.items():
self.result = (
self.result
+ type_name
+ r': '
+ Type.compact(type, parenthesized=True)
+ r', '
)
self.result = self.result[:-2] + r'}'
else:
self.Unit()
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(fields) > 0:
for type in fields.values():
self.result = (
self.result
+ Type.compact(type, parenthesized=True)
+ r' | '
)
self.result = self.result[:-2]
if parenthesized:
self.result = r'(' + self.result + r')'
else:
self.Null()
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = Type.compact(type, parenthesized=True) + r'?'
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = Type.compact(type, parenthesized=True) + r'*'
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = (
Type.compact(type)
+ r'**('
+ r'x'.join([str(i) for i in shape])
+ r')'
)
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'𝒟𝓉'
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'𝒟𝒶'
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'𝒯𝓂'
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r'𝒟𝓊'
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(types) > 0:
self.result = r'<'
for type, score in types:
self.result = (
self.result
+ Type.compact(type, parenthesized=False)
+ f',{score}|'
)
self.result = self.result[:-1] + r'>'
else:
self.Null()
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r''
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = r''
visitor = Compact()
self.accept(visitor)
return visitor.result
def get(self: Type, item: st.Path) -> st.Type:
"""Return a projection of the considered type defined by the path.
The projection contains all the parents types of the leaves of
the path. If the path stops at a Union, Struct or Optional,
it also returns that type with everything it contains."""
class Select(st.TypeVisitor):
result = Type(sp.Type())
def __init__(
self, properties: t.Optional[t.Mapping[str, str]] = None
):
self.properties = properties
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Null()
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Id(base=base, unique=unique, reference=reference)
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Unit(properties=self.properties)
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Boolean()
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Integer(
min=min,
max=max,
possible_values=possible_values,
properties=self.properties,
)
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Enum(
name=name,
name_values=name_values,
ordered=ordered,
properties=self.properties,
)
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Float(
min=min,
max=max,
possible_values=possible_values,
properties=self.properties,
)
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Text(
encoding=encoding,
possible_values=possible_values,
properties=self.properties,
)
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Bytes()
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
new_fields = {}
for path in proto.paths:
# here struct each path must have a label
new_fields[path.label] = fields[path.label].get(Path(path))
self.result = Struct(
fields=new_fields if len(new_fields) > 0 else fields,
name=name if name is not None else 'Struct',
properties=self.properties,
)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
new_fields = {}
for path in proto.paths:
new_fields[path.label] = fields[path.label].get(Path(path))
self.result = Union(
fields=new_fields if len(new_fields) > 0 else fields,
name=name if name is not None else 'Union',
properties=self.properties,
)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) <= 1
self.result = Optional(
type.get(Path(proto.paths[0]))
if len(proto.paths) > 0
else type,
name=t.cast(str, name),
properties=self.properties,
)
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) <= 1
self.result = List(
type.get(Path(proto.paths[0]))
if len(proto.paths) > 0
else type,
name=t.cast(str, name),
max_size=max_size,
properties=self.properties,
)
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) <= 1
self.result = Array(
type.get(Path(proto.paths[0]))
if len(proto.paths) > 0
else type,
name=t.cast(str, name),
shape=shape,
properties=self.properties,
)
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Datetime(
format=format,
min=min,
max=max,
properties=self.properties,
base=base,
possible_values=possible_values,
)
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Date(
format=format,
min=min,
max=max,
properties=self.properties,
base=base,
possible_values=possible_values,
)
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Time(
format=format,
min=min,
max=max,
properties=self.properties,
base=base,
possible_values=possible_values,
)
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
proto = item.protobuf()
assert len(proto.paths) == 0
self.result = Duration(
unit=unit,
min=min,
max=max,
properties=self.properties,
possible_values=possible_values,
)
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# TODO
pass
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = Select(properties=self.properties())
self.accept(visitor)
return visitor.result
def sub_types(self: Type, item: st.Path) -> t.List[st.Type]:
"""Returns a list of the subtypes corresponding to the
leaves of the input path"""
class Select(st.TypeVisitor):
def __init__(self, type_item: st.Type):
self.result = [type_item]
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
for sub_path in item.sub_paths():
result.extend(fields[sub_path.label()].sub_types(sub_path))
if len(result) > 0:
self.result = result
# otherwise struct is empty and it is a terminal node
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
for sub_path in item.sub_paths():
result.extend(fields[sub_path.label()].sub_types(sub_path))
if len(result) > 0:
self.result = result
# otherwise union is empty and it is a terminal node
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
if len(item.sub_paths()) == 1:
result.extend(type.sub_types(item.sub_paths()[0]))
self.result = result
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
if len(item.sub_paths()) == 1:
result.extend(type.sub_types(item.sub_paths()[0]))
self.result = result
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
if len(item.sub_paths()) == 1:
result.extend(type.sub_types(item.sub_paths()[0]))
self.result = result
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
visitor = Select(type_item=self)
self.accept(visitor)
return visitor.result
def structs(self: Type) -> t.Optional[t.List[st.Path]]:
"""Returns the path to the first level structs encountered in the type.
For example, Union[Struct1,Union[Struct2[Struct3]] will return only a
path that brings to Struct1 and Struct2.
"""
class AddPath(st.TypeVisitor):
result: t.Optional[t.List[st.Path]] = None
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = []
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
paths = []
for type_name, curr_type in fields.items():
if curr_type.protobuf().WhichOneof('type') == 'struct':
paths.append(Path(sp.Path(label=type_name)))
else:
sub_paths = curr_type.structs()
if sub_paths is not None:
paths.extend(
[
Path(
sp.Path(
label=type_name,
paths=[subpath.protobuf()],
)
)
for subpath in sub_paths
]
)
if len(paths) > 0:
self.result = t.cast(t.List[st.Path], paths)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if type.protobuf().WhichOneof('type') == 'struct':
self.result = [Path(sp.Path(label=OPTIONAL_VALUE))]
else:
sub_paths = type.structs()
if sub_paths is not None:
self.result = [
Path(
sp.Path(
label=OPTIONAL_VALUE,
paths=[
subpath.protobuf()
for subpath in sub_paths
],
)
)
]
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if type.protobuf().WhichOneof('type') == 'struct':
self.result = [Path(sp.Path(label=LIST_VALUES))]
else:
sub_paths = type.structs()
if sub_paths is not None:
self.result = [
Path(
sp.Path(
label=LIST_VALUES,
paths=[
subpath.protobuf()
for subpath in sub_paths
],
)
)
]
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if type.protobuf().WhichOneof('type') == 'struct':
self.result = [Path(sp.Path(label=ARRAY_VALUES))]
else:
sub_paths = type.structs()
if sub_paths is not None:
self.result = [
Path(
sp.Path(
label=ARRAY_VALUES,
paths=[
subpath.protobuf()
for subpath in sub_paths
],
)
)
]
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
visitor = AddPath()
self.accept(visitor)
return visitor.result
def leaves(self: st.Type) -> t.List[st.Type]:
"""Returns a list of the sub-types corresponding to
the leaves of the type tree structure"""
class AddLeaves(st.TypeVisitor):
result: t.List[st.Type] = []
def __init__(self, type_item: st.Type):
self.result = [type_item]
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
for item_name in fields.keys():
result.extend(fields[item_name].leaves())
if len(result) > 0:
self.result = result
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
for item_name in fields.keys():
result.extend(fields[item_name].leaves())
if len(result) > 0:
self.result = result
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
result.extend(type.leaves())
if len(result) > 0:
self.result = result
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
result.extend(type.leaves())
if len(result) > 0:
self.result = result
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
result = []
result.extend(type.leaves())
if len(result) > 0:
self.result = result
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
visitor = AddLeaves(type_item=self)
self.accept(visitor)
return visitor.result
def children(self: st.Type) -> t.Dict[str, st.Type]:
"""Returns the children contained in the type tree structure"""
class GetChildren(st.TypeVisitor):
result: t.Dict[str, st.Type] = {}
def __init__(
self, properties: t.Optional[t.Mapping[str, str]] = None
):
self.properties = properties
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = t.cast(t.Dict[str, st.Type], fields)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = t.cast(t.Dict[str, st.Type], fields)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {OPTIONAL_VALUE: type}
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {LIST_VALUES: type}
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {ARRAY_VALUES: type}
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
visitor = GetChildren(properties=self.properties())
self.accept(visitor)
return visitor.result
def example(self) -> pa.Array:
"""This methods returns a pyarrow scalar that matches the type.
For an optional type, we consider the case where, the field
is not missing.
"""
class ToArrow(st.TypeVisitor):
result = pa.nulls(0)
def __init__(
self, properties: t.Optional[t.Mapping[str, str]] = None
):
self.properties = properties if properties is not None else {}
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = pa.nulls(1)
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = pa.array([True], type=pa.bool_())
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# TODO: we should clarify for Ids, user_input
# and so on, to be consistent
if base == st.IdBase.STRING:
self.Text(
encoding="",
possible_values=['1'],
properties={
TEXT_CHARSET: "[\"1\"]",
TEXT_MAX_LENGTH: "1",
},
)
elif base in (
st.IdBase.INT8,
st.IdBase.INT16,
st.IdBase.INT32,
st.IdBase.INT64,
):
int_base = {
st.IdBase.INT8: st.IntegerBase.INT8,
st.IdBase.INT16: st.IntegerBase.INT16,
st.IdBase.INT32: st.IntegerBase.INT32,
st.IdBase.INT64: st.IntegerBase.INT64,
}[base]
self.Integer(
min=1, max=1, base=int_base, possible_values=[1]
)
else:
raise NotImplementedError
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pa_type: pa.DataType = {
st.IntegerBase.INT8: pa.int8(),
st.IntegerBase.INT16: pa.int16(),
st.IntegerBase.INT32: pa.int32(),
st.IntegerBase.INT64: pa.int64(),
}[base]
self.result = pa.array([int((min + max) / 2)], type=pa_type)
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.array([name_values[0][0]], pa.string())
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pa_type: pa.DataType = {
st.FloatBase.FLOAT16: pa.float16(),
st.FloatBase.FLOAT32: pa.float32(),
st.FloatBase.FLOAT64: pa.float64(),
}[base]
x: t.Union[float, np.float16] = (min + max) / 2
if base == st.FloatBase.FLOAT16:
x = np.float16(x)
self.result = pa.array([x], type=pa_type)
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
try:
char_set = json.loads(self.properties[TEXT_CHARSET])
except json.JSONDecodeError:
self.result = pa.array([""], pa.string())
else:
max_length = int(self.properties[TEXT_MAX_LENGTH])
ex = ''.join(char_set)
if len(ex) > max_length:
ex = ex[:max_length]
self.result = pa.array([ex], pa.string())
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = pa.array(
[bytes('1', 'utf-8')], pa.binary(length=1)
)
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.StructArray.from_arrays(
arrays=[
field_type.example() for field_type in fields.values()
],
names=list(fields.keys()),
)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
n_fields = len(fields)
arrays = []
for j, field_type in enumerate(fields.values()):
middle_arr = field_type.example()
early_arr = pa.nulls(j, type=middle_arr.type)
late_arr = pa.nulls(n_fields - j - 1, type=middle_arr.type)
arrays.append(
pa.concat_arrays([early_arr, middle_arr, late_arr])
)
names = list(fields.keys())
arrays.append(pa.array(names, pa.string()))
names.append('field_selected')
self.result = pa.StructArray.from_arrays(
arrays=arrays,
names=names,
)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = type.example()
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
sub_type = type.example()
# build ListArray with one single repeated value
self.result = pa.ListArray.from_arrays(
offsets=[0, 1], values=pa.concat_arrays([sub_type])
)
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.array(
pd.to_datetime([max], format=format), pa.timestamp('ns')
)
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = ToArrow(properties=self.properties())
self.accept(visitor)
return visitor.result
def numpy_example(self) -> np.ndarray:
"""Returns an example of numpy array matching the type.
For an optional type, it returns a non missing value of the type.
"""
return self.example().to_numpy(zero_copy_only=False) # type:ignore
def tensorflow_example(self) -> t.Any:
"""This methods returns a dictionary where the leaves are
tf tensors. For optional types, we consider the case
where the field is not missing.
"""
class TensorflowExample(st.TypeVisitor):
result = {}
def __init__(
self, properties: t.Optional[t.Mapping[str, str]] = None
):
self.properties = properties if properties is not None else {}
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = tf.constant([np.NaN], dtype=tf.float64)
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = tf.constant([1], dtype=tf.int64)
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# TODO: we should clarify for Ids, user_input
# and so on, to be consistent
if base == st.IdBase.STRING:
self.result = tf.constant(['1'], tf.string)
elif base == st.IdBase.INT64:
self.result = tf.constant([1], tf.int64)
else:
raise NotImplementedError
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = tf.constant([int((min + max) / 2)], tf.int64)
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = tf.constant([name_values[0][0]], tf.string)
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = tf.constant([(min + max) / 2], dtype=tf.float64)
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
try:
char_set = json.loads(self.properties[TEXT_CHARSET])
except json.JSONDecodeError:
self.result = tf.constant([""], tf.string)
else:
max_length = int(self.properties[TEXT_MAX_LENGTH])
ex = ''.join(char_set)
if len(ex) > max_length:
ex = ex[:max_length]
self.result = tf.constant([ex], tf.string)
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = tf.constant(['1'], tf.string)
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {
field_name: field_type.tensorflow_example()
for field_name, field_type in fields.items()
}
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {
field_name: field_type.tensorflow_example()
for field_name, field_type in fields.items()
}
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {
'input_mask': tf.constant([1], dtype=tf.int64),
'values': type.tensorflow_example(),
}
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = tf.constant([min], dtype=tf.string)
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = TensorflowExample(properties=self.properties())
self.accept(visitor)
return visitor.result
def default(self) -> pa.Array:
"""This methods returns a pyarrow scalar that matches the type.
For an optional type, we consider the case where, the field
is missing.
"""
class Default(st.TypeVisitor):
result = pa.nulls(0)
def __init__(
self, properties: t.Optional[t.Mapping[str, str]] = None
):
self.properties = properties if properties is not None else {}
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = pa.nulls(0)
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = pa.array([True], type=pa.bool_())
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# TODO: we should clarify for Ids, user_input
# and so on, to be consistent
if base == st.IdBase.STRING:
self.result = pa.array(['1'], pa.string())
elif base == st.IdBase.INT64:
self.result = pa.array([1], pa.int64())
else:
raise NotImplementedError
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.array([int((min + max) / 2)], type=pa.int64())
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.array([name_values[0][0]], pa.string())
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.array([(min + max) / 2], type=pa.float64())
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
try:
char_set = json.loads(self.properties[TEXT_CHARSET])
except json.JSONDecodeError:
self.result = pa.array([""], pa.string())
else:
max_length = int(self.properties[TEXT_MAX_LENGTH])
ex = ''.join(char_set)
if len(ex) > max_length:
ex = ex[:max_length]
self.result = pa.array([ex], pa.string())
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = pa.array(
[bytes('1', 'utf-8')], pa.binary(length=1)
)
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.StructArray.from_arrays(
arrays=[
field_type.default() for field_type in fields.values()
],
names=list(fields.keys()),
)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
n_fields = len(fields)
arrays = []
for j, field_type in enumerate(fields.values()):
middle_arr = field_type.default()
early_arr = pa.nulls(j, type=middle_arr.type)
late_arr = pa.nulls(n_fields - j - 1, type=middle_arr.type)
arrays.append(
pa.concat_arrays([early_arr, middle_arr, late_arr])
)
names = list(fields.keys())
arrays.append(pa.array(names, pa.string()))
names.append('field_selected')
self.result = pa.StructArray.from_arrays(
arrays=arrays,
names=names,
)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.array([None], type=type.default().type)
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = pa.array(
pd.to_datetime([max], format=format), pa.timestamp('ns')
)
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = Default(properties=self.properties())
self.accept(visitor)
return visitor.result
def numpy_default(self) -> np.ndarray:
"""Returns an example of numpy array matching the type.
For an optional type, it sets the default missing value
"""
return self.default().to_numpy(zero_copy_only=False) # type:ignore
def tensorflow_default(self, is_optional: bool = False) -> t.Any:
"""This methods returns a dictionary with tensors as leaves
that match the type.
For an optional type, we consider the case where the field
is missing, and set the default value for each missing type.
"""
class ToTensorflow(st.TypeVisitor):
result = {}
def __init__(
self, properties: t.Optional[t.Mapping[str, str]] = None
):
self.properties = properties if properties is not None else {}
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.result = tf.constant([np.NaN], dtype=tf.float64)
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
if is_optional:
self.result = tf.constant(
[np.iinfo(np.int64).max], dtype=tf.int64
)
else:
self.result = tf.constant([1], dtype=tf.int64)
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# TODO: we should clarify for Ids, user_input
# and so on, to be consistent
if is_optional:
if base == st.IdBase.STRING:
self.result = tf.constant([''], dtype=tf.string)
elif base == st.IdBase.INT64:
self.result = tf.constant(
[np.iinfo(np.int64).max], pa.string()
)
else:
raise NotImplementedError
else:
if base == st.IdBase.STRING:
self.result = tf.constant(['1'], tf.string)
elif base == st.IdBase.INT64:
self.result = tf.constant([1], tf.int64)
else:
raise NotImplementedError
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if is_optional:
self.result = tf.constant(
[np.iinfo(np.int64).min], dtype=tf.int64
)
else:
self.result = tf.constant(
[int((min + max) / 2)], type=tf.int64
)
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if is_optional:
self.result = tf.constant([''], dtype=tf.string)
else:
self.result = tf.constant([name_values[0][0]], tf.string)
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if is_optional:
self.result = tf.constant([np.NaN], dtype=tf.float64)
else:
self.result = tf.constant(
[(min + max) / 2], dtype=tf.float64
)
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if is_optional:
self.result = tf.constant([''], dtype=tf.string)
else:
try:
char_set = json.loads(self.properties[TEXT_CHARSET])
except json.JSONDecodeError:
self.result = tf.constant([""], tf.string)
else:
max_length = int(self.properties[TEXT_MAX_LENGTH])
ex = ''.join(char_set)
if len(ex) > max_length:
ex = ex[:max_length]
self.result = tf.constant([ex], tf.string)
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
if is_optional:
self.result = tf.constant([''], dtype=tf.string)
else:
self.result = tf.constant(['1'], tf.string)
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {
field_name: field_type.tensorflow_default(
is_optional=is_optional
)
for field_name, field_type in fields.items()
}
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {
field_name: field_type.tensorflow_default(
is_optional=is_optional
)
for field_name, field_type in fields.items()
}
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.result = {
'input_mask': tf.constant([0], dtype=tf.int64),
'values': type.tensorflow_default(is_optional=True),
}
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if is_optional:
self.result = tf.constant([''], dtype=tf.string)
else:
self.result = tf.constant([min], dtype=tf.string)
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = ToTensorflow(properties=self.properties())
self.accept(visitor)
return visitor.result
def path_leaves(self) -> t.Sequence[st.Path]:
"""Returns the list of each path to a leaf in the type. If the type
is a leaf, it returns an empty list"""
class PathLeaves(st.TypeVisitor):
result = []
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
for field_name, field_type in fields.items():
sub_paths = field_type.path_leaves()
if len(sub_paths) > 0:
self.result.extend(
[
path_builder(label=field_name, paths=[el])
for el in sub_paths
]
)
else:
self.result.extend(
[path_builder(label=field_name, paths=[])]
)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
for field_name, field_type in fields.items():
sub_paths = field_type.path_leaves()
if len(sub_paths) > 0:
self.result.extend(
[
path_builder(label=field_name, paths=[el])
for el in sub_paths
]
)
else:
self.result.extend(
[path_builder(label=field_name, paths=[])]
)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
sub_paths = type.path_leaves()
if len(sub_paths) > 0:
self.result.extend(
[
path_builder(label=OPTIONAL_VALUE, paths=[el])
for el in sub_paths
]
)
else:
self.result.extend(
[path_builder(label=OPTIONAL_VALUE, paths=[])]
)
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = PathLeaves()
self.accept(visitor)
return visitor.result
# A few builders
def Null(
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(name='Null', null=sp.Type.Null(), properties=properties)
)
def Unit(properties: t.Optional[t.Mapping[str, str]] = None) -> Type:
return Type(
sp.Type(name='Unit', unit=sp.Type.Unit(), properties=properties)
)
def Id(
unique: bool,
base: t.Optional[st.IdBase] = None,
reference: t.Optional[st.Path] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
if base is None:
base = st.IdBase.STRING
if reference is None:
return Type(
sp.Type(
name='Id',
id=sp.Type.Id(
base=base.value,
unique=unique,
),
properties=properties,
)
)
return Type(
sp.Type(
name='Id',
id=sp.Type.Id(
base=base.value,
unique=unique,
reference=reference.protobuf(),
),
properties=properties,
)
)
def Boolean(
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name='Boolean', boolean=sp.Type.Boolean(), properties=properties
)
)
def Integer(
min: t.Optional[int] = None,
max: t.Optional[int] = None,
base: t.Optional[st.IntegerBase] = None,
possible_values: t.Optional[t.Iterable[int]] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
if base is None:
base = st.IntegerBase.INT64
if min is None:
if base == st.IntegerBase.INT64:
min = np.iinfo(np.int64).min
elif base == st.IntegerBase.INT32:
min = np.iinfo(np.int32).min
elif base == st.IntegerBase.INT16:
min = np.iinfo(np.int16).min
else:
min = np.iinfo(np.int8).min
if max is None:
if base == st.IntegerBase.INT64:
max = np.iinfo(np.int64).max
elif base == st.IntegerBase.INT32:
max = np.iinfo(np.int32).max
elif base == st.IntegerBase.INT16:
max = np.iinfo(np.int16).max
else:
max = np.iinfo(np.int8).max
return Type(
sp.Type(
name='Integer',
integer=sp.Type.Integer(
base=base.value,
min=min,
max=max,
possible_values=possible_values,
),
properties=properties,
)
)
def Enum(
name: str,
name_values: t.Union[
t.Sequence[str], t.Sequence[int], t.Sequence[t.Tuple[str, int]]
],
ordered: bool = False,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
enum_name_values: t.List[sp.Type.Enum.NameValue]
if len(name_values) == 0:
raise ValueError("No enum values")
if isinstance(name_values[0], str):
name_values = t.cast(t.Sequence[str], name_values)
enum_name_values = [
sp.Type.Enum.NameValue(name=n, value=v)
for v, n in enumerate(sorted(name_values))
]
elif isinstance(name_values[0], int):
name_values = t.cast(t.Sequence[int], name_values)
enum_name_values = [
sp.Type.Enum.NameValue(name=str(v), value=v)
for v in sorted(name_values)
]
elif isinstance(name_values[0], tuple):
name_values = t.cast(t.Sequence[t.Tuple[str, int]], name_values)
enum_name_values = [
sp.Type.Enum.NameValue(name=n, value=v)
for n, v in sorted(name_values)
]
return Type(
sp.Type(
name=name,
enum=sp.Type.Enum(
base=sp.Type.Enum.Base.INT64,
ordered=ordered,
name_values=enum_name_values,
),
properties=properties,
)
)
def Float(
min: t.Optional[float] = np.finfo(np.float64).min, # type:ignore
max: t.Optional[float] = np.finfo(np.float64).max, # type:ignore
base: t.Optional[st.FloatBase] = None,
possible_values: t.Optional[t.Iterable[float]] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
if base is None:
base = st.FloatBase.FLOAT64
if min is None:
if base == st.FloatBase.FLOAT64:
min = np.finfo(np.float64).min # type:ignore
elif base == st.FloatBase.FLOAT32:
min = np.finfo(np.float32).min # type:ignore
else:
min = np.finfo(np.float16).min # type:ignore
if max is None:
if base == st.FloatBase.FLOAT64:
max = np.finfo(np.float64).max # type:ignore
elif base == st.FloatBase.FLOAT32:
max = np.finfo(np.float32).max # type:ignore
else:
max = np.finfo(np.float16).max # type:ignore
return Type(
sp.Type(
name='Float64',
float=sp.Type.Float(
base=base.value,
min=min, # type:ignore
max=max, # type:ignore
possible_values=possible_values,
),
properties=properties,
)
)
def Text(
encoding: str = 'UTF-8',
possible_values: t.Optional[t.Iterable[str]] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name=f'Text {encoding}',
text=sp.Type.Text(
encoding='UTF-8', possible_values=possible_values
),
properties=properties,
)
)
def Bytes() -> Type:
return Type(sp.Type(name='Bytes', bytes=sp.Type.Bytes()))
def Struct(
fields: t.Mapping[str, st.Type],
name: str = 'Struct',
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name=name,
struct=sp.Type.Struct(
fields=[
sp.Type.Struct.Field(name=name, type=type.protobuf())
for name, type in fields.items()
]
),
properties=properties,
)
)
def Union(
fields: t.Mapping[str, st.Type],
name: str = 'Union',
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name=name,
union=sp.Type.Union(
fields=[
sp.Type.Union.Field(
name=field_name, type=field_type.protobuf()
)
for field_name, field_type in fields.items()
]
),
properties=properties,
)
)
def Optional(
type: st.Type,
name: str = 'Optional',
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name=name,
optional=sp.Type.Optional(type=type.protobuf()),
properties=properties,
)
)
def List(
type: st.Type,
max_size: int = np.iinfo(np.int64).max,
name: str = 'List',
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name=name,
list=sp.Type.List(type=type.protobuf(), max_size=max_size),
properties=properties,
)
)
def Array(
type: st.Type,
shape: t.Sequence[int],
name: str = 'Array',
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name=name,
array=sp.Type.Array(type=type.protobuf(), shape=shape),
properties=properties,
)
)
def Datetime(
format: t.Optional[str] = None,
min: t.Optional[str] = None,
max: t.Optional[str] = None,
base: t.Optional[st.DatetimeBase] = None,
possible_values: t.Optional[t.Iterable[str]] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
if format is None:
format = '%Y-%m-%dT%H:%M:%S'
if base is None:
base = st.DatetimeBase.INT64_NS
assert base == st.DatetimeBase.INT64_NS
bounds = []
iint64 = np.iinfo(np.int64)
for i, bound in enumerate((min, max)):
if bound is None: # the bound is assumed to be sound otherwise
# This starts with the true bounds for the type datetime64[ns]
# However, storing dates as string implies an aliasing:
# datetime.datetime cannot be more precise than µs.
# So this would truncate the nanoseconds:
# ```
# min = (min + np.timedelta64(1, "us")).astype("datetime64[us]")
# max = max.astype("datetime64[us]")
# ```
# More generally, the date format can only store a bound lower
# than that bound, which is fine with the max but not for the
# min, as it truncates some time units.
if i == 0:
int_bound = iint64.min + 1 # iint64.min maps to 'NaT'
aliasing = np.timedelta64(0, 'ns')
# This looks for the lowest offset for the format:
# see:
# https://numpy.org/doc/stable/reference/arrays.datetime.html#datetime-and-timedelta-arithmetic
for unit, np_unit in [
("%Y", "Y"),
("%m", "M"),
("%d", "D"),
("%H", "h"),
("%M", "m"),
("%S", "s"),
("%f", "us"),
]:
if unit not in format:
break
elif unit in ["%m", "%Y"]:
# months and years have variable length
as_unit = np.datetime64(int_bound, np_unit)
aliasing = np.timedelta64(1, np_unit)
aliasing = as_unit + aliasing - as_unit
aliasing = aliasing.astype("timedelta64[ns]")
else:
aliasing = np.timedelta64(1, np_unit)
elif i == 1:
int_bound = iint64.max
aliasing = np.timedelta64(0, 'ns')
bound = str(
(np.datetime64(int_bound, "ns") + aliasing).astype(
"datetime64[us]"
)
)
bound = datetime.datetime.strptime(
bound, '%Y-%m-%dT%H:%M:%S.%f'
).strftime(format)
bounds.append(bound)
return Type(
sp.Type(
name='Datetime',
datetime=sp.Type.Datetime(
format=format,
min=bounds[0],
max=bounds[1],
base=base.value,
possible_values=possible_values,
),
properties=properties,
)
)
def Date(
format: t.Optional[str] = None,
min: t.Optional[str] = None,
max: t.Optional[str] = None,
possible_values: t.Optional[t.Iterable[str]] = None,
base: t.Optional[st.DateBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
"""Inspired by pyarrow.date32() type. This is compatible with
pyarrow-pandas integration:
https://arrow.apache.org/docs/python/pandas.html#pandas-arrow-conversion
pyarrow.date32() and not pyarrow.date64() because the later isndefined as:
"milliseconds since UNIX epoch 1970-01-01"
which is a bit bizarre since there are multiple integers representing
a single date.
Default ranges are defined by datetime.date lowest and highest year:
0001-01-01 and 9999-12-31. Note that if the SQL database has a date outside
of this range the table reflection would fail since also sql alchemy is
using datetime.date as an underlying python type.
"""
if format is None:
format = '%Y-%m-%d'
if base is None:
base = st.DateBase.INT32
if min is None:
min = datetime.datetime.strptime(
str(datetime.date(datetime.MINYEAR, 1, 1)), '%Y-%m-%d'
).strftime(format)
if max is None:
max = datetime.datetime.strptime(
str(datetime.date(datetime.MAXYEAR, 12, 31)), '%Y-%m-%d'
).strftime(format)
return Type(
sp.Type(
name='Date',
date=sp.Type.Date(
format=format,
min=min,
max=max,
base=base.value,
possible_values=possible_values,
),
properties=properties,
)
)
def Time(
format: t.Optional[str] = None,
min: t.Optional[str] = None,
max: t.Optional[str] = None,
possible_values: t.Optional[t.Iterable[str]] = None,
base: t.Optional[st.TimeBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
"""Very similar to Datetime. The difference here is that the
range is simpler.
"""
if format is None:
format = '%H:%M:%S.%f'
if base is None:
base = st.TimeBase.INT64_US
if base == st.TimeBase.INT64_NS:
raise NotImplementedError(
'time with nanoseconds resolution not supported'
)
if min is None:
min = datetime.time.min.strftime(
format.replace("%f", "{__subseconds__}").format(
__subseconds__=(
"000000" if base == st.TimeBase.INT64_US else "000"
)
)
)
if max is None:
max = datetime.time.max.strftime(
format.replace("%f", "{__subseconds__}").format(
__subseconds__=(
"999999" if base == st.TimeBase.INT64_US else "999"
)
)
)
return Type(
sp.Type(
name='Time',
time=sp.Type.Time(
format=format,
min=min,
max=max,
base=base.value,
possible_values=possible_values,
),
properties=properties,
)
)
def Duration(
unit: t.Optional[str] = None,
min: t.Optional[int] = None,
max: t.Optional[int] = None,
possible_values: t.Optional[t.Iterable[int]] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
"""Inspired by pythons datetime.timedelta,
It stores duration as int64 with microseconds resolution.
If unit is provided the range is adjusted accodingly.
Compatible with pyarrow.duration(unit). All pyarrow units are valid:
https://arrow.apache.org/docs/python/generated/pyarrow.duration.html
except for 'ns' because it is incompatible with SQLAlchemy types
(backed by python's datetime.timedelta which has up to 'us' resolution)
and it would cause problems when pushing to sql (also SQL duration
types have up to 'us' resolution).
It raises an error if the unit provided is not among:
('us','ms', 's'). Default value 'us'
"""
if unit is None:
unit = 'us'
default_bounds = DURATION_UNITS_TO_RANGE.get(unit)
if default_bounds is None:
raise ValueError(
f'Duration unit {unit} not recongnized'
f'Only values in {DURATION_UNITS_TO_RANGE.keys()} are allowed'
)
bounds = [
default_bounds[0] if min is None else min,
default_bounds[1] if max is None else max,
]
return Type(
sp.Type(
name='Duration',
duration=sp.Type.Duration(
unit=unit,
min=bounds[0],
max=bounds[1],
possible_values=possible_values,
),
properties=properties,
)
)
def Constrained(
type: st.Type,
constraint: Predicate,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name='Constrained',
constrained=sp.Type.Constrained(
type=type.protobuf(), constraint=constraint._protobuf
),
properties=properties,
)
)
def Hypothesis(
*types: t.Tuple[st.Type, float],
name: str = 'Hypothesis',
properties: t.Optional[t.Mapping[str, str]] = None,
) -> Type:
return Type(
sp.Type(
name=name,
hypothesis=sp.Type.Hypothesis(
types=(
sp.Type.Hypothesis.Scored(type=v.protobuf(), score=s)
for v, s in types
)
),
properties=properties,
)
)
def extract_filter_from_types(
initial_type: st.Type, goal_type: st.Type
) -> st.Type:
class FilterVisitor(st.TypeVisitor):
"""Visitor that select type for filtering, it only takes
the Union types of the goal type and the rest is taken from
the initial type
"""
filter_type = initial_type
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# here select the fields in the goal type
self.filter_type = Union(
fields={
field_name: extract_filter_from_types(
initial_type=initial_type.children()[field_name],
goal_type=field_type,
)
for field_name, field_type in fields.items()
}
)
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# here select the fields in the initial type
self.filter_type = Struct(
fields={
field_name: (
extract_filter_from_types(
initial_type=field_type,
goal_type=fields[field_name],
)
if fields.get(field_name) is not None
else field_type
)
for field_name, field_type in initial_type.children().items() # noqa: E501
}
)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# here it does not change
self.filter_type = Optional(
type=extract_filter_from_types(
initial_type=initial_type.children()[OPTIONAL_VALUE],
goal_type=type,
)
)
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
visitor = FilterVisitor()
goal_type.accept(visitor)
return visitor.filter_type
def extract_project_from_types(
initial_type: st.Type, goal_type: st.Type
) -> st.Type:
class ProjectVisitor(st.TypeVisitor):
"""Visitor that select type for projecting, it only takes
the Project types of the goal type and the rest is taken from
the initial type
"""
project_type = initial_type
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# here select the fields in the initial type
self.project_type = Union(
fields={
field_name: (
extract_filter_from_types(
initial_type=field_type,
goal_type=fields[field_name],
)
if fields.get(field_name) is not None
else field_type
)
for field_name, field_type in initial_type.children().items() # noqa: E501
}
)
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# here select the fields in the goal type
self.project_type = Struct(
fields={
field_name: extract_project_from_types(
initial_type=initial_type.children()[field_name],
goal_type=field_type,
)
for field_name, field_type in fields.items()
}
)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# here it does not change
self.project_type = Optional(
type=extract_filter_from_types(
initial_type=initial_type.children()[OPTIONAL_VALUE],
goal_type=type,
)
)
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = ProjectVisitor()
goal_type.accept(visitor)
return visitor.project_type
def protected_type(input_type: st.Type) -> st.Type:
"""Convert a data Type to a protected Type."""
protection_fields = {
PUBLIC: Boolean(),
USER_COLUMN: Optional(type=Id(base=st.IdBase.STRING, unique=False)),
WEIGHTS: Float(min=0.0, max=np.finfo(np.float64).max), # type: ignore
}
if input_type.has_protection():
# Already protected
return input_type
elif input_type.has_admin_columns():
# Add protection to existing admin columns
fields = {
**input_type.children(),
**protection_fields,
}
return Struct(fields=fields)
else:
# Add admin columns
fields = {
DATA: input_type,
**protection_fields,
}
return Struct(fields=fields)
if t.TYPE_CHECKING:
test_type: st.Type = Type(sp.Type())
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/type.py
| 0.727879 | 0.16896 |
type.py
|
pypi
|
from __future__ import annotations
from typing import (
Collection,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
cast,
)
import datetime
import typing as t
from sarus_data_spec.base import Referring
from sarus_data_spec.transform import Transform
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
class Scalar(Referring[sp.Scalar]):
"""A python class to describe scalars"""
def __init__(self, protobuf: sp.Scalar, store: bool = True) -> None:
if protobuf.spec.HasField("transformed"):
transformed = protobuf.spec.transformed
self._referred = {
transformed.transform,
*transformed.arguments,
*list(transformed.named_arguments.values()),
}
super().__init__(protobuf=protobuf, store=store)
def prototype(self) -> Type[sp.Scalar]:
"""Return the type of the underlying protobuf."""
return sp.Scalar
def name(self) -> str:
return self._protobuf.name
def doc(self) -> str:
return self._protobuf.doc
def spec(self) -> str:
return str(self._protobuf.spec.WhichOneof('spec'))
def is_transformed(self) -> bool:
"""Is the scalar composed."""
return self._protobuf.spec.HasField("transformed")
def is_remote(self) -> bool:
"""Is the dataspec a remotely defined dataset."""
return self.manager().is_remote(self)
def is_source(self) -> bool:
"""Is the scalar not composed."""
return not self.is_transformed()
def is_privacy_params(self) -> bool:
"""Is the scalar privacy parameters."""
return self._protobuf.spec.HasField("privacy_params")
def is_random_seed(self) -> bool:
"""Is the scalar a random seed."""
if self._protobuf.spec.HasField("random_seed"):
return True
if self.is_transformed():
transform = self.transform()
if transform.protobuf().spec.HasField("derive_seed"):
return True
return False
def is_synthetic_model(self) -> bool:
"""Is the scalar composed."""
return self._protobuf.spec.HasField("synthetic_model")
def is_pep(self) -> bool:
"""Is the scalar PEP."""
return False
def pep_token(self) -> Optional[str]:
"""Returns the scalar PEP token."""
return None
def is_synthetic(self) -> bool:
"""Is the scalar synthetic."""
return self.manager().dataspec_validator().is_synthetic(self)
def is_dp(self) -> bool:
"""Is the dataspec the result of a DP transform"""
return self.manager().dataspec_validator().is_dp(self)
def is_public(self) -> bool:
"""Is the scalar public."""
return self.manager().dataspec_validator().is_public(self)
def status(self, task_names: t.Optional[List[str]]) -> Optional[st.Status]:
"""This method return a status that contains all the
last updates for the task_names required. It returns None if
all the tasks are missing."""
if task_names is None:
task_names = []
if type(task_names) not in [list, set, tuple]:
raise TypeError(
f"Invalid task_names passed to dataset.status {task_names}"
)
last_status = self.manager().status(self)
if last_status is None:
return last_status
if all([last_status.task(task) is None for task in task_names]):
return None
return last_status
def transform(self) -> st.Transform:
return cast(
st.Transform,
self.storage().referrable(
self.protobuf().spec.transformed.transform
),
)
def parents(
self,
) -> Tuple[
List[Union[st.DataSpec, st.Transform]],
Dict[str, Union[st.DataSpec, st.Transform]],
]:
if not self.is_transformed():
return list(), dict()
args_id = self._protobuf.spec.transformed.arguments
kwargs_id = self._protobuf.spec.transformed.named_arguments
args_parents = [
cast(
Union[st.DataSpec, st.Transform],
self.storage().referrable(uuid),
)
for uuid in args_id
]
kwargs_parents = {
name: cast(
Union[st.DataSpec, st.Transform],
self.storage().referrable(uuid),
)
for name, uuid in kwargs_id.items()
}
return args_parents, kwargs_parents
def sources(
self, type_name: t.Optional[str] = sp.type_name(sp.Dataset)
) -> Set[st.DataSpec]:
"""Returns the set of non-transformed datasets that are parents
of the current dataset"""
sources = self.storage().sources(self, type_name=type_name)
return sources
def variant(
self,
kind: st.ConstraintKind,
public_context: Collection[str] = (),
privacy_limit: Optional[st.PrivacyLimit] = None,
salt: Optional[int] = None,
) -> Optional[st.DataSpec]:
return (
self.manager()
.dataspec_rewriter()
.variant(self, kind, public_context, privacy_limit, salt)
)
def variants(self) -> Collection[st.DataSpec]:
return self.manager().dataspec_rewriter().variants(self)
def private_queries(self) -> List[st.PrivateQuery]:
"""Return the list of PrivateQueries used in a Dataspec's transform.
It represents the privacy loss associated with the current computation.
It can be used by Sarus when a user (Access object) reads a DP dataspec
to update its accountant. Note that Private Query objects are generated
with a random uuid so that even if they are submitted multiple times to
an account, they are only accounted once (ask @cgastaud for more on
accounting)."""
return self.manager().dataspec_validator().private_queries(self)
def value(self) -> st.DataSpecValue:
return self.manager().value(self)
async def async_value(self) -> st.DataSpecValue:
return await self.manager().async_value(self)
# A Visitor acceptor
def accept(self, visitor: st.Visitor) -> None:
visitor.all(self)
if self.is_transformed():
visitor.transformed(
self,
cast(
Transform,
self.storage().referrable(
self._protobuf.spec.transformed.transform
),
),
*(
cast(Scalar, self.storage().referrable(arg))
for arg in self._protobuf.spec.transformed.arguments
),
**{
name: cast(Scalar, self.storage().referrable(arg))
for name, arg in self._protobuf.spec.transformed.named_arguments.items() # noqa: E501
},
)
else:
visitor.other(self)
def dot(self) -> str:
"""return a graphviz representation of the scalar"""
class Dot(st.Visitor):
visited: Set[st.DataSpec] = set()
nodes: Dict[str, Tuple[str, str]] = {}
edges: Dict[Tuple[str, str], str] = {}
def transformed(
self,
visited: st.DataSpec,
transform: st.Transform,
*arguments: st.DataSpec,
**named_arguments: st.DataSpec,
) -> None:
if visited not in self.visited:
if visited.prototype() == sp.Dataset:
self.nodes[visited.uuid()] = (
visited.name(),
"Dataset",
)
else:
self.nodes[visited.uuid()] = (visited.name(), "Scalar")
if not visited.is_remote():
for argument in arguments:
self.edges[
(argument.uuid(), visited.uuid())
] = transform.name()
argument.accept(self)
for _, argument in named_arguments.items():
self.edges[
(argument.uuid(), visited.uuid())
] = transform.name()
argument.accept(self)
self.visited.add(visited)
def other(self, visited: st.DataSpec) -> None:
if visited.prototype() == sp.Dataset:
self.nodes[visited.uuid()] = (
visited.name(),
"Dataset",
)
else:
self.nodes[visited.uuid()] = (visited.name(), "Scalar")
visitor = Dot()
self.accept(visitor)
result = 'digraph {'
for uuid, (label, node_type) in visitor.nodes.items():
shape = "polygon" if node_type == "Scalar" else "ellipse"
result += (
f'\n"{uuid}" [label="{label} ({uuid[:2]})", shape={shape}];'
)
for (u1, u2), label in visitor.edges.items():
result += f'\n"{u1}" -> "{u2}" [label="{label} ({uuid[:2]})"];'
result += '}'
return result
def attribute(self, name: str) -> Optional[st.Attribute]:
return self.manager().attribute(name=name, dataspec=self)
def attributes(self, name: str) -> List[st.Attribute]:
return self.manager().attributes(name=name, dataspec=self)
def privacy_budget(privacy_limit: st.PrivacyLimit) -> Scalar:
delta_epsilon_dict = privacy_limit.delta_epsilon_dict()
return Scalar(
sp.Scalar(
name='privacy_budget',
spec=sp.Scalar.Spec(
privacy_params=sp.Scalar.PrivacyParameters(
points=[
sp.Scalar.PrivacyParameters.Point(
epsilon=epsilon, delta=delta
)
for delta, epsilon in delta_epsilon_dict.items()
]
)
),
)
)
def random_seed(value: int) -> Scalar:
return Scalar(
sp.Scalar(
name='seed',
spec=sp.Scalar.Spec(random_seed=sp.Scalar.RandomSeed(value=value)),
)
)
def synthetic_model() -> Scalar:
return Scalar(
sp.Scalar(
name='synthetic_model',
spec=sp.Scalar.Spec(synthetic_model=sp.Scalar.SyntheticModel()),
properties={'creation_time': str(datetime.datetime.now())},
)
)
class Visitor:
"""A visitor class for Scalar"""
def all(self, visited: Scalar) -> None:
return
def transformed(
self,
visited: Scalar,
transform: Transform,
*arguments: Scalar,
**named_arguments: Scalar,
) -> None:
return
def other(self, visited: Scalar) -> None:
return
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/scalar.py
| 0.930703 | 0.362151 |
scalar.py
|
pypi
|
from __future__ import annotations
import typing as t
import pyarrow as pa
from sarus_data_spec.arrow.schema import to_arrow
from sarus_data_spec.base import Referring
from sarus_data_spec.constants import DATA, DATASET_SLUGNAME, PUBLIC
from sarus_data_spec.path import Path, path
from sarus_data_spec.type import Type
import sarus_data_spec.dataset as sd
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
class Schema(Referring[sp.Schema]):
"""A python class to describe schemas"""
def __init__(self, protobuf: sp.Schema, store: bool = True) -> None:
self._referred = {
protobuf.dataset
} # This has to be defined before it is initialized
super().__init__(protobuf, store=store)
self._type = Type(self._protobuf.type)
def prototype(self) -> t.Type[sp.Schema]:
"""Return the type of the underlying protobuf."""
return sp.Schema
def name(self) -> str:
return self._protobuf.name
def dataset(self) -> sd.Dataset:
return t.cast(
sd.Dataset, self.storage().referrable(self._protobuf.dataset)
)
def to_arrow(self) -> pa.Schema:
return to_arrow(self.protobuf())
def type(self) -> Type:
"""Returns the first type level of the schema"""
return self._type
def data_type(self) -> Type:
"""Returns the first type level containing the data,
hence skips the protected_entity struct if there is one"""
return self.type().data_type()
def has_admin_columns(self) -> bool:
return self.type().has_admin_columns()
def is_protected(self) -> bool:
return self.type().has_protection()
def protected_path(self) -> Path:
"""Returns the path to the protected entities"""
return Path(self.protobuf().protected)
# TODO: Add to_parquet, to_tensorflow, to_sql... here?
# The Schema has a manager, it would provide the implementation
def tables(self) -> t.List[st.Path]:
struct_paths = self.data_type().structs()
if struct_paths is None: # there is no struct
return []
if len(struct_paths) == 0: # struct is the first level
return [path(label=DATA)]
return [
path(label=DATA, paths=[t.cast(Path, element)])
for element in struct_paths
]
def private_tables(self) -> t.List[st.Path]:
return [
table
for table in self.tables()
if self.data_type().sub_types(table)[0].properties()[PUBLIC]
!= str(True)
]
def public_tables(self) -> t.List[st.Path]:
return [
table
for table in self.tables()
if self.data_type().sub_types(table)[0].properties()[PUBLIC]
== str(True)
]
# Builder
def schema(
dataset: st.Dataset,
fields: t.Optional[t.Mapping[str, st.Type]] = None,
schema_type: t.Optional[st.Type] = None,
protected_paths: t.Optional[sp.Path] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
name: t.Optional[str] = None,
) -> Schema:
"""A builder to ease the construction of a schema"""
if name is None:
name = dataset.properties().get(
DATASET_SLUGNAME, f'{dataset.name()}_schema'
)
assert name is not None
if fields is not None:
return Schema(
sp.Schema(
dataset=dataset.uuid(),
name=name,
type=sp.Type(
struct=sp.Type.Struct(
fields=[
sp.Type.Struct.Field(
name=name, type=type.protobuf()
)
for name, type in fields.items()
]
)
),
protected=protected_paths,
properties=properties,
)
)
if schema_type is not None:
return Schema(
sp.Schema(
dataset=dataset.uuid(),
name=name,
type=schema_type.protobuf(),
protected=protected_paths,
properties=properties,
)
)
# If none of fields or type is defined, set type to Null
return Schema(
sp.Schema(
dataset=dataset.uuid(),
name=name,
type=sp.Type(name='Null', null=sp.Type.Null()),
protected=protected_paths,
properties=properties,
)
)
if t.TYPE_CHECKING:
test_schema: st.Schema = schema(sd.sql(uri='sqlite:///:memory:'))
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/schema.py
| 0.742888 | 0.33185 |
schema.py
|
pypi
|
from __future__ import annotations
import datetime
import typing as t
from sarus_data_spec.base import Referrable
from sarus_data_spec.json_serialisation import SarusJSONEncoder
from sarus_data_spec.path import straight_path
import sarus_data_spec.dataset as sd
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
class Transform(Referrable[sp.Transform]):
"""A python class to describe transforms"""
def prototype(self) -> t.Type[sp.Transform]:
"""Return the type of the underlying protobuf."""
return sp.Transform
def name(self) -> str:
return self._protobuf.name
def doc(self) -> str:
return self._protobuf.doc
def is_composed(self) -> bool:
"""Is the transform composed."""
return self._protobuf.spec.HasField('composed')
def is_variable(self) -> bool:
"""Is the transform a variable."""
return self._protobuf.spec.HasField('variable')
def spec(self) -> str:
return t.cast(str, self._protobuf.spec.WhichOneof('spec'))
def is_external(self) -> bool:
"""Is the transform an external operation."""
return self._protobuf.spec.HasField("external")
def infer_output_type(
self,
*arguments: t.Union[st.DataSpec, st.Transform],
**named_arguments: t.Union[st.DataSpec, st.Transform],
) -> t.Tuple[str, t.Callable[[st.DataSpec], None]]:
"""Guess if the external transform output is a Dataset or a Scalar.
Registers schema if it is a Dataset and returns the value type.
"""
return self.manager().infer_output_type(
self, *arguments, **named_arguments
)
def transforms(self) -> t.Set[st.Transform]:
"""return all transforms (and avoid infinite recursions/loops)"""
class Transforms(st.TransformVisitor):
visited: t.Set[st.Transform] = set()
def all(self, visited: st.Transform) -> None:
self.visited.add(visited)
def composed(
self,
visited: st.Transform,
transform: st.Transform,
*arguments: st.Transform,
**named_arguments: st.Transform,
) -> None:
self.visited.add(transform)
if transform not in self.visited:
transform.accept(self)
for arg in arguments:
if arg not in self.visited:
arg.accept(self)
for name, arg in named_arguments.items():
if arg not in self.visited:
arg.accept(self)
def other(self, visited: st.Transform) -> None:
raise ValueError(
"A composed transform can only have Variables "
"or Composed ancestors."
)
visitor = Transforms()
self.accept(visitor)
return visitor.visited
def variables(self) -> t.Set[st.Transform]:
"""Return all the variables from a composed transform"""
return {
transform
for transform in self.transforms()
if transform.is_variable()
}
def compose(
self,
*compose_arguments: st.Transform,
**compose_named_arguments: st.Transform,
) -> st.Transform:
class Compose(st.TransformVisitor):
visited: t.Set[st.Transform] = set()
result: st.Transform
def variable(
self,
visited: st.Transform,
name: str,
position: int,
) -> None:
self.result = visited
self.result = compose_named_arguments[name]
def composed(
self,
visited: st.Transform,
transform: st.Transform,
*arguments: st.Transform,
**named_arguments: st.Transform,
) -> None:
if visited not in self.visited:
self.result = composed(
transform,
*(
arg.compose(
*compose_arguments, **compose_named_arguments
)
for arg in arguments
),
**{
name: arg.compose(
*compose_arguments, **compose_named_arguments
)
for name, arg in named_arguments.items()
},
)
self.visited.add(visited)
else:
self.result = visited
def other(self, visited: st.Transform) -> None:
self.result = composed(
visited, *compose_arguments, **compose_named_arguments
)
visitor = Compose()
self.accept(visitor)
return visitor.result
def apply(
self,
*apply_arguments: st.DataSpec,
**apply_named_arguments: st.DataSpec,
) -> st.DataSpec:
class Apply(st.TransformVisitor):
visited: t.Dict[st.Transform, st.DataSpec] = {}
result: st.DataSpec
def variable(
self,
visited: st.Transform,
name: str,
position: int,
) -> None:
self.result = apply_named_arguments[name]
if self.result is None:
raise ValueError("Cannot substitute all variables")
def composed(
self,
visited: st.Transform,
transform: st.Transform,
*arguments: st.Transform,
**named_arguments: st.Transform,
) -> None:
if visited not in self.visited:
self.result = t.cast(
sd.Dataset,
sd.transformed(
transform,
*(
arg.apply(
*apply_arguments, **apply_named_arguments
)
for arg in arguments
),
dataspec_type=None,
dataspec_name=None,
**{
name: arg.apply(
*apply_arguments, **apply_named_arguments
)
for name, arg in named_arguments.items()
},
),
)
self.visited[visited] = self.result
def other(self, visited: st.Transform) -> None:
self.result = sd.transformed(
visited,
*apply_arguments,
dataspec_type=None,
dataspec_name=None,
**apply_named_arguments,
)
visitor = Apply()
self.accept(visitor)
return visitor.result
def abstract(
self,
*arguments: str,
**named_arguments: str,
) -> st.Transform:
return composed(
self,
*(variable(name=arg) for arg in arguments),
**{
name: variable(name=arg)
for name, arg in named_arguments.items()
},
)
def __call__(
self,
*arguments: t.Union[st.Transform, st.DataSpec, int, str],
**named_arguments: t.Union[st.Transform, st.DataSpec, int, str],
) -> t.Union[st.Transform, st.DataSpec]:
"""Applies the transform to another element"""
n_transforms = 0
n_datasets = 0
n_variables = 0
for arg in arguments:
n_transforms += int(isinstance(arg, Transform))
n_datasets += int(isinstance(arg, st.DataSpec))
n_variables += int(isinstance(arg, int) or isinstance(arg, str))
for arg in named_arguments.values():
n_transforms += int(isinstance(arg, Transform))
n_datasets += int(isinstance(arg, st.DataSpec))
n_variables += int(isinstance(arg, int) or isinstance(arg, str))
total = len(arguments) + len(named_arguments)
if total == 0:
# If no argument is passed, we consider that we should apply
return self.apply(
*t.cast(t.Sequence[st.DataSpec], arguments),
**t.cast(t.Mapping[str, st.DataSpec], named_arguments),
)
elif n_transforms == total:
return self.compose(
*t.cast(t.Sequence[Transform], arguments),
**t.cast(t.Mapping[str, Transform], named_arguments),
)
elif n_variables == total:
return self.abstract(
*t.cast(t.Sequence[str], arguments),
**t.cast(t.Mapping[str, str], named_arguments),
)
elif n_transforms + n_datasets == total:
return self.apply(
*t.cast(t.Sequence[st.DataSpec], arguments),
**t.cast(t.Mapping[str, st.DataSpec], named_arguments),
)
return self
def __mul__(self, argument: st.Transform) -> st.Transform:
return self.compose(argument)
# A Visitor acceptor
def accept(self, visitor: st.TransformVisitor) -> None:
visitor.all(self)
if self.is_composed():
visitor.composed(
self,
t.cast(
Transform,
self.storage().referrable(
self._protobuf.spec.composed.transform
),
),
*(
t.cast(Transform, self.storage().referrable(transform))
for transform in self._protobuf.spec.composed.arguments
),
**{
name: t.cast(
Transform, self.storage().referrable(transform)
)
for name, transform in self._protobuf.spec.composed.named_arguments.items() # noqa: E501
},
)
elif self.is_variable():
var = self._protobuf.spec.variable
visitor.variable(self, name=var.name, position=var.position)
else:
visitor.other(self)
def dot(self) -> str:
"""return a graphviz representation of the transform"""
class Dot(st.TransformVisitor):
visited: t.Set[st.Transform] = set()
nodes: t.Dict[str, str] = {}
edges: t.Set[t.Tuple[str, str]] = set()
def variable(
self,
visited: st.Transform,
name: str,
position: int,
) -> None:
self.nodes[visited.uuid()] = f"{name} ({position})"
def composed(
self,
visited: st.Transform,
transform: st.Transform,
*arguments: st.Transform,
**named_arguments: st.Transform,
) -> None:
if visited not in self.visited:
transform.accept(self)
self.nodes[visited.uuid()] = transform.name()
for argument in arguments:
self.edges.add((argument.uuid(), visited.uuid()))
argument.accept(self)
for _, argument in named_arguments.items():
self.edges.add((argument.uuid(), visited.uuid()))
argument.accept(self)
self.visited.add(visited)
def other(self, visited: st.Transform) -> None:
pass
visitor = Dot()
self.accept(visitor)
result = 'digraph {'
for uuid, label in visitor.nodes.items():
result += f'\n"{uuid}" [label="{label} ({uuid[:2]})"];'
for u1, u2 in visitor.edges:
result += f'\n"{u1}" -> "{u2}";'
result += '}'
return result
def transform_to_apply(self) -> st.Transform:
"""Return the transform of a composed transform."""
assert self.is_composed()
uuid = self.protobuf().spec.composed.transform
return t.cast(st.Transform, self.storage().referrable(uuid))
def composed_parents(
self,
) -> t.Tuple[t.List[st.Transform], t.Dict[str, st.Transform]]:
"""Return the parents of a composed transform."""
assert self.is_composed()
args_id = self._protobuf.spec.composed.arguments
kwargs_id = self._protobuf.spec.composed.named_arguments
args_parents = [
t.cast(st.Transform, self.storage().referrable(uuid))
for uuid in args_id
]
kwargs_parents = {
name: t.cast(st.Transform, self.storage().referrable(uuid))
for name, uuid in kwargs_id.items()
}
return args_parents, kwargs_parents
def composed_callable(self) -> t.Callable[..., t.Any]:
"""Return the composed transform's equivalent callable.
The function takes an undefined number of named arguments.
"""
return self.manager().composed_callable(self)
# Builders
def identity() -> Transform:
return Transform(
sp.Transform(
name='Identity',
spec=sp.Transform.Spec(identity=sp.Transform.Identity()),
inversible=True,
schema_preserving=True,
)
)
def variable(name: str, position: int = 0) -> Transform:
return Transform(
sp.Transform(
name='Variable',
spec=sp.Transform.Spec(
variable=sp.Transform.Variable(
name=name,
position=position,
)
),
inversible=True,
schema_preserving=True,
)
)
def composed(
transform: st.Transform,
*arguments: st.Transform,
**named_arguments: st.Transform,
) -> st.Transform:
if transform.is_composed():
# We want to compose simple transforms only
return transform.compose(*arguments, **named_arguments)
return Transform(
sp.Transform(
name='Composed',
spec=sp.Transform.Spec(
composed=sp.Transform.Composed(
transform=transform.uuid(),
arguments=(a.uuid() for a in arguments),
named_arguments={
n: a.uuid() for n, a in named_arguments.items()
},
)
),
)
)
def op_identifier_from_id(id: str) -> sp.Transform.External.OpIdentifier:
"""Build an OpIdentifier protobuf message from a string identifier.
Args:
identifier (str): id in the form library.name (e.g. sklearn.PD_MEAN)
"""
parts = id.split(".")
if len(parts) != 2:
raise ValueError(
f"Transform ID {id} should have the format library.name"
)
library, name = parts
mapping = {
"std": sp.Transform.External.Std,
"sklearn": sp.Transform.External.Sklearn,
"pandas": sp.Transform.External.Pandas,
"pandas_profiling": sp.Transform.External.PandasProfiling,
"numpy": sp.Transform.External.Numpy,
"tensorflow": sp.Transform.External.Tensorflow,
"xgboost": sp.Transform.External.XGBoost,
"skopt": sp.Transform.External.Skopt,
"imblearn": sp.Transform.External.Imblearn,
"shap": sp.Transform.External.Shap,
}
if library not in mapping.keys():
raise ValueError(f"Unsupported library {library}")
MsgClass = mapping[library]
msg = sp.Transform.External.OpIdentifier()
getattr(msg, library).CopyFrom(MsgClass(name=name))
return msg
def transform_id(transform: st.Transform) -> str:
"""Return the transform id."""
spec = transform.protobuf().spec
spec_type = str(spec.WhichOneof("spec"))
if spec_type != "external":
return spec_type
else:
library = str(spec.external.op_identifier.WhichOneof("op"))
op_name = getattr(spec.external.op_identifier, library).name
return f"{library}.{op_name}"
def external(
id: str,
py_args: t.Dict[int, t.Any] = {},
py_kwargs: t.Dict[str, t.Any] = {},
ds_args_pos: t.List[int] = [],
ds_types: t.Dict[t.Union[int, str], str] = {},
) -> Transform:
"""Create an external library transform.
Args:
id (str): id in the form library.name (e.g. sklearn.PD_MEAN)
py_args (Dict[int, Any]):
the Python objects passed as arguments to the transform.
py_kwargs (Dict[int, Any]):
the Python objects passed as keyword arguments to the transform.
ds_args_pos (List[int]):
the positions of Dataspecs passed in args.
ds_types (Dict[int | str, str]):
the types of the Dataspecs passed as arguments.
"""
external = sp.Transform.External(
arguments=SarusJSONEncoder.encode_bytes([]),
named_arguments=SarusJSONEncoder.encode_bytes(
{
"py_args": py_args,
"py_kwargs": py_kwargs,
"ds_args_pos": ds_args_pos,
"ds_types": ds_types,
}
),
op_identifier=op_identifier_from_id(id),
)
return Transform(
sp.Transform(
name=id,
spec=sp.Transform.Spec(
external=external,
),
)
)
def project(projection: st.Type) -> Transform:
return Transform(
sp.Transform(
name='Project',
spec=sp.Transform.Spec(
project=sp.Transform.Project(projection=projection.protobuf())
),
inversible=False,
schema_preserving=False,
)
)
def filter(filter: st.Type) -> Transform:
return Transform(
sp.Transform(
name='Filter',
spec=sp.Transform.Spec(
filter=sp.Transform.Filter(filter=filter.protobuf())
),
inversible=False,
schema_preserving=False,
)
)
def shuffle() -> Transform:
return Transform(
sp.Transform(
name='Shuffle',
spec=sp.Transform.Spec(shuffle=sp.Transform.Shuffle()),
inversible=False,
schema_preserving=True,
)
)
def join(on: st.Type) -> Transform:
return Transform(
sp.Transform(
name='Join',
spec=sp.Transform.Spec(join=sp.Transform.Join(on=on.protobuf())),
inversible=False,
schema_preserving=False,
)
)
def cast(type: st.Type) -> Transform:
return Transform(
sp.Transform(
name='Cast',
spec=sp.Transform.Spec(
cast=sp.Transform.Cast(type=type.protobuf())
),
inversible=False,
schema_preserving=False,
)
)
def sample(fraction_size: t.Union[float, int], seed: st.Scalar) -> Transform:
"""Transform to sample from a dataspec
- the dataset that needs to be protected as the first arg
- a kwarg seed"""
return Transform(
sp.Transform(
name='Sample',
spec=sp.Transform.Spec(
sample=sp.Transform.Sample(
size=fraction_size,
seed=seed.protobuf(),
)
if isinstance(fraction_size, int)
else sp.Transform.Sample(
fraction=fraction_size, seed=seed.protobuf()
)
),
inversible=False,
schema_preserving=False,
)
)
def user_settings() -> Transform:
"""Transform to create a dataspec from
a protected one with a new schema. It should
be called on:
- the dataset that needs to be protected as the first arg
- a kwarg user_type: scalar output of automatic_user_setttings"""
return Transform(
sp.Transform(
name='User Settings',
spec=sp.Transform.Spec(user_settings=sp.Transform.UserSettings()),
inversible=False,
schema_preserving=False,
)
)
def automatic_user_settings(max_categories: int = 200) -> Transform:
"""Transform to be called on a protected dataset
we want to change the schema. It creates a scalar
whose value explicits the new type of the schema"""
return Transform(
sp.Transform(
name='automatic_user_settings',
spec=sp.Transform.Spec(
automatic_user_settings=sp.Transform.AutomaticUserSettings(
max_categories=max_categories
)
),
inversible=False,
schema_preserving=False,
properties={'creation_time': str(datetime.datetime.now())},
)
)
def synthetic() -> Transform:
"""Synthetic transform. This transform should be
called on a dataset with the additional following kwargs:
-sampling_ratios: a scalar created by the transform sampling ratios
-synthetic_model: a scalar of type synthetic_model
"""
return Transform(
sp.Transform(
name="Synthetic data",
spec=sp.Transform.Spec(
synthetic=sp.Transform.Synthetic(),
),
inversible=False,
schema_preserving=True,
)
)
def protect() -> Transform:
"""Transform used for protection should be called on:
- the dataset that needs to be protected as the first arg
- a kwarg protected_paths: scalar specifying the paths
to the entities to protect
- a kwarg public_paths: scalar specifying the paths to
the public tables"""
return Transform(
sp.Transform(
name='Protect',
spec=sp.Transform.Spec(protect_dataset=sp.Transform.Protect()),
inversible=True,
schema_preserving=False,
)
)
def transcode() -> st.Transform:
return Transform(
sp.Transform(
name='Transcode',
spec=sp.Transform.Spec(transcode=sp.Transform.Transcode()),
inversible=True,
schema_preserving=False,
)
)
def inverse_transcode() -> st.Transform:
return Transform(
sp.Transform(
name='Inverse Transcoding for synthetic data',
spec=sp.Transform.Spec(
inverse_transcode=sp.Transform.InverseTranscode()
),
inversible=True,
schema_preserving=False,
)
)
def automatic_protected_paths() -> st.Transform:
"""Transform that should be called on the dataset
that needs to be protected, it creates a scalar whose
value will explicit the paths to protect"""
return Transform(
sp.Transform(
name='automatic_protected_paths',
spec=sp.Transform.Spec(
protected_paths=sp.Transform.ProtectedPaths()
),
properties={'creation_time': str(datetime.datetime.now())},
)
)
def automatic_public_paths() -> st.Transform:
"""Transform that should be called on the dataset
that needs to be protected, it creates a scalar whose
value will explicit the paths to public entities"""
return Transform(
sp.Transform(
name='automatic_public_paths',
spec=sp.Transform.Spec(public_paths=sp.Transform.PublicPaths()),
properties={'creation_time': str(datetime.datetime.now())},
)
)
def get_item(path: st.Path) -> st.Transform:
return Transform(
sp.Transform(
name='get_item',
spec=sp.Transform.Spec(
get_item=sp.Transform.GetItem(path=path.protobuf())
),
inversible=False,
schema_preserving=False,
)
)
def assign_budget() -> st.Transform:
"""Transform to assign a given privacy budget to a dataset.
It is used to specify the budget to compute the attributes
size, bounds, marginals"""
return Transform(
sp.Transform(
name='budget_assignment',
spec=sp.Transform.Spec(assign_budget=sp.Transform.AssignBudget()),
)
)
def automatic_budget() -> st.Transform:
"""Transform to create a scalar specifying a budget
automatically from the dataset it is called on.
The rule to fix the budget is set in the corresponding
op.
"""
return Transform(
sp.Transform(
name='automatic_budget',
spec=sp.Transform.Spec(
automatic_budget=sp.Transform.AutomaticBudget()
),
)
)
def attributes_budget() -> st.Transform:
"""Transform to create a scalar specifying an
epsilon,delta budget for the DP attributes of a
dataset. It is called on a scalar specifying a
global budget for attributes+sd."""
return Transform(
sp.Transform(
name='attributes_budget',
spec=sp.Transform.Spec(
attribute_budget=sp.Transform.AttributesBudget()
),
)
)
def sd_budget() -> st.Transform:
"""Transform to create a scalar specifying an
epsilon,delta budget for a synthetic dataset.
It should be called on another scalar that specifies
a global budget (SD+DP attributes)"""
return Transform(
sp.Transform(
name='sd_budget',
spec=sp.Transform.Spec(sd_budget=sp.Transform.SDBudget()),
)
)
def sampling_ratios() -> st.Transform:
"""Transform to create a scalar specifying the sampling ratio for
each table to synthetize from.
It should be called on the dataset to synthetize from."""
return Transform(
sp.Transform(
name='sampling_ratios',
spec=sp.Transform.Spec(
sampling_ratios=sp.Transform.SamplingRatios()
),
)
)
def derive_seed(random_int: int) -> st.Transform:
"""Transform to derive a seed from a master seed"""
return Transform(
sp.Transform(
name='derive_seed',
spec=sp.Transform.Spec(
derive_seed=sp.Transform.DeriveSeed(random_integer=random_int)
),
)
)
def group_by_pe() -> st.Transform:
"""Transform that allows to group fields
by protected entity value. This implies that
the dataset on which the transform is
applied should be PEP"""
return Transform(
sp.Transform(
name='group_by',
spec=sp.Transform.Spec(group_by_pe=sp.Transform.GroupByPE()),
)
)
def differentiated_sample( # type: ignore[no-untyped-def]
fraction_size: t.Union[float, int], seed=st.Scalar
) -> Transform:
return Transform(
sp.Transform(
name='DifferentiatedSample',
spec=sp.Transform.Spec(
differentiated_sample=sp.Transform.DifferentiatedSample(
size=fraction_size, seed=seed.protobuf()
)
if isinstance(fraction_size, int)
else sp.Transform.DifferentiatedSample(
fraction=fraction_size, seed=seed.protobuf()
)
),
)
)
def select_sql(
query: t.Union[str, t.Dict[t.Union[str, t.Tuple[str]], str]],
dialect: t.Optional[st.SQLDialect] = None,
) -> st.Transform:
"""Transform that applies a query or a batch of aliased queries to
a dataset.Calling .schema() or .to_arrow() on a select_sql transformed
dataset the .sql method will be invoked and the query will be executed.
"""
sql_dialect = (
sp.Transform.SQLDialect.POSTGRES
if not dialect
else sp.Transform.SQLDialect.Value(dialect.name)
)
if isinstance(query, str):
select_sql = sp.Transform.SelectSql(
query=query,
sql_dialect=sql_dialect,
)
elif len(query) == 0:
raise ValueError(
"""Transform `SelecltSQL` must be used with
at least one query"""
)
else:
queries = {
straight_path(
list(
name
if isinstance(name, t.Tuple) # type: ignore
else (name,)
)
): qry
for (name, qry) in query.items()
}
select_sql = sp.Transform.SelectSql(
aliased_queries=sp.Transform.AliasedQueries(
aliased_query=(
sp.Transform.AliasedQuery(
path=_path.protobuf(),
query=qry,
)
for (_path, qry) in queries.items()
),
),
sql_dialect=sql_dialect,
)
return Transform(
sp.Transform(
name='select_sql',
spec=sp.Transform.Spec(select_sql=select_sql),
inversible=False,
schema_preserving=False,
)
)
def dp_select_sql(
query: t.Union[str, t.Dict[t.Union[str, t.Tuple[str]], str]],
dialect: t.Optional[st.SQLDialect] = None,
) -> st.Transform:
"""DP variant of select_sql transform. It should be called with a budget
and a seed as every dp transform.
"""
sql_dialect = (
sp.Transform.SQLDialect.POSTGRES
if not dialect
else sp.Transform.SQLDialect.Value(dialect.name)
)
if isinstance(query, str):
proto = sp.Transform.DPSelectSql(
query=query,
sql_dialect=sql_dialect,
)
elif len(query) == 0:
raise ValueError(
"""Transform `SelecltSQL` must be used with
at least one query"""
)
else:
queries = {
straight_path(
list(
name
if isinstance(name, t.Tuple) # type: ignore
else (name,)
)
): qry
for (name, qry) in query.items()
}
proto = sp.Transform.DPSelectSql(
aliased_queries=sp.Transform.AliasedQueries(
aliased_query=(
sp.Transform.AliasedQuery(
path=_path.protobuf(),
query=qry,
)
for (_path, qry) in queries.items()
),
),
sql_dialect=sql_dialect,
)
return Transform(
sp.Transform(
name='dp_select_sql',
spec=sp.Transform.Spec(dp_select_sql=proto),
inversible=False,
schema_preserving=False,
)
)
def extract(
size: int,
) -> st.Transform:
"""Transform that should be called on a dataset from which we want to
extract some rows from according to the size parameter and a kwargs
random_seed, a scalar that is a seed. For now, seed and size are
ignored and iterating on the extract transfomed dataset will be as
iterating over the parent dataset.
"""
return Transform(
sp.Transform(
name='extract',
spec=sp.Transform.Spec(extract=sp.Transform.Extract(size=size)),
inversible=False,
schema_preserving=True,
)
)
def relationship_spec() -> st.Transform:
"""Transform that allows to redefine the primary and foreign keys
of a dataset."""
return Transform(
sp.Transform(
name='relationship_spec',
spec=sp.Transform.Spec(
relationship_spec=sp.Transform.RelationshipSpec()
),
)
)
def validated_user_type() -> st.Transform:
"""Transform that allows to set whether the user has validated
the schema or if some types have to be changed"""
return Transform(
sp.Transform(
name='validated_user_type',
spec=sp.Transform.Spec(
validated_user_type=sp.Transform.ValidatedUserType()
),
)
)
if t.TYPE_CHECKING:
test_transform: st.Transform = Transform(sp.Transform())
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/transform.py
| 0.849582 | 0.39826 |
transform.py
|
pypi
|
from datetime import date, datetime, time, timedelta, timezone
from typing import Any
import json
import sys
from dateutil.parser import parse
import numpy as np
import pandas as pd
class SarusJSONEncoder(json.JSONEncoder):
def default(self, obj: Any) -> Any:
if isinstance(obj, np.ndarray):
return {'_type': 'numpy.ndarray', 'data': obj.tolist()}
elif isinstance(obj, pd.DataFrame):
return {
'_type': 'pandas.DataFrame',
'data': obj.to_json(date_format='iso'),
}
elif isinstance(obj, pd.Series):
return {
'_type': 'pandas.Series',
'data': obj.to_json(date_format='iso'),
}
elif isinstance(obj, pd.Timestamp):
return {'_type': 'pandas.Timestamp', 'data': obj.isoformat()}
elif isinstance(obj, datetime):
return {'_type': 'datetime', 'data': obj.isoformat()}
elif isinstance(obj, timedelta):
return {'_type': 'timedelta', 'data': obj.total_seconds()}
elif isinstance(obj, timezone):
utcoffset_result = obj.utcoffset(None)
if utcoffset_result is not None:
return {
'_type': 'timezone',
'data': utcoffset_result.total_seconds(),
}
else:
raise ValueError("Invalid timezone object")
elif isinstance(obj, time):
return {'_type': 'time', 'data': obj.isoformat()}
elif isinstance(obj, date):
return {'_type': 'date', 'data': obj.isoformat()}
elif isinstance(obj, np.generic):
if np.issubdtype(obj, np.complexfloating):
complex_obj = obj.astype(np.complex128)
return {
'_type': 'numpy.complex',
'data': {
'"real"': complex_obj.real,
'"imag"': complex_obj.imag,
},
}
else:
return {'_type': 'numpy.generic', 'data': obj.item()}
elif isinstance(obj, pd.MultiIndex):
return {
'_type': 'pandas.MultiIndex',
'data': obj.tolist(),
'names': obj.names,
'levels': [level.tolist() for level in obj.levels],
'codes': list(obj.codes),
}
elif isinstance(obj, pd.Index):
return {
'_type': 'pandas.Index',
'class': type(obj).__name__,
'data': obj.tolist(),
'dtype': str(obj.dtype),
}
elif isinstance(obj, pd.Period):
return {
'_type': 'pandas.Period',
'data': str(obj),
'freq': obj.freqstr,
}
elif isinstance(obj, pd.Timedelta):
return {'_type': 'pandas.Timedelta', 'data': obj.value}
elif isinstance(obj, pd.Interval):
return {'_type': 'pandas.Interval', 'data': (obj.left, obj.right)}
elif isinstance(obj, pd.Categorical):
return {
'_type': 'pandas.Categorical',
'data': obj.tolist(),
'categories': obj.categories.tolist(),
'ordered': obj.ordered,
}
elif isinstance(obj, type):
return {
'_type': 'class',
'data': {'"name"': obj.__name__, '"module"': obj.__module__},
}
elif isinstance(obj, pd.api.extensions.ExtensionDtype):
return {'_type': 'dtype', 'data': str(obj)}
elif isinstance(obj, slice):
return {'_type': 'slice', 'data': (obj.start, obj.stop, obj.step)}
elif isinstance(obj, range):
return {
'_type': 'range',
'data': {
'"start"': obj.start,
'"stop"': obj.stop,
'"step"': obj.step,
},
}
return super().default(obj)
def encode_obj(self, obj: Any) -> Any:
if isinstance(obj, tuple):
return {
'_type': 'tuple',
'data': [self.encode_obj(v) for v in obj],
}
elif isinstance(obj, list):
return [self.encode_obj(v) for v in obj]
elif isinstance(obj, dict):
return {self.encode(k): self.encode_obj(v) for k, v in obj.items()}
return obj
def encode(self, obj: Any) -> str:
obj_transformed = self.encode_obj(obj)
return super().encode(obj_transformed)
@classmethod
def encode_bytes(cls, obj: Any) -> bytes:
encoder = cls()
return (encoder.encode(obj)).encode('utf-8')
class SarusJSONDecoder(json.JSONDecoder):
def decode(self, s: str, *args: Any, **kwargs: Any) -> Any:
obj = super().decode(s, *args, **kwargs)
return self.decode_obj(obj)
def decode_obj(self, obj: Any) -> Any:
if isinstance(obj, dict):
if '_type' in obj:
data = self.decode_obj(obj['data'])
if obj['_type'] == 'tuple':
return tuple(self.decode_obj(v) for v in data)
elif obj['_type'] == 'numpy.ndarray':
return np.array(data)
elif obj['_type'] == 'pandas.DataFrame':
return pd.read_json(data, convert_dates=True)
elif obj['_type'] == 'pandas.Series':
return pd.read_json(data, typ='series', convert_dates=True)
elif obj['_type'] == 'pandas.Timestamp':
return pd.Timestamp(data)
elif obj['_type'] == 'datetime':
return parse(data)
elif obj['_type'] == 'timedelta':
return timedelta(seconds=data)
elif obj['_type'] == 'timezone':
return timezone(timedelta(seconds=data))
elif obj['_type'] == 'time':
return parse(data).time()
elif obj['_type'] == 'date':
return parse(data).date()
elif obj['_type'] == 'numpy.generic':
return np.array(data).item()
elif obj['_type'] == 'numpy.complex':
return np.complex128(complex(data['real'], data['imag']))
elif obj['_type'] == 'pandas.Index':
cls = getattr(pd, obj['class'])
return cls(data, dtype=obj['dtype'])
elif obj['_type'] == 'pandas.MultiIndex':
return pd.MultiIndex(
levels=[pd.Index(level) for level in obj['levels']],
codes=self.decode_obj(obj['codes']),
names=obj['names'],
)
elif obj['_type'] == 'pandas.Period':
return pd.Period(data, freq=obj['freq'])
elif obj['_type'] == 'pandas.Timedelta':
return pd.to_timedelta(data)
elif obj['_type'] == 'pandas.Interval':
return pd.Interval(*data)
elif obj['_type'] == 'pandas.Categorical':
return pd.Categorical(
data,
categories=obj['categories'],
ordered=obj['ordered'],
)
elif obj['_type'] == 'class':
if data['module'] in ['builtins', 'numpy', 'pandas']:
cls = getattr(
sys.modules[data['module']], data['name']
)
if isinstance(cls, type):
return cls
else:
raise ValueError("Decoded object is not a type")
else:
raise ValueError("Invalid module name")
elif obj['_type'] == 'dtype':
return pd.api.types.pandas_dtype(data)
elif obj['_type'] == 'slice':
return slice(*data)
elif obj['_type'] == 'range':
return range(data['start'], data['stop'], data['step'])
return {self.decode(k): self.decode_obj(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [self.decode_obj(v) for v in obj]
return obj
@classmethod
def decode_bytes(cls, b: bytes, *args: Any, **kwargs: Any) -> Any:
decoder = cls()
return decoder.decode(b.decode('utf-8'), *args, **kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/json_serialisation.py
| 0.605566 | 0.228425 |
json_serialisation.py
|
pypi
|
from __future__ import annotations
from collections import defaultdict
import typing as t
from sarus_data_spec.base import Base
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
class Path(Base[sp.Path]):
"""A python class to describe paths"""
def prototype(self) -> t.Type[sp.Path]:
"""Return the type of the underlying protobuf."""
return sp.Path
def label(self) -> str:
return self._protobuf.label
def sub_paths(self) -> t.List[st.Path]:
return [Path(path) for path in self._protobuf.paths]
def to_strings_list(self) -> t.List[t.List[str]]:
paths = []
proto = self._protobuf
if len(proto.paths) == 0:
return [[proto.label]]
for path in proto.paths:
out = Path(path).to_strings_list()
for el in out:
el.insert(
0,
proto.label,
)
paths.extend(out)
return paths
def to_dict(self) -> t.Dict[str, str]:
list_paths = self.to_strings_list()
return {
'.'.join(path[1:-1]): path[-1] for path in list_paths
} # always start with DATA
def select(self, select_path: st.Path) -> t.List[st.Path]:
# TODO: very unclear docstring
"""Select_path must be a sub_path of the path starting
at the same node. The algorithm returns a list of the
paths in path that continue after select_path."""
assert select_path.label() == self.label()
if len(select_path.sub_paths()) == 0:
return self.sub_paths()
final_sub_paths = []
for sub_path in select_path.sub_paths():
should_add = False
for available_sub_path in self.sub_paths():
if available_sub_path.label() == sub_path.label():
should_add = True
break
if should_add:
final_sub_paths.extend(available_sub_path.select(sub_path))
return final_sub_paths
def paths(path_list: t.List[t.List[str]]) -> t.List[Path]:
out = defaultdict(list)
for path in path_list:
try:
first_el = path.pop(0)
except IndexError:
return []
else:
out[first_el].append(path)
return [
Path(
sp.Path(
label=element,
paths=[path.protobuf() for path in paths(path_list)],
)
)
for element, path_list in dict(out).items()
]
def path(paths: t.Optional[t.List[st.Path]] = None, label: str = '') -> Path:
if paths is None:
paths = []
return Path(
sp.Path(label=label, paths=[element.protobuf() for element in paths])
)
def straight_path(nodes: t.List[str]) -> Path:
"""Returns linear path between elements in the list"""
if len(nodes) == 0:
raise ValueError('At least one node must be provided')
curr_sub_path: t.List[st.Path] = []
for el in reversed(nodes):
update = path(label=el, paths=curr_sub_path)
curr_sub_path = [update]
return update
def append_to_straight_path(
curr_path: t.Optional[st.Path], new_element: str
) -> st.Path:
if curr_path is None:
return straight_path([new_element])
else:
return straight_path(
[
*(element for element in curr_path.to_strings_list()[0]),
new_element,
]
)
if t.TYPE_CHECKING:
test_path: st.Path = Path(sp.Path())
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/path.py
| 0.674372 | 0.323948 |
path.py
|
pypi
|
from time import time_ns
from typing import Collection, Dict, List, Optional, Tuple, Union, cast
import logging
from sarus_data_spec.attribute import attach_properties
from sarus_data_spec.constants import VARIANT_UUID
from sarus_data_spec.context import global_context
from sarus_data_spec.dataset import transformed
from sarus_data_spec.dataspec_validator.typing import DataspecValidator
from sarus_data_spec.manager.ops.processor import routing
from sarus_data_spec.scalar import privacy_budget
from sarus_data_spec.variant_constraint import (
dp_constraint,
mock_constraint,
syn_constraint,
)
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
logger = logging.getLogger(__name__)
def attach_variant(
original: st.DataSpec,
variant: st.DataSpec,
kind: st.ConstraintKind,
) -> None:
attach_properties(
original,
properties={
# TODO deprecated in SDS >= 2.0.0 -> use only VARIANT_UUID
kind.name: variant.uuid(),
VARIANT_UUID: variant.uuid(),
},
name=kind.name,
)
def compile(
dataspec_validator: DataspecValidator,
dataspec: st.DataSpec,
kind: st.ConstraintKind,
public_context: Collection[str],
privacy_limit: Optional[st.PrivacyLimit],
salt: Optional[int] = None,
) -> Optional[st.DataSpec]:
"""Returns a compliant Node or None."""
if kind == st.ConstraintKind.SYNTHETIC:
variant, _ = compile_synthetic(
dataspec_validator,
dataspec,
public_context,
)
return variant
elif kind == st.ConstraintKind.MOCK:
mock_variant, _ = compile_mock(
dataspec_validator,
dataspec,
public_context,
salt,
)
return mock_variant
if privacy_limit is None:
raise ValueError(
"Privacy limit must be defined for PEP or DP compilation"
)
if kind == st.ConstraintKind.DP:
variant, _ = compile_dp(
dataspec_validator,
dataspec,
public_context=public_context,
privacy_limit=privacy_limit,
salt=salt,
)
return variant
elif kind == st.ConstraintKind.PEP:
raise NotImplementedError("PEP compilation")
else:
raise ValueError(
f"Privacy policy {kind} compilation not implemented yet"
)
def compile_synthetic(
dataspec_validator: DataspecValidator,
dataspec: st.DataSpec,
public_context: Collection[str],
) -> Tuple[st.DataSpec, Collection[str]]:
# Current dataspec verifies the constraint?
for constraint in dataspec_validator.verified_constraints(dataspec):
if dataspec_validator.verifies(
constraint,
st.ConstraintKind.SYNTHETIC,
public_context,
privacy_limit=None,
):
return dataspec, public_context
# Current dataspec has a variant that verifies the constraint?
for variant in dataspec.variants():
if variant is None:
logger.info(f"Found a None variant for dataspec {dataspec.uuid()}")
continue
for constraint in dataspec_validator.verified_constraints(variant):
if dataspec_validator.verifies(
constraint,
st.ConstraintKind.SYNTHETIC,
public_context,
privacy_limit=None,
):
return variant, public_context
# Derive the SD from the parents SD
if dataspec.is_transformed():
transform = dataspec.transform()
args, kwargs = dataspec.parents()
syn_args: List[Union[st.DataSpec, st.Transform]] = [
compile_synthetic(dataspec_validator, arg, public_context)[0]
if isinstance(arg, st.DataSpec)
else arg
for arg in args
]
syn_kwargs: Dict[str, Union[st.DataSpec, st.Transform]] = {
name: compile_synthetic(dataspec_validator, arg, public_context)[0]
if isinstance(arg, st.DataSpec)
else arg
for name, arg in kwargs.items()
}
syn_variant = cast(
st.DataSpec,
transformed(
transform,
*syn_args,
dataspec_type=sp.type_name(dataspec.prototype()),
dataspec_name=None,
**syn_kwargs,
),
)
syn_constraint(
dataspec=syn_variant, required_context=list(public_context)
)
attach_variant(dataspec, syn_variant, kind=st.ConstraintKind.SYNTHETIC)
return syn_variant, public_context
elif dataspec.is_public():
return dataspec, public_context
else:
raise TypeError(
'Non public source Datasets cannot'
'be compiled to Synthetic, a synthetic variant'
'should have been created downstream in the graph.'
)
def compile_mock(
dataspec_validator: DataspecValidator,
dataspec: st.DataSpec,
public_context: Collection[str],
salt: Optional[int] = None,
) -> Tuple[Optional[st.DataSpec], Collection[str]]:
"""Compile the MOCK variant of a DataSpec.
Note that the MOCK compilation only makes sense for internally transformed
dataspecs. For externally transformed dataspecs, the MOCK is computed
before the dataspec, so we can only fetch it.
"""
for constraint in dataspec_validator.verified_constraints(dataspec):
if dataspec_validator.verifies(
constraint,
st.ConstraintKind.MOCK,
public_context,
privacy_limit=None,
):
return dataspec, public_context
# Current dataspec has a variant that verifies the constraint?
for variant in dataspec.variants():
if variant is None:
logger.info(f"Found a None variant for dataspec {dataspec.uuid()}")
continue
for constraint in dataspec_validator.verified_constraints(variant):
if dataspec_validator.verifies(
constraint,
st.ConstraintKind.MOCK,
public_context,
privacy_limit=None,
):
return variant, public_context
if dataspec.is_public():
return dataspec, public_context
if not dataspec.is_transformed():
raise ValueError(
'Cannot compile the MOCK of a non public source DataSpec. '
'A MOCK should be set manually downstream in the '
'computation graph.'
)
# The DataSpec is the result of an internal transform
transform = dataspec.transform()
args, kwargs = dataspec.parents()
mock_args = [
arg.variant(st.ConstraintKind.MOCK)
if isinstance(arg, st.DataSpec)
else arg
for arg in args
]
named_mock_args = {
name: arg.variant(st.ConstraintKind.MOCK)
if isinstance(arg, st.DataSpec)
else arg
for name, arg in kwargs.items()
}
if any([m is None for m in mock_args]) or any(
[m is None for m in named_mock_args.values()]
):
raise ValueError(
f"Cannot derive a mock for {dataspec} "
"because of of the parent has a None MOCK."
)
typed_mock_args = [cast(st.DataSpec, ds) for ds in mock_args]
typed_named_mock_args = {
name: cast(st.DataSpec, ds) for name, ds in named_mock_args.items()
}
mock: st.DataSpec = transformed(
transform,
*typed_mock_args,
dataspec_type=sp.type_name(dataspec.prototype()),
dataspec_name=None,
**typed_named_mock_args,
)
mock_constraint(mock)
attach_variant(dataspec, mock, st.ConstraintKind.MOCK)
return mock, public_context
def compile_dp(
dataspec_validator: DataspecValidator,
dataspec: st.DataSpec,
public_context: Collection[str],
privacy_limit: st.PrivacyLimit,
salt: Optional[int] = None,
) -> Tuple[st.DataSpec, Collection[str]]:
"""Simple DP compilation.
Only check the dataspec's parents, do not go further up in the graph.
"""
# Current dataspec verifies the constraint?
for constraint in dataspec_validator.verified_constraints(dataspec):
if dataspec_validator.verifies(
variant_constraint=constraint,
kind=st.ConstraintKind.DP,
public_context=public_context,
privacy_limit=privacy_limit,
salt=salt,
):
return dataspec, public_context
# Current dataspec has a variant that verifies the constraint?
for variant in dataspec.variants():
for constraint in dataspec_validator.verified_constraints(variant):
if dataspec_validator.verifies(
variant_constraint=constraint,
kind=st.ConstraintKind.DP,
public_context=public_context,
privacy_limit=privacy_limit,
salt=salt,
):
return variant, public_context
if not dataspec.is_transformed():
return compile_synthetic(dataspec_validator, dataspec, public_context)
# Check that there is a positive epsilon
delta_epsilon_dict = privacy_limit.delta_epsilon_dict()
if len(delta_epsilon_dict) == 1:
epsilon = list(delta_epsilon_dict.values()).pop()
if epsilon == 0:
return compile_synthetic(
dataspec_validator, dataspec, public_context
)
transform = dataspec.transform()
if dataspec.prototype() == sp.Dataset:
dataset = cast(st.Dataset, dataspec)
_, DatasetStaticChecker = routing.get_dataset_op(transform)
is_dp_applicable = DatasetStaticChecker(dataset).is_dp_applicable(
public_context
)
dp_transform = DatasetStaticChecker(dataset).dp_transform()
else:
scalar = cast(st.Scalar, dataspec)
_, ScalarStaticChecker = routing.get_scalar_op(transform)
is_dp_applicable = ScalarStaticChecker(scalar).is_dp_applicable(
public_context
)
dp_transform = ScalarStaticChecker(scalar).dp_transform()
if not is_dp_applicable:
return compile_synthetic(dataspec_validator, dataspec, public_context)
# Create the DP variant
assert dp_transform is not None
budget = privacy_budget(privacy_limit)
if salt is None:
salt = time_ns()
seed = global_context().generate_seed(salt=salt)
args, kwargs = dataspec.parents()
dp_variant = cast(
st.DataSpec,
transformed(
dp_transform,
*args,
dataspec_type=sp.type_name(dataspec.prototype()),
dataspec_name=None,
budget=budget,
seed=seed,
**kwargs,
),
)
dp_constraint(
dataspec=dp_variant,
required_context=list(public_context),
privacy_limit=privacy_limit,
salt=salt,
)
attach_variant(
original=dataspec,
variant=dp_variant,
kind=st.ConstraintKind.DP,
)
# We also attach the dataspec's synthetic variant to be the DP dataspec's
# synthetic variant. This is to avoid to have DP computations in the MOCK.
syn_variant = dataspec.variant(st.ConstraintKind.SYNTHETIC)
if syn_variant is None:
raise ValueError("Could not find a synthetic variant.")
attach_variant(
original=dp_variant,
variant=syn_variant,
kind=st.ConstraintKind.SYNTHETIC,
)
return dp_variant, public_context
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/dataspec_rewriter/simple_rules.py
| 0.817902 | 0.233335 |
simple_rules.py
|
pypi
|
from typing import (
Callable,
Collection,
Dict,
Optional,
Protocol,
Set,
Tuple,
Union,
runtime_checkable,
)
from sarus_data_spec.protobuf.typing import (
ProtobufWithUUID,
ProtobufWithUUIDAndDatetime,
)
from sarus_data_spec.typing import DataSpec, Referrable, Referring
# We want to store objects, be able to filter on their types and keep the last
# added in some type and relating to some object
@runtime_checkable
class Storage(Protocol):
"""Storage protocol
A Storage can store Referrable and Referring values.
"""
def store(self, value: Referrable[ProtobufWithUUID]) -> None:
"""Write a value to store."""
...
def batch_store(
self, values: Collection[Referrable[ProtobufWithUUID]]
) -> None:
"""Store a collection of referrables in the storage.
This method does not requires the objects to be provided in the graph
order.
"""
def referrable(self, uuid: str) -> Optional[Referrable[ProtobufWithUUID]]:
"""Read a stored value."""
...
def referring(
self,
referred: Union[
Referrable[ProtobufWithUUID],
Collection[Referrable[ProtobufWithUUID]],
],
type_name: Optional[str] = None,
) -> Collection[Referring[ProtobufWithUUID]]:
"""List all values referring to one referred."""
...
def batch_referring(
self,
collection_referred: Collection[
Union[
Referrable[ProtobufWithUUID],
Collection[Referrable[ProtobufWithUUID]],
]
],
type_names: Optional[Collection[str]] = None,
) -> Optional[Dict[str, Set[Referring[ProtobufWithUUID]]]]:
"""Returns the list of all the referring
(for multiples type_name) of several Referrables."""
...
def sources(
self,
referring: Referrable[ProtobufWithUUID],
type_name: Optional[str] = None,
) -> Set[DataSpec]:
"""List all sources."""
...
def last_referring(
self,
referred_uuid: Collection[str],
type_name: str,
) -> Optional[Referring[ProtobufWithUUIDAndDatetime]]:
"""Last value referring to one referred.
``last_referring`` returns the last
``Referring[ProtobufWithUUIDAndDatetime]``
object the ``type_name`` of which correspond
to the argument ``type_name``.
A typical use is to gather the last ``Status``
of a ``Dataset`` and a ``Manager``.
Note that, only time aware ``Referring``
objects can be accessed this way as
*last* would not make sense otherwise in the context of Data Spec where
objects are immutable and eternal.
Keyword arguments:
referred:
Either a ``Referrable`` or a collection of ``Referrable``
referred by the object we are trying to retrieve
type_name:
The ``type_name`` of the Data Spec object we are trying to retrieve
"""
...
def update_referring_with_datetime(
self,
referred_uuid: Collection[str],
type_name: str,
update: Callable[
[Referring[ProtobufWithUUIDAndDatetime]],
Tuple[Referring[ProtobufWithUUIDAndDatetime], bool],
],
) -> Tuple[Referring[ProtobufWithUUIDAndDatetime], bool]:
"""Update the last referring value of a type atomically
Update the object ``self.last_referring(referred, type_name)``
would be returning using the ``update`` function passed as argument.
Note that in Sarus Data Spec, *update* means: creating an object
with a more recent timestamp as objects are all immutable and eternal
to simplify sync, caching and parallelism.
Therefore everything happens as if::
update(self.last_referring(referred, type_name))
was inserted atomically
(no object can be inserted with a timestamp in-between).
Keyword arguments:
referred:
Either a ``Referrable`` or a collection of ``Referrable``
referred by the object we are trying to update
type_name:
The ``type_name`` of the dataspec object we are trying to update
update:
A callable computing the new object to store
based on the last such object.
"""
...
def create_referring_with_datetime(
self,
value: Referring[ProtobufWithUUIDAndDatetime],
update: Callable[
[Referring[ProtobufWithUUIDAndDatetime]],
Tuple[Referring[ProtobufWithUUIDAndDatetime], bool],
],
) -> Tuple[Referring[ProtobufWithUUIDAndDatetime], bool]:
...
def type_name(
self, type_name: str
) -> Collection[Referrable[ProtobufWithUUID]]:
"""List all values from a given type_name."""
...
def delete(self, uuid: str) -> None:
"""Delete a stored value from the database."""
...
def delete_type(self, type_name: str) -> None:
"""Delete all elements of a given type_name
from the database and all the referrings"""
...
@runtime_checkable
class HasStorage(Protocol):
"""Has a storage for persistent objects."""
def storage(self) -> Storage:
"""Return a storage (usually a singleton)."""
...
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/storage/typing.py
| 0.89951 | 0.553023 |
typing.py
|
pypi
|
from collections import defaultdict
from itertools import chain, combinations
from typing import (
Callable,
Collection,
DefaultDict,
Dict,
Final,
FrozenSet,
List,
MutableMapping,
Optional,
Set,
Tuple,
Union,
cast,
)
from sarus_data_spec.protobuf.typing import (
ProtobufWithUUID,
ProtobufWithUUIDAndDatetime,
)
from sarus_data_spec.storage.utils import sort_dataspecs
from sarus_data_spec.typing import DataSpec, Referrable, Referring
SEP: Final[str] = ','
def referrable_collection_string(
values: Collection[Referrable[ProtobufWithUUID]],
) -> str:
return SEP.join(sorted(value.uuid() for value in values))
def uuid_collection_string(uuids: Collection[str]) -> str:
return SEP.join(sorted(uuid for uuid in uuids))
def referrable_collection_set(
values: Collection[Referrable[ProtobufWithUUID]],
) -> FrozenSet[str]:
return frozenset(value.uuid() for value in values)
class Storage:
"""Simple local Storage."""
def __init__(self) -> None:
# A Store to save (timestamp, type_name, data, relating data)
self._referrables: MutableMapping[
str, Referrable[ProtobufWithUUID]
] = dict()
self._referring: DefaultDict[str, Set[str]] = defaultdict(set)
self._sources: DefaultDict[str, Set[str]] = defaultdict(set)
def store(self, value: Referrable[ProtobufWithUUID]) -> None:
# Checks the value for consistency
assert value._frozen()
self._referrables[value.uuid()] = value
if isinstance(value, Referring):
value = cast(Referring[ProtobufWithUUID], value)
referred_values = value.referred()
# add referring
referred_combinations = chain.from_iterable(
combinations(referred_values, r) for r in range(1, 3)
)
for combination in referred_combinations:
self._referring[referrable_collection_string(combination)].add(
value.uuid()
)
# add sources
self._store_sources(value)
def _store_sources(self, value: Referring[ProtobufWithUUID]) -> None:
"""A helper function to only store the sources of a referrable"""
referred_values = value.referred()
referred_dataspecs = [
referred_value
for referred_value in referred_values
if isinstance(referred_value, DataSpec)
]
if not referred_dataspecs:
self._sources[value.uuid()].add(value.uuid())
return None
# update _sources if missing sources
for referred_dataspec in referred_dataspecs:
if isinstance(referred_dataspec, DataSpec):
if len(self.sources(referred_dataspec)) == 0:
self._store_sources(referred_dataspec)
sources = set()
for referred_dataspec in referred_dataspecs:
sources.update(self._sources[referred_dataspec.uuid()])
if len(sources) == 0:
raise ValueError(
"""Unable to retrieve sources. The computation graph
in storage is incomplete."""
)
self._sources[value.uuid()].update(sources)
def batch_store(
self, values: Collection[Referrable[ProtobufWithUUID]]
) -> None:
"""Store a collection of referrables in the storage.
This method does not requires the objects to be provided in the graph
order.
"""
for value in values:
# Add all objects to the referrables first
assert value._frozen()
self._referrables[value.uuid()] = value
for value in values:
# Add referring link in a second time
if isinstance(value, Referring):
value = cast(Referring[ProtobufWithUUID], value)
referred_combinations = chain.from_iterable(
combinations(value.referred(), r) for r in range(1, 3)
)
for combination in referred_combinations:
self._referring[
referrable_collection_string(combination)
].add(value.uuid())
# add sources
sorted_values = self.sort_dataspecs(values)
for value in sorted_values:
self._store_sources(value)
def referrable(self, uuid: str) -> Optional[Referrable[ProtobufWithUUID]]:
"""Read a stored value."""
return self._referrables.get(uuid, None)
def referring_uuid(
self,
referred_uuid: Collection[str],
type_name: Optional[str] = None,
) -> Collection[Referring[ProtobufWithUUID]]:
"""List all values referring to one referred referrable."""
referring_uuids = self._referring[
uuid_collection_string(referred_uuid)
]
referrings = [self.referrable(uuid) for uuid in referring_uuids]
if not all(referring is not None for referring in referrings):
raise ValueError("A referring is not stored.")
if type_name is not None:
referrings = [
referring
for referring in referrings
if (
referring is not None
and referring.type_name() == type_name
)
]
return referrings # type:ignore
def referring(
self,
referred: Union[
Referrable[ProtobufWithUUID],
Collection[Referrable[ProtobufWithUUID]],
],
type_name: Optional[str] = None,
) -> Collection[Referring[ProtobufWithUUID]]:
"""List all values referring to one referred referrable."""
if isinstance(referred, Referrable):
referred_uuid = {referred.uuid()}
else:
referred_uuid = {value.uuid() for value in referred}
return self.referring_uuid(
referred_uuid=referred_uuid, type_name=type_name
)
def batch_referring(
self,
collection_referred: Collection[
Union[
Referrable[ProtobufWithUUID],
Collection[Referrable[ProtobufWithUUID]],
]
],
type_names: Optional[Collection[str]] = None,
) -> Optional[Dict[str, Set[Referring[ProtobufWithUUID]]]]:
referred_strings = []
for referred in collection_referred:
if isinstance(referred, Referrable):
referred_strings.append(
referrable_collection_string([referred])
)
else:
referred_strings.append(referrable_collection_string(referred))
referring_uuids: List[str] = []
for referred_string in referred_strings:
referring_uuids.extend(self._referring[referred_string])
referrings: List[Referring[ProtobufWithUUID]] = []
for uuid in referring_uuids:
ref = self.referrable(uuid)
assert ref is not None
referring = cast(Referring[ProtobufWithUUID], ref)
referrings.append(referring)
# init result dict with types
result_dict: Dict[str, Set[Referring[ProtobufWithUUID]]] = {}
if type_names is not None:
for type_name in type_names:
result_dict[type_name] = set()
for referring in referrings:
typename = referring.type_name()
if typename in result_dict:
result_dict[typename].add(referring)
else:
result_dict[typename] = {referring}
return result_dict
def sources(
self,
value: Referrable[ProtobufWithUUID],
type_name: Optional[str] = None,
) -> Set[DataSpec]:
"""Returns a set of all sources of a referrable."""
if not isinstance(value, Referring):
return set()
value = cast(Referring[ProtobufWithUUID], value)
source_uuids = self._sources[value.uuid()]
sources = {self.referrable(uuid) for uuid in source_uuids}
if not all(source is not None for source in sources):
raise ValueError("A source is not stored.")
if len(sources) > 0:
if type_name is not None:
sources = {
source
for source in sources
if (source is not None and source.type_name() == type_name)
}
return {
cast(
DataSpec,
source,
)
for source in sources
}
else:
self._store_sources(value)
return self.sources(value=value, type_name=type_name)
def last_referring(
self,
referred_uuid: Collection[str],
type_name: str,
) -> Optional[Referring[ProtobufWithUUIDAndDatetime]]:
"""Last value referring to one referred.
This implementation is not very efficient"""
referrings = cast(
Collection[Referring[ProtobufWithUUIDAndDatetime]],
self.referring_uuid(referred_uuid, type_name),
)
if len(referrings) > 0:
return max(referrings, key=lambda r: r.protobuf().datetime)
else:
return None
def update_referring_with_datetime(
self,
referred_uuid: Collection[str],
type_name: str,
update: Callable[
[Referring[ProtobufWithUUIDAndDatetime]],
Tuple[Referring[ProtobufWithUUIDAndDatetime], bool],
],
) -> Tuple[Referring[ProtobufWithUUIDAndDatetime], bool]:
"""
The local storage has no concurrency problem,
simply call last referring and store
"""
value = self.last_referring(referred_uuid, type_name)
assert value is not None
updated, should_update = update(value)
if should_update:
self.store(updated)
return value, True
def create_referring_with_datetime(
self,
value: Referring[ProtobufWithUUIDAndDatetime],
update: Callable[
[Referring[ProtobufWithUUIDAndDatetime]],
Tuple[Referring[ProtobufWithUUIDAndDatetime], bool],
],
) -> Tuple[Referring[ProtobufWithUUIDAndDatetime], bool]:
"""Local storage is process dependent, no concurrency"""
self.store(value)
return value, True
def type_name(
self, type_name: str
) -> Collection[Referrable[ProtobufWithUUID]]:
"""List all values from a given type_name."""
return {
ref
for ref in self._referrables.values()
if ref.type_name() == type_name
}
def all_referrings(self, uuid: str) -> List[str]:
"""Returns a list all items referring to a Referrable recursively."""
target = self.referrable(uuid)
if target is None:
raise ValueError("The referrable object is not stored.")
to_delete, to_check = set(), {target}
while len(to_check) > 0:
node = to_check.pop()
if not node:
continue
to_delete.add(node)
deps = node.referring()
if not deps:
continue
for dep in deps:
if dep not in to_delete:
to_check.add(dep)
return [msg.uuid() for msg in to_delete]
def delete(self, uuid: str) -> None:
"""Delete a Referrable and all elements referring to it to let the
storage in a consistent state."""
uuids_to_delete = set(self.all_referrings(uuid))
self._referrables = {
uuid: referring
for uuid, referring in self._referrables.items()
if uuid not in uuids_to_delete
}
self._referring = defaultdict(
set,
{
uuid: referring_uuids - uuids_to_delete
for uuid, referring_uuids in self._referring.items()
if uuid not in uuids_to_delete
},
)
self._sources = defaultdict(
set,
{
uuid: sources_uuids - uuids_to_delete
for uuid, sources_uuids in self._sources.items()
if uuid not in uuids_to_delete
},
)
def delete_type(self, type_name: str) -> None:
"""Deletes all referrable corresponding to a given type_name and all
the referrings corresponfing to it"""
uuids = [obj.uuid() for obj in self.type_name(type_name)]
uuids_to_delete = set(
chain(*(self.all_referrings(uuid) for uuid in uuids))
)
self._referrables = {
uuid: referring
for uuid, referring in self._referrables.items()
if uuid not in uuids_to_delete
}
self._referring = defaultdict(
set,
{
uuid: referring_uuids - uuids_to_delete
for uuid, referring_uuids in self._referring.items()
if uuid not in uuids_to_delete
},
)
self._sources = defaultdict(
set,
{
uuid: sources_uuids - uuids_to_delete
for uuid, sources_uuids in self._sources.items()
if uuid not in uuids_to_delete
},
)
def sort_dataspecs(
self, values: Collection[Referrable[ProtobufWithUUID]]
) -> Collection[Referring[ProtobufWithUUID]]:
"""Return a sorted list of dataspecs, in the order of the DAG, from the
root to the nodes (the elements of the input list)."""
return sort_dataspecs(self, values)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/storage/local.py
| 0.876383 | 0.460046 |
local.py
|
pypi
|
from typing import Collection, cast
from sarus_data_spec.protobuf.typing import ProtobufWithUUID
from sarus_data_spec.storage.typing import Storage
from sarus_data_spec.typing import Referrable, Referring
def sort_dataspecs(
storage: Storage, values: Collection[Referrable[ProtobufWithUUID]]
) -> Collection[Referring[ProtobufWithUUID]]:
"""Sort a list of dataspecs, in the order of the DAG, from the root to the
nodes (the elements of the input list).
This algorithm is a variation of the depth first search.
It uses a queue, called values_queue, to store all data elements.
A separate set, called visited_values, is used to keep track
of elements that have been processed. The algorithm adds
elements to the values_queue in such a way that when an element
is encountered again (we know it from visited_values),
all of its parent elements have already been added to the
sorted list. This ensures that the final list is sorted
in a way that preserves the hierarchical relationships
between elements.
The worst case time complexity of this algorithm is O(n^2)
where n is the number of elements in the input list.
Args:
values (Collection[Referrable[ProtobufWithUUID]]):
A list of dataspecs that need to be sorted.
Raises:
ValueError: In case the user attempts to add a dataspec
with a reference to another dataspec that is not already stored.
Returns:
Collection[DataSpec]: The sorted list
of dataspecs, in the order of the DAG, from the root to the nodes.
"""
values_queue = []
for value in values:
if isinstance(value, Referring):
value = cast(Referring[ProtobufWithUUID], value)
values_queue.append(value)
sorted_values = []
visited_values = set()
while values_queue:
value = values_queue.pop()
if value not in visited_values:
referred_uuids = value.referred_uuid()
referred_values_to_add = []
for referred_uuid in referred_uuids:
if (referred_uuid not in [v.uuid() for v in values]) and (
not storage.referrable(referred_uuid)
):
raise ValueError(
"""Referenced object not found in
storage or in dataspecs to be stored."""
)
for queued_value in values_queue:
if queued_value.uuid() == referred_uuid:
referred_values_to_add.append(queued_value)
if not referred_values_to_add:
sorted_values.append(value)
visited_values.add(value)
else:
for referred_value in referred_values_to_add:
values_queue.remove(referred_value)
values_queue.extend([value, *referred_values_to_add])
visited_values.add(value)
else:
sorted_values.append(value)
return sorted_values
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/storage/utils.py
| 0.74008 | 0.593963 |
utils.py
|
pypi
|
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type, TypeVar, Union, cast
import base64
from google.protobuf.any_pb2 import Any as AnyProto
from google.protobuf.descriptor_pool import Default
from google.protobuf.json_format import (
MessageToDict,
MessageToJson,
Parse,
ParseDict,
)
from google.protobuf.message_factory import MessageFactory
from sarus_data_spec.protobuf.proto_container_pb2 import ProtoContainer
from sarus_data_spec.protobuf.typing import Protobuf
import sarus_data_spec as s
message_factory = MessageFactory()
def message(type_name: str) -> Protobuf:
"""Return a message instance from a type_name."""
return message_factory.GetPrototype(
Default().FindMessageTypeByName(type_name) # type: ignore
)()
def message_type(type_name: str) -> Type[Protobuf]:
"""Return a message type from a type_name."""
return message_factory.GetPrototype(
Default().FindMessageTypeByName(type_name) # type: ignore
)
def type_name(message: Union[Type[Protobuf], Protobuf]) -> str:
"""Return a type_name from a message."""
return cast(str, message.DESCRIPTOR.full_name)
def wrap(message: Protobuf) -> AnyProto:
"""Wrap a Message into an Any"""
wrapped_message: AnyProto = AnyProto()
wrapped_message.Pack(
message,
type_url_prefix=cast(bytes, s.PACKAGE_NAME),
deterministic=True,
)
return wrapped_message
def unwrap(wrapped_message: AnyProto) -> Protobuf:
"""Unwrap an Any to a Message"""
result = message(wrapped_message.TypeName()) # type: ignore
wrapped_message.Unpack(result)
return result
M = TypeVar('M', bound=Protobuf)
def copy(value: M) -> M:
result = message(value.DESCRIPTOR.full_name)
result.CopyFrom(value)
return cast(M, result)
def serialize(message: M) -> bytes:
return wrap(message).SerializeToString(deterministic=True)
def deserialize(bytes: bytes) -> Protobuf:
wrapped_message: AnyProto = AnyProto()
wrapped_message.MergeFromString(bytes)
return unwrap(wrapped_message)
def json_serialize(message: Protobuf) -> bytes:
return MessageToJson(
wrap(message),
including_default_value_fields=True,
preserving_proto_field_name=True,
sort_keys=True,
).encode('utf8')
def json_deserialize(bytes: bytes) -> Protobuf:
wrapped_message: AnyProto = AnyProto()
Parse(bytes.decode('utf8'), wrapped_message)
return unwrap(wrapped_message)
def dict_serialize(message: Protobuf) -> Dict[str, Any]:
return MessageToDict(
wrap(message),
including_default_value_fields=True,
preserving_proto_field_name=True,
)
def dict_deserialize(dct: Dict[str, Any]) -> Protobuf:
wrapped_message: AnyProto = AnyProto()
ParseDict(dct, wrapped_message)
return unwrap(wrapped_message)
def json(message: Protobuf) -> str:
return MessageToJson(
wrap(message),
including_default_value_fields=True,
preserving_proto_field_name=True,
sort_keys=True,
)
def dejson(string: str) -> Protobuf:
wrapped_message: AnyProto = AnyProto()
Parse(string, wrapped_message)
return unwrap(wrapped_message)
def to_base64(message: Protobuf) -> str:
return base64.b64encode(serialize(message)).decode('ASCII')
def from_base64(string: str, message: Optional[M] = None) -> M:
return cast(M, unwrap(AnyProto().FromString(base64.b64decode(string))))
def serialize_protos_list(protos: List[M]) -> str:
"""stores protos in a container and serialize it to a string"""
return to_base64(
ProtoContainer(protos=[wrap(element) for element in protos])
)
def deserialize_proto_list(string: str) -> List[Protobuf]:
"""deserialize ad hoc proto containers to a list of protobufs"""
proto_cont = from_base64(string, ProtoContainer())
return [unwrap(element) for element in proto_cont.protos]
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/protobuf/utilities.py
| 0.855972 | 0.183009 |
utilities.py
|
pypi
|
import typing as t
import pandas as pd
import pyarrow as pa
from sarus_data_spec.arrow.pandas_utils import (
convert_pandas_metadata_to_admin_columns,
pandas_index_columns,
remove_pandas_index_columns,
)
from sarus_data_spec.constants import DATA, PUBLIC, USER_COLUMN, WEIGHTS
import sarus_data_spec.typing as st
async def async_to_arrow_extract_admin(
dataset: st.Dataset, batch_size: int = 10000
) -> t.Optional[t.AsyncIterator[pa.RecordBatch]]:
"""This function return an async iterator record batches of the
admin data if there is administrative columns.
"""
schema = await dataset.async_schema()
if not schema.has_admin_columns():
return None
# Extract admin data from DATA
batches_async_it = await dataset.async_to_arrow(batch_size)
pe_field_names = [PUBLIC, USER_COLUMN, WEIGHTS]
async def extract_admin_columns(
batches_async_it: t.AsyncIterator[pa.RecordBatch],
) -> t.AsyncIterator[t.Tuple[pa.RecordBatch, t.Optional[pa.RecordBatch]]]:
async for batch in batches_async_it:
field_names = list(batch.schema.names)
index_cols = pandas_index_columns(batch.schema)
pe_batch = pa.RecordBatch.from_arrays(
[
batch.columns[field_names.index(field_name)]
for field_name in pe_field_names + index_cols
],
names=pe_field_names + index_cols,
)
pe_batch = pe_batch.replace_schema_metadata(batch.schema.metadata)
yield pe_batch
return extract_admin_columns(batches_async_it)
async def async_to_arrow_extract_data_only(
dataset: st.Dataset, batch_size: int = 10000
) -> t.AsyncIterator[pa.RecordBatch]:
"""This function return an async iterator of record batches.
The RecordBatches contain the data only, without the admin columns.
"""
batches_async_it = await dataset.async_to_arrow(batch_size)
schema = await dataset.async_schema()
if not schema.has_admin_columns():
return batches_async_it
# Extract PE from DATA
data_cols = list(schema.type().data_type().children().keys())
async def extract_data(
batches_async_it: t.AsyncIterator[pa.RecordBatch],
) -> t.AsyncIterator[pa.RecordBatch]:
async for batch in batches_async_it:
# We add the index columns to the data
field_names = list(batch.schema.names)
index_cols = pandas_index_columns(batch.schema)
index_arrays = [
batch.columns[field_names.index(col)]
for col in pandas_index_columns(batch.schema)
]
data_arrays = batch.columns[field_names.index(DATA)].flatten()
arrays = index_arrays + data_arrays
names = index_cols + data_cols
if len(arrays) != len(names):
raise ValueError(
f"Incompatible number of arrays {len(arrays)} and"
f" names {len(names)}.\n"
f"Names are index cols {index_cols} and data "
f"cols {data_cols}.\n"
f"There are {len(index_arrays)} index arrays "
f"and {len(data_arrays)} data arrays.\n"
f"Arrow batch schema is {batch.schema}."
)
new_struct_array = pa.StructArray.from_arrays(arrays, names)
data_batch = pa.RecordBatch.from_struct_array(new_struct_array)
data_batch = data_batch.replace_schema_metadata(
batch.schema.metadata
)
yield data_batch
return extract_data(batches_async_it)
async def async_admin_data(dataset: st.Dataset) -> t.Optional[pa.Table]:
"""Return the protected entity as a pa.Table if it exists."""
pe_batches_async_it = await async_to_arrow_extract_admin(dataset)
if pe_batches_async_it is None:
return None
pe_batches = [batch async for batch in pe_batches_async_it]
return pa.Table.from_batches(pe_batches)
def merge_schemas_metadata(schema1: pa.Schema, schema2: pa.Schema) -> dict:
"""Merge metadata from two PyArrow schemas."""
metadata1 = schema1.metadata or {}
metadata2 = schema2.metadata or {}
# Combine metadata from both schemas
merged_metadata = {**metadata1, **metadata2}
return merged_metadata
def merge_data_and_admin(
data: pa.Table, admin_data: t.Optional[pa.Table]
) -> pa.Table:
"""Merge a protection and the data.
If the data Table has some pandas metadata attached, we remove them before
merging with the protected entity.
"""
if admin_data is None:
# TODO also wrap the data in an empty protection
return data
data_index_columns = pandas_index_columns(data.schema)
if len(data_index_columns) > 0:
# There are some pandas metadata
assert data_index_columns == pandas_index_columns(admin_data.schema)
data = remove_pandas_index_columns(data)
merged_metadata = merge_schemas_metadata(data.schema, admin_data.schema)
# We merge the protected entity and data in Pyarrow
data_arrays = [
chunked_array.combine_chunks() for chunked_array in data.columns
]
data_array = pa.StructArray.from_arrays(
data_arrays, names=data.column_names
)
merged_table = admin_data.append_column(DATA, data_array)
merged_table = merged_table.replace_schema_metadata(merged_metadata)
return merged_table
def compute_admin_data(
input_admin_data: pa.Table, result: st.DatasetCastable
) -> pa.Table:
"""Compute the output protected entity of an external transform."""
# We guarantee that the data.index is a reliable way to trace how
# the rows were rearranged
if type(result) == pd.DataFrame:
df = t.cast(pd.DataFrame, result)
input_pe_df = input_admin_data.to_pandas()
return pa.Table.from_pandas(input_pe_df.loc[df.index])
elif type(result) == pd.Series:
sr = t.cast(pd.Series, result)
input_pe_df = input_admin_data.to_pandas()
return pa.Table.from_pandas(input_pe_df.loc[sr.index])
elif type(result) == pd.core.groupby.DataFrameGroupBy:
df_grouped_by = t.cast(pd.core.groupby.DataFrameGroupBy, result)
combined_df = df_grouped_by.obj
input_pe_df = input_admin_data.to_pandas()
return pa.Table.from_pandas(input_pe_df.loc[combined_df.index])
elif type(result) == pd.core.groupby.SeriesGroupBy:
series_grouped_by = t.cast(pd.core.groupby.SeriesGroupBy, result)
combined_series = series_grouped_by.obj
input_pe_df = input_admin_data.to_pandas()
return pa.Table.from_pandas(input_pe_df.loc[combined_series.index])
else:
raise TypeError(
f"Cannot compute the admin data for type {type(result)}"
)
def validate_admin_data(
admin_data: t.List[pa.Table],
) -> pa.Table:
"""Check that all admin data are equal."""
# If we reach this part then there should be only one input admin data
if len(admin_data) == 0:
raise ValueError("The list of input admin data is empty.")
pe = next(iter(admin_data), None)
if pe is None:
raise ValueError(
"The dataset was infered PEP but has no input admin data"
)
if not all([candidate.equals(pe) for candidate in admin_data]):
raise ValueError(
"The dataset is PEP but has several differing input admin "
"data values"
)
return pe
def create_admin_columns(table: pa.Table) -> pa.Table:
"""Isolate special columns to admin columns."""
return convert_pandas_metadata_to_admin_columns(table)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/arrow/admin_utils.py
| 0.589953 | 0.466542 |
admin_utils.py
|
pypi
|
import typing as t
import pyarrow as pa
import sarus_data_spec.type as sdt
import sarus_data_spec.typing as st
INTBASE_TO_ARROW = {
st.IntegerBase.INT64: pa.int64(),
st.IntegerBase.INT32: pa.int32(),
st.IntegerBase.INT16: pa.int16(),
st.IntegerBase.INT8: pa.int8(),
st.IntegerBase.UINT64: pa.uint64(),
st.IntegerBase.UINT32: pa.uint32(),
st.IntegerBase.UINT16: pa.uint16(),
st.IntegerBase.UINT8: pa.uint8(),
}
IDBASE_TO_ARROW = {
st.IdBase.INT64: pa.int64(),
st.IdBase.INT32: pa.int32(),
st.IdBase.INT16: pa.int16(),
st.IdBase.INT8: pa.int8(),
st.IdBase.STRING: pa.string(),
st.IdBase.BYTES: pa.binary(),
}
FLOATBASE_TO_ARROW = {
st.FloatBase.FLOAT64: pa.float64(),
st.FloatBase.FLOAT32: pa.float32(),
st.FloatBase.FLOAT16: pa.float16(),
}
DATETIMEBASE_TO_ARROW = {
st.DatetimeBase.INT64_NS: pa.timestamp('ns'),
st.DatetimeBase.INT64_MS: pa.timestamp('ms'),
st.DatetimeBase.STRING: pa.string(),
}
DATEBASE_TO_ARROW = {
st.DateBase.INT32: pa.date32(),
st.DateBase.STRING: pa.string(),
}
TIMEBASE_TO_ARROW = {
st.TimeBase.INT64_US: pa.time64('us'),
st.TimeBase.INT32_MS: pa.time32('ms'),
st.TimeBase.STRING: pa.string(),
}
def to_arrow(
_type: st.Type,
nullable: bool = True,
) -> pa.DataType:
"""Visitor that maps sarus types to pa types
See https://arrow.apache.org/docs/python/api/datatypes.html
"""
class ToArrow(st.TypeVisitor):
pa_type: pa.DataType
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.pa_type = pa.null()
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.pa_type = pa.null()
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.pa_type = pa.bool_()
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if base is not None:
self.pa_type = IDBASE_TO_ARROW[base]
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = INTBASE_TO_ARROW[base]
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = pa.string()
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = FLOATBASE_TO_ARROW[base]
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = pa.string()
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
self.pa_type = pa.binary()
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = pa.struct(
[
pa.field(
name=name,
type=to_arrow(field),
nullable=field.protobuf().HasField('optional')
or field.protobuf().HasField('unit'),
)
for name, field in fields.items()
]
)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = pa.struct(
[
pa.field(
name=name,
type=to_arrow(field),
nullable=True,
)
for name, field in fields.items()
]
+ [
pa.field(
name='field_selected', type=pa.string(), nullable=False
)
]
)
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = to_arrow(type, nullable=True)
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = pa.list_(to_arrow(type), max_size)
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = DATETIMEBASE_TO_ARROW[base]
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = TIMEBASE_TO_ARROW[base]
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = DATEBASE_TO_ARROW[base]
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.pa_type = pa.duration(unit)
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = ToArrow()
_type.accept(visitor)
return visitor.pa_type
def from_arrow(type: pa.DataType) -> sdt.Type:
# Integers
if pa.types.is_int8(type):
return sdt.Integer(base=st.IntegerBase.INT8)
if pa.types.is_int16(type):
return sdt.Integer(base=st.IntegerBase.INT16)
if pa.types.is_int32(type):
return sdt.Integer(base=st.IntegerBase.INT32)
if pa.types.is_int64(type):
return sdt.Integer(base=st.IntegerBase.INT64)
if pa.types.is_uint8(type):
return sdt.Integer(base=st.IntegerBase.UINT8)
if pa.types.is_uint16(type):
return sdt.Integer(base=st.IntegerBase.UINT16)
if pa.types.is_uint32(type):
return sdt.Integer(base=st.IntegerBase.UINT32)
if pa.types.is_uint64(type):
return sdt.Integer(base=st.IntegerBase.UINT64)
# Floats
if pa.types.is_float16(type):
return sdt.Float(base=st.FloatBase.FLOAT16)
if pa.types.is_float32(type):
return sdt.Float(base=st.FloatBase.FLOAT32)
if pa.types.is_float64(type):
return sdt.Float(base=st.FloatBase.FLOAT64)
if pa.types.is_string(type):
return sdt.Text()
if pa.types.is_boolean(type):
return sdt.Boolean()
# Temporal
if pa.types.is_temporal(type):
# Return True if value is an instance of date, time,
# timestamp or duration.
if pa.types.is_timestamp(type):
return sdt.Datetime(base=st.DatetimeBase.INT64_NS)
# TODO: when we will support different bases for datetime
# we need to remove the asserts in the Datetime builder and
# the error rise in the check_visitor in user_settings
if pa.types.is_time(type):
if type.unit in ['ns', 'us']:
return sdt.Time(base=st.TimeBase.INT64_US)
else:
return sdt.Time(base=st.TimeBase.INT32_MS)
if pa.types.is_date(type):
return sdt.Date()
else:
# It is a duration
if type.unit in ['s', 'ms', 'us']:
return sdt.Duration(unit=type.unit)
else:
raise ValueError(
'Duration type with nanosecond resolution not supported'
)
if pa.types.is_null(type):
return sdt.Unit()
if pa.types.is_struct(type):
struct_type = t.cast(pa.StructType, type)
return sdt.Struct(
{
struct_type.field(i).name: type_from_arrow(
struct_type.field(i).type, nullable=False
)
for i in range(struct_type.num_fields)
}
)
if pa.types.is_list(type):
list_type = t.cast(pa.ListType, type)
return sdt.List(
type=type_from_arrow(list_type.value_type, nullable=False)
)
raise NotImplementedError(f'Type {type} not implemented')
def type_from_arrow(
arrow_type: pa.DataType,
nullable: bool,
) -> st.Type:
if nullable and not (pa.types.is_null(arrow_type)):
return sdt.Optional(type=from_arrow(arrow_type))
return from_arrow(arrow_type)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/arrow/type.py
| 0.581897 | 0.281286 |
type.py
|
pypi
|
import typing as t
import pyarrow as pa
from sarus_data_spec.type import Struct, Type
import sarus_data_spec.arrow.type as arrow_type
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
def to_arrow(schema: sp.Schema) -> pa.schema:
"""Convert Sarus schema to pyarrow schema."""
return arrow_schema(Type(schema.type))
def arrow_schema(_type: st.Type) -> pa.Schema:
"""Visitor that returns the schema Arrow given the Sarus Type
#TODO: Currently only Struct and Unions are supported
"""
class SchemaVisitor(st.TypeVisitor):
schema: pa.Schema = pa.schema(fields=[])
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
self.schema = pa.schema(
fields=[
pa.field(
name=field_name,
type=arrow_type.to_arrow(fields[field_name]),
)
for field_name in fields.keys()
]
)
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# TODO
pass
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# TODO
pass
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
arrow_fields = [
pa.field(
name=field_name,
type=arrow_type.to_arrow(field_type),
)
for field_name, field_type in fields.items()
]
arrow_fields.append(
pa.field(name='field_selected', type=pa.string())
)
self.schema = pa.schema(fields=arrow_fields)
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
visitor = SchemaVisitor()
_type.accept(visitor)
return visitor.schema
def type_from_arrow_schema(schema: pa.Schema) -> st.Type:
"""Convert a Pyarrow schema to a Sarus Type.
NB: This does not handle the multitable case.
"""
fields = {
name: arrow_type.type_from_arrow(data_type, nullable=False)
for name, data_type in zip(schema.names, schema.types)
}
return Struct(fields=fields)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/arrow/schema.py
| 0.41324 | 0.402099 |
schema.py
|
pypi
|
import typing as t
import pyarrow as pa
from sarus_data_spec.constants import DATA
def pandas_index_columns(schema: pa.Schema) -> t.List[str]:
"""Return the list of columns that have to be considered as Pandas index
columns and ignored by the Sarus type.
"""
pandas_metadata = schema.pandas_metadata
if pandas_metadata is None:
return []
def column_name(index: t.Any) -> t.Optional[str]:
if isinstance(index, str):
return index
elif isinstance(index, dict):
return t.cast(t.Optional[str], index["name"])
else:
raise ValueError("Unrecognized Arrow `index_column` format")
columns = [
column_name(index) for index in pandas_metadata["index_columns"]
]
return [col for col in columns if col is not None]
def remove_pandas_index_columns(table: pa.Table) -> pa.Table:
"""Remove pandas metadata and drop additional
index columns used for Pandas indexing.
"""
index_columns_names = pandas_index_columns(table.schema)
return table.drop(index_columns_names).replace_schema_metadata(None)
def convert_pandas_metadata_to_admin_columns(table: pa.Table) -> pa.Table:
"""Isolate the pandas index from the data."""
index_columns = pandas_index_columns(table.schema)
if len(index_columns) == 0:
return table
# Create admin columns
data_columns = [
col for col in table.column_names if col not in index_columns
]
data_arrays = [
chunked_array.combine_chunks()
for name, chunked_array in zip(table.column_names, table.columns)
if name in data_columns
]
index_arrays = [
chunked_array.combine_chunks()
for name, chunked_array in zip(table.column_names, table.columns)
if name in index_columns
]
data_array = pa.StructArray.from_arrays(data_arrays, names=data_columns)
new_table = pa.Table.from_arrays(index_arrays, names=index_columns)
new_table = new_table.append_column(DATA, data_array)
return new_table.replace_schema_metadata(table.schema.metadata)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/arrow/pandas_utils.py
| 0.617051 | 0.477432 |
pandas_utils.py
|
pypi
|
from __future__ import annotations
from typing import (
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Protocol,
Tuple,
runtime_checkable,
)
import typing as t
import pandas as pd
import pyarrow as pa
try:
import tensorflow as tf
except ModuleNotFoundError:
pass # Warning is displayed by typing.py
import warnings
from sarus_data_spec.storage.typing import HasStorage
import sarus_data_spec.dataspec_rewriter.typing as sdrt
import sarus_data_spec.dataspec_validator.typing as sdvt
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
try:
import sqlalchemy as sa
sa_engine = sa.engine.Engine
except ModuleNotFoundError:
warnings.warn("Sqlalchemy not installed, cannot send sql queries")
sa_engine = t.Any # type: ignore
@runtime_checkable
class Manager(st.Referrable[sp.Manager], HasStorage, Protocol):
"""Provide the dataset functionalities"""
def to_arrow(
self, dataset: st.Dataset, batch_size: int
) -> t.Iterator[pa.RecordBatch]:
"""Synchronous method based on async_to_arrow
that returns an iterator of arrow batches
for the input dataset"""
...
async def async_to_arrow(
self, dataset: st.Dataset, batch_size: int
) -> AsyncIterator[pa.RecordBatch]:
"""Asynchronous method. It orchestrates how
the iterator is obtained: it can either be delegated
via arrow_task and the result polled, or computed directly
it via the op"""
...
def schema(self, dataset: st.Dataset) -> st.Schema:
"""Synchronous method that returns the schema of a
dataspec. Based on the asynchronous version"""
...
async def async_schema(self, dataset: st.Dataset) -> st.Schema:
"""Asynchronous method that returns the schema of a
dataspec. The computation can be either delegated to
another manager via schema_task and the result polled
or executed directly via async_schema_ops"""
...
def value(self, scalar: st.Scalar) -> st.DataSpecValue:
"""Synchronous method that returns the value of a
scalar. Based on the asynchronous version"""
...
async def async_value(self, scalar: st.Scalar) -> st.DataSpecValue:
"""Asynchronous method that returns the value of a
scalar. The computation can be either delegated to
another manager via value_task and the result polled
or executed directly via async_value_ops"""
...
def prepare(self, dataspec: st.DataSpec) -> None:
"""Make sure a Dataspec is ready."""
...
async def async_prepare(self, dataspec: st.DataSpec) -> None:
"""Make sure a Dataspec is ready asynchronously."""
...
async def async_prepare_parents(self, dataspec: st.DataSpec) -> None:
"""Prepare all the parents of a Dataspec."""
...
def sql_prepare(self, dataset: st.Dataset) -> None:
"""Make sure a dataset is sql ready"""
...
async def async_sql_prepare(self, dataset: st.Dataset) -> None:
"""Make sure a dataset is sql ready asynchronously."""
...
async def async_sql_prepare_parents(self, dataset: st.Dataset) -> None:
"""SQL prepare all the parents of a dataset. It should sql_prepare
dataset parents and prepare Scalars parents.
"""
...
def cache_scalar(self, scalar: st.Scalar) -> None:
"""Synchronous scalar caching"""
...
async def async_cache_scalar(self, scalar: st.Scalar) -> None:
"""Asynchronous scalar caching"""
...
def to_parquet(self, dataset: st.Dataset) -> None:
"""Synchronous parquet caching"""
...
async def async_to_parquet(self, dataset: st.Dataset) -> None:
"""Asynchronous parquet caching"""
...
def parquet_dir(self) -> str:
...
def marginals(self, dataset: st.Dataset) -> st.Marginals:
...
async def async_marginals(self, dataset: st.Dataset) -> st.Marginals:
...
def bounds(self, dataset: st.Dataset) -> st.Bounds:
...
async def async_bounds(self, dataset: st.Dataset) -> st.Bounds:
...
def size(self, dataset: st.Dataset) -> st.Size:
...
async def async_size(self, dataset: st.Dataset) -> st.Size:
...
def multiplicity(self, dataset: st.Dataset) -> st.Multiplicity:
...
async def async_multiplicity(self, dataset: st.Dataset) -> st.Multiplicity:
...
def to_pandas(self, dataset: st.Dataset) -> pd.DataFrame:
...
async def async_to_pandas(self, dataset: st.Dataset) -> pd.DataFrame:
...
async def async_to(
self, dataset: st.Dataset, kind: t.Type, drop_admin: bool = True
) -> st.DatasetCastable:
"""Casts a Dataset to a Python type passed as argument."""
...
def to(
self, dataset: st.Dataset, kind: t.Type, drop_admin: bool = True
) -> st.DatasetCastable:
...
def to_tensorflow(self, dataset: st.Dataset) -> tf.data.Dataset:
...
async def async_to_tensorflow(
self, dataset: st.Dataset
) -> tf.data.Dataset:
...
def to_sql(self, dataset: st.Dataset) -> None:
...
async def async_to_sql(self, dataset: st.Dataset) -> None:
...
def status(
self, dataspec: st.DataSpec, task_name: t.Optional[str] = None
) -> t.Optional[st.Status]:
...
def dataspec_rewriter(self) -> sdrt.DataspecRewriter:
...
def dataspec_validator(self) -> sdvt.DataspecValidator:
...
def is_remote(self, dataspec: st.DataSpec) -> bool:
"""Is the dataspec a remotely defined dataset."""
...
def infer_output_type(
self,
transform: st.Transform,
*arguments: t.Union[st.DataSpec, st.Transform],
**named_arguments: t.Union[st.DataSpec, st.Transform],
) -> Tuple[str, Callable[[st.DataSpec], None]]:
...
def foreign_keys(self, dataset: st.Dataset) -> Dict[st.Path, st.Path]:
...
async def async_foreign_keys(
self, dataset: st.Dataset
) -> Dict[st.Path, st.Path]:
...
async def async_primary_keys(self, dataset: st.Dataset) -> List[st.Path]:
...
def primary_keys(self, dataset: st.Dataset) -> List[st.Path]:
...
def sql(
self,
dataset: st.Dataset,
query: t.Union[str, t.Dict[str, t.Any]],
dialect: Optional[st.SQLDialect] = None,
batch_size: int = 10000,
) -> Iterator[pa.RecordBatch]:
...
async def async_sql(
self,
dataset: st.Dataset,
query: t.Union[str, t.Dict[str, t.Any]],
dialect: Optional[st.SQLDialect] = None,
batch_size: int = 10000,
result_type: t.Optional[st.Type] = None,
) -> AsyncIterator[pa.RecordBatch]:
...
async def execute_sql_query(
self,
dataset: st.Dataset,
caching_properties: t.Mapping[str, str],
query: t.Union[str, t.Dict[str, t.Any]],
dialect: t.Optional[st.SQLDialect] = None,
batch_size: int = 10000,
result_type: t.Optional[st.Type] = None,
) -> t.AsyncIterator[pa.RecordBatch]:
...
async def async_sql_op(
self,
dataset: st.Dataset,
query: t.Union[str, t.Dict[str, t.Any]],
dialect: t.Optional[st.SQLDialect] = None,
batch_size: int = 10000,
result_type: t.Optional[st.Type] = None,
) -> t.AsyncIterator[pa.RecordBatch]:
...
def is_big_data(self, dataset: st.Dataset) -> bool:
...
def is_cached(self, dataspec: st.DataSpec) -> bool:
"""Returns whether a dataspec should be cached
or not"""
...
def is_pushed_to_sql(self, dataspec: st.DataSpec) -> bool:
"""Returns whether a dataspec should be pushed to sql
or not"""
...
def attribute(
self, name: str, dataspec: st.DataSpec
) -> t.Optional[st.Attribute]:
...
def attributes(
self, name: str, dataspec: st.DataSpec
) -> t.List[st.Attribute]:
...
def links(self, dataset: st.Dataset) -> st.Links:
...
async def async_links(self, dataset: st.Dataset) -> st.Links:
...
def sql_pushing_schema_prefix(self, dataset: st.Dataset) -> str:
...
def engine(self, uri: str) -> sa_engine:
...
def mock_value(
self,
transform: st.Transform,
*arguments: st.DataSpec,
**named_arguments: st.DataSpec,
) -> t.Any:
"""Compute the mock value of an external transform applied on
Dataspecs.
"""
def composed_callable(
self, transform: st.Transform
) -> t.Callable[..., t.Any]:
"""Return a Python callable of a composed transform."""
@runtime_checkable
class HasManager(Protocol):
"""Has a manager."""
def manager(self) -> Manager:
"""Return a manager (usually a singleton)."""
...
T = t.TypeVar("T", covariant=True)
class Computation(t.Protocol[T]):
"""Protocol for classes that perform tasks computations.
It sets how computations are scheduled and launched
depending on statuses. A computation is mainly defined by two methods:
- launch : a method that does not return a value but
that only has side effects, changing either the storage or the cache
and that updates statuses during the process
- result: a method that allows to get the value of the computation
either by reading the cache/storage or via the ops.
Furthermore, a computation has a method to monitor task completion.
"""
task_name: str = ''
def launch_task(self, dataspec: st.DataSpec) -> t.Optional[t.Awaitable]:
"""This methods launches a task in the background
but returns immediately without waiting for the
result. It updates the statuses during its process."""
...
async def task_result(self, dataspec: st.DataSpec, **kwargs: t.Any) -> T:
"""Returns the value for the given computed task. It either
retrieves it from the cache or computes it via the ops."""
...
async def complete_task(self, dataspec: st.DataSpec) -> st.Status:
"""Monitors a task: it launches it if there is no status
and then polls until it is ready/error"""
...
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/typing.py
| 0.854642 | 0.476032 |
typing.py
|
pypi
|
from collections import defaultdict
import typing as t
from sarus_sql import ast_utils, rename_tables, translator
from sarus_data_spec.path import Path
import sarus_data_spec.type as sdt
import sarus_data_spec.typing as st
def rename_and_compose_queries(
query_or_dict: t.Union[str, t.Dict[str, t.Any]],
curr_path: t.List[str],
queries_transform: t.Optional[t.Dict[str, str]],
table_map: t.Dict[Path, t.Tuple[str, ...]],
) -> t.Union[str, t.Dict[str, t.Any]]:
"""Composition is done by first updating the table names
in the leaves of queries_or_dict and then composing with
all the queries in queries_transform
"""
if isinstance(query_or_dict, str):
# type ignore because issue of typing in sarus_sql
updated_query = rename_tables.rename_tables(query_or_dict, table_map) # type: ignore # noqa : E501
if queries_transform is not None:
updated_query = ast_utils.compose_query(
queries_transform, updated_query
)
return updated_query
else:
new_queries = {}
for name, sub_queries_or_dict in query_or_dict.items():
new_queries[name] = rename_and_compose_queries(
query_or_dict=sub_queries_or_dict,
curr_path=[*curr_path, name],
queries_transform=queries_transform,
table_map=table_map,
)
return new_queries
def flatten_queries_dict(
queries: t.Dict[str, t.Any]
) -> t.Dict[t.Tuple[str, ...], str]:
"""Transform nested dict in linear dict where each
key is the tuple of the nesting path"""
final_dict: t.Dict[t.Tuple[str, ...], str] = {}
def update_dict(
curr_path: t.List[str],
dict_to_update: t.Dict[t.Tuple[str, ...], t.Any],
query_or_dict: t.Union[t.Dict[str, t.Any], t.Any],
) -> None:
if isinstance(query_or_dict, dict):
for name, sub_query in query_or_dict.items():
update_dict(
curr_path=[*curr_path, name],
dict_to_update=dict_to_update,
query_or_dict=sub_query,
)
else:
dict_to_update[tuple(curr_path)] = t.cast(str, query_or_dict)
return
for name, query_or_dict in queries.items():
update_dict(
query_or_dict=query_or_dict,
curr_path=[name],
dict_to_update=final_dict,
)
return final_dict
def rename_and_translate_query(
old_query: str,
dialect: st.SQLDialect,
destination_dialect: st.SQLDialect,
table_mapping: t.Dict[st.Path, t.Tuple[str]],
extended_table_mapping: t.Optional[t.List[str]],
) -> str:
"""Converts to postgres, parses query and then
reconverts if needed"""
# Translate to postgres
new_query = str(
translator.translate_to_postgres(
ast_utils.parse_query(old_query),
dialect,
)
)
# Rename tables
new_query = rename_tables.rename_tables(
query_str=new_query,
table_mapping=t.cast(
t.Dict[st.Path, t.Tuple[str, ...]], table_mapping
),
extended_table_mapping=extended_table_mapping,
)
if destination_dialect != st.SQLDialect.POSTGRES:
new_query = str(
translator.translate_to_dialect(
ast_utils.parse_query(new_query),
destination_dialect,
)
)
return new_query
def nest_queries(
queries: t.Dict[t.Tuple[str, ...], str]
) -> t.Dict[str, t.Any]:
"""It transform the dict of queries according to the tuple keys:
if queries = {
('a','b'):'q',
('a','c'):'q'
}
the results woulf be: {a: {b: 'q', c: 'q'}
if queries = {
('a','b'):'q',
('e','c'):'q'
}
the results woulf be: {a: {b: 'q'}, e: {c: 'q'}}
"""
intermediate: t.Dict[str, t.Dict[t.Tuple[str, ...], t.Any]] = defaultdict(
dict
)
final: t.Dict[str, t.Any] = {}
for query_path, query in queries.items():
if len(query_path) == 1:
final[query_path[0]] = query
else:
intermediate[query_path[0]][query_path[1:]] = query
for name, subdict in intermediate.items():
final[name] = nest_queries(subdict)
return final
def nested_dict_of_types(
types: t.Dict[t.Tuple[str, ...], st.Type]
) -> t.Dict[str, t.Any]:
"""Similar to nest_queries but values are sarus types instead of strings"""
intermediate: t.Dict[str, t.Dict[t.Tuple[str, ...], t.Any]] = defaultdict(
dict
)
final: t.Dict[str, t.Any] = {}
for type_path, type in types.items():
if len(type_path) == 1:
final[type_path[0]] = type
else:
intermediate[type_path[0]][type_path[1:]] = type
for name, subdict in intermediate.items():
final[name] = nested_dict_of_types(subdict)
return final
def nested_unions_from_nested_dict_of_types(
nested_types: t.Dict[str, t.Any]
) -> t.Dict[str, st.Type]:
"""create unions out of nested_types"""
fields: t.Dict[str, st.Type] = {}
for path_string, type_or_dict in nested_types.items():
if isinstance(type_or_dict, dict):
fields[path_string] = sdt.Union(
nested_unions_from_nested_dict_of_types(type_or_dict)
)
else:
fields[path_string] = type_or_dict
return fields
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/sql_utils/queries.py
| 0.730097 | 0.35095 |
queries.py
|
pypi
|
import typing as t
import warnings
import pyarrow as pa
from sarus_data_spec.attribute import attach_properties
from sarus_data_spec.bounds import bounds as bounds_builder
from sarus_data_spec.marginals import marginals as marginals_builder
from sarus_data_spec.multiplicity import multiplicity as multiplicity_builder
from sarus_data_spec.size import size as size_builder
import sarus_data_spec.typing as st
try:
from sarus_synthetic_data.synthetic_generator.generator import (
SyntheticGenerator,
)
except ModuleNotFoundError:
warnings.warn(
'sarus-synthetic-data Module not found, synthetic data operations not '
'available '
)
from sarus_data_spec.constants import (
DATASET_SLUGNAME,
SYNTHETIC_MODEL,
SYNTHETIC_TASK,
TRAIN_CORRELATIONS,
)
from sarus_data_spec.dataset import Dataset
try:
from sarus_data_spec.manager.ops.source.query_builder import (
synthetic_parameters,
)
except ModuleNotFoundError:
warnings.warn(
"synthetic_parameters not found, "
"synthetic data operations not available "
)
from sarus_data_spec.scalar import Scalar
from sarus_data_spec.schema import schema
from .standard_op import (
StandardDatasetImplementation,
StandardDatasetStaticChecker,
StandardScalarImplementation,
StandardScalarStaticChecker,
)
MAX_SIZE = 1e6 # TODO: in sarus_data_spec.constants ?
def convert_array_to_table(
schema_type: st.Type, arrow_data: pa.array
) -> pa.Array:
"""Given a PyArrow array, returns a correctly-defined Table."""
class ArrayToTable(st.TypeVisitor):
"""Handles both configuration: a dataset as a Struct or as an Union."""
result = None
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
names = list(fields.keys())
self.result = pa.Table.from_arrays(
arrays=arrow_data.flatten(), names=names
)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
names = list(fields.keys())
arrs = arrow_data.flatten()
schema = pa.schema(
[
pa.field(name, type=arr.type)
for arr, name in zip(arrs[:-1], names)
]
)
schema = schema.append(
pa.field('field_selected', type=pa.string(), nullable=False)
)
self.result = pa.Table.from_arrays(
arrays=arrow_data.flatten(), schema=schema
)
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
raise NotImplementedError
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
visitor = ArrayToTable()
schema_type.accept(visitor)
return visitor.result
async def async_iter_arrow(
iterator: t.Iterator[pa.RecordBatch],
) -> t.AsyncIterator[pa.RecordBatch]:
"""Async generator from the synthetic data iterator."""
for batch in iterator:
yield batch
return
class SyntheticStaticChecker(StandardDatasetStaticChecker):
def pep_token(self, public_context: t.Collection[str]) -> t.Optional[str]:
# TODO add pep token when the synthetic data is actually protected
return None
async def schema(self) -> st.Schema:
parent_schema = await self.parent_schema()
return schema(
self.dataset,
schema_type=parent_schema.data_type(),
properties=parent_schema.properties(),
name=self.dataset.properties().get(DATASET_SLUGNAME, None),
)
class Synthetic(StandardDatasetImplementation):
"""Create a Synthetic op class for is_pep."""
async def to_arrow(
self, batch_size: int
) -> t.AsyncIterator[pa.RecordBatch]:
dataset = self.dataset
parents, parents_dict = dataset.parents()
# Forcing the marginals to be computed first
parent = t.cast(Dataset, parents[0])
_ = await parent.manager().async_marginals(parent)
# Budget
budget_param = parents_dict['sd_budget']
budget = t.cast(
t.Tuple[float, float],
await dataset.manager().async_value(t.cast(Scalar, budget_param)),
)
# Model
correlations_scalar = t.cast(Scalar, parents_dict['synthetic_model'])
train_correlations = t.cast(
bool, await dataset.manager().async_value(correlations_scalar)
)
# Generator params
generator_params = await synthetic_parameters(
dataset,
sd_budget=budget,
task=SYNTHETIC_TASK,
train_correlations=train_correlations,
)
# Links computation
_ = await self.dataset.manager().async_links(self.dataset)
# compute
generator = SyntheticGenerator(dataset, generator_params.generator)
dataset_schema = await dataset.manager().async_schema(dataset)
datatype = dataset_schema.type()
generator.train()
sample = generator.sample()
table = convert_array_to_table(datatype, sample)
return async_iter_arrow(table.to_batches(max_chunksize=batch_size))
async def size(self) -> st.Size:
parent_size = await self.parent_size()
return size_builder(self.dataset, parent_size.statistics())
async def multiplicity(self) -> st.Multiplicity:
parent_multiplicity = await self.parent_multiplicity()
return multiplicity_builder(
self.dataset, parent_multiplicity.statistics()
)
async def bounds(self) -> st.Bounds:
parent_bounds = await self.parent_bounds()
return bounds_builder(self.dataset, parent_bounds.statistics())
async def marginals(self) -> st.Marginals:
parent_marginals = await self.parent_marginals()
return marginals_builder(self.dataset, parent_marginals.statistics())
class SamplingRatiosStaticChecker(StandardScalarStaticChecker):
...
class SamplingRatios(StandardScalarImplementation):
"""Computes the sampling ratios for the SD
of the dataspec given the total budget"""
async def value(self) -> t.Any:
dataset = t.cast(st.Dataset, self.parent())
out = {}
for table_path in (await self.parent_schema()).tables():
sizes = await dataset.manager().async_size(dataset)
assert sizes
stat = sizes.statistics().nodes_statistics(table_path)[0]
out[table_path] = min(1, MAX_SIZE / stat.size())
return out
class SyntheticModelStaticChecker(StandardScalarStaticChecker):
...
class SyntheticModel(StandardScalarImplementation):
"""Computes the synthetic model to use"""
async def value(self) -> t.Any:
attribute = self.scalar.attribute(name=SYNTHETIC_MODEL)
if attribute is None:
attach_properties(
self.scalar,
name=SYNTHETIC_MODEL,
properties={TRAIN_CORRELATIONS: str(True)},
)
return True
return attribute.properties()[TRAIN_CORRELATIONS] == str(True)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/standard/synthetic.py
| 0.442155 | 0.245367 |
synthetic.py
|
pypi
|
import typing as t
import pyarrow as pa
from sarus_data_spec.arrow.array import convert_record_batch
from sarus_data_spec.bounds import bounds as bounds_builder
from sarus_data_spec.constants import (
DATASET_SLUGNAME,
OPTIONAL_VALUE,
PRIMARY_KEYS,
)
from sarus_data_spec.manager.ops.processor.standard.standard_op import ( # noqa: E501
StandardDatasetImplementation,
StandardDatasetStaticChecker,
)
from sarus_data_spec.manager.ops.processor.standard.visitor_selector import ( # noqa: E501
filter_primary_keys,
select_columns,
update_fks,
)
from sarus_data_spec.marginals import marginals as marg_builder
from sarus_data_spec.multiplicity import multiplicity as multiplicity_builder
from sarus_data_spec.schema import schema
from sarus_data_spec.size import size as size_builder
import sarus_data_spec.statistics as sds
import sarus_data_spec.type as sdt
import sarus_data_spec.typing as st
class ProjectStaticChecker(StandardDatasetStaticChecker):
def pep_token(self, public_context: t.Collection[str]) -> t.Optional[str]:
"""This transform doesn't change the PEP alignment."""
return self.parent().pep_token()
async def schema(self) -> st.Schema:
new_type = sdt.Type(
self.dataset.transform().protobuf().spec.project.projection
)
parent_schema = await self.parent_schema()
new_type = update_fks(
curr_type=new_type, original_type=new_type # type:ignore
)
old_properties = parent_schema.properties()
if PRIMARY_KEYS in old_properties.keys():
new_pks = filter_primary_keys(
old_properties[PRIMARY_KEYS],
new_type,
)
old_properties[PRIMARY_KEYS] = new_pks # type:ignore
return schema(
self.dataset,
schema_type=new_type,
protected_paths=parent_schema.protected_path().protobuf(),
properties=old_properties,
name=self.dataset.properties().get(DATASET_SLUGNAME, None),
)
class Project(StandardDatasetImplementation):
"""Computes schema and arrow
batches for a dataspec transformed by
a user_settings transform
"""
async def to_arrow(
self, batch_size: int
) -> t.AsyncIterator[pa.RecordBatch]:
schema = await self.dataset.manager().async_schema(
dataset=self.dataset
)
parent_schema = await self.parent_schema()
async def async_generator(
parent_iter: t.AsyncIterator[pa.RecordBatch],
) -> t.AsyncIterator[pa.RecordBatch]:
async for batch in parent_iter:
updated_array = select_columns(
schema.type(),
convert_record_batch(
record_batch=batch, _type=parent_schema.type()
),
)
yield pa.RecordBatch.from_struct_array(updated_array)
return async_generator(
parent_iter=await self.parent_to_arrow(batch_size=batch_size)
)
async def size(self) -> st.Size:
schema = await self.dataset.manager().async_schema(self.dataset)
sizes = await self.parent_size()
new_stats = update_statistics(
stats=sizes.statistics(), new_type=schema.data_type()
)
return size_builder(dataset=self.dataset, statistics=new_stats)
async def multiplicity(self) -> st.Multiplicity:
schema = await self.dataset.manager().async_schema(self.dataset)
multiplicities = await self.parent_multiplicity()
new_stats = update_statistics(
stats=multiplicities.statistics(), new_type=schema.data_type()
)
return multiplicity_builder(dataset=self.dataset, statistics=new_stats)
async def bounds(self) -> st.Bounds:
schema = await self.dataset.manager().async_schema(self.dataset)
bounds = await self.parent_bounds()
new_stats = update_statistics(
stats=bounds.statistics(), new_type=schema.data_type()
)
return bounds_builder(dataset=self.dataset, statistics=new_stats)
async def marginals(self) -> st.Marginals:
schema = await self.dataset.manager().async_schema(self.dataset)
marginals = await self.parent_marginals()
new_stats = update_statistics(
stats=marginals.statistics(), new_type=schema.data_type()
)
return marg_builder(dataset=self.dataset, statistics=new_stats)
def update_statistics(
stats: st.Statistics, new_type: st.Type
) -> st.Statistics:
"""Visitor to update recursively the stats object via the new_type.
Sub_statistics whose corresponding type is absent in new_type are removed.
"""
class Updater(st.StatisticsVisitor):
result = stats
def Struct(
self,
fields: t.Mapping[str, st.Statistics],
size: int,
multiplicity: float,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
# project does affect structs
children_type = new_type.children()
children_stats = self.result.children()
new_struct = sds.Struct(
fields={
fieldname: update_statistics(
children_stats[fieldname], fieldtype
)
for fieldname, fieldtype in children_type.items()
},
size=size,
multiplicity=multiplicity,
name=name,
properties=self.result.properties(),
)
self.result = new_struct
def Union(
self,
fields: t.Mapping[str, st.Statistics],
size: int,
multiplicity: float,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
children_type = new_type.children()
new_fields = {
fieldname: update_statistics(
fieldstats, children_type[fieldname]
)
for fieldname, fieldstats in self.result.children().items()
}
self.result = sds.Union(
fields=new_fields,
size=size,
multiplicity=multiplicity,
name=name if name is not None else 'Union',
properties=self.result.properties(),
)
def Optional(
self, statistics: st.Statistics, size: int, multiplicity: float
) -> None:
self.result = sds.Optional(
statistics=update_statistics(
self.result.children()[OPTIONAL_VALUE],
new_type.children()[OPTIONAL_VALUE],
),
size=size,
multiplicity=multiplicity,
properties=self.result.properties(),
)
def Null(self, size: int, multiplicity: float) -> None:
pass
def Unit(self, size: int, multiplicity: float) -> None:
pass
def Boolean(
self,
size: int,
multiplicity: float,
probabilities: t.Optional[t.List[float]] = None,
names: t.Optional[t.List[bool]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
pass
def Id(self, size: int, multiplicity: float) -> None:
pass
def Integer(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
pass
def Enum(
self,
size: int,
multiplicity: float,
probabilities: t.Optional[t.List[float]] = None,
names: t.Optional[t.List[str]] = None,
values: t.Optional[t.List[float]] = None,
name: str = 'Enum',
) -> None:
pass
def Float(
self,
size: int,
multiplicity: float,
min_value: float,
max_value: float,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[float]] = None,
) -> None:
pass
def Text(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
example: str = '',
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
pass
def Bytes(self, size: int, multiplicity: float) -> None:
pass
def List(
self,
statistics: st.Statistics,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
name: str = 'List',
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
raise NotImplementedError
def Array(
self,
statistics: st.Statistics,
size: int,
multiplicity: float,
min_values: t.Optional[t.List[float]] = None,
max_values: t.Optional[t.List[float]] = None,
name: str = 'Array',
probabilities: t.Optional[t.List[t.List[float]]] = None,
values: t.Optional[t.List[t.List[float]]] = None,
) -> None:
raise NotImplementedError
def Datetime(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
pass
def Date(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
pass
def Time(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
pass
def Duration(
self,
size: int,
multiplicity: float,
min_value: int,
max_value: int,
probabilities: t.Optional[t.List[float]] = None,
values: t.Optional[t.List[int]] = None,
) -> None:
pass
def Constrained(
self, statistics: st.Statistics, size: int, multiplicity: float
) -> None:
raise NotImplementedError
visitor = Updater()
stats.accept(visitor)
return visitor.result
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/standard/project.py
| 0.722723 | 0.160727 |
project.py
|
pypi
|
import typing as t
import pyarrow as pa
from sarus_data_spec.arrow.array import convert_record_batch
from sarus_data_spec.bounds import bounds as bounds_builder
from sarus_data_spec.constants import DATA, DATASET_SLUGNAME
from sarus_data_spec.dataset import Dataset
from sarus_data_spec.manager.ops.processor.standard.standard_op import ( # noqa: E501
StandardDatasetImplementation,
StandardDatasetStaticChecker,
)
from sarus_data_spec.manager.ops.processor.standard.visitor_selector import ( # noqa : E501
select_rows,
)
from sarus_data_spec.marginals import marginals as marg_builder
from sarus_data_spec.multiplicity import multiplicity as multiplicity_builder
from sarus_data_spec.path import Path
from sarus_data_spec.schema import schema
from sarus_data_spec.size import size as size_builder
import sarus_data_spec.type as sdt
import sarus_data_spec.typing as st
class GetItemStaticChecker(StandardDatasetStaticChecker):
async def schema(self) -> st.Schema:
parent_schema = await self.parent_schema()
path = Path(self.dataset.transform().protobuf().spec.get_item.path)
sub_types = parent_schema.data_type().sub_types(path)
assert len(sub_types) == 1
new_type = sub_types[0]
# TODO: update foreign_keys/primary_keys in the type
previous_fields = parent_schema.type().children()
if DATA in previous_fields.keys():
previous_fields[DATA] = new_type
new_type = sdt.Struct(fields=previous_fields)
return schema(
self.dataset,
schema_type=new_type,
protected_paths=parent_schema.protobuf().protected,
name=self.dataset.properties().get(DATASET_SLUGNAME, None),
)
class GetItem(StandardDatasetImplementation):
"""Computes schema and arrow
batches for a dataspec transformed by
a get_item transform
"""
async def to_arrow(
self, batch_size: int
) -> t.AsyncIterator[pa.RecordBatch]:
previous_ds = t.cast(Dataset, self.parent())
path = Path(self.dataset.transform().protobuf().spec.get_item.path)
parent_schema = await self.parent_schema()
async def get_item_func(batch: pa.RecordBatch) -> pa.Array:
array = convert_record_batch(
record_batch=batch, _type=parent_schema.type()
)
# VERY UGLY SHOULD BE REMOVED WHEN WE HAVE PROTECTED TYPE
if DATA in parent_schema.type().children():
old_arrays = array.flatten()
idx_data = array.type.get_field_index(DATA)
array = old_arrays[idx_data]
updated_array = get_items(
_type=parent_schema.data_type(),
array=array,
path=path,
)
old_arrays[idx_data] = updated_array
return pa.StructArray.from_arrays(
old_arrays,
names=list(parent_schema.type().children().keys()),
)
updated_array = get_items(
_type=parent_schema.data_type(),
array=array,
path=path,
)
if isinstance(updated_array, pa.StructArray):
return updated_array
return pa.StructArray.from_arrays(
[updated_array],
names=[path.to_strings_list()[0][-1]],
)
return await self.ensure_batch_correct(
async_iterator=await previous_ds.async_to_arrow(
batch_size=batch_size
),
batch_size=batch_size,
func_to_apply=get_item_func,
)
async def size(self) -> st.Size:
sizes = await self.parent_size()
path = Path(self.dataset.transform().protobuf().spec.get_item.path)
new_stats = sizes.statistics().nodes_statistics(path)
assert len(new_stats) == 1
return size_builder(dataset=self.dataset, statistics=new_stats[0])
async def multiplicity(self) -> st.Multiplicity:
multiplicities = await self.parent_multiplicity()
path = Path(self.dataset.transform().protobuf().spec.get_item.path)
new_stats = multiplicities.statistics().nodes_statistics(path)
assert len(new_stats) == 1
return multiplicity_builder(
dataset=self.dataset, statistics=new_stats[0]
)
async def bounds(self) -> st.Bounds:
bounds = await self.parent_bounds()
path = Path(self.dataset.transform().protobuf().spec.get_item.path)
new_stats = bounds.statistics().nodes_statistics(path)
assert len(new_stats) == 1
return bounds_builder(dataset=self.dataset, statistics=new_stats[0])
async def marginals(self) -> st.Marginals:
marginals = await self.parent_marginals()
path = Path(self.dataset.transform().protobuf().spec.get_item.path)
new_stats = marginals.statistics().nodes_statistics(path)
assert len(new_stats) == 1
return marg_builder(dataset=self.dataset, statistics=new_stats[0])
def get_items(array: pa.Array, path: st.Path, _type: st.Type) -> pa.Array:
"""Visitor selecting columns based on the type.
The idea is that at each level,
the filter for the array is computed, and for the union,
we remove the fields that we want to filter among
the columns
"""
class ItemSelector(st.TypeVisitor):
batch_array: pa.Array = array
def Struct(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(path.sub_paths()) > 0:
sub_path = path.sub_paths()[0]
idx = array.type.get_field_index(sub_path.label())
self.batch_array = get_items(
array=array.flatten()[idx],
path=sub_path,
_type=fields[sub_path.label()],
)
def Constrained(
self,
type: st.Type,
constraint: st.Predicate,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Optional(
self,
type: st.Type,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
idx = self.batch_array.type.get_field_index(path.label())
array = self.batch_array.flatten()(idx)
if len(path.sub_paths()) == 0:
self.batch_array = array
else:
self.batch_array = get_items(
array=array, path=path.sub_paths()[0], _type=type
)
def Union(
self,
fields: t.Mapping[str, st.Type],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
if len(path.sub_paths()) == 0:
self.batch_array = array
else:
sub_path = path.sub_paths()[0]
idx = array.type.get_field_index(sub_path.label())
self.batch_array = get_items(
array=array.flatten()[idx],
path=sub_path,
_type=fields[sub_path.label()],
)
def Array(
self,
type: st.Type,
shape: t.Tuple[int, ...],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def List(
self,
type: st.Type,
max_size: int,
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
raise NotImplementedError
def Boolean(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Bytes(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Unit(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Date(
self,
format: str,
min: str,
max: str,
base: st.DateBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Time(
self,
format: str,
min: str,
max: str,
base: st.TimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Datetime(
self,
format: str,
min: str,
max: str,
base: st.DatetimeBase,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Duration(
self,
unit: str,
min: int,
max: int,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Enum(
self,
name: str,
name_values: t.Sequence[t.Tuple[str, int]],
ordered: bool,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Text(
self,
encoding: str,
possible_values: t.Iterable[str],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Hypothesis(
self,
*types: t.Tuple[st.Type, float],
name: t.Optional[str] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Id(
self,
unique: bool,
reference: t.Optional[st.Path] = None,
base: t.Optional[st.IdBase] = None,
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Integer(
self,
min: int,
max: int,
base: st.IntegerBase,
possible_values: t.Iterable[int],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
def Null(
self, properties: t.Optional[t.Mapping[str, str]] = None
) -> None:
pass
def Float(
self,
min: float,
max: float,
base: st.FloatBase,
possible_values: t.Iterable[float],
properties: t.Optional[t.Mapping[str, str]] = None,
) -> None:
pass
visitor = ItemSelector()
_type.accept(visitor)
return visitor.batch_array
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/standard/get_item.py
| 0.459319 | 0.308972 |
get_item.py
|
pypi
|
import hashlib
import logging
import typing as t
import warnings
import pyarrow as pa
from sarus_data_spec.dataset import Dataset
from sarus_data_spec.manager.ops.base import (
DatasetImplementation,
DatasetStaticChecker,
DataspecStaticChecker,
ScalarImplementation,
_ensure_batch_correct,
)
try:
from sarus_data_spec.manager.ops.sql_utils.table_mapping import (
name_encoder,
table_mapping,
)
except ModuleNotFoundError:
warnings.warn('table_mapping not found. Cannot send sql queries.')
try:
from sarus_data_spec.manager.ops.sql_utils.queries import (
rename_and_compose_queries,
)
except ModuleNotFoundError:
warnings.warn('Queries composition not available.')
from sarus_data_spec.scalar import Scalar
import sarus_data_spec.typing as st
logger = logging.getLogger(__name__)
class StandardDatasetStaticChecker(DatasetStaticChecker):
def parent(self, kind: str = 'dataset') -> t.Union[st.Dataset, st.Scalar]:
return parent(self.dataset, kind=kind)
async def parent_schema(self) -> st.Schema:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_schema(parent)
async def parent_marginals(self) -> st.Marginals:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_marginals(parent)
def pep_token(self, public_context: t.Collection[str]) -> t.Optional[str]:
"""By default we implement that the transform inherits the PEP status
but changes the PEP token."""
parent_token = self.parent().pep_token()
if parent_token is None:
return None
transform = self.dataset.transform()
h = hashlib.md5()
h.update(parent_token.encode("ascii"))
h.update(transform.protobuf().SerializeToString())
return h.hexdigest()
class StandardDatasetImplementation(DatasetImplementation):
"""Object that executes first routing among ops between
transformed/source and processor
"""
def parents(self) -> t.List[t.Union[st.DataSpec, st.Transform]]:
return parents(self.dataset)
def parent(self, kind: str = 'dataset') -> t.Union[st.Dataset, st.Scalar]:
return parent(self.dataset, kind=kind)
async def parent_to_arrow(
self, batch_size: int = 10000
) -> t.AsyncIterator[pa.RecordBatch]:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
parent_iterator = await parent.manager().async_to_arrow(
parent, batch_size=batch_size
)
return await self.decoupled_async_iter(parent_iterator)
async def parent_schema(self) -> st.Schema:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_schema(parent)
async def parent_value(self) -> t.Any:
parent = self.parent(kind='scalar')
assert isinstance(parent, Scalar)
return await parent.manager().async_value(parent)
async def parent_size(self) -> st.Size:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_size(parent)
async def parent_multiplicity(self) -> st.Multiplicity:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_multiplicity(parent)
async def parent_bounds(self) -> st.Bounds:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_bounds(parent)
async def parent_marginals(self) -> st.Marginals:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_marginals(parent)
async def ensure_batch_correct(
self,
async_iterator: t.AsyncIterator[pa.RecordBatch],
func_to_apply: t.Callable,
batch_size: int,
) -> t.AsyncIterator[pa.RecordBatch]:
"""Method that executes func_to_apply on each batch
of the async_iterator but rather than directly returning
the result, it accumulates them and returns them progressively
so that each new batch has batch_size."""
return _ensure_batch_correct(async_iterator, func_to_apply, batch_size)
async def sql_implementation(
self,
) -> t.Optional[t.Dict[t.Tuple[str, ...], str]]:
"""Returns a dict of queries equivalent to the current transform.
If the the transform does not change the schema, then return None"""
raise NotImplementedError(
"No SQL implementation for dataset issued from"
f" {self.dataset.transform().spec()} transform."
)
async def sql(
self,
query: t.Union[str, t.Dict[str, t.Any]],
dialect: t.Optional[st.SQLDialect] = None,
batch_size: int = 10000,
result_type: t.Optional[st.Type] = None,
) -> t.AsyncIterator[pa.RecordBatch]:
"""It rewrites and/or composes the query and sends it to the parent."""
queries_transform = await self.sql_implementation()
current_schema = await self.dataset.manager().async_schema(
self.dataset
)
parent_schema = await self.parent_schema()
if (
queries_transform is None
and current_schema.name() == parent_schema.name()
):
parent_query = query
else:
table_map = {}
if queries_transform is not None:
table_map = table_mapping(
tables=current_schema.tables(),
sarus_schema_name=current_schema.name(),
encoded_name_length=10,
encoder_prefix_name=self.dataset.uuid(),
)
updated_queries_transform = (
{
name_encoder(
names=(self.dataset.uuid(), *tab_name),
length=10,
): query_str
for tab_name, query_str in queries_transform.items()
}
if queries_transform is not None
else None
)
parent_query = rename_and_compose_queries(
query_or_dict=query,
curr_path=[],
queries_transform=updated_queries_transform,
table_map=table_map,
)
parent_ds = t.cast(st.Dataset, self.parent(kind='dataset'))
logger.debug(
f"query {parent_query} sent to the "
f"parent dataset {parent_ds.uuid()}"
)
return await parent_ds.manager().async_sql(
dataset=parent_ds,
query=parent_query,
dialect=dialect,
batch_size=batch_size,
result_type=result_type,
)
class StandardScalarStaticChecker(DataspecStaticChecker):
...
class StandardScalarImplementation(ScalarImplementation):
def parent(self, kind: str = 'dataset') -> st.DataSpec:
return parent(self.scalar, kind=kind)
def parents(self) -> t.List[t.Union[st.DataSpec, st.Transform]]:
return parents(self.scalar)
async def parent_to_arrow(
self, batch_size: int = 10000
) -> t.AsyncIterator[pa.RecordBatch]:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
parent_iterator = await parent.manager().async_to_arrow(
parent, batch_size=batch_size
)
return await self.decoupled_async_iter(parent_iterator)
async def parent_schema(self) -> st.Schema:
parent = self.parent(kind='dataset')
assert isinstance(parent, Dataset)
return await parent.manager().async_schema(parent)
async def parent_value(self) -> t.Any:
parent = self.parent(kind='scalar')
assert isinstance(parent, Scalar)
return await parent.manager().async_value(parent)
def parent(dataspec: st.DataSpec, kind: str) -> t.Union[st.Dataset, st.Scalar]:
pars = parents(dataspec)
if kind == 'dataset':
parent: t.Union[t.List[Scalar], t.List[Dataset]] = [
element for element in pars if isinstance(element, Dataset)
]
else:
parent = [element for element in pars if isinstance(element, Scalar)]
assert len(parent) == 1
return parent[0]
def parents(
dataspec: st.DataSpec,
) -> t.List[t.Union[st.DataSpec, st.Transform]]:
parents_args, parents_kwargs = dataspec.parents()
parents_args.extend(parents_kwargs.values())
return parents_args
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/standard/standard_op.py
| 0.750827 | 0.374991 |
standard_op.py
|
pypi
|
from __future__ import annotations
import typing as t
from sarus_data_spec.dataspec_validator.signature import SarusBoundSignature
from sarus_data_spec.dataspec_validator.typing import PEPKind
import sarus_data_spec.typing as st
NO_TRANSFORM_ID = "no_transform_id"
@t.runtime_checkable
class ExternalOpImplementation(t.Protocol):
"""External Op implementation class.
The `allowed_pep_args` is a list of combinations of arguments' names which
are managed by the Op. The result of the Op will be PEP only if the set of
PEP arguments passed to the Op are in this list.
For instance, if we have an op that takes 3 arguments `a`, `b` and `c` and
the `allowed_pep_args` are [{'a'}, {'b'}, {'a','b'}] then the following
combinations will yield a PEP output:
- `a` is a PEP dataspec, `b` and `c` are either not dataspecs or public
dataspecs
- `b` is a PEP dataspec, `a` and `c` are either not dataspecs or public
dataspecs
- `a` and `b` are PEP dataspecs, `c` is either not a dataspec or a
public dataspec
"""
def transform_id(self) -> str:
...
def dp_equivalent_id(self) -> t.Optional[str]:
...
def dp_equivalent(self) -> t.Optional[ExternalOpImplementation]:
...
async def call(self, bound_signature: SarusBoundSignature) -> t.Any:
"""Compute the external op output value.
DP implementation take additional arguments:
- `seed` an integer used to parametrize rangom number generators
- `budget` the privacy budget that can be spend in the op
- `pe` the protected entity information of each row
"""
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""Return the PEP properties of the transform.
It takes the transform arguments as input because it can depend on some
transform parameters. For instance, it is not PEP if we are aggregating
the rows (axis=0) and it is PEP if we are aggregating the columns
(axis=1).
"""
def is_dp(self, bound_signature: SarusBoundSignature) -> bool:
"""Return True if the DP transform is compatible with the arguments.
It takes the transform arguments as input because it can depend on some
transform parameters. For instance, if we are aggregating the rows
(axis=0), then there might be an equivalent DP transform but if we are
aggregating the columns there might not (axis=1).
"""
async def private_queries(
self, signature: SarusBoundSignature
) -> t.List[st.PrivateQuery]:
"""Return the PrivateQueries summarizing DP characteristics."""
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/typing.py
| 0.901834 | 0.62986 |
typing.py
|
pypi
|
from __future__ import annotations
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
import numpy as np
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from .external_op import ExternalOpImplementation
try:
from imblearn import over_sampling, pipeline, under_sampling
except ModuleNotFoundError:
pass # error message in typing.py
SamplingStrategy = Literal[
'majority', 'not minority', 'not majority', 'all', 'auto'
]
# ------ CONSTRUCTORS ------
class imb_pipeline(ExternalOpImplementation):
_transform_id = "imblearn.IMB_PIPELINE"
_signature = SarusSignature(
SarusParameter(
name="steps",
annotation=List[Tuple[str, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="memory",
annotation=Optional,
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="verbose",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return pipeline.Pipeline(**kwargs)
class imb_random_under_sampler(ExternalOpImplementation):
_transform_id = "imblearn.IMB_RANDOM_UNDER_SAMPLER"
_signature = SarusSignature(
SarusParameter(
name="sampling_strategy",
annotation=Union[float, SamplingStrategy, Callable, Dict],
default="auto",
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="replacement",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return under_sampling.RandomUnderSampler(**kwargs)
class imb_smotenc(ExternalOpImplementation):
_transform_id = "imblearn.IMB_SMOTENC"
_signature = SarusSignature(
SarusParameter(
name="categorical_features",
annotation=Union[List[int], List[bool]],
),
SarusParameter(
name="sampling_strategy",
annotation=Union[float, SamplingStrategy, Callable, Dict],
default="auto",
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="k_neighbors",
annotation=Union[int, object],
default=5,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return over_sampling.SMOTENC(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/imblearn.py
| 0.890163 | 0.226495 |
imblearn.py
|
pypi
|
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
import numpy as np
import pandas as pd
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from .external_op import ExternalOpImplementation
try:
from xgboost import XGBClassifier, XGBRegressor
except ModuleNotFoundError:
pass # error message in sarus_data_spec.typing
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
# ------ CONSTRUCTORS -------
class xgb_classifier(ExternalOpImplementation):
_transform_id = "xgboost.XGB_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="max_leaves",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="max_bin",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="grow_policy",
annotation=Optional[Literal[0, 1]],
default=None,
),
SarusParameter(
name="learning_rate",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="verbosity",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="objective",
annotation=Optional[Union[str, Callable]],
default=None,
),
SarusParameter(
name="booster",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="tree_method",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="gamma",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="min_child_weight",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="max_delta_step",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="subsample",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="colsample_bytree",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="colsample_bylevel",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="colsample_bynode",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="reg_alpha",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="reg_lambda",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="scale_pos_weight",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="base_score",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[np.random.RandomState, int]],
default=None,
),
SarusParameter(
name="missing",
annotation=float,
default=np.nan,
),
SarusParameter(
name="num_parallel_tree",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="monotone_constraints",
annotation=Optional[Union[Dict[str, int], str]],
default=None,
),
SarusParameter(
name="interaction_constraints",
annotation=Optional[Union[str, List[Tuple[str]]]],
default=None,
),
SarusParameter(
name="importance_type",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="gpu_id",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="validate_parameters",
annotation=Optional[bool],
default=None,
),
SarusParameter(
name="predictor",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="enable_categorical",
annotation=bool,
default=False,
),
SarusParameter(
name="max_cat_to_onehot",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="max_cat_threshold",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="eval_metric",
annotation=Optional[Union[str, List[str], Callable]],
default=None,
),
SarusParameter(
name="early_stopping_rounds",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="callbacks",
annotation=Optional[List[Callable]],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="kwargs",
annotation=Optional[Dict],
default=None,
),
SarusParameter(
name="use_label_encoder",
annotation=Optional[bool],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return XGBClassifier(**kwargs)
class xgb_regressor(ExternalOpImplementation):
_transform_id = "xgboost.XGB_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="max_leaves",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="max_bin",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="grow_policy",
annotation=Optional[Literal[0, 1]],
default=None,
),
SarusParameter(
name="learning_rate",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="verbosity",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="objective",
annotation=Optional[
Union[
str,
Callable[
[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]
],
None,
]
],
default=None,
),
SarusParameter(
name="booster",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="tree_method",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="gamma",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="min_child_weight",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="max_delta_step",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="subsample",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="colsample_bytree",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="colsample_bylevel",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="colsample_bynode",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="reg_alpha",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="reg_lambda",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="scale_pos_weight",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="base_score",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[np.random.RandomState, int]],
default=None,
),
SarusParameter(
name="missing",
annotation=float,
default=np.nan,
),
SarusParameter(
name="num_parallel_tree",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="monotone_constraints",
annotation=Optional[Union[Dict[str, int], str]],
default=None,
),
SarusParameter(
name="interaction_constraints",
annotation=Optional[Union[str, List[Tuple[str]]]],
default=None,
),
SarusParameter(
name="importance_type",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="gpu_id",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="validate_parameters",
annotation=Optional[bool],
default=None,
),
SarusParameter(
name="predictor",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="enable_categorical",
annotation=bool,
default=False,
),
SarusParameter(
name="max_cat_to_onehot",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="max_cat_threshold",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="eval_metric",
annotation=Optional[Union[str, List[str], Callable]],
default=None,
),
SarusParameter(
name="early_stopping_rounds",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="callbacks",
annotation=Optional[List[Callable]],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="kwargs",
annotation=Optional[Dict],
default=None,
),
SarusParameter(
name="use_label_encoder",
annotation=Optional[bool],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return XGBRegressor(**kwargs)
# ------ METHODS ------
class xgb_fit(ExternalOpImplementation):
_transform_id = "xgboost.XGB_FIT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
condition=DATASPEC,
),
SarusParameter(
name="X",
annotation=Union[pd.DataFrame, np.ndarray],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y",
annotation=Union[pd.Series, np.ndarray],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="sample_weight",
annotation=Optional[np.ndarray],
default=None,
),
SarusParameter(
name="base_margin",
annotation=Optional[np.ndarray],
default=None,
),
SarusParameter(
name="eval_set",
annotation=Optional[List[Tuple[np.ndarray, np.ndarray]]],
default=None,
),
SarusParameter(
name="verbose",
annotation=Union[bool, int, None],
default=True,
),
SarusParameter(
name="xgb_model",
annotation=Any,
default=None,
),
SarusParameter(
name="sample_weight_eval_set",
annotation=Optional[List[np.ndarray]],
default=None,
),
SarusParameter(
name="base_margin_eval_set",
annotation=Optional[List[np.ndarray]],
default=None,
),
SarusParameter(
name="feature_weights",
annotation=Optional[np.ndarray],
default=None,
),
SarusParameter(
name="callbacks",
annotation=Optional[List[Callable]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.fit(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/xgboost.py
| 0.784773 | 0.182589 |
xgboost.py
|
pypi
|
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from .external_op import ExternalOpImplementation
try:
from sklearn.base import BaseEstimator
from sklearn.model_selection import BaseCrossValidator
import skopt
except ModuleNotFoundError:
BaseEstimator = Any
BaseCrossValidator = Any
class skopt_bayes_search_cv(ExternalOpImplementation):
_transform_id = "skopt.SKOPT_BAYES_SEARCH_CV"
_signature = SarusSignature(
SarusParameter(
name="estimator",
annotation=BaseEstimator,
),
SarusParameter(
name="search_spaces",
annotation=Union[Dict, List[Union[Dict, Tuple]]],
),
SarusParameter(
name="n_iter",
annotation=int,
default=50,
),
SarusParameter(
name="optimizer_kwargs",
annotation=Optional[Dict],
default=None,
),
SarusParameter(
name="scoring",
annotation=Optional[Union[str, Callable]],
default=None,
),
SarusParameter(
name="fit_params",
annotation=Optional[Dict],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=int,
default=1,
predicate=lambda x: bool(x < 4),
),
SarusParameter(
name="n_points",
annotation=int,
default=1,
),
SarusParameter(
name="pre_dispatch",
annotation=Optional[Union[int, str]],
default="2*n_jobs",
),
SarusParameter(
name="iid",
annotation=bool,
default=True,
),
SarusParameter(
name="cv",
annotation=Optional[Union[int, BaseCrossValidator, Iterable]],
default=None,
),
SarusParameter(
name="refit",
annotation=bool,
default=True,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="random_state",
annotation=Union[int, np.random.RandomState],
default=None,
),
SarusParameter(
name="error_score",
annotation=Union[str, float],
default="raise",
),
SarusParameter(
name="return_train_score",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return skopt.BayesSearchCV(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/skopt.py
| 0.823399 | 0.212681 |
skopt.py
|
pypi
|
from importlib import import_module
from typing import Dict, List, Optional, Type, cast
import inspect
from .typing import NO_TRANSFORM_ID, ExternalOpImplementation
MODULES = [
"pandas",
"sklearn",
"numpy",
"imblearn",
"pandas_profiling",
"skopt",
"std",
"xgboost",
"shap",
]
def op_name(module_name: str, transform_id: str) -> str:
"""Extract the last part of a transform ID.
Example: sklearn.SK_FIT -> SK_FIT.
"""
mod_name, op_name = transform_id.split(".")
assert module_name == mod_name
return op_name
def valid_ops(module_name: str) -> List[ExternalOpImplementation]:
"""Get all ExternalOpImplementation from a module."""
module = import_module(
f"sarus_data_spec.manager.ops.processor.external.{module_name}"
)
members = inspect.getmembers(
module,
lambda __obj: inspect.isclass(__obj)
and issubclass(__obj, ExternalOpImplementation),
)
ops_classes = [
cast(Type[ExternalOpImplementation], op) for _, op in members
]
ops_instances = [op_class() for op_class in ops_classes]
valid_ops = [
op for op in ops_instances if op.transform_id() != NO_TRANSFORM_ID
]
return valid_ops
def ops_mapping(module_name: str) -> Dict[str, ExternalOpImplementation]:
"""Get all ExternalOpImplementation from a module.
Return a Mapping (op_name -> op_implementation).
"""
ops_mapping = {
op_name(module_name, op.transform_id()): op
for op in valid_ops(module_name)
}
return ops_mapping
ROUTING = {module_name: ops_mapping(module_name) for module_name in MODULES}
# These are lists of PEP and DP transforms. They are mappings {OP_ID:
# DOCSTRING}. The docstrings are displayed in the documentation to explain
# under which condition the op is PEP or DP.
def replace_none(x: Optional[str]) -> str:
return x if x else ""
ALL_OPS: List[ExternalOpImplementation] = sum(
[valid_ops(module_name) for module_name in MODULES], []
)
PEP_TRANSFORMS = {
op.transform_id(): replace_none(op.pep_kind.__doc__)
for op in ALL_OPS
if op.pep_kind.__doc__ != ExternalOpImplementation.pep_kind.__doc__
}
DP_TRANSFORMS = {
op.transform_id(): replace_none(
cast(ExternalOpImplementation, op.dp_equivalent()).is_dp.__doc__
)
for op in ALL_OPS
if op.dp_equivalent_id() is not None
}
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/__init__.py
| 0.809351 | 0.357259 |
__init__.py
|
pypi
|
from __future__ import annotations
import hashlib
import typing as t
import pandas as pd
import pyarrow as pa
from sarus_data_spec.arrow.admin_utils import (
compute_admin_data,
create_admin_columns,
merge_data_and_admin,
)
from sarus_data_spec.arrow.conversion import to_pyarrow_table
from sarus_data_spec.arrow.schema import type_from_arrow_schema
from sarus_data_spec.dataspec_validator.signature import (
SarusBoundSignature,
SarusSignature,
SarusSignatureValue,
)
from sarus_data_spec.dataspec_validator.typing import PEPKind
from sarus_data_spec.manager.async_utils import async_iter
from sarus_data_spec.manager.ops.base import (
DatasetImplementation,
DatasetStaticChecker,
DataspecStaticChecker,
ScalarImplementation,
)
from sarus_data_spec.schema import schema as schema_builder
from sarus_data_spec.transform import external, transform_id
import sarus_data_spec.protobuf as sp
import sarus_data_spec.type as sdt
import sarus_data_spec.typing as st
from .typing import NO_TRANSFORM_ID
from .utils import static_arguments
class ExternalScalarStaticChecker(DataspecStaticChecker):
async def private_queries(self) -> t.List[st.PrivateQuery]:
"""Return the PrivateQueries summarizing DP characteristics."""
implementation = external_implementation(self.dataspec.transform())
bound_signature = implementation.signature().bind_dataspec(
self.dataspec
)
return await implementation.private_queries(bound_signature)
def is_dp(self) -> bool:
"""Checks if the transform is DP and compatible with the arguments."""
implementation = external_implementation(self.dataspec.transform())
bound_signature = implementation.signature().bind_dataspec(
self.dataspec
)
return implementation.is_dp(bound_signature)
def is_dp_applicable(self, public_context: t.Collection[str]) -> bool:
"""Statically check if a DP transform is applicable in this position.
This verification is common to all dataspecs and is true if:
- the dataspec is transformed and its transform has an equivalent
DP transform
- the DP transform's required PEP arguments are PEP and aligned
(i.e. same PEP token)
- other dataspecs arguments are public
"""
transform = self.dataspec.transform()
implementation = external_implementation(transform)
dp_implementation = implementation.dp_equivalent()
bound_signature = implementation.signature().bind_dataspec(
self.dataspec
)
if dp_implementation is None or not dp_implementation.is_dp(
bound_signature
):
return False
return bound_signature.pep_token() is not None
def dp_transform(self) -> t.Optional[st.Transform]:
"""Return the dataspec's DP equivalent transform if existing."""
transform = self.dataspec.transform()
op_implementation = external_implementation(transform)
py_args, py_kwargs, ds_args_pos, ds_types = static_arguments(transform)
dp_implementation = op_implementation.dp_equivalent()
if dp_implementation is None:
return None
dp_transform_id = dp_implementation.transform_id()
assert dp_transform_id is not None
# Types won't be used since budget & seed are scalars
ds_types["budget"] = ""
ds_types["seed"] = ""
return external(
dp_transform_id,
py_args=py_args,
py_kwargs=py_kwargs,
ds_args_pos=ds_args_pos,
ds_types=ds_types,
)
class ExternalDatasetStaticChecker(
ExternalScalarStaticChecker, DatasetStaticChecker
):
def __init__(self, dataset: st.Dataset):
super().__init__(dataset)
self.dataset = dataset
def pep_token(self, public_context: t.Collection[str]) -> t.Optional[str]:
"""Return the dataspec's PEP token."""
transform = self.dataspec.transform()
implementation = external_implementation(transform)
bound_signature = implementation.signature().bind_dataspec(
self.dataspec
)
input_token = bound_signature.pep_token()
if input_token is None:
return None
pep_kind = implementation.pep_kind(bound_signature)
if pep_kind == PEPKind.NOT_PEP:
return None
elif pep_kind == PEPKind.TOKEN_PRESERVING:
return input_token
else: # PEP or ROW
h = hashlib.md5()
h.update(input_token.encode("ascii"))
h.update(transform.protobuf().SerializeToString())
new_token = h.hexdigest()
return new_token
async def schema(self) -> st.Schema:
"""Computes the schema of the dataspec.
The schema is computed by computing the synthetic data value and
converting the Pyarrow schema to a Sarus schema.q
"""
syn_variant = self.dataset.variant(kind=st.ConstraintKind.SYNTHETIC)
assert syn_variant is not None
assert syn_variant.prototype() == sp.Dataset
syn_dataset = t.cast(st.Dataset, syn_variant)
arrow_iterator = await syn_dataset.async_to_arrow(batch_size=1)
first_batch = await arrow_iterator.__anext__()
schema = first_batch.schema
schema_type = type_from_arrow_schema(schema)
if self.dataset.is_pep():
# If the dataset is PEP then the schema of the real data should
# have protection but the synthetic data might not have it. We
# need to add it manually.
schema_type = sdt.protected_type(schema_type)
return schema_builder(self.dataset, schema_type=schema_type)
class ExternalDatasetOp(DatasetImplementation):
async def to_arrow(
self, batch_size: int
) -> t.AsyncIterator[pa.RecordBatch]:
transform = self.dataset.transform()
implementation = external_implementation(transform)
bound_signature = implementation.signature().bind_dataspec(
self.dataset
)
bound_signature.static_validation()
if self.dataset.is_pep():
result = await implementation.compute(bound_signature)
if (
isinstance(result, pd.Series)
and implementation.pep_kind(bound_signature) == PEPKind.ROW
):
# Reformat the series as a dataframe with a single row
result = result.to_frame().transpose()
ds_result = t.cast(st.DatasetCastable, result)
admin_data = await bound_signature.admin_data()
output_admin_data = compute_admin_data(admin_data, ds_result)
data_table = to_pyarrow_table(ds_result)
table = merge_data_and_admin(data_table, output_admin_data)
else:
result = await implementation.compute(bound_signature)
data_table = to_pyarrow_table(result)
table = create_admin_columns(data_table)
return async_iter(table.to_batches(max_chunksize=batch_size))
class ExternalScalarOp(ScalarImplementation):
async def value(self) -> t.Any:
transform = self.scalar.transform()
ds_args, ds_kwargs = self.scalar.parents()
return await async_compute_external_value(
transform, *ds_args, **ds_kwargs
)
async def async_compute_external_value(
transform: st.Transform,
*ds_args: t.Union[st.DataSpec, st.Transform],
**ds_kwargs: t.Union[st.DataSpec, st.Transform],
) -> t.Any:
"""Compute the value of an external transform applied on Dataspecs.
This function computes the output value without manipulating the
corresponding Dataspec. This is useful when we need to have access
to the value of a Dataspec before its creation:
- for computing a Mock value and inferring if the result is
a Scalar or a Dataset.
"""
implementation = external_implementation(transform)
bound_signature = implementation.signature().bind(
transform, *ds_args, **ds_kwargs
)
bound_signature.static_validation()
data = await implementation.compute(bound_signature)
return data
class ExternalOpImplementation:
"""External PEP op implementation class.
This class wraps together several elements of an external op
implementation:
- `call` is the function that computes the output value from the
input(s) value(s).
"""
_transform_id: str = NO_TRANSFORM_ID
_dp_equivalent_id: t.Optional[str] = None
_non_dp_equivalent_id: t.Optional[str] = None
_signature: t.Optional[SarusSignature] = None
def transform_id(self) -> str:
return self._transform_id
def dp_equivalent_id(self) -> t.Optional[str]:
return self._dp_equivalent_id
def dp_equivalent(self) -> t.Optional[ExternalOpImplementation]:
if not self._dp_equivalent_id:
return None
return external_implementation_from_id(self._dp_equivalent_id)
def signature(self) -> SarusSignature:
if self._signature is not None:
return self._signature
if self._non_dp_equivalent_id is None:
raise ValueError(
f"External implementation {self.transform_id()} has no "
"signature defined and no non-DP equivalent."
)
non_dp_signature = external_implementation_from_id(
self._non_dp_equivalent_id
).signature()
return non_dp_signature.make_dp()
async def compute(self, bound_signature: SarusBoundSignature) -> t.Any:
if self.is_dp(bound_signature):
return await self.call_dp(bound_signature)
else:
signature_value = await bound_signature.collect_signature_value()
return self.call(signature_value)
def call(self, signature_value: SarusSignatureValue) -> t.Any:
raise NotImplementedError
async def call_dp(self, bound_signature: SarusBoundSignature) -> t.Any:
"""DP ops `call` need to be async to compute schema, tasks, etc"""
raise NotImplementedError
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""Return the PEP properties of the transform.
It takes the transform arguments as input because it can depend on some
transform parameters. For instance, it is not PEP if we are aggregating
the rows (axis=0) and it is PEP if we are aggregating the columns
(axis=1).
"""
# Default implementation
return PEPKind.NOT_PEP
def is_dp(self, bound_signature: SarusBoundSignature) -> bool:
"""Return True if the DP transform is compatible with the arguments.
It takes the transform arguments as input because it can depend on some
transform parameters. For instance, if we are aggregating the rows
(axis=0), then there might be an equivalent DP transform but if we are
aggregating the columns there might not (axis=1).
"""
# Default implementation
return False
async def private_queries(
self, signature: SarusBoundSignature
) -> t.List[st.PrivateQuery]:
"""Takes as input the args of the transform (static and dynamic)."""
if not signature.is_dp():
return []
queries, _ = await self.private_queries_and_task(signature)
return queries
async def private_queries_and_task(
self, signature: SarusBoundSignature
) -> t.Tuple[t.List[st.PrivateQuery], st.Task]:
raise NotImplementedError
def callable(
self, composed_transform: st.Transform
) -> t.Callable[..., t.Awaitable[t.Any]]:
"""Build the transform's async callable.
The function takes an undefined number of named arguments.
It first collects the current transform's signature concrete values
using the passed variables' values. The concrete values are stored in a
SarusBoundSignature object so we can compute the current transform's
output by simply using the implementation's `call` method.
"""
lambda_signature = self.signature().bind_composed(composed_transform)
previous_callable = lambda_signature.callable()
def composed_callable(*vars: t.Any, **kwvars: t.Any) -> t.Any:
signature_value = previous_callable(*vars, **kwvars)
return self.call(signature_value)
return composed_callable
def external_implementation(
transform: st.Transform,
) -> ExternalOpImplementation:
"""Return the implementation of an external op from a DataSpec.
The mapping is done by the config file.
"""
assert transform and transform.is_external()
id = transform_id(transform)
return external_implementation_from_id(id)
def external_implementation_from_id(id: str) -> ExternalOpImplementation:
# Imported here to avoid circular imports
from . import ROUTING
library, op_name = id.split(".")
op_implementation = t.cast(
ExternalOpImplementation, ROUTING[library][op_name]
)
return op_implementation
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/external_op.py
| 0.872958 | 0.315209 |
external_op.py
|
pypi
|
from typing import Any, List, Literal, Optional, Tuple, Union
from numpy.typing import ArrayLike, DTypeLike
import numpy as np
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusParameterArray,
SarusSignature,
SarusSignatureValue,
)
from .external_op import ExternalOpImplementation
Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"]
Order = Literal["K", "A", "C", "F"]
# ------ CONSTRUCTORS ------
class np_array(ExternalOpImplementation):
_transform_id = "numpy.NP_ARRAY"
_signature = SarusSignature(
SarusParameter(
name="object",
annotation=ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="dtype",
annotation=Optional[DTypeLike],
default=None,
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
SarusParameter(
name="order",
annotation=Order,
default="K",
),
SarusParameter(
name="subok",
annotation=bool,
default=False,
),
SarusParameter(
name="ndmin",
annotation=int,
default=0,
),
SarusParameter(
name="like",
annotation=Optional[ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["like"] is None:
del kwargs["like"]
return np.array(**kwargs)
# ------ FUNCTIONS ------
class np_ceil(ExternalOpImplementation):
_transform_id = "numpy.NP_CEIL"
_signature = SarusSignature(
SarusParameter(
name="x",
annotation=ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="out",
annotation=Optional[ArrayLike],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="where",
annotation=Optional[Union[bool, ArrayLike]],
default=True,
),
SarusParameter(
name="casting",
annotation=Casting,
default="same_kind",
),
SarusParameter(
name="order",
annotation=Order,
default="K",
),
SarusParameter(
name="dtype",
annotation=Optional[DTypeLike],
default=None,
),
SarusParameter(
name="subok",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
x, kwargs = signature.collect_kwargs_method()
return np.ceil(x, **kwargs)
class np_floor(ExternalOpImplementation):
_transform_id = "numpy.NP_FLOOR"
_signature = SarusSignature(
SarusParameter(
name="x",
annotation=ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="out",
annotation=Optional[ArrayLike],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="where",
annotation=Optional[Union[bool, ArrayLike]],
default=True,
),
SarusParameter(
name="casting",
annotation=Casting,
default="same_kind",
),
SarusParameter(
name="order",
annotation=Order,
default="K",
),
SarusParameter(
name="dtype",
annotation=Optional[DTypeLike],
default=None,
),
SarusParameter(
name="subok",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
x, kwargs = signature.collect_kwargs_method()
return np.floor(x, **kwargs)
class np_mean(ExternalOpImplementation):
_transform_id = "numpy.NP_MEAN"
_signature = SarusSignature(
SarusParameter(
name="a",
annotation=ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="axis",
annotation=Optional[Union[int, Tuple[int]]],
default=None,
),
SarusParameter(
name="dtype",
annotation=Optional[DTypeLike],
default=None,
),
SarusParameter(
name="out",
annotation=Optional[ArrayLike],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="keepdims",
annotation=bool,
default=False,
),
SarusParameter(
name="where",
annotation=Optional[Union[bool, ArrayLike]],
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return np.mean(**kwargs)
class np_std(ExternalOpImplementation):
_transform_id = "numpy.NP_STD"
_signature = SarusSignature(
SarusParameter(
name="a",
annotation=ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="axis",
annotation=Optional[Union[int, Tuple[int]]],
default=None,
),
SarusParameter(
name="dtype",
annotation=Optional[DTypeLike],
default=None,
),
SarusParameter(
name="out",
annotation=Optional[ArrayLike],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="ddof",
annotation=int,
default=0,
),
SarusParameter(
name="keepdims",
annotation=bool,
default=False,
),
SarusParameter(
name="where",
annotation=Optional[Union[bool, ArrayLike]],
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return np.std(**kwargs)
class np_rand(ExternalOpImplementation):
_transform_id = "numpy.NP_RAND"
_signature = SarusSignature(
SarusParameterArray(
name="size",
annotation=Optional[Union[int, List[int]]],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
sizes = signature.collect_args()
return np.random.random_sample(sizes)
# ------ METHODS ------
class np_astype(ExternalOpImplementation):
_transform_id = "numpy.NP_ASTYPE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=np.ndarray,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="dtype",
annotation=DTypeLike,
),
SarusParameter(
name="order",
annotation=Order,
default="K",
),
SarusParameter(
name="casting",
annotation=Casting,
default="unsafe",
),
SarusParameter(
name="subok",
annotation=bool,
default=False,
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.astype(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/numpy.py
| 0.858704 | 0.212743 |
numpy.py
|
pypi
|
from typing import Any, Dict, Iterable, List, Optional
import numpy as np
from sarus_data_spec.dataspec_validator.parameter_kind import STATIC
from sarus_data_spec.dataspec_validator.privacy_limit import DeltaEpsilonLimit
from sarus_data_spec.dataspec_validator.signature import (
SarusBoundSignature,
SarusParameter,
SarusParameterArray,
SarusParameterMapping,
SarusSignature,
SarusSignatureValue,
)
from sarus_data_spec.dataspec_validator.typing import PEPKind
from sarus_data_spec.status import DataSpecErrorStatus
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
from .external_op import ExternalOpImplementation
class add(ExternalOpImplementation):
_transform_id = "std.ADD"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this + other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class sub(ExternalOpImplementation):
_transform_id = "std.SUB"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
name="substract",
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this - other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class rsub(ExternalOpImplementation):
_transform_id = "std.RSUB"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return other - this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class mul(ExternalOpImplementation):
_transform_id = "std.MUL"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
name="multiply",
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this * other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class div(ExternalOpImplementation):
_transform_id = "std.DIV"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this / other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class rdiv(ExternalOpImplementation):
_transform_id = "std.RDIV"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return other / this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class invert(ExternalOpImplementation):
_transform_id = "std.INVERT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return ~this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class length(ExternalOpImplementation):
_transform_id = "std.LEN"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return len(this)
class getitem(ExternalOpImplementation):
_transform_id = "std.GETITEM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="key",
annotation=Any,
),
name=_transform_id,
)
def call(self, signature: SarusSignatureValue) -> Any:
this, key = signature.collect_args()
return this[key]
class setitem(ExternalOpImplementation):
_transform_id = "std.SETITEM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="key",
annotation=Any,
),
SarusParameter(
name="value",
annotation=Any,
),
name=_transform_id,
)
def call(self, signature: SarusSignatureValue) -> Any:
this, key, value = signature.collect_args()
this[key] = value
return this
class greater_than(ExternalOpImplementation):
_transform_id = "std.GT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this > other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class greater_equal(ExternalOpImplementation):
_transform_id = "std.GE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this >= other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class lower_than(ExternalOpImplementation):
_transform_id = "std.LT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this < other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class lower_equal(ExternalOpImplementation):
_transform_id = "std.LE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this <= other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class not_equal(ExternalOpImplementation):
_transform_id = "std.NE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this != other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class equal(ExternalOpImplementation):
_transform_id = "std.EQ"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this == other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class neg(ExternalOpImplementation):
_transform_id = "std.NEG"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return -this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pos(ExternalOpImplementation):
_transform_id = "std.POS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return +this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class _abs(ExternalOpImplementation):
_transform_id = "std.ABS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return abs(this)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class _round(ExternalOpImplementation):
_transform_id = "std.ROUND"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return round(this)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class modulo(ExternalOpImplementation):
_transform_id = "std.MOD"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this % other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class rmodulo(ExternalOpImplementation):
_transform_id = "std.RMOD"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return other % this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class _or(ExternalOpImplementation):
_transform_id = "std.OR"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this | other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class ror(ExternalOpImplementation):
_transform_id = "std.ROR"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return other | this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class _and(ExternalOpImplementation):
_transform_id = "std.AND"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return this & other
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class rand(ExternalOpImplementation):
_transform_id = "std.RAND"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, other = signature.collect_args()
return other & this
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class _int(ExternalOpImplementation):
_transform_id = "std.INT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return int(this)
class _float(ExternalOpImplementation):
_transform_id = "std.FLOAT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return float(this)
class _list(ExternalOpImplementation):
_transform_id = "std.LIST"
_signature = SarusSignature(
SarusParameterArray(
name="elem",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
elems = signature.collect_args()
return list(elems)
class _dict(ExternalOpImplementation):
_transform_id = "std.DICT"
_signature = SarusSignature(
SarusParameterMapping(
name="elem",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
_, elems = signature.collect()
return dict(**elems)
class _slice(ExternalOpImplementation):
_transform_id = "std.SLICE"
_signature = SarusSignature(
SarusParameter(
name="start",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="stop",
annotation=int,
default=-1,
),
SarusParameter(
name="step",
annotation=Optional[int],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(start, stop, step) = signature.collect_args()
return slice(start, stop, step)
class _set(ExternalOpImplementation):
_transform_id = "std.SET"
_signature = SarusSignature(
SarusParameterArray(
name="elem",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
elems = signature.collect_args()
return set(elems)
class _tuple(ExternalOpImplementation):
_transform_id = "std.TUPLE"
_signature = SarusSignature(
SarusParameterArray(
name="elem",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
elems = signature.collect_args()
return tuple(elems)
class _string(ExternalOpImplementation):
_transform_id = "std.STRING"
_signature = SarusSignature(
SarusParameterArray(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return str(this)
class _bool(ExternalOpImplementation):
_transform_id = "std.BOOL"
_signature = SarusSignature(
SarusParameterArray(
name="this",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return bool(this)
class keys(ExternalOpImplementation):
_transform_id = "std.KEYS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Dict,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return list(this.keys())
class values(ExternalOpImplementation):
_transform_id = "std.VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Dict,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return list(this.values())
class sudo(ExternalOpImplementation):
_transform_id = "std.SUDO"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
name="sudo",
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this
class error(ExternalOpImplementation):
_transform_id = "std.ERROR"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Any,
),
SarusParameter(
name="epsilon",
annotation=float,
condition=STATIC,
predicate=lambda x: bool(x > 0),
),
SarusParameter(
name="delta",
annotation=float,
condition=STATIC,
predicate=lambda x: bool(0 < x < 1),
),
SarusParameter(
name="confidence_level",
annotation=float,
default=0.95,
condition=STATIC, # so that the parameter cannot be a DataSpec
predicate=lambda x: bool(0.5 < x < 1),
),
SarusParameter(
name="sample_size",
annotation=int,
default=10,
condition=STATIC,
predicate=lambda x: bool(x >= 1),
),
name="error",
)
def is_dp(self, signature: SarusBoundSignature) -> bool:
return True
async def call_dp(self, signature: SarusBoundSignature) -> Any:
dataspec = signature.static_kwargs()["this"]
assert dataspec.prototype() == sp.Scalar
(
true_value,
epsilon,
delta,
confidence_level,
sample_size,
) = await signature.collect_args()
privacy_limit = DeltaEpsilonLimit({delta: epsilon})
dp_values = []
for i in range(sample_size):
dp_dataspec = dataspec.variant(
kind=st.ConstraintKind.DP,
public_context=[],
privacy_limit=privacy_limit,
salt=np.random.randint(np.iinfo(np.int32).max),
)
if i == 0 and not dp_dataspec.is_dp():
raise DataSpecErrorStatus(
(
False,
"The dataspec could not be compiled in DP. Either "
"`error` is not whitelisted or the dataspec is not "
"PEP.",
)
)
dp_value = await dp_dataspec.async_value()
dp_values.append(dp_value)
interval = np.quantile(
np.array(dp_values) - true_value,
[1 - confidence_level, confidence_level],
)
return interval
class extend(ExternalOpImplementation):
_transform_id = "std.EXTEND"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=List,
),
SarusParameter(
name="other",
annotation=List,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, other) = signature.collect_args()
this.extend(other)
return this
class append(ExternalOpImplementation):
_transform_id = "std.APPEND"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=List,
),
SarusParameter(
name="other",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, other) = signature.collect_args()
this.append(other)
return this
class pop(ExternalOpImplementation):
_transform_id = "std.POP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=List,
),
SarusParameter(
name="index",
annotation=int,
default=-1,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, index) = signature.collect_args()
return this.pop(index)
class split(ExternalOpImplementation):
_transform_id = "std.SPLIT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="separator",
annotation=str,
default=" ",
),
SarusParameter(
name="maxsplit",
annotation=int,
default=-1,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, separator, maxsplit = signature.collect_args()
return this.split(separator, maxsplit)
class join(ExternalOpImplementation):
_transform_id = "std.JOIN"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="iterable",
annotation=Iterable,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, iterable = signature.collect_args()
return this.join(iterable)
class capitalize(ExternalOpImplementation):
_transform_id = "std.CAPITALIZE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.capitalize()
class casefold(ExternalOpImplementation):
_transform_id = "std.CASEFOLD"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.casefold()
class center(ExternalOpImplementation):
_transform_id = "std.CENTER"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="width",
annotation=int,
),
SarusParameter(name="fillchar", annotation=str, default=" "),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, width, fillchar) = signature.collect_args()
return this.center(width, fillchar)
class expandtabs(ExternalOpImplementation):
_transform_id = "std.EXPANDTABS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="tabsize",
annotation=int,
default=8,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, tabsize) = signature.collect_args()
return this.expandtabs(tabsize)
class lower(ExternalOpImplementation):
_transform_id = "std.LOWER"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.lower()
class upper(ExternalOpImplementation):
_transform_id = "std.UPPER"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.upper()
class lstrip(ExternalOpImplementation):
_transform_id = "std.LSTRIP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="chars",
annotation=str,
default=" ",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, chars) = signature.collect_args()
return this.lstrip(chars)
class rstrip(ExternalOpImplementation):
_transform_id = "std.RSTRIP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="chars",
annotation=str,
default=" ",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, chars) = signature.collect_args()
return this.rstrip(chars)
class strip(ExternalOpImplementation):
_transform_id = "std.STRIP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="chars",
annotation=str,
default=" ",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, chars) = signature.collect_args()
return this.strip(chars)
class replace(ExternalOpImplementation):
_transform_id = "std.REPLACE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
SarusParameter(
name="old",
annotation=str,
),
SarusParameter(
name="new",
annotation=str,
),
SarusParameter(
name="count",
annotation=int,
default=-1,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, old, new, count) = signature.collect_args()
return this.replace(old, new, count)
class splitlines(ExternalOpImplementation):
_transform_id = "std.SPLITLINES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.splitlines()
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/std.py
| 0.845974 | 0.211458 |
std.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.