code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# -*- coding: utf-8 -*-
import pickle
from . import logger
from .types import FilePath
from .zc_debug_pickler import find_pickling_error
from .zc_safe_write import safe_read, safe_write
__all__ = ["safe_pickle_dump", "safe_pickle_load"]
debug_pickling = False
def safe_pickle_dump(
value: object, filename: FilePath, protocol=3, **safe_write_options, # pickle.HIGHEST_PROTOCOL,
) -> None:
# sys.setrecursionlimit(15000)
with safe_write(filename, **safe_write_options) as f:
try:
pickle.dump(value, f, protocol)
except KeyboardInterrupt:
raise
except BaseException:
msg = f"Cannot pickle object of class {type(value)}."
logger.error(msg)
if debug_pickling:
msg = find_pickling_error(value, protocol)
logger.error(msg)
raise
def safe_pickle_load(filename: FilePath) -> object:
# TODO: add debug check
with safe_read(filename) as f:
return pickle.load(f)
# TODO: add pickling debug
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/fs/zc_safe_pickling.py
|
zc_safe_pickling.py
|
# -*- coding: utf-8 -*-
import codecs
import os
from typing import cast
from zuper_commons.types import check_isinstance
from . import logger
from .types import FilePath
from .zc_friendly_path import friendly_path
from .zc_mkdirs import make_sure_dir_exists
from .zc_path_utils import expand_all
__all__ = [
"read_bytes_from_file",
"write_bytes_to_file",
"read_ustring_from_utf8_file",
"read_ustring_from_utf8_file_lenient",
"write_ustring_to_utf8_file",
]
def read_bytes_from_file(filename: FilePath) -> bytes:
""" Read binary data and returns bytes """
_check_exists(filename)
with open(filename, "rb") as f:
return f.read()
def read_ustring_from_utf8_file(filename: FilePath) -> str:
""" Returns a unicode/proper string """
_check_exists(filename)
with codecs.open(filename, encoding="utf-8") as f:
try:
return f.read()
except UnicodeDecodeError as e:
msg = f"Could not successfully decode file {filename!r}"
raise UnicodeError(msg) from e
def read_ustring_from_utf8_file_lenient(filename: FilePath) -> str:
""" Ignores decoding errors """
_check_exists(filename)
with codecs.open(filename, encoding="utf-8", errors="ignore") as f:
try:
return f.read()
except UnicodeDecodeError as e:
msg = f"Could not successfully decode file {filename!r}"
raise UnicodeError(msg) from e
def _check_exists(filename: FilePath) -> None:
if not os.path.exists(filename):
if os.path.lexists(filename):
msg = f"The link {filename} does not exist."
msg += f" it links to {os.readlink(filename)}"
raise ValueError(msg)
else:
msg = f"Could not find file {filename!r}"
msg += f" from directory {os.getcwd()}"
raise ValueError(msg)
def write_ustring_to_utf8_file(data: str, filename: FilePath, quiet: bool = False) -> None:
"""
It also creates the directory if it does not exist.
:param data:
:param filename:
:param quiet:
:return:
"""
check_isinstance(data, str)
b = data.encode("utf-8") # OK
return write_bytes_to_file(b, filename, quiet=quiet)
def write_bytes_to_file(data: bytes, filename: FilePath, quiet: bool = False) -> None:
"""
Writes the data to the given filename.
If the data did not change, the file is not touched.
"""
check_isinstance(data, bytes)
L = len(filename)
if L > 1024:
msg = f"Invalid argument filename: too long at {L}. Did you confuse it with data?\n{filename[:1024]}"
raise ValueError(msg)
filename = cast(FilePath, expand_all(filename))
make_sure_dir_exists(filename)
if os.path.exists(filename):
with open(filename, "rb") as _:
current = _.read()
if current == data:
if not "assets" in filename:
if not quiet:
logger.debug("already up to date %s" % friendly_path(filename))
return
with open(filename, "wb") as f:
f.write(data)
if filename.startswith("/tmp"):
quiet = True
if not quiet:
mbs = len(data) / (1024 * 1024)
size = f"{mbs:.1f}MB"
logger.debug(f"Written {size} to: {friendly_path(filename)}")
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/fs/zc_fileutils.py
|
zc_fileutils.py
|
from .. import logger
logger = logger.getChild("fs")
from .zc_dir_from_package_nam import *
from .zc_fileutils import *
from .zc_locate_files_imp import *
from .zc_mkdirs import *
from .zc_safe_pickling import *
from .zc_path_utils import *
from .zc_safe_write import *
from .zc_friendly_path import *
from .zc_debug_pickler import *
from .types import *
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/fs/__init__.py
|
__init__.py
|
from typing import Callable, Optional
import termcolor
import webcolors
from xtermcolor import colorize
__all__ = [
"color_blue",
"color_blue_light",
"color_brown",
"color_constant",
"color_float",
"color_green",
"color_int",
"color_ops",
"color_orange",
"color_orange_dark",
"color_par",
"color_pink",
"color_synthetic_types",
"color_pink2",
"color_typename",
"color_typename2",
"colorize_rgb",
"get_colorize_function",
]
from zuper_commons.types import ZValueError, ZAssertionError
color_orange = "#ffb342"
color_orange_dark = "#cfa342"
color_blue = "#42a0ff"
# color_blue_light = "#62c0ff"
color_blue_light = "#c2a0ff"
color_green = "#42ffa0"
color_pink = "#FF69B4"
color_pink2 = "#FF1493"
color_magenta_1 = "#a000a0"
color_brown = "#b08100"
def colorize_rgb(x: str, rgb: str, bg_color: Optional[str] = None) -> str:
rgb = interpret_color(rgb)
bg_color = interpret_color(bg_color)
if rgb is None:
msg = "We do not support rgb=None"
raise ZAssertionError(msg, rgb=rgb, bg_color=bg_color)
if rgb is None and bg_color is None:
return x
fg_int = int(rgb[1:], 16) if rgb is not None else None
bg_int = int(bg_color[1:], 16) if bg_color is not None else None
if fg_int is None and bg_int is None:
return x
try:
r = colorize(x, rgb=fg_int, bg=bg_int)
except Exception as e:
raise ZValueError(x=x, rgb=rgb, bg_color=bg_color) from e
if r is None:
raise NotImplementedError()
return r
def interpret_color(x: Optional[str]) -> Optional[str]:
if not x:
return None
if x.startswith('#'):
return x
return webcolors.name_to_hex(x)
def get_colorize_function(rgb: str, bg_color: Optional[str] = None) -> Callable[[str], str]:
T = "template"
Tc = colorize_rgb(T, rgb, bg_color)
before, _, after = Tc.partition(T)
def f(s: str) -> str:
return before + s + after
return f
color_ops = get_colorize_function(color_blue)
color_ops_light = get_colorize_function(color_blue_light)
color_synthetic_types = get_colorize_function(color_green)
color_int = get_colorize_function(color_pink)
color_float = get_colorize_function(color_pink2)
color_typename = get_colorize_function(color_orange)
color_typename2 = get_colorize_function(color_orange_dark)
color_constant = get_colorize_function(color_pink2)
color_magenta = get_colorize_function(color_magenta_1)
#
# def color_ops(x):
# return colorize_rgb(x, color_blue)
#
#
# def color_synthetic_types(x):
# return colorize_rgb(x, color_green)
#
#
# def color_int(x):
# return colorize_rgb(x, color_pink)
#
#
# def color_float(x):
# return colorize_rgb(x, color_pink2)
#
#
# def color_typename(x):
# return colorize_rgb(x, color_orange)
def color_par(x):
return termcolor.colored(x, attrs=["dark"])
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/ui/colors.py
|
colors.py
|
from .. import logger
logger = logger.getChild("ui")
from .zc_duration_hum import *
from .colors import *
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/ui/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
import math
__all__ = ["duration_compact"]
def duration_compact(seconds: float) -> str:
seconds = int(math.ceil(seconds))
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
years, days = divmod(days, 365)
minutes = int(minutes)
hours = int(hours)
days = int(days)
years = int(years)
duration = []
if years > 0:
duration.append("%dy" % years)
else:
if days > 0:
duration.append("%dd" % days)
if (days < 3) and (years == 0):
if hours > 0:
duration.append("%dh" % hours)
if (hours < 3) and (days == 0):
if minutes > 0:
duration.append("%dm" % minutes)
if (minutes < 3) and (hours == 0):
if seconds > 0:
duration.append("%ds" % seconds)
return " ".join(duration)
#
# def duration_human(seconds):
# ''' Code modified from
# http://darklaunch.com/2009/10/06
# /python-time-duration-human-friendly-timestamp
# '''
# seconds = int(math.ceil(seconds))
# minutes, seconds = divmod(seconds, 60)
# hours, minutes = divmod(minutes, 60)
# days, hours = divmod(hours, 24)
# years, days = divmod(days, 365.242199)
#
# minutes = int(minutes)
# hours = int(hours)
# days = int(days)
# years = int(years)
#
# duration = []
# if years > 0:
# duration.append('%d year' % years + 's' * (years != 1))
# else:
# if days > 0:
# duration.append('%d day' % days + 's' * (days != 1))
# if (days < 3) and (years == 0):
# if hours > 0:
# duration.append('%d hour' % hours + 's' * (hours != 1))
# if (hours < 3) and (days == 0):
# if minutes > 0:
# duration.append('%d min' % minutes +
# 's' * (minutes != 1))
# if (minutes < 3) and (hours == 0):
# if seconds > 0:
# duration.append('%d sec' % seconds +
# 's' * (seconds != 1))
#
# return ' '.join(duration)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/ui/zc_duration_hum.py
|
zc_duration_hum.py
|
from datetime import datetime
import pytz
__all__ = ["now_utc"]
def now_utc():
return datetime.now(tz=pytz.utc)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/timing/dates.py
|
dates.py
|
import time
from contextlib import contextmanager
from logging import Logger
from typing import Callable, Optional, Union
from zuper_commons.logs import ZLogger
__all__ = ["timeit_clock", "timeit_wall"]
class Stack:
stack = []
@contextmanager
def timeit_generic(
desc: str, minimum: Optional[float], time_function: Callable[[], float], logger: Union[Logger, ZLogger],
):
# logger.debug('timeit %s ...' % desc)
t0 = time_function()
try:
Stack.stack.append(desc)
yield
finally:
Stack.stack.pop()
t1 = time_function()
delta = t1 - t0
if minimum is not None:
if delta < minimum:
return
show_timeit_benchmarks = True
if show_timeit_benchmarks or (minimum is not None):
pre = " " * len(Stack.stack)
msg = "timeit_clock: %s %6.2f ms for %s" % (pre, delta * 1000, desc)
# t0 = time_function()
if isinstance(logger, ZLogger):
logger.info(msg, stacklevel=4)
else:
logger.info(msg)
# t1 = time_function()
# delta = t1 - t0
try:
from time import thread_time as measure_thread_time
except ImportError:
from time import clock as measure_thread_time
@contextmanager
def timeit_clock(desc: Optional[str], logger: Logger, minimum: Optional[float] = None):
with timeit_generic(desc=desc, minimum=minimum, time_function=measure_thread_time, logger=logger):
yield
@contextmanager
def timeit_wall(desc: Optional[str], logger: Logger, minimum: Optional[float] = None):
with timeit_generic(desc=desc, minimum=minimum, time_function=time.time, logger=logger):
yield
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/timing/timeit_.py
|
timeit_.py
|
from .timeit_ import *
from .dates import *
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/timing/__init__.py
|
__init__.py
|
__all__ = ["describe_type"]
def describe_type(x: object) -> str:
""" Returns a friendly description of the type of x. """
if hasattr(x, "__class__"):
c = x.__class__
if hasattr(x, "__name__"):
class_name = "%s" % c.__name__
else:
class_name = str(c)
else:
# for extension classes (spmatrix)
class_name = str(type(x))
return class_name
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/types/zc_describe_type.py
|
zc_describe_type.py
|
from typing import NoReturn, Tuple, Type, Union
from .exceptions import ZValueError
from zuper_commons.text import indent, pretty_msg
__all__ = ["check_isinstance", "raise_wrapped", "raise_desc"]
def check_isinstance(ob: object, expected: Union[type, Tuple[type, ...]], **kwargs: object) -> None:
if not isinstance(ob, expected):
kwargs["object"] = ob
raise_type_mismatch(ob, expected, **kwargs)
def raise_type_mismatch(ob: object, expected: type, **kwargs: object) -> NoReturn:
""" Raises an exception concerning ob having the wrong type. """
msg = "Object not of expected type:"
# e += "\n expected: {}".format(expected)
# e += "\n obtained: {}".format(type(ob))
# try:
# msg = pretty_msg(e, **kwargs)
# except:
# msg = e + "(! cannot write message)"
raise ZValueError(msg, expected=expected, obtained=type(ob), **kwargs)
def raise_desc(etype: Type[BaseException], msg: str, args_first: bool = False, **kwargs: object) -> NoReturn:
"""
Example:
raise_desc(ValueError, "I don't know", a=a, b=b)
"""
assert isinstance(msg, str), type(msg)
s1 = msg
if kwargs:
s2 = pretty_msg("", **kwargs)
else:
s2 = ""
if args_first:
s = s2 + "\n" + s1
else:
s = s1 + "\n" + s2
raise etype(s)
def raise_wrapped(
etype: Type[BaseException], e: BaseException, msg: str, compact: bool = False, **kwargs: object
) -> NoReturn:
s = pretty_msg(msg, **kwargs)
if compact:
s += "\n" + indent(str(e), "| ")
raise etype(s) from e
# if not compact:
# raise etype(s) from e
# else:
# e2 = etype(s)
# raise e2 from e
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/types/zc_checks.py
|
zc_checks.py
|
from .. import logger
logger = logger.getChild("types")
from .zc_checks import *
from .zc_describe_type import *
from .zc_describe_values import *
from .exceptions import *
from .recsize import *
from .zc_import import *
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/types/__init__.py
|
__init__.py
|
def describe_value(x: object, clip: int = 80) -> str:
""" Describes an object, for use in the error messages.
Short description, no multiline.
"""
if hasattr(x, "shape") and hasattr(x, "dtype"):
shape_desc = "x".join(str(i) for i in x.shape)
desc = f"array[{shape_desc!r}]({x.dtype}) "
final = desc + clipped_repr(x, clip - len(desc))
return remove_newlines(final)
else:
from .zc_describe_type import describe_type
class_name = describe_type(x)
desc = f"Instance of {class_name}: "
final = desc + clipped_repr(x, clip - len(desc))
return remove_newlines(final)
def clipped_repr(x: object, clip: int) -> str:
s = repr(x)
if len(s) > clip:
clip_tag = "... [clip]"
cut = clip - len(clip_tag)
s = f"{s[:cut]}{clip_tag}"
return s
def remove_newlines(s: str) -> str:
return s.replace("\n", " ")
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/types/zc_describe_values.py
|
zc_describe_values.py
|
import os
from typing import Callable, ClassVar, Dict, Optional
__all__ = [
"ZException",
"ZValueError",
"ZTypeError",
"ZAssertionError",
"ZNotImplementedError",
"ZKeyError",
]
PASS_THROUGH = (KeyboardInterrupt,)
class ZException(Exception):
msg: Optional[str] = None
info: Optional[Dict[str, object]] = None
entries_formatter: ClassVar[Callable[[object], str]] = repr
def __init__(self, msg: Optional[str] = None, **info: object):
self.st = None
assert isinstance(msg, (str, type(None))), msg
self.msg = msg
self.info = info
# self.__str__()
def __str__(self) -> str:
if self.st is None:
try:
self.st = self.get_str()
except PASS_THROUGH: # pragma: no cover
raise
#
# except BaseException as e:
# self.st = f"!!! could not print: {e}"
return self.st
def get_str(self) -> str:
entries = {}
for k, v in self.info.items():
try:
# noinspection PyCallByClass
entries[k] = ZException.entries_formatter(v)
except Exception as e:
try:
entries[k] = f"!!! cannot print: {e}"
except:
entries[k] = f"!!! cannot print, and cannot print exception."
if not self.msg:
self.msg = "\n"
from zuper_commons.text import pretty_dict
if len(entries) == 1:
first = list(entries)[0]
payload = entries[first]
s = self.msg + f'\n{first}:\n{payload}'
elif entries:
s = pretty_dict(self.msg, entries)
else:
s = self.msg
s = sanitize_circle_ci(s)
return s
def __repr__(self) -> str:
return self.__str__()
def disable_colored() -> bool:
circle_job = os.environ.get("CIRCLE_JOB", None)
return circle_job is not None
def sanitize_circle_ci(s: str) -> str:
if disable_colored():
from zuper_commons.text.coloring import remove_escapes
s = remove_escapes(s)
difficult = ["┋"]
for c in difficult:
s = s.replace(c, "")
return s
else:
return s
class ZTypeError(ZException, TypeError):
pass
class ZValueError(ZException, ValueError):
pass
class ZKeyError(ZException, KeyError):
pass
class ZAssertionError(ZException, AssertionError):
pass
class ZNotImplementedError(ZException, NotImplementedError):
pass
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/types/exceptions.py
|
exceptions.py
|
# -*- coding: utf-8 -*-
import traceback
from zuper_commons.text import indent
__all__ = ["import_name"]
def import_name(name: str) -> object:
"""
Loads the python object with the given name.
Note that "name" might be "module.module.name" as well.
"""
try:
return __import__(name, fromlist=["dummy"])
except ImportError:
# split in (module, name) if we can
if "." in name:
tokens = name.split(".")
field = tokens[-1]
module_name = ".".join(tokens[:-1])
if False: # previous method
pass
# try:
# module = __import__(module_name, fromlist=['dummy'])
# except ImportError as e:
# msg = ('Cannot load %r (tried also with %r):\n' %
# (name, module_name))
# msg += '\n' + indent(
# '%s\n%s' % (e, traceback.format_exc(e)), '> ')
# raise ValueError(msg)
#
# if not field in module.__dict__:
# msg = 'No field %r\n' % field
# msg += ' found in %r.' % module
# raise ValueError(msg)
#
# return module.__dict__[field]
else:
# other method, don't assume that in "M.x", "M" is a module.
# It could be a class as well, and "x" be a staticmethod.
try:
module = import_name(module_name)
except ImportError as e:
msg = "Cannot load %r (tried also with %r):\n" % (name, module_name)
msg += "\n" + indent("%s\n%s" % (e, traceback.format_exc()), "> ")
raise ValueError(msg) from None
if isinstance(module, type):
if hasattr(module, field):
return getattr(module, field)
else:
msg = f"No field {field!r} found in type {module!r}."
raise KeyError(msg) from None
if not field in module.__dict__:
msg = f"No field {field!r} found in module {module!r}."
msg += f'Known: {sorted(module.__dict__)} '
raise KeyError(msg) from None
f = module.__dict__[field]
# "staticmethod" are not functions but descriptors, we need
# extra magic
if isinstance(f, staticmethod):
return f.__get__(module, None)
else:
return f
else:
msg = "Cannot import name %r." % name
msg += indent(traceback.format_exc(), "> ")
raise ValueError(msg)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/types/zc_import.py
|
zc_import.py
|
import sys
from collections import defaultdict
from dataclasses import dataclass
from decimal import Decimal
from typing import Dict, Set, Tuple
__all__ = ["get_rec_size"]
from zuper_commons.types import ZException
@dataclass
class RSize:
nbytes: int = 0
nobjects: int = 0
max_size: int = 0
largest: object = -1
largest_prefix: Tuple[str, ...] = ()
@dataclass
class RecSize:
sizes: Dict[type, RSize]
def friendly_mb(s: int) -> str:
mb = Decimal(s) / Decimal(1024 * 1024)
mb = mb.quantize(Decimal(".001"))
return str(mb) + " MB"
def friendly_kb(s: int) -> str:
mb = Decimal(s) / Decimal(1024)
mb = mb.quantize(Decimal(".001"))
return str(mb) + " KB"
def visualize(rs: RecSize, percentile=0.95, min_rows: int = 5) -> str:
types = list(rs.sizes)
sizes = list(rs.sizes.values())
indices = list(range(len(types)))
# order the indices by size
def key(i: int) -> int:
if sizes[i] is object:
return -1
return sizes[i].nbytes
indices = sorted(indices, key=key, reverse=True)
tot_bytes = rs.sizes[object].nbytes
stop_at = percentile * tot_bytes
cells = {}
row = 0
so_far = 0
cells[(row, 0)] = "type"
cells[(row, 1)] = "# objects"
cells[(row, 2)] = "bytes"
cells[(row, 3)] = "max size of 1 ob"
row += 1
cells[(row, 0)] = "-"
cells[(row, 1)] = "-"
cells[(row, 2)] = "-"
cells[(row, 3)] = "-"
row += 1
for j, i in enumerate(indices):
Ti = types[i]
rsi = sizes[i]
db = ZException.entries_formatter
cells[(row, 0)] = db(Ti)
cells[(row, 1)] = db(rsi.nobjects)
# cells[(row, 2)] = db(rsi.nbytes)
cells[(row, 2)] = friendly_mb(rsi.nbytes)
cells[(row, 3)] = friendly_kb(rsi.max_size)
if Ti in (bytes, str):
cells[(row, 4)] = "/".join(rsi.largest_prefix)
cells[(row, 5)] = db(rsi.largest)[:100]
row += 1
if Ti is not object:
so_far += rsi.nbytes
if j > min_rows and so_far > stop_at:
break
from zuper_commons.text import format_table, Style
align_right = Style(halign="right")
col_style: Dict[int, Style] = {2: align_right, 3: align_right}
res = format_table(cells, style="spaces", draw_grid_v=False, col_style=col_style)
return res
def get_rec_size(ob: object) -> RecSize:
"""Recursively finds size of objects. Traverses mappings and iterables. """
seen = set()
sizes = defaultdict(RSize)
rec_size = RecSize(sizes)
_get_rec_size(rec_size, ob, seen, ())
return rec_size
def _get_rec_size(rs: RecSize, obj: object, seen: Set[int], prefix: Tuple[str, ...]) -> None:
size = sys.getsizeof(obj)
obj_id = id(obj)
if obj_id in seen:
return
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
T = type(obj)
bases = T.__bases__
Ks = bases + (T,)
for K in Ks:
_ = rs.sizes[K]
_.nbytes += size
_.nobjects += 1
if size > _.max_size:
_.max_size = size
_.largest = obj
_.largest_prefix = prefix
def rec(x: object, p: Tuple[str, ...]):
_get_rec_size(rs, x, seen, p)
if isinstance(obj, dict):
for i, (k, v) in enumerate(obj.items()):
rec(k, p=prefix + (f"key{i}",))
rec(v, p=prefix + (f"val{i}",))
elif hasattr(obj, "__dict__"):
for k, v in obj.__dict__.items():
rec(v, p=prefix + (k,))
elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
# noinspection PyTypeChecker
for i, v in enumerate(obj):
rec(v, p=prefix + (str(i),))
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/types/recsize.py
|
recsize.py
|
from functools import wraps
from typing import Tuple
from unittest import SkipTest
from nose.plugins.attrib import attr
from ..types import ZValueError, ZAssertionError
__all__ = ["known_failure", "relies_on_missing_features", "my_assert_equal"]
def known_failure(f, forbid: Tuple[type, ...] = ()): # pragma: no cover
@wraps(f)
def run_test(*args, **kwargs):
try:
f(*args, **kwargs)
except BaseException as e:
if forbid:
if isinstance(e, forbid):
msg = f"Known failure test is not supposed to raise {type(e).__name__}"
raise AssertionError(msg) from e
raise SkipTest("Known failure test failed: " + str(e))
raise AssertionError("test passed but marked as work in progress")
return attr("known_failure")(run_test)
def relies_on_missing_features(f):
msg = "Test relying on not implemented feature."
@wraps(f)
def run_test(*args, **kwargs): # pragma: no cover
try:
f(*args, **kwargs)
except BaseException as e:
raise SkipTest(msg) from e
raise AssertionError("test passed but marked as work in progress")
return attr("relies_on_missing_features")(run_test)
def my_assert_equal(a, b, msg=None):
if a != b:
m = "Not equal"
if msg is not None:
m += ": " + msg
raise ZValueError(m, a=a, b=b)
def assert_isinstance(a, C):
if not isinstance(a, C): # pragma: no cover
raise ZAssertionError(
"not isinstance", a=a, type_a=type(a), type_type_a=type(type(a)), C=C, type_C=type(C),
)
def assert_issubclass(A, C):
if not issubclass(A, C): # pragma: no cover
raise ZAssertionError("not issubclass", A=A, C=C, type_A=type(A), type_C=type(C))
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/test_utils/decorators.py
|
decorators.py
|
from .decorators import *
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/test_utils/__init__.py
|
__init__.py
|
import io
import logging
import traceback
from logging import currentframe
from os.path import normcase
__all__ = ["monkeypatch_findCaller"]
def monkeypatch_findCaller():
if __file__.lower()[-4:] in [".pyc", ".pyo"]:
_wrapper_srcfile = __file__.lower()[:-4] + ".py"
else:
_wrapper_srcfile = __file__
_wrapper_srcfile = normcase(_wrapper_srcfile)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = normcase(co.co_filename)
# print(filename)
if filename == _wrapper_srcfile or filename == logging._srcfile or "zlogger" in filename:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write("Stack (most recent call last):\n")
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == "\n":
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
# print(rv)
return rv
logging.Logger.findCaller = findCaller
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/logs/hacks.py
|
hacks.py
|
# coding=utf-8
import logging
from typing import cast
import termcolor
__all__ = ["setup_logging_color", "setup_logging_format", "setup_logging"]
def get_FORMAT_datefmt():
pre = "%(asctime)s|%(name)s|%(filename)s:%(lineno)s|%(funcName)s:"
pre = termcolor.colored(pre, attrs=["dark"])
FORMAT = pre + "\n" + "%(message)s"
datefmt = "%H:%M:%S"
return FORMAT, datefmt
def setup_logging_format():
from logging import Logger, StreamHandler, Formatter
import logging
FORMAT, datefmt = get_FORMAT_datefmt()
logging.basicConfig(format=FORMAT, datefmt=datefmt)
# noinspection PyUnresolvedReferences
root = cast(Logger, Logger.root)
if root.handlers:
for handler in root.handlers:
if isinstance(handler, StreamHandler):
formatter = Formatter(FORMAT, datefmt=datefmt)
handler.setFormatter(formatter)
else:
logging.basicConfig(format=FORMAT, datefmt=datefmt)
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
from zuper_commons.text import get_length_on_screen
from zuper_commons.ui.colors import colorize_rgb, get_colorize_function
RED = "#ff0000"
GREEN = "#00ff00"
LGREEN = "#77ff77"
PINK = "#FFC0CB"
YELLOW = "#FFFF00"
colorizers = {
"red": get_colorize_function(RED),
"green": get_colorize_function(LGREEN),
"pink": get_colorize_function(PINK),
"yellow": get_colorize_function(YELLOW),
"normal": (lambda x: x),
}
prefixes = {
"red": colorize_rgb(" ", "#000000", bg_color=RED),
"green": colorize_rgb(" ", "#000000", bg_color=LGREEN),
"pink": colorize_rgb(" ", "#000000", bg_color=PINK),
"yellow": colorize_rgb(" ", "#000000", bg_color=YELLOW),
"normal": " ",
}
def new(*args):
levelno = args[1].levelno
if levelno >= 50:
ncolor = "red"
color = "\x1b[31m" # red
elif levelno >= 40:
ncolor = "red"
color = "\x1b[31m" # red
elif levelno >= 30:
ncolor = "yellow"
color = "\x1b[33m" # yellow
elif levelno >= 20:
ncolor = "green"
color = "\x1b[32m" # green
elif levelno >= 10:
ncolor = "pink"
color = "\x1b[35m" # pink
else:
ncolor = "normal"
color = "\x1b[0m" # normal
msg = str(args[1].msg)
lines = msg.split("\n")
any_color_inside = any(get_length_on_screen(l) != len(l) for l in lines)
#
# if any_color_inside:
# print(msg.__repr__())
do_color_lines_inside = False
def color_line(l):
if do_color_lines_inside and not any_color_inside:
return prefixes[ncolor] + " " + colorizers[ncolor](l)
else:
return prefixes[ncolor] + " " + l
# return "%s%s%s" % (color, levelno, "\x1b[0m") + ' ' + l # normal
lines = list(map(color_line, lines))
msg = "\n".join(lines)
# if len(lines) > 1:
# msg = "\n" + msg
args[1].msg = msg
return fn(*args)
return new
def setup_logging_color() -> None:
import platform
if platform.system() != "Windows":
emit1 = logging.StreamHandler.emit
if getattr(emit1, "__name__", "") != "new":
emit2 = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
# print(f'now changing {logging.StreamHandler.emit} -> {emit2}')
logging.StreamHandler.emit = emit2
def setup_logging() -> None:
# logging.basicConfig()
setup_logging_color()
setup_logging_format()
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/logs/col_logging.py
|
col_logging.py
|
import inspect
import logging
from abc import ABC, abstractmethod
from typing import Dict, Union
import termcolor
__all__ = ["ZLogger", "ZLoggerInterface"]
class ZLoggerInterface(ABC):
@abstractmethod
def info(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
...
@abstractmethod
def debug(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
...
@abstractmethod
def warn(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs) -> None:
...
@abstractmethod
def warning(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs) -> None:
...
@abstractmethod
def error(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
...
@abstractmethod
def getChild(self, child_name: str) -> "ZLoggerInterface":
...
class ZLogger(ZLoggerInterface):
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
logger: logging.Logger
debug_print = str
def __init__(self, logger: Union[str, logging.Logger]):
if isinstance(logger, str):
logger = logger.replace("zuper_", "")
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
self.logger = logger
else:
self.logger = logger
from zuper_commons.text import pretty_dict
self.pretty_dict = pretty_dict
self.debug_print = None
# monkeypatch_findCaller()
def info(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
level = logging.INFO
return self._log(level=level, msg=_msg, args=args, stacklevel=stacklevel, kwargs=kwargs)
def debug(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
level = logging.DEBUG
return self._log(level=level, msg=_msg, args=args, stacklevel=stacklevel, kwargs=kwargs)
def warn(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs) -> None:
level = logging.WARN
return self._log(level=level, msg=_msg, args=args, stacklevel=stacklevel, kwargs=kwargs)
def warning(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs) -> None:
level = logging.WARN
return self._log(level=level, msg=_msg, args=args, stacklevel=stacklevel, kwargs=kwargs)
def error(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
level = logging.ERROR
return self._log(level=level, msg=_msg, args=args, stacklevel=stacklevel, kwargs=kwargs)
def _log(self, level: int, msg: str, args, stacklevel: int, kwargs: Dict[str, object]):
if self.debug_print is None:
try:
# noinspection PyUnresolvedReferences
from zuper_typing import debug_print
self.debug_print = debug_print
except ImportError:
self.debug_print = str
if not self.logger.isEnabledFor(level):
return
do_inspect = True
if do_inspect:
try:
# 0 is us
# 1 is one of our methods
stacklevel += 2
# for i, frame_i in enumerate(stack[:5]):
# x = '***' if i == stacklevel else ' '
# print(i, x, frame_i.filename, frame_i.function)
stack = inspect.stack()
frame = stack[stacklevel]
pathname = frame.filename
lineno = frame.lineno
funcname = str(frame.function)
locals = frame[0].f_locals
except KeyboardInterrupt:
raise
except:
locals = {}
funcname = "!!!could not inspect()!!!"
pathname = "!!!"
lineno = -1
# print(list(locals))
if "self" in locals:
# print(locals['self'])
typename = locals["self"].__class__.__name__
funcname = typename + ":" + funcname
else:
locals = {}
funcname = "n/a"
pathname = "n/a"
lineno = -1
res = {}
def lab(x):
return x
# return termcolor.colored(x, attrs=["dark"])
for i, a in enumerate(args):
for k, v in locals.items():
if a is v:
use = k
break
else:
use = str(i)
res[lab(use)] = ZLogger.debug_print(a)
for k, v in kwargs.items():
res[lab(k)] = ZLogger.debug_print(v)
if res:
s = self.pretty_dict(msg, res, leftmargin=" ")
# if not msg:
# s = "\n" + s
# if msg:
# s = msg + '\n' + indent(rest, ' ')
# else:
# s = rest
else:
s = msg
# funcname = termcolor.colored(funcname, "red")
record = self.logger.makeRecord(
self.logger.name,
level,
pathname,
lineno,
s,
(),
exc_info=None,
func=funcname,
extra=None,
sinfo=None,
)
self.logger.handle(record)
return record
# self.logger.log(level, s)
def getChild(self, child_name: str) -> "ZLogger":
logger_child = self.logger.getChild(child_name)
return ZLogger(logger_child)
def setLevel(self, level: int) -> None:
self.logger.setLevel(level)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/logs/zlogger.py
|
zlogger.py
|
from typing import Optional
from zuper_commons.logs.zlogger import ZLoggerInterface
class ZLoggerStore(ZLoggerInterface):
def __init__(self, up: Optional[ZLoggerInterface]):
self.up = up
self.records = []
self.record = True
def info(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
from zuper_commons.timing import now_utc
record = {"msg": _msg, "args": list(args), "kwargs": kwargs, "t": now_utc()}
if self.up:
r = self.up.info(_msg=_msg, *args, stacklevel=stacklevel + 1, **kwargs)
record["record"] = r
if self.record:
self.records.append(record)
def debug(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
from zuper_commons.timing import now_utc
record = {"msg": _msg, "args": list(args), "kwargs": kwargs, "t": now_utc()}
if self.up:
r = self.up.debug(_msg=_msg, *args, stacklevel=stacklevel + 1, **kwargs)
record["record"] = r
if self.record:
self.records.append(record)
def warn(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs) -> None:
from zuper_commons.timing import now_utc
record = {"msg": _msg, "args": list(args), "kwargs": kwargs, "t": now_utc()}
if self.up:
r = self.up.warn(_msg=_msg, *args, stacklevel=stacklevel + 1, **kwargs)
record["record"] = r
if self.record:
self.records.append(record)
def warning(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs) -> None:
from zuper_commons.timing import now_utc
record = {"msg": _msg, "args": list(args), "kwargs": kwargs, "t": now_utc()}
if self.up:
r = self.up.warning(_msg=_msg, *args, stacklevel=stacklevel + 1, **kwargs)
record["record"] = r
if self.record:
self.records.append(record)
def error(self, _msg: str = None, *args, stacklevel: int = 0, **kwargs: object) -> None:
from zuper_commons.timing import now_utc
record = {"msg": _msg, "args": list(args), "kwargs": kwargs, "t": now_utc()}
if self.up:
r = self.up.error(_msg=_msg, *args, stacklevel=stacklevel + 1, **kwargs)
record["record"] = r
if self.record:
self.records.append(record)
def getChild(self, child_name: str) -> "ZLoggerInterface":
return self.up.getChild(child_name)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/logs/zlogger_store.py
|
zlogger_store.py
|
from .hacks import *
from .col_logging import *
from .zlogger import *
from .zlogger_store import *
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/logs/__init__.py
|
__init__.py
|
from typing import NewType
__all__ = ["MarkdownStr", "MD5Hash", "SHA1Hash", "HTMLString", "XMLString", "JSONString"]
MarkdownStr = str
""" A Markdown string """
MD5Hash = NewType("MD5Hash", str)
""" A MD5 hash"""
SHA1Hash = NewType("SHA1Hash", str)
""" A SHA-1 hash"""
HTMLString = str
""" A string containing HTML """
XMLString = str
""" A string containing XML """
JSONString = NewType("JSONString", str)
""" A string containing JSON """
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/types.py
|
types.py
|
import re
# escape = re.compile('\x1b\[..?m')
escape = re.compile("\x1b\[[\d;]*?m")
__all__ = ["remove_escapes", "get_length_on_screen"]
def remove_escapes(s):
return escape.sub("", s)
def get_length_on_screen(s):
""" Returns the length of s without the escapes """
return len(remove_escapes(s))
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/coloring.py
|
coloring.py
|
from typing import Mapping, Optional as O, TypeVar
from .coloring import get_length_on_screen
from .zc_indenting import indent
__all__ = ["pretty_dict", "pretty_msg", "format_error"]
def pretty_msg(head: str, **kwargs: object) -> str:
return pretty_dict(head, kwargs)
format_error = pretty_msg
def pretty_dict(
head: O[str],
d: Mapping[str, object],
omit_falsy: bool = False,
sort_keys: bool = False,
leftmargin: str = "│ ", # | <-- note box-making
) -> str:
if not d:
return head + ": (empty dict)" if head else "(empty dict)"
s = []
n = max(get_length_on_screen(str(_)) for _ in d)
ordered = sorted(d) if sort_keys else list(d)
# ks = sorted(d)
for k in ordered:
v = d[k]
if k == "__builtins__":
v = "(hiding __builtins__)"
if not hasattr(v, "conclusive") and (not isinstance(v, int)) and (not v) and omit_falsy:
continue
prefix = (str(k) + ":").rjust(n + 1) + " "
if isinstance(v, TypeVar):
# noinspection PyUnresolvedReferences
v = f"TypeVar({v.__name__}, bound={v.__bound__})"
if isinstance(v, dict):
v = pretty_dict("", v)
s.extend(indent(v, "", prefix).split("\n"))
# return (head + ':\n' if head else '') + indent("\n".join(s), '| ')
return (head + "\n" if head else "") + indent("\n".join(s), leftmargin)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/zc_pretty_dicts.py
|
zc_pretty_dicts.py
|
from typing import Optional as O
from .coloring import get_length_on_screen
__all__ = ["indent"]
def indent(s: str, prefix: str, first: O[str] = None, last: O[str] = None) -> str:
if not isinstance(s, str):
s = u"{}".format(s)
assert isinstance(prefix, str), type(prefix)
try:
lines = s.split("\n")
except UnicodeDecodeError:
lines = [s]
if not lines:
return ""
if first is None:
first = prefix
if last is None:
couples = [("│", "└"), ("┋", "H")]
for a, b in couples:
if a in prefix:
last = prefix.replace(a, b)
break
else:
last = prefix
# print(f'{prefix!r:10} -> {get_length_on_screen(prefix)}')
# print(f'{first!r:10} -> {get_length_on_screen(first)}')
m = max(get_length_on_screen(prefix), get_length_on_screen(first))
prefix = " " * (m - get_length_on_screen(prefix)) + prefix
first = " " * (m - get_length_on_screen(first)) + first
last = " " * (m - get_length_on_screen(last)) + last
# differnet first prefix
res = [u"%s%s" % (prefix, line.rstrip()) for line in lines]
res[0] = u"%s%s" % (first, lines[0].rstrip())
return "\n".join(res)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/zc_indenting.py
|
zc_indenting.py
|
import hashlib
from typing import Union
from .types import MD5Hash, SHA1Hash
__all__ = ["get_md5", "get_sha1"]
def get_md5(contents: Union[bytes, str]) -> MD5Hash:
""" Returns an hexdigest (string).
If the contents is a string, then it is encoded as utf-8.
"""
from zuper_commons.types import check_isinstance
if isinstance(contents, str):
contents = contents.encode("utf-8")
check_isinstance(contents, bytes)
m = hashlib.md5()
m.update(contents)
s = m.hexdigest()
check_isinstance(s, str)
return MD5Hash(s)
def get_sha1(contents: bytes) -> SHA1Hash:
from zuper_commons.types import check_isinstance
""" Returns an hexdigest (string).
"""
check_isinstance(contents, bytes)
m = hashlib.sha1()
m.update(contents)
s = m.hexdigest()
check_isinstance(s, str)
return SHA1Hash(s)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/zc_quick_hash.py
|
zc_quick_hash.py
|
from dataclasses import dataclass
from typing import Optional, List, Tuple
import termcolor
from zuper_commons.text.coloring import get_length_on_screen
from zuper_commons.text.text_sidebyside import pad
__all__ = ["box", "text_dimensions"]
@dataclass
class TextDimensions:
nlines: int
max_width: int
def text_dimensions(s: str):
lines = s.split("\n")
max_width = max(get_length_on_screen(_) for _ in lines)
return TextDimensions(nlines=len(lines), max_width=max_width)
#
# U+250x ─ ━ │ ┃ ┄ ┅ ┆ ┇ ┈ ┉ ┊ ┋ ┌ ┍ ┎ ┏
# U+251x ┐ ┑ ┒ ┓ └ ┕ ┖ ┗ ┘ ┙ ┚ ┛ ├ ┝ ┞ ┟
# U+252x ┠ ┡ ┢ ┣ ┤ ┥ ┦ ┧ ┨ ┩ ┪ ┫ ┬ ┭ ┮ ┯
# U+253x ┰ ┱ ┲ ┳ ┴ ┵ ┶ ┷ ┸ ┹ ┺ ┻ ┼ ┽ ┾ ┿
# U+254x ╀ ╁ ╂ ╃ ╄ ╅ ╆ ╇ ╈ ╉ ╊ ╋ ╌ ╍ ╎ ╏
# U+255x ═ ║ ╒ ╓ ╔ ╕ ╖ ╗ ╘ ╙ ╚ ╛ ╜ ╝ ╞ ╟
# U+256x ╠ ╡ ╢ ╣ ╤ ╥ ╦ ╧ ╨ ╩ ╪ ╫ ╬ ╭ ╮ ╯
# U+257x ╰ ╱ ╲ ╳ ╴ ╵ ╶ ╷ ╸ ╹ ╺ ╻ ╼ ╽ ╾ ╿
boxes = {
"pipes": "╔ ═ ╗ ║ ╝ ═ ╚ ║ ╬ ╠ ╣ ╦ ╩ ═ ║ ┼ ╟ ╢ ╤ ╧ ─ │".split(),
"heavy": "┏ ━ ┓ ┃ ┛ ━ ┗ ┃ ╋ ┣ ┫ ┳ ┻ ━ ┃ ┼ ┠ ┨ ┯ ┷ ─ │".split(),
"light": "┌ ─ ┐ │ ┘ ─ └ │ ┼ ├ ┤ ┬ ┴ ─ │ ┼ ├ ┤ ┬ ┴ ─ │".split(),
"circo": "╭ ─ ╮ │ ╯ ─ ╰ │ ┼ ├ ┤ ┬ ┴ ─ │ ┼ ├ ┤ ┬ ┴ ─ │".split(),
}
boxes["spaces"] = [" "] * len(boxes["pipes"])
CORNERS = ["corner"]
NEIGH = ((0, 0, 0), (0, None, 0), (0, 0, 0))
def box(
s: str,
style="pipes",
neighs=NEIGH,
draw_borders: Tuple[int, int, int, int] = (1, 1, 1, 1),
light_inside=True,
color: Optional[str] = None,
attrs: Optional[List[str]] = None,
style_fun=None,
) -> str:
dims = text_dimensions(s)
padded = pad(s, dims.nlines, dims.max_width, style_fun=style_fun)
(tl_n, tc_n, tr_n), (ml_n, _, mr_n), (bl_n, bc_n, br_n) = neighs
S = boxes[style]
assert len(S) == 22, len(S)
(
tl,
tc,
tr,
mr,
br,
bc,
bl,
ml,
Pc,
Pr,
Pl,
Pd,
Pu,
H,
V,
Pc_light,
Pr_light,
Pl_light,
Pd_light,
Pu_light,
H_light,
V_light,
) = S
if light_inside:
Pc = Pc_light
Pu = Pu_light
Pd = Pd_light
Pr = Pr_light
Pl = Pl_light
H = H_light
V = V_light
tl_use = {
(0, 0, 0): tl,
(0, 0, 1): Pd,
(0, 1, 0): Pr,
(0, 1, 1): Pc, # XXX
(1, 0, 0): Pc, # XXX
(1, 0, 1): Pc, # XXX
(1, 1, 0): Pc,
(1, 1, 1): Pc,
}[(tl_n, tc_n, ml_n)]
tr_use = {
(0, 0, 0): tr,
(0, 0, 1): Pd,
(0, 1, 0): Pc,
(0, 1, 1): Pc,
(1, 0, 0): Pl,
(1, 0, 1): Pc,
(1, 1, 0): Pc,
(1, 1, 1): Pc,
}[(tc_n, tr_n, mr_n)]
br_use = {
(0, 0, 0): br,
(0, 0, 1): Pc,
(0, 1, 0): Pl,
(0, 1, 1): Pc,
(1, 0, 0): Pu,
(1, 0, 1): Pc,
(1, 1, 0): Pc,
(1, 1, 1): Pc,
}[(mr_n, bc_n, br_n)]
bl_use = {
(0, 0, 0): bl,
(0, 0, 1): Pr,
(0, 1, 0): Pc,
(0, 1, 1): Pc,
(1, 0, 0): Pu,
(1, 0, 1): Pc,
(1, 1, 0): Pc,
(1, 1, 1): Pc,
}[(ml_n, bl_n, bc_n)]
mr_use = {0: mr, 1: V}[mr_n]
ml_use = {0: ml, 1: V}[ml_n]
tc_use = {0: tc, 1: H}[tc_n]
bc_use = {0: bc, 1: H}[bc_n]
draw_top, draw_right, draw_bottom, draw_left = draw_borders
if not draw_right:
tr_use = ""
mr_use = ""
br_use = ""
if not draw_left:
tl_use = ""
ml_use = ""
bl_use = ""
top = tl_use + tc_use * dims.max_width + tr_use
bot = bl_use + bc_use * dims.max_width + br_use
def f(_):
if style_fun:
_ = style_fun(_)
if color is not None or attrs:
_ = termcolor.colored(_, color=color, attrs=attrs)
return _
top_col = f(top)
bot_col = f(bot)
mr_use_col = f(mr_use)
ml_use_col = f(ml_use)
new_lines = []
if draw_top:
new_lines.append(top_col)
for l in padded:
new_lines.append(ml_use_col + l + mr_use_col)
if draw_bottom:
new_lines.append(bot_col)
return "\n".join(new_lines)
# begin = termcolor.colored('║', 'yellow', attrs=['dark'])
# ending = termcolor.colored('║', 'yellow', attrs=['dark']) # ↵┋
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/boxing.py
|
boxing.py
|
from typing import Callable, List, Sequence
from .coloring import get_length_on_screen
__all__ = ["pad", "side_by_side"]
def pad(
text: str,
nlines: int,
linelength: int,
halign: str = "left",
valign: str = "top",
style_fun: Callable[[str], str] = None,
) -> List[str]:
lines: List[str] = text.split("\n")
if len(lines) < nlines:
extra = nlines - len(lines)
if valign == "top":
extra_top = 0
extra_bottom = extra
elif valign == "bottom":
extra_top = extra
extra_bottom = 0
elif valign == "middle":
extra_bottom = int(extra / 2)
extra_top = extra - extra_bottom
else:
raise ValueError(valign)
assert extra == extra_top + extra_bottom
lines_top = [""] * extra_top
lines_bottom = [""] * extra_bottom
lines = lines_top + lines + lines_bottom
res: List[str] = []
for l in lines:
extra = max(linelength - get_length_on_screen(l), 0)
if halign == "left":
extra_left = 0
extra_right = extra
elif halign == "right":
extra_left = extra
extra_right = 0
elif halign == "center":
extra_right = int(extra / 2)
extra_left = extra - extra_right
else:
raise ValueError(halign)
assert extra == extra_left + extra_right
left = " " * extra_left
right = " " * extra_right
if style_fun:
left = style_fun(left)
right = style_fun(right)
l = left + l + right
res.append(l)
return res
def side_by_side(args: Sequence[str], sep=" ", style_fun=None) -> str:
args = list(args)
lines: List[List[str]] = [_.split("\n") for _ in args]
nlines: int = max([len(_) for _ in lines])
linelengths: List[int] = [max(get_length_on_screen(line) for line in _) for _ in lines]
padded = [pad(_, nlines, linelength, style_fun=style_fun) for _, linelength in zip(args, linelengths)]
res = []
for i in range(nlines):
ls = [x[i] for x in padded]
l = sep.join(ls)
res.append(l)
return "\n".join(res)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/text_sidebyside.py
|
text_sidebyside.py
|
# -*- coding: utf-8 -*-
import re
from typing import Iterator, List, Sequence, Union
__all__ = ["expand_string", "get_wildcard_matches", "wildcard_to_regexp", 'expand_wildcard']
def flatten(seq: Iterator) -> List:
res = []
for l in seq:
res.extend(l)
return res
def expand_string(x: Union[str, Sequence[str]], options: Sequence[str]) -> List[str]:
if isinstance(x, list):
return flatten(expand_string(y, options) for y in x)
elif isinstance(x, str):
x = x.strip()
if "," in x:
splat = [_ for _ in x.split(",") if _] # remove empty
return flatten(expand_string(y, options) for y in splat)
elif "*" in x:
xx = expand_wildcard(x, options)
expanded = list(xx)
return expanded
else:
return [x]
else:
assert False, x
def wildcard_to_regexp(arg: str):
""" Returns a regular expression from a shell wildcard expression. """
return re.compile("\A" + arg.replace("*", ".*") + "\Z")
def has_wildcard(s: str) -> bool:
return s.find("*") > -1
def expand_wildcard(wildcard: str, universe: Sequence[str]) -> Sequence[str]:
"""
Expands a wildcard expression against the given list.
Raises ValueError if none found.
:param wildcard: string with '*'
:param universe: a list of strings
"""
if not has_wildcard(wildcard):
msg = "No wildcards in %r." % wildcard
raise ValueError(msg)
matches = list(get_wildcard_matches(wildcard, universe))
if not matches:
msg = "Could not find matches for pattern %r in %s." % (wildcard, universe)
raise ValueError(msg)
return matches
def get_wildcard_matches(wildcard: str, universe: Sequence[str]) -> Iterator[str]:
"""
Expands a wildcard expression against the given list.
Yields a sequence of strings.
:param wildcard: string with '*'
:param universe: a list of strings
"""
regexp = wildcard_to_regexp(wildcard)
for x in universe:
if regexp.match(x):
yield x
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/zc_wildcards.py
|
zc_wildcards.py
|
# -*- coding: utf-8 -*-
import re
# ---------------------------------------------------------
# natsort.py: Natural string sorting.
# ---------------------------------------------------------
# By Seo Sanghyeon. Some changes by Connelly Barnes.
from typing import Union, List
__all__ = ["natsorted"]
def try_int(s: str) -> Union[str, int]:
"Convert to integer if possible."
try:
return int(s)
except:
return s
def natsort_key(s: str):
"Used internally to get a tuple by which s is sorted."
s = str(s) # convert everything to string
return tuple(map(try_int, re.findall(r"(\d+|\D+)", s)))
def natsorted(seq: List[str]) -> List[str]:
"Returns a copy of seq, sorted by natural string sort."
# convert set -> list
return sorted(list(seq), key=natsort_key)
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/natsorting.py
|
natsorting.py
|
from .. import logger
logger = logger.getChild("text")
from .zc_indenting import *
from .zc_pretty_dicts import *
from .zc_quick_hash import *
from .zc_wildcards import *
from .boxing import box
from .text_sidebyside import side_by_side
from .table import *
from .types import *
from .coloring import *
from .natsorting import *
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/__init__.py
|
__init__.py
|
import itertools
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple, TypeVar
from .boxing import box, text_dimensions
from .coloring import get_length_on_screen
from .text_sidebyside import pad, side_by_side
try:
from typing import Literal
HAlign = Literal["left", "center", "right", "inherit"]
VAlign = Literal["top", "middle", "bottom", "inherit"]
except ImportError:
HAlign = VAlign = str
__all__ = ["Style", "format_table", "wrap_lines"]
@dataclass
class Style:
halign: HAlign = "inherit"
valign: VAlign = "inherit"
padding_right: int = "inherit"
padding_left: int = "inherit"
def format_table(
cells: Dict[Tuple[int, int], str],
*,
draw_grid_v: bool = True,
draw_grid_h: bool = True,
style: str = "pipes",
light_inside: bool = True,
color: Optional[str] = None,
attrs: Optional[List[str]] = None,
col_style: Dict[int, Style] = None,
row_style: Dict[int, Style] = None,
cell_style: Dict[Tuple[int, int], Style] = None,
table_style: Style = None,
style_fun=None
) -> str:
"""
Styles: "none", "pipes", ...
"""
table_style = table_style or Style()
col_styles = col_style or {}
row_styles = row_style or {}
cell_styles = cell_style or {}
def get_row_style(row: int) -> Style:
return row_styles.get(row, Style())
def get_col_style(col: int) -> Style:
return col_styles.get(col, Style())
def get_cell_style(cell: Tuple[int, int]) -> Style:
return cell_styles.get(cell, Style())
X = TypeVar("X")
def resolve(a: List[X]) -> X:
cur = a[0]
for _ in a:
if _ == "inherit":
continue
else:
cur = _
return cur
def get_style(cell: Tuple[int, int]) -> Style:
row, col = cell
rows = get_row_style(row)
cols = get_col_style(col)
cels = get_cell_style(cell)
halign = resolve(["left", table_style.halign, rows.halign, cols.halign, cels.halign])
valign = resolve(["top", table_style.valign, rows.valign, cols.valign, cels.valign])
padding_left = resolve(
[0, table_style.padding_left, rows.padding_left, cols.padding_left, cels.padding_left]
)
padding_right = resolve(
[0, table_style.padding_right, rows.padding_right, cols.padding_right, cels.padding_right]
)
return Style(halign=halign, valign=valign, padding_left=padding_left, padding_right=padding_right)
cells = dict(cells)
# find all mentioned cells
mentioned_js = set()
mentioned_is = set()
for i, j in cells:
mentioned_is.add(i)
mentioned_js.add(j)
# add default = '' for missing cells
nrows = max(mentioned_is) + 1 if mentioned_is else 1
ncols = max(mentioned_js) + 1 if mentioned_js else 1
coords = list(itertools.product(range(nrows), range(ncols)))
for c in coords:
if c not in cells:
cells[c] = ""
# find max size for cells
row_heights = [0] * nrows
col_widths = [0] * ncols
for (i, j), s in list(cells.items()):
dims = text_dimensions(s)
col_widths[j] = max(col_widths[j], dims.max_width)
row_heights[i] = max(row_heights[i], dims.nlines)
# pad all cells
for (i, j), s in list(cells.items()):
linelength = col_widths[j]
nlines = row_heights[i]
cell_style = get_style((i, j))
padded = do_padding(
s,
linelength=linelength,
nlines=nlines,
halign=cell_style.halign,
valign=cell_style.valign,
padding_left=cell_style.padding_left,
padding_right=cell_style.padding_right,
style_fun=style_fun,
)
ibef = int(i > 0)
iaft = int(i < nrows - 1)
jbef = int(j > 0)
jaft = int(j < ncols - 1)
neighs = (
(ibef * jbef, ibef, ibef * jaft),
(jbef, None, jaft),
(iaft * jbef, iaft, iaft * jaft),
)
draw_top = 1
draw_left = 1
draw_right = jaft == 0
draw_bottom = iaft == 0
if not draw_grid_v:
draw_bottom = draw_top = 0
if not draw_grid_h:
draw_left = draw_right = 0
d = draw_top, draw_right, draw_bottom, draw_left
if style == "none":
s = " " + padded
else:
s = box(
padded,
neighs=neighs,
style=style,
draw_borders=d,
light_inside=light_inside,
color=color,
attrs=attrs,
style_fun=style_fun,
)
cells[(i, j)] = s
parts = []
for i in range(nrows):
ss = []
for j in range(ncols):
ss.append(cells[(i, j)])
s = side_by_side(ss, sep="")
parts.append(s)
whole = "\n".join(parts)
# res = box(whole, style=style)
# logger.info(f'table {cells!r}')
return whole
def wrap_lines(s: str, max_width: int):
lines = s.split("\n")
res = []
while lines:
l = lines.pop(0)
n = get_length_on_screen(l)
if n <= max_width:
res.append(l)
else:
a = l[:max_width]
b = "$" + l[max_width:]
res.append(a)
lines.insert(0, b)
return "\n".join(res)
def do_padding(
s: str,
linelength: int,
nlines: int,
halign: HAlign,
valign: VAlign,
padding_left: int,
padding_right: int,
style_fun: Callable[[str], str] = None,
pad_char=" ",
) -> str:
padded_lines = pad(
s, linelength=linelength, nlines=nlines, halign=halign, valign=valign, style_fun=style_fun
)
pl = pad_char * padding_left
pr = pad_char * padding_right
if style_fun is not None:
pl = style_fun(pl)
pr = style_fun(pr)
padded_lines = [pl + _ + pr for _ in padded_lines]
padded = "\n".join(padded_lines)
return padded
|
zuper-commons-z6
|
/zuper-commons-z6-6.2.4.tar.gz/zuper-commons-z6-6.2.4/src/zuper_commons/text/table.py
|
table.py
|
from setuptools import setup, find_packages
def get_version(filename):
import ast
version = None
with open(filename) as f:
for line in f:
if line.startswith('__version__'):
version = ast.parse(line).body[0].value.s
break
else:
raise ValueError('No version found in %r.' % filename)
if version is None:
raise ValueError(filename)
return version
version = get_version('src/zuper_commons/__init__.py')
import os
description = """"""
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='zuper-commons',
version=version,
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=True,
entry_points={
'console_scripts': [
]
},
install_requires=[
'PyContracts',
],
)
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/setup.py
|
setup.py
|
__version__ = '3.0.4'
import logging
logging.basicConfig()
logger = logging.getLogger('zc')
logger.setLevel(logging.DEBUG)
logging.info(f'zc {__version__}')
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
import os
__all__ = [
'friendly_path',
]
def friendly_path(path, use_environment=True):
"""
Gets a friendly representation of the given path,
using relative paths or environment variables
(if use_environment = True).
"""
# TODO: send extra rules
options = []
options.append(os.path.relpath(path, os.getcwd()))
rules = []
rules.append(('~', os.path.expanduser('~')))
rules.append(('.', os.getcwd()))
rules.append(('.', os.path.realpath(os.getcwd())))
if use_environment:
envs = dict(os.environ)
# remove unwanted
for e in list(envs.keys()):
if 'PWD' in e:
del envs[e]
for k, v in envs.items():
if v:
if v and v[-1] == '/':
v = v[:-1]
if v and v[0] == '/':
rules.append(('${%s}' % k, v))
# apply longest first
rules.sort(key=lambda x: (-len(x[1])))
path = replace_variables(path, rules)
options.append(path)
weight_doubledot = 5
def score(s):
# penalize '..' a lot
s = s.replace('..', '*' * weight_doubledot)
return len(s)
options.sort(key=score)
result = options[0]
# print('Converted %s => %s' % (original, result))
return result
def replace_variables(path, rules):
for k, v in rules:
if path.startswith(v):
# print(" applied %s => %s" % (v, k))
path = path.replace(v, k)
return path
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_friendly_path.py
|
zc_friendly_path.py
|
# -*- coding: utf-8 -*-
import os
def expand_all(x0):
x = x0
x = os.path.expanduser(x)
x = os.path.expandvars(x)
if '$' in x:
msg = 'Cannot resolve all environment variables in %r.' % x0
raise ValueError(msg)
return x
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_path_utils.py
|
zc_path_utils.py
|
import os
__all__ = [
'mkdirs_thread_safe',
'make_sure_dir_exists',
]
def mkdirs_thread_safe(dst):
"""Make directories leading to 'dst' if they don't exist yet"""
if dst == '' or os.path.exists(dst):
return
head, _ = os.path.split(dst)
if os.sep == ':' and not ':' in head:
head = head + ':'
mkdirs_thread_safe(head)
try:
os.mkdir(dst, 0o777)
except OSError as err:
if err.errno != 17: # file exists
raise
def make_sure_dir_exists(filename):
""" Makes sure that the path to file exists, but creating directories. """
dirname = os.path.dirname(filename)
# dir == '' for current dir
if dirname != '' and not os.path.exists(dirname):
mkdirs_thread_safe(dirname)
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_mkdirs.py
|
zc_mkdirs.py
|
# -*- coding: utf-8 -*-
import os
__all__ = [
'dir_from_package_name',
]
def dir_from_package_name(d: str):
""" This works for "package.sub" format. If it's only
package, we look for __init__.py"""
tokens = d.split('.')
if len(tokens) == 1:
package = d
sub = '__init__'
else:
package = '.'.join(tokens[:-1])
sub = tokens[-1]
try:
from pkg_resources import resource_filename # @UnresolvedImport
res = resource_filename(package, sub + '.py')
if len(tokens) == 1:
res = os.path.dirname(res)
return res
except BaseException as e: # pragma: no cover
msg = format_error('Cannot resolve package name', d=d)
raise ValueError(msg) from e
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_dir_from_package_nam.py
|
zc_dir_from_package_nam.py
|
# -*- coding: utf-8 -*-
import pickle
import traceback
from io import BytesIO
from pickle import (Pickler, SETITEM, MARK, SETITEMS, EMPTY_TUPLE, TUPLE, POP,
_tuplesize2code, POP_MARK)
from zuper_commons.types.zc_describe_type import describe_type
from . import logger
__all__ = ['find_pickling_error']
def find_pickling_error(obj, protocol=pickle.HIGHEST_PROTOCOL):
sio = BytesIO()
try:
pickle.dumps(obj)
except BaseException:
se1 = traceback.format_exc()
pickler = MyPickler(sio, protocol)
try:
pickler.dump(obj)
except Exception:
se2 = traceback.format_exc()
msg = pickler.get_stack_description()
msg += '\n --- Current exception----\n%s' % se1
msg += '\n --- Old exception----\n%s' % se2
return msg
else:
msg = 'I could not find the exact pickling error.'
raise Exception(msg)
else:
msg = ('Strange! I could not reproduce the pickling error '
'for the object of class %s' % describe_type(obj))
logger.info(msg)
class MyPickler(Pickler):
def __init__(self, *args, **kargs):
Pickler.__init__(self, *args, **kargs)
self.stack = []
def save(self, obj):
desc = 'object of type %s' % (describe_type(obj))
# , describe_value(obj, 100))
# self.stack.append(describe_value(obj, 120))
self.stack.append(desc)
Pickler.save(self, obj)
self.stack.pop()
def get_stack_description(self):
s = 'Pickling error occurred at:\n'
for i, context in enumerate(self.stack):
s += ' ' * i + '- %s\n' % context
return s
def save_pair(self, k, v):
self.stack.append('key %r = object of type %s' % (k, describe_type(v)))
self.save(k)
self.save(v)
self.stack.pop()
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
# save = self.save
write = self.write
if not self.bin:
for k, v in items:
self.stack.append('entry %s' % str(k))
self.save_pair(k, v)
self.stack.pop()
write(SETITEM)
return
r = list(range(self._BATCHSIZE))
while items is not None:
tmp = []
for _ in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
self.stack.append('entry %s' % str(k))
self.save_pair(k, v)
self.stack.pop()
write(SETITEMS)
elif n:
k, v = tmp[0]
self.stack.append('entry %s' % str(k))
self.save_pair(k, v)
self.stack.pop()
write(SETITEM)
# else tmp is empty, and we're done
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for i, element in enumerate(obj):
self.stack.append('tuple element %s' % i)
save(element)
self.stack.pop()
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for i, element in enumerate(obj):
self.stack.append('tuple element %s' % i)
save(element)
self.stack.pop()
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n + 1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_debug_pickler.py
|
zc_debug_pickler.py
|
# -*- coding: utf-8 -*-
import fnmatch
import os
import time
from typing import *
from collections import defaultdict
from contracts import contract
from contracts.utils import check_isinstance
from . import logger
__all__ = [
'locate_files',
]
@contract(returns='list(str)', directory='str',
pattern='str|seq(str)', followlinks='bool')
def locate_files(directory, pattern, followlinks=True,
include_directories=False,
include_files=True,
normalize=True,
ignore_patterns: Optional[List[str]]=None):
"""
pattern is either a string or a sequence of strings
NOTE: if you do not pass ignore_patterns, it will use MCDPConstants.locate_files_ignore_patterns
ignore_patterns = ['*.bak']
normalize = uses realpath
"""
t0 = time.time()
if ignore_patterns is None:
ignore_patterns = []
if isinstance(pattern, str):
patterns = [pattern]
else:
patterns = list(pattern)
for p in patterns:
check_isinstance(p, str)
# directories visited
# visited = set()
# visited_basename = set()
# print('locate_files %r %r' % (directory, pattern))
filenames = []
def matches_pattern(x):
return any(fnmatch.fnmatch(x, _) or (x == _) for _ in patterns)
def should_ignore_resource(x):
return any(fnmatch.fnmatch(x, _) or (x == _) for _ in ignore_patterns)
def accept_dirname_to_go_inside(_root_, d_):
if should_ignore_resource(d_):
return False
# XXX
# dd = os.path.realpath(os.path.join(root_, d_))
# if dd in visited:
# return False
# visited.add(dd)
return True
def accept_dirname_as_match(_):
return include_directories and \
not should_ignore_resource(_) and \
matches_pattern(_)
def accept_filename_as_match(_):
return include_files and \
not should_ignore_resource(_) and \
matches_pattern(_)
ntraversed = 0
for root, dirnames, files in os.walk(directory, followlinks=followlinks):
ntraversed += 1
dirnames[:] = [_ for _ in dirnames if accept_dirname_to_go_inside(root, _)]
for f in files:
# logger.info('look ' + root + '/' + f)
if accept_filename_as_match(f):
filename = os.path.join(root, f)
filenames.append(filename)
for d in dirnames:
if accept_dirname_as_match(d):
filename = os.path.join(root, d)
filenames.append(filename)
if normalize:
real2norm = defaultdict(lambda: [])
for norm in filenames:
real = os.path.realpath(norm)
real2norm[real].append(norm)
# print('%s -> %s' % (real, norm))
for k, v in real2norm.items():
if len(v) > 1:
msg = 'In directory:\n\t%s\n' % directory
msg += 'I found %d paths that refer to the same file:\n' % len(v)
for n in v:
msg += '\t%s\n' % n
msg += 'refer to the same file:\n\t%s\n' % k
msg += 'I will silently eliminate redundancies.'
# logger.warning(msg) # XXX
filenames = list(real2norm.keys())
seconds = time.time() - t0
if seconds > 5:
n = len(filenames)
nuniques = len(set(filenames))
logger.debug('%.4f s for locate_files(%s,%s): %d traversed, found %d filenames (%d uniques)' %
(seconds, directory, pattern, ntraversed, n, nuniques))
return filenames
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_locate_files_imp.py
|
zc_locate_files_imp.py
|
# -*- coding: utf-8 -*-
import gzip
import os
import random
from contextlib import contextmanager
__all__ = [
'safe_write',
'safe_read',
]
def is_gzip_filename(filename):
return '.gz' in filename
@contextmanager
def safe_write(filename, mode='wb', compresslevel=5):
"""
Makes atomic writes by writing to a temp filename.
Also if the filename ends in ".gz", writes to a compressed stream.
Yields a file descriptor.
It is thread safe because it renames the file.
If there is an error, the file will be removed if it exists.
"""
dirname = os.path.dirname(filename)
if dirname:
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except:
pass
# Dont do this!
# if os.path.exists(filename):
# os.unlink(filename)
# assert not os.path.exists(filename)
#
n = random.randint(0, 10000)
tmp_filename = '%s.tmp.%s.%s' % (filename, os.getpid(), n)
try:
if is_gzip_filename(filename):
fopen = lambda fname, fmode: gzip.open(filename=fname, mode=fmode,
compresslevel=compresslevel)
else:
fopen = open
with fopen(tmp_filename, mode) as f:
yield f
f.close()
# if os.path.exists(filename):
# msg = 'Race condition for writing to %r.' % filename
# raise Exception(msg)
#
# On Unix, if dst exists and is a file, it will be replaced silently
# if the user has permission.
os.rename(tmp_filename, filename)
except:
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
if os.path.exists(filename):
os.unlink(filename)
raise
@contextmanager
def safe_read(filename, mode='rb'):
"""
If the filename ends in ".gz", reads from a compressed stream.
Yields a file descriptor.
"""
try:
if is_gzip_filename(filename):
f = gzip.open(filename, mode)
try:
yield f
finally:
f.close()
else:
with open(filename, mode) as f:
yield f
except:
# TODO
raise
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_safe_write.py
|
zc_safe_write.py
|
# -*- coding: utf-8 -*-
import pickle
from contracts import describe_type
from . import logger
from .zc_debug_pickler import find_pickling_error
from .zc_safe_write import safe_read, safe_write
__all__ = [
'safe_pickle_dump',
'safe_pickle_load',
]
debug_pickling = False
def safe_pickle_dump(value, filename, protocol=pickle.HIGHEST_PROTOCOL,
**safe_write_options):
with safe_write(filename, **safe_write_options) as f:
try:
pickle.dump(value, f, protocol)
except KeyboardInterrupt:
raise
except BaseException:
msg = 'Cannot pickle object of class %s' % describe_type(value)
logger.error(msg)
if debug_pickling:
msg = find_pickling_error(value, protocol)
logger.error(msg)
raise
def safe_pickle_load(filename):
# TODO: add debug check
with safe_read(filename) as f:
return pickle.load(f)
# TODO: add pickling debug
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_safe_pickling.py
|
zc_safe_pickling.py
|
# -*- coding: utf-8 -*-
import codecs
import os
from zuper_commons.types import check_isinstance
from . import logger
from .zc_friendly_path import friendly_path
from .zc_mkdirs import make_sure_dir_exists
from .zc_path_utils import expand_all
__all__ = [
'read_bytes_from_file',
'read_ustring_from_utf8_file',
'read_ustring_from_utf8_file_lenient',
'write_bytes_to_file',
'write_ustring_to_utf8_file',
]
def read_bytes_from_file(filename: str) -> bytes:
""" Read binary data and returns bytes """
_check_exists(filename)
with open(filename, 'rb') as f:
return f.read()
def read_ustring_from_utf8_file(filename: str) -> str:
""" Returns a unicode/proper string """
_check_exists(filename)
with codecs.open(filename, encoding='utf-8') as f:
try:
return f.read()
except UnicodeDecodeError as e:
msg = 'Could not successfully decode file %s' % filename
raise UnicodeError(msg) from e
def read_ustring_from_utf8_file_lenient(filename) -> str:
""" Ignores errors """
_check_exists(filename)
with codecs.open(filename, encoding='utf-8', errors='ignore') as f:
try:
return f.read()
except UnicodeDecodeError as e:
msg = 'Could not successfully decode file %s' % filename
raise UnicodeError(msg) from e
def _check_exists(filename):
if not os.path.exists(filename):
if os.path.lexists(filename):
msg = 'The link %s does not exist.' % filename
msg += ' it links to %s' % os.readlink(filename)
raise ValueError(msg)
else:
msg = 'Could not find file %r' % filename
msg += ' from directory %s' % os.getcwd()
raise ValueError(msg)
def write_ustring_to_utf8_file(data: str, filename, quiet=False):
"""
It also creates the directory if it does not exist.
:param data:
:param filename:
:param quiet:
:return:
"""
check_isinstance(data, str)
b = data.encode('utf-8') # OK
return write_bytes_to_file(b, filename, quiet=quiet)
def write_bytes_to_file(data: bytes, filename: str, quiet=False):
"""
Writes the data to the given filename.
If the data did not change, the file is not touched.
"""
check_isinstance(data, bytes)
L = len(filename)
if L > 1024:
msg = f'Invalid argument filename: too long at {L}. Did you confuse it with data?\n{filename[:1024]}'
raise ValueError(msg)
filename = expand_all(filename)
make_sure_dir_exists(filename)
if os.path.exists(filename):
current = open(filename, 'rb').read()
if current == data:
if not 'assets' in filename:
if not quiet:
logger.debug('already up to date %s' % friendly_path(filename))
return
with open(filename, 'wb') as f:
f.write(data)
if filename.startswith('/tmp'):
quiet = True
if not quiet:
size = '%.1fMB' % (len(data) / (1024 * 1024))
logger.debug('Written %s to: %s' % (size, friendly_path(filename)))
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/zc_fileutils.py
|
zc_fileutils.py
|
from .. import logger
logger = logger.getChild("fs")
from .zc_dir_from_package_nam import *
from .zc_fileutils import *
from .zc_locate_files_imp import *
from .zc_mkdirs import *
from .zc_safe_pickling import *
from .zc_path_utils import *
from .zc_safe_write import *
from .zc_friendly_path import *
from .zc_debug_pickler import *
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/fs/__init__.py
|
__init__.py
|
import termcolor
color_orange = '#ffb342'
color_blue = '#42a0ff'
from xtermcolor import colorize
def colorize_rgb(x, rgb):
assert rgb.startswith('#')
return colorize(x, int(rgb[1:], 16))
def color_ops(x):
return colorize_rgb(x, color_blue)
def color_typename(x):
return colorize_rgb(x, color_orange)
def color_par(x):
return termcolor.colored(x, attrs=['dark'])
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/ui/colors.py
|
colors.py
|
from .. import logger
logger = logger.getChild("ui")
from .zc_duration_hum import *
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/ui/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
import math
__all__ = [
'duration_compact',
]
def duration_compact(seconds):
seconds = int(math.ceil(seconds))
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
years, days = divmod(days, 365)
minutes = int(minutes)
hours = int(hours)
days = int(days)
years = int(years)
duration = []
if years > 0:
duration.append('%dy' % years)
else:
if days > 0:
duration.append('%dd' % days)
if (days < 3) and (years == 0):
if hours > 0:
duration.append('%dh' % hours)
if (hours < 3) and (days == 0):
if minutes > 0:
duration.append('%dm' % minutes)
if (minutes < 3) and (hours == 0):
if seconds > 0:
duration.append('%ds' % seconds)
return ' '.join(duration)
#
# def duration_human(seconds):
# ''' Code modified from
# http://darklaunch.com/2009/10/06
# /python-time-duration-human-friendly-timestamp
# '''
# seconds = int(math.ceil(seconds))
# minutes, seconds = divmod(seconds, 60)
# hours, minutes = divmod(minutes, 60)
# days, hours = divmod(hours, 24)
# years, days = divmod(days, 365.242199)
#
# minutes = int(minutes)
# hours = int(hours)
# days = int(days)
# years = int(years)
#
# duration = []
# if years > 0:
# duration.append('%d year' % years + 's' * (years != 1))
# else:
# if days > 0:
# duration.append('%d day' % days + 's' * (days != 1))
# if (days < 3) and (years == 0):
# if hours > 0:
# duration.append('%d hour' % hours + 's' * (hours != 1))
# if (hours < 3) and (days == 0):
# if minutes > 0:
# duration.append('%d min' % minutes +
# 's' * (minutes != 1))
# if (minutes < 3) and (hours == 0):
# if seconds > 0:
# duration.append('%d sec' % seconds +
# 's' * (seconds != 1))
#
# return ' '.join(duration)
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/ui/zc_duration_hum.py
|
zc_duration_hum.py
|
__all__ = [
'describe_type',
]
def describe_type(x):
""" Returns a friendly description of the type of x. """
if hasattr(x, '__class__'):
c = x.__class__
if hasattr(x, '__name__'):
class_name = '%s' % c.__name__
else:
class_name = str(c)
else:
# for extension classes (spmatrix)
class_name = str(type(x))
return class_name
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/types/zc_describe_type.py
|
zc_describe_type.py
|
from zuper_commons.text import pretty_msg
__all__ = [
'check_isinstance',
]
def check_isinstance(ob, expected, **kwargs):
if not isinstance(ob, expected):
kwargs['object'] = ob
raise_type_mismatch(ob, expected, **kwargs)
def raise_type_mismatch(ob, expected, **kwargs):
""" Raises an exception concerning ob having the wrong type. """
e = 'Object not of expected type:'
e += '\n expected: {}'.format(expected)
e += '\n obtained: {}'.format(type(ob))
msg = pretty_msg(e, **kwargs)
raise ValueError(msg)
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/types/zc_checks.py
|
zc_checks.py
|
from .. import logger
logger = logger.getChild("types")
from .zc_checks import *
from .zc_describe_type import *
from .zc_describe_values import *
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/types/__init__.py
|
__init__.py
|
def describe_value(x, clip=80):
""" Describes an object, for use in the error messages.
Short description, no multiline.
"""
if hasattr(x, 'shape') and hasattr(x, 'dtype'):
shape_desc = 'x'.join(str(i) for i in x.shape)
desc = 'array[%r](%s) ' % (shape_desc, x.dtype)
final = desc + clipped_repr(x, clip - len(desc))
return remove_newlines(final)
else:
from .zc_describe_type import describe_type
class_name = describe_type(x)
desc = 'Instance of %s: ' % class_name
final = desc + clipped_repr(x, clip - len(desc))
return remove_newlines(final)
def clipped_repr(x, clip):
s = "{0!r}".format(x)
if len(s) > clip:
clip_tag = '... [clip]'
cut = clip - len(clip_tag)
s = "%s%s" % (s[:cut], clip_tag)
return s
def remove_newlines(s):
return s.replace('\n', ' ')
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/types/zc_describe_values.py
|
zc_describe_values.py
|
import io
import logging
import traceback
from logging import currentframe
from os.path import normcase
def monkeypatch_findCaller():
if __file__.lower()[-4:] in ['.pyc', '.pyo']:
_wrapper_srcfile = __file__.lower()[:-4] + '.py'
else:
_wrapper_srcfile = __file__
_wrapper_srcfile = normcase(_wrapper_srcfile)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = normcase(co.co_filename)
if filename == _wrapper_srcfile or filename == logging._srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
logging.Logger.findCaller = findCaller
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/logs/hacks.py
|
hacks.py
|
# coding=utf-8
import logging
import termcolor
__all__ = ['setup_logging_color', 'setup_logging_format', 'setup_logging']
def get_FORMAT_datefmt():
pre = '%(asctime)s|%(name)s|%(filename)s:%(lineno)s|%(funcName)s(): '
pre = termcolor.colored(pre, attrs=['dark'])
FORMAT = pre + "%(message)s"
datefmt = "%H:%M:%S"
return FORMAT, datefmt
def setup_logging_format():
from logging import Logger, StreamHandler, Formatter
import logging
FORMAT, datefmt = get_FORMAT_datefmt()
logging.basicConfig(format=FORMAT, datefmt=datefmt)
if Logger.root.handlers: # @UndefinedVariable
for handler in Logger.root.handlers: # @UndefinedVariable
if isinstance(handler, StreamHandler):
formatter = Formatter(FORMAT, datefmt=datefmt)
handler.setFormatter(formatter)
else:
logging.basicConfig(format=FORMAT, datefmt=datefmt)
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if levelno >= 50:
color = '\x1b[31m' # red
elif levelno >= 40:
color = '\x1b[31m' # red
elif levelno >= 30:
color = '\x1b[33m' # yellow
elif levelno >= 20:
color = '\x1b[32m' # green
elif levelno >= 10:
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
msg = str(args[1].msg)
lines = msg.split('\n')
def color_line(l):
return "%s%s%s" % (color, l, '\x1b[0m') # normal
lines = list(map(color_line, lines))
args[1].msg = "\n".join(lines)
return fn(*args)
return new
def setup_logging_color():
import platform
if platform.system() != 'Windows':
emit2 = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
logging.StreamHandler.emit = emit2
def setup_logging():
# logging.basicConfig()
setup_logging_color()
setup_logging_format()
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/logs/col_logging.py
|
col_logging.py
|
from .hacks import *
from .col_logging import *
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/logs/__init__.py
|
__init__.py
|
import re
escape = re.compile('\x1b\[..?m')
def remove_escapes(s):
return escape.sub("", s)
def get_length_on_screen(s):
""" Returns the length of s without the escapes """
return len(remove_escapes(s))
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/text/coloring.py
|
coloring.py
|
from typing import *
from .zc_indenting import indent
__all__ = [
'pretty_dict',
'pretty_msg',
'format_error',
]
def pretty_msg(head: str, **kwargs: Any):
return pretty_dict(head, kwargs)
format_error = pretty_msg
def pretty_dict(head: Optional[str],
d: Dict[str, Any],
omit_falsy=False,
sort_keys=False):
if not d:
return head + ': (empty dict)' if head else '(empty dict)'
s = []
n = max(len(str(_)) for _ in d)
ordered = sorted(d) if sort_keys else list(d)
# ks = sorted(d)
for k in ordered:
v = d[k]
if k == '__builtins__':
v = '(hiding __builtins__)'
if not hasattr(v, 'conclusive') and (not isinstance(v, int)) and (not v) and omit_falsy:
continue
prefix = (str(k) + ':').rjust(n + 1) + ' '
if isinstance(v, TypeVar):
# noinspection PyUnresolvedReferences
v = f'TypeVar({v.__name__}, bound={v.__bound__})'
if isinstance(v, dict):
v = pretty_dict('', v)
s.extend(indent(v, '', prefix).split('\n'))
# return (head + ':\n' if head else '') + indent("\n".join(s), '| ')
return (head + '\n' if head else '') + indent("\n".join(s), '| ')
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/text/zc_pretty_dicts.py
|
zc_pretty_dicts.py
|
from typing import *
from .coloring import get_length_on_screen
__all__ = ['indent']
def indent(s: str, prefix: str, first: Optional[str] = None) -> str:
if not isinstance(s, str):
s = u'{}'.format(s)
assert isinstance(prefix, str), type(prefix)
try:
lines = s.split('\n')
except UnicodeDecodeError:
print(type(s)) # XXX
print(s) # XXX
lines = [s]
if not lines:
return u''
if first is None:
first = prefix
m = max(get_length_on_screen(prefix), get_length_on_screen(first))
prefix = ' ' * (m - get_length_on_screen(prefix)) + prefix
first = ' ' * (m - get_length_on_screen(first)) + first
# differnet first prefix
res = [u'%s%s' % (prefix, line.rstrip()) for line in lines]
res[0] = u'%s%s' % (first, lines[0].rstrip())
return '\n'.join(res)
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/text/zc_indenting.py
|
zc_indenting.py
|
from typing import Union
__all__ = [
'get_md5',
'get_sha1',
]
def get_md5(contents: Union[bytes, str]) -> str:
""" Returns an hexdigest (string).
If the contents is a string, then it is encoded as utf-8.
"""
if isinstance(contents, str):
contents = contents.encode('utf-8')
from zuper_commons.types import check_isinstance
check_isinstance(contents, bytes)
import hashlib
m = hashlib.md5()
m.update(contents)
s = m.hexdigest()
check_isinstance(s, str)
return s
def get_sha1(contents: bytes) -> str:
""" Returns an hexdigest (string) """
from zuper_commons.types import check_isinstance
import hashlib
check_isinstance(contents, bytes)
m = hashlib.sha1()
m.update(contents)
s = m.hexdigest()
check_isinstance(s, str)
return s
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/text/zc_quick_hash.py
|
zc_quick_hash.py
|
# -*- coding: utf-8 -*-
import re
from typing import List, Union, Iterator
__all__ = [
'expand_string',
'get_wildcard_matches',
'wildcard_to_regexp',
]
def flatten(seq):
res = []
for l in seq:
res.extend(l)
return res
def expand_string(x: Union[str, List[str]], options: List[str]) -> List[str]:
if isinstance(x, list):
return flatten(expand_string(y, options) for y in x)
elif isinstance(x, str):
x = x.strip()
if ',' in x:
splat = [_ for _ in x.split(',') if _] # remove empty
return flatten(expand_string(y, options) for y in splat)
elif '*' in x:
xx = expand_wildcard(x, options)
expanded = list(xx)
return expanded
else:
return [x]
else:
assert False
def wildcard_to_regexp(arg: str):
""" Returns a regular expression from a shell wildcard expression. """
return re.compile('\A' + arg.replace('*', '.*') + '\Z')
def has_wildcard(s: str) -> bool:
return s.find('*') > -1
def expand_wildcard(wildcard: str, universe: List[str]) -> List[str]:
'''
Expands a wildcard expression against the given list.
Raises ValueError if none found.
:param wildcard: string with '*'
:param universe: a list of strings
'''
if not has_wildcard(wildcard):
msg = 'No wildcards in %r.' % wildcard
raise ValueError(msg)
matches = list(get_wildcard_matches(wildcard, universe))
if not matches:
msg = ('Could not find matches for pattern %r in %s.' %
(wildcard, universe))
raise ValueError(msg)
return matches
def get_wildcard_matches(wildcard: str, universe: List[str]) -> Iterator[str]:
'''
Expands a wildcard expression against the given list.
Yields a sequence of strings.
:param wildcard: string with '*'
:param universe: a list of strings
'''
regexp = wildcard_to_regexp(wildcard)
for x in universe:
if regexp.match(x):
yield x
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/text/zc_wildcards.py
|
zc_wildcards.py
|
from .. import logger
logger = logger.getChild("text")
from .zc_indenting import *
from .zc_pretty_dicts import *
from .zc_quick_hash import *
from .zc_wildcards import *
|
zuper-commons
|
/zuper-commons-3.0.4.tar.gz/zuper-commons-3.0.4/src/zuper_commons/text/__init__.py
|
__init__.py
|
from .func import my_function
|
zuper-ipce-comp
|
/zuper_ipce_comp-0.1.0-cp37-cp37m-macosx_10_14_x86_64.whl/zuper_ipce_comp/__init__.py
|
__init__.py
|
from setuptools import find_packages, setup
def get_version(filename):
import ast
version = None
with open(filename) as f:
for line in f:
if line.startswith("__version__"):
version = ast.parse(line).body[0].value.s
break
else:
raise ValueError("No version found in %r." % filename)
if version is None:
raise ValueError(filename)
return version
shell_version = get_version(filename="src/zuper_ipce/__init__.py")
line = 'z5'
setup(
name=f"zuper-ipce-{line}",
package_dir={"": "src"},
packages=find_packages("src"),
version=shell_version,
zip_safe=False,
entry_points={
"console_scripts": [
# 'zj = zuper_ipce.zj:zj_main',
"json2cbor = zuper_ipce.json2cbor:json2cbor_main",
"cbor2json = zuper_ipce.json2cbor:cbor2json_main",
"cbor2yaml = zuper_ipce.json2cbor:cbor2yaml_main",
]
},
install_requires=[
"oyaml",
"pybase64",
"PyYAML",
"validate_email",
"mypy_extensions",
"typing_extensions",
"nose",
"coverage>=1.4.33",
"dataclasses",
"jsonschema",
"cbor2<5",
"numpy",
"base58",
"zuper-commons-z5",
"zuper-typing-z5",
"frozendict",
"pytz",
"termcolor",
"numpy",
],
)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/setup.py
|
setup.py
|
import io
import json
import select
import time
import traceback
from io import BufferedReader
from json import JSONDecodeError
from typing import Iterator
import base58
import cbor2
from . import logger
from .json_utils import (
decode_bytes_before_json_deserialization,
encode_bytes_before_json_serialization,
)
from .utils_text import oyaml_dump
__all__ = [
"read_cbor_or_json_objects",
"json2cbor_main",
"cbor2json_main",
"cbor2yaml_main",
"read_next_cbor",
"read_next_either_json_or_cbor",
]
def json2cbor_main() -> None:
fo = open("/dev/stdout", "wb", buffering=0)
fi = open("/dev/stdin", "rb", buffering=0)
# noinspection PyTypeChecker
fi = BufferedReader(fi, buffer_size=1)
for j in read_cbor_or_json_objects(fi):
c = cbor2.dumps(j)
fo.write(c)
fo.flush()
def cbor2json_main() -> None:
fo = open("/dev/stdout", "wb", buffering=0)
fi = open("/dev/stdin", "rb", buffering=0)
for j in read_cbor_objects(fi):
j = encode_bytes_before_json_serialization(j)
ob = json.dumps(j)
ob = ob.encode("utf-8")
fo.write(ob)
fo.write(b"\n")
fo.flush()
def cbor2yaml_main() -> None:
fo = open("/dev/stdout", "wb")
fi = open("/dev/stdin", "rb")
for j in read_cbor_objects(fi):
ob = oyaml_dump(j)
ob = ob.encode("utf-8")
fo.write(ob)
fo.write(b"\n")
fo.flush()
def read_cbor_or_json_objects(f, timeout=None) -> Iterator:
""" Reads cbor or line-separated json objects from the binary file f."""
while True:
try:
ob = read_next_either_json_or_cbor(f, timeout=timeout)
yield ob
except StopIteration:
break
except TimeoutError:
raise
def read_cbor_objects(f, timeout=None) -> Iterator:
""" Reads cbor or line-separated json objects from the binary file f."""
while True:
try:
ob = read_next_cbor(f, timeout=timeout)
yield ob
except StopIteration:
break
except TimeoutError:
raise
def read_next_either_json_or_cbor(f, timeout=None, waiting_for: str = None) -> dict:
""" Raises StopIteration if it is EOF.
Raises TimeoutError if over timeout"""
fs = [f]
t0 = time.time()
intermediate_timeout = 3.0
while True:
try:
readyr, readyw, readyx = select.select(fs, [], fs, intermediate_timeout)
except io.UnsupportedOperation:
break
if readyr:
break
elif readyx:
logger.warning("Exceptional condition on input channel %s" % readyx)
else:
delta = time.time() - t0
if (timeout is not None) and (delta > timeout):
msg = "Timeout after %.1f s." % delta
logger.error(msg)
raise TimeoutError(msg)
else:
msg = "I have been waiting %.1f s." % delta
if timeout is None:
msg += " I will wait indefinitely."
else:
msg += " Timeout will occurr at %.1f s." % timeout
if waiting_for:
msg += " " + waiting_for
logger.warning(msg)
first = f.peek(1)[:1]
if len(first) == 0:
msg = "Detected EOF on %s." % f
if waiting_for:
msg += " " + waiting_for
raise StopIteration(msg)
# logger.debug(f'first char is {first}')
if first in [b" ", b"\n", b"{"]:
line = f.readline()
line = line.strip()
if not line:
msg = "Read empty line. Re-trying."
logger.warning(msg)
return read_next_either_json_or_cbor(f)
# logger.debug(f'line is {line!r}')
try:
j = json.loads(line)
except JSONDecodeError:
msg = f"Could not decode line {line!r}: {traceback.format_exc()}"
logger.error(msg)
return read_next_either_json_or_cbor(f)
j = decode_bytes_before_json_deserialization(j)
return j
else:
j = cbor2.load(f, tag_hook=tag_hook)
return j
def tag_hook(decoder, tag, shareable_index=None) -> dict:
if tag.tag != 42:
return tag
d = tag.value
val = base58.b58encode(d).decode("ascii")
val = "z" + val[1:]
return {"/": val}
def wait_for_data(f, timeout=None, waiting_for: str = None):
""" Raises StopIteration if it is EOF.
Raises TimeoutError if over timeout"""
# XXX: StopIteration not implemented
fs = [f]
t0 = time.time()
intermediate_timeout = 3.0
while True:
try:
readyr, readyw, readyx = select.select(fs, [], fs, intermediate_timeout)
except io.UnsupportedOperation:
break
if readyr:
break
elif readyx:
logger.warning("Exceptional condition on input channel %s" % readyx)
else:
delta = time.time() - t0
if (timeout is not None) and (delta > timeout):
msg = "Timeout after %.1f s." % delta
logger.error(msg)
raise TimeoutError(msg)
else:
msg = "I have been waiting %.1f s." % delta
if timeout is None:
msg += " I will wait indefinitely."
else:
msg += " Timeout will occurr at %.1f s." % timeout
if waiting_for:
msg += " " + waiting_for
logger.warning(msg)
def read_next_cbor(f, timeout=None, waiting_for: str = None) -> dict:
""" Raises StopIteration if it is EOF.
Raises TimeoutError if over timeout"""
wait_for_data(f, timeout, waiting_for)
try:
j = cbor2.load(f, tag_hook=tag_hook)
return j
except OSError as e:
if e.errno == 29:
raise StopIteration from None
raise
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/json2cbor.py
|
json2cbor.py
|
from collections import UserString
from typing import Callable, Dict, NewType
from zuper_typing.exceptions import ZValueError
def valid_email(s: str) -> None:
import validate_email
is_valid = validate_email.validate_email(s)
if not is_valid:
msg = "Invalid email address."
raise ZValueError(msg, s=s)
json_formats: Dict[str, Callable[[str], None]] = {
"date-time": None,
"email": valid_email,
"ipv4": None,
"ipv6": None,
"uri": None,
"uri-reference": None,
"json-pointer": None,
"uri-template": None,
# others:
"domain": None,
"multihash": None,
}
def make_special(name: str, sformat: str) -> type:
validator = json_formats[sformat]
class Special(UserString):
data: str
def __init__(self, seq: object):
UserString.__init__(self, seq)
if validator is not None:
validator(self.data)
return type(name, (Special,), {})
__all__ = [
"URL",
"DateTimeString",
"Email",
"IP4",
"IP6",
"URI",
"URIReference",
"JSONPointer",
"URITemplate",
"Domain",
"Multihash",
# 'IPDELink',
]
URL = make_special("URL", "uri")
DateTimeString = make_special("DateTimeString", "date-time")
Email = make_special("Email", "email")
IP4 = make_special("IP4", "ipv4")
IP6 = make_special("IP6", "ipv6")
URI = make_special("URI", "uri")
URIReference = make_special("URIReference", "uri")
JSONPointer = make_special("JSONPointer", "json-pointer")
URITemplate = make_special("URITemplate", "uri-template")
Domain = make_special("Domain", "domain")
Multihash = make_special("Multihash", "multihash")
IPDELink = NewType("IPDELink", str)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/special_strings.py
|
special_strings.py
|
import logging
logging.basicConfig()
logger = logging.getLogger("zj")
logger.setLevel(logging.DEBUG)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/logging.py
|
logging.py
|
from datetime import datetime
from typing import Dict, List, Union
from zuper_typing.annotations_tricks import is_Any
IPCE = Union[
int, str, float, bytes, datetime, List["IPCE"], Dict[str, "IPCE"], type(None)
]
__all__ = ["IPCE", "TypeLike"]
from zuper_typing.aliases import TypeLike
_ = TypeLike
def is_unconstrained(t: TypeLike):
assert t is not None
return is_Any(t) or (t is object)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/types.py
|
types.py
|
import numpy as np
from zuper_commons.types import check_isinstance
from .types import IPCE
def ipce_from_numpy_array(x: np.ndarray) -> IPCE:
res = {"shape": list(x.shape), "dtype": x.dtype.name, "data": x.tobytes()}
from .ipce_spec import sorted_dict_cbor_ord
res = sorted_dict_cbor_ord(res)
return res
def numpy_array_from_ipce(d: IPCE) -> np.ndarray:
shape = tuple(d["shape"])
dtype = d["dtype"]
data: bytes = d["data"]
check_isinstance(data, bytes)
a = np.frombuffer(data, dtype=dtype)
res = a.reshape(shape)
return res
#
#
# def bytes_from_numpy(a: np.ndarray) -> bytes:
# import h5py
# io = BytesIO()
# with h5py.File(io) as f:
# # f.setdefault("compression", "lzo")
# f['value'] = a
# uncompressed = io.getvalue()
#
# compressed_data = zlib.compress(uncompressed)
# return compressed_data
#
#
# def numpy_from_bytes(b: bytes) -> np.ndarray:
# b = zlib.decompress(b)
# import h5py
# io = BytesIO(b)
# with h5py.File(io) as f:
# # f.setdefault("compression", "lzw")
# a = f['value']
# res = np.array(a)
# return res
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/numpy_encoding.py
|
numpy_encoding.py
|
import hashlib
from typing import Any
import base58
__all__ = ["get_sha256_base58"]
def get_sha256_base58(contents: bytes) -> bytes:
m = hashlib.sha256()
m.update(contents)
s = m.digest()
return base58.b58encode(s)
import oyaml
def oyaml_dump(x: object) -> str:
return oyaml.dump(x)
def oyaml_load(x: str, **kwargs: Any) -> object:
return oyaml.load(x, **kwargs)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/utils_text.py
|
utils_text.py
|
import datetime
from dataclasses import is_dataclass
from decimal import Decimal
from numbers import Number
from typing import (
Callable,
cast,
ClassVar,
Dict,
List,
NewType,
Optional,
Set,
Tuple,
Type,
)
import numpy as np
from zuper_typing.aliases import TypeLike
from zuper_typing.annotations_tricks import (
get_Callable_info,
get_ClassVar_arg,
get_Dict_args,
get_FixedTupleLike_args,
get_List_arg,
get_NewType_arg,
get_NewType_name,
get_Optional_arg,
get_Set_arg,
get_Type_arg,
get_Union_args,
get_VarTuple_arg,
is_Any,
is_Callable,
is_ClassVar,
is_Dict,
is_FixedTupleLike,
is_ForwardRef,
is_List,
is_NewType,
is_Optional,
is_Set,
is_TupleLike,
is_Type,
is_TypeVar,
is_Union,
is_VarTuple,
make_Tuple,
make_Union,
)
from zuper_typing.monkey_patching_typing import my_dataclass, original_dict_getitem
from zuper_typing.my_dict import (
CustomDict,
CustomList,
CustomSet,
get_CustomDict_args,
get_CustomList_arg,
get_CustomSet_arg,
is_CustomDict,
is_CustomList,
is_CustomSet,
make_dict,
make_list,
make_set,
)
# def resolve_all(T, globals_):
# """
# Returns either a type or a generic alias
#
#
# :return:
# """
# if isinstance(T, type):
# return T
#
# if is_Optional(T):
# t = get_Optional_arg(T)
# t = resolve_all(t, globals_)
# return Optional[t]
#
# # logger.debug(f'no thing to do for {T}')
# return T
def recursive_type_subst(
T: TypeLike, f: Callable[[TypeLike], TypeLike], ignore: tuple = ()
) -> TypeLike:
if T in ignore:
# logger.info(f'ignoring {T} in {ignore}')
return T
r = lambda _: recursive_type_subst(_, f, ignore + (T,))
if is_Optional(T):
a = get_Optional_arg(T)
a2 = r(a)
if a == a2:
return T
# logger.info(f'Optional unchanged under {f.__name__}: {a} == {a2}')
return Optional[a2]
elif is_ForwardRef(T):
return f(T)
elif is_Union(T):
ts0 = get_Union_args(T)
ts = tuple(r(_) for _ in ts0)
if ts0 == ts:
# logger.info(f'Union unchanged under {f.__name__}: {ts0} == {ts}')
return T
return make_Union(*ts)
elif is_TupleLike(T):
if is_VarTuple(T):
X = get_VarTuple_arg(T)
X2 = r(X)
if X == X2:
return T
return Tuple[X2, ...]
elif is_FixedTupleLike(T):
args = get_FixedTupleLike_args(T)
ts = tuple(r(_) for _ in args)
if args == ts:
return T
return make_Tuple(*ts)
else:
assert False
elif is_Dict(T):
T = cast(Type[Dict], T)
K, V = get_Dict_args(T)
K2, V2 = r(K), r(V)
if (K, V) == (K2, V2):
return T
return original_dict_getitem((K, V))
elif is_CustomDict(T):
T = cast(Type[CustomDict], T)
K, V = get_CustomDict_args(T)
K2, V2 = r(K), r(V)
if (K, V) == (K2, V2):
return T
return make_dict(K2, V2)
elif is_List(T):
T = cast(Type[List], T)
V = get_List_arg(T)
V2 = r(V)
if V == V2:
return T
return List[V2]
elif is_ClassVar(T):
V = get_ClassVar_arg(T)
V2 = r(V)
if V == V2:
return T
return ClassVar[V2]
elif is_CustomList(T):
T = cast(Type[CustomList], T)
V = get_CustomList_arg(T)
V2 = r(V)
if V == V2:
return T
return make_list(V2)
elif is_Set(T):
T = cast(Type[Set], T)
V = get_Set_arg(T)
V2 = r(V)
if V == V2:
return T
return make_set(V2)
elif is_CustomSet(T):
T = cast(Type[CustomSet], T)
V = get_CustomSet_arg(T)
V2 = r(V)
if V == V2:
return T
return make_set(V2)
elif is_NewType(T):
name = get_NewType_name(T)
a = get_NewType_arg(T)
a2 = r(a)
if a == a2:
return T
return NewType(name, a2)
elif is_dataclass(T):
annotations = dict(getattr(T, "__annotations__", {}))
annotations2 = {}
nothing_changed = True
for k, v0 in list(annotations.items()):
v2 = r(v0)
nothing_changed &= v0 == v2
annotations2[k] = v2
if nothing_changed:
# logger.info(f'Union unchanged under {f.__name__}: {ts0} == {ts}')
return T
T2 = my_dataclass(
type(
T.__name__,
(),
{
"__annotations__": annotations2,
"__module__": T.__module__,
"__doc__": getattr(T, "__doc__", None),
"__qualname__": getattr(T, "__qualname__"),
},
)
)
return T2
elif T in (
int,
bool,
float,
Decimal,
datetime.datetime,
bytes,
str,
type(None),
type,
np.ndarray,
Number,
object,
):
return f(T)
elif is_TypeVar(T):
return f(T)
elif is_Type(T):
V = get_Type_arg(T)
V2 = r(V)
if V == V2:
return T
return Type[V2]
elif is_Any(T):
return f(T)
elif is_Callable(T):
info = get_Callable_info(T)
args = []
for k, v in info.parameters_by_name.items():
# if is_MyNamedArg(v):
# # try:
# v = v.original
# TODO: add MyNamedArg
args.append(f(v))
fret = f(info.returns)
args = list(args)
# noinspection PyTypeHints
return Callable[args, fret]
# noinspection PyTypeHints
elif isinstance(T, type) and "Placeholder" in T.__name__:
return f(T)
else: # pragma: no cover
raise NotImplementedError(T)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/assorted_recursive_type_subst.py
|
assorted_recursive_type_subst.py
|
from dataclasses import dataclass
from datetime import datetime
from decimal import Decimal
from typing import cast, Dict, NewType, Tuple
from zuper_typing.my_dict import make_dict
JSONSchema = NewType("JSONSchema", dict)
GlobalsDict = Dict[str, object]
ProcessingDict = Dict[str, str]
EncounteredDict = Dict[str, object]
# _SpecialForm = Any
SCHEMA_ID = "http://json-schema.org/draft-07/schema#"
SCHEMA_ATT = "$schema"
HINTS_ATT = "$hints"
ANY_OF = "anyOf"
ALL_OF = "allOf"
ID_ATT = "$id"
REF_ATT = "$ref"
X_CLASSVARS = "classvars"
X_CLASSATTS = "classatts"
X_ORDER = "order"
JSC_FORMAT = "format"
JSC_REQUIRED = "required"
JSC_TYPE = "type"
JSC_ITEMS = "items"
JSC_DEFAULT = "default"
JSC_TITLE = "title"
JSC_NUMBER = "number"
JSC_INTEGER = "integer"
JSC_ARRAY = "array"
JSC_OBJECT = "object"
JSC_ADDITIONAL_PROPERTIES = "additionalProperties"
JSC_PROPERTY_NAMES = "propertyNames"
JSC_DESCRIPTION = "description"
JSC_STRING = "string"
JSC_NULL = "null"
JSC_BOOL = "boolean"
JSC_PROPERTIES = "properties"
JSC_DEFINITIONS = "definitions"
JSC_ALLOF = "allOf"
JSC_ANYOF = "anyOf"
Z_ATT_LSIZE = "lsize"
Z_ATT_TSIZE = "tsize"
X_PYTHON_MODULE_ATT = "__module__"
ATT_PYTHON_NAME = "__qualname__"
JSC_TITLE_NUMPY = "numpy"
JSC_TITLE_SLICE = "slice"
JSC_TITLE_BYTES = "bytes"
JSC_TITLE_DECIMAL = "decimal"
JSC_TITLE_FLOAT = "float"
JSC_TITLE_DATETIME = "datetime"
JSC_TITLE_CALLABLE = "Callable"
JSC_TITLE_TYPE = "type"
JSC_TITLE_CID = "cid"
# JSC_TITLE_TUPLE = 'Tuple'
# JSC_TITLE_LIST = 'List'
JSC_FORMAT_CID = "cid"
SCHEMA_BYTES = cast(
JSONSchema,
{JSC_TYPE: JSC_STRING, JSC_TITLE: JSC_TITLE_BYTES, SCHEMA_ATT: SCHEMA_ID},
)
SCHEMA_CID = cast(
JSONSchema,
{
JSC_TYPE: JSC_STRING,
JSC_TITLE: JSC_TITLE_CID,
JSC_FORMAT: JSC_FORMAT_CID,
SCHEMA_ATT: SCHEMA_ID,
},
)
IPCE_SCALARS = (int, str, float, bytes, datetime, bool, Decimal, type(None))
# check_types = False
CALLABLE_ORDERING = "ordering"
CALLABLE_RETURN = "return"
@dataclass
class IEDO:
use_remembered_classes: bool
remember_deserialized_classes: bool
ModuleName = QualName = str
n = 0
@dataclass
class IEDS:
global_symbols: Dict[str, type]
encountered: Dict
klasses: Dict[Tuple[ModuleName, QualName], type] = None
def __post_init__(self):
pass
if self.klasses is None:
self.klasses = make_dict(str, type)()
# from .logging import logger
# logger.info('IEDS new')
# global n
# n += 1
# if n == 5:
# raise NotImplementedError()
@dataclass
class IESO:
use_ipce_from_typelike_cache: bool = True
with_schema: bool = True
IPCE_PASS_THROUGH = (
NotImplementedError,
KeyboardInterrupt,
MemoryError,
AttributeError,
NameError,
)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/constants.py
|
constants.py
|
from dataclasses import is_dataclass
from typing import Dict, List, overload, Tuple, TypeVar
from zuper_ipce.constants import JSONSchema
from zuper_typing.exceptions import ZValueError
from .types import IPCE
D = TypeVar("D")
_V = TypeVar("_V")
@overload
def sorted_dict_cbor_ord(x: JSONSchema) -> JSONSchema:
...
@overload
def sorted_dict_cbor_ord(x: Dict[str, _V]) -> Dict[str, _V]:
...
def sorted_dict_cbor_ord(x):
def key(item: Tuple[str, object]) -> Tuple[int, str]:
k, v = item
return (len(k), k)
res = dict(sorted(x.items(), key=key))
# TODO
# assert_sorted_dict_cbor_ord(res)
return res
def sorted_list_cbor_ord(x: List[str]) -> List[str]:
def key(k: str) -> Tuple[int, str]:
return (len(k), k)
return sorted(x, key=key)
IPCL_LINKS = "$links"
IPCL_SELF = "$self"
def assert_sorted_dict_cbor_ord(x: dict):
keys = list(x.keys())
keys2 = sorted_list_cbor_ord(keys)
if keys != keys2:
msg = f"x not sorted"
raise ZValueError(msg, keys=keys, keys2=keys2)
def assert_canonical_ipce(ob_ipce: IPCE, max_rec=2) -> None:
if isinstance(ob_ipce, dict):
if "/" in ob_ipce:
msg = 'Cannot have "/" in here '
raise ZValueError(msg, ob_ipce=ob_ipce)
assert_sorted_dict_cbor_ord(ob_ipce)
if IPCL_LINKS in ob_ipce:
msg = f"Should have dropped the {IPCL_LINKS} part."
raise ZValueError(msg, ob_ipce=ob_ipce)
if IPCL_SELF in ob_ipce:
msg = f"Re-processing the {IPCL_LINKS}."
raise ZValueError(msg, ob_ipce=ob_ipce)
for k, v in ob_ipce.items():
assert not is_dataclass(v), ob_ipce
if max_rec > 0:
assert_canonical_ipce(v, max_rec=max_rec - 1)
elif isinstance(ob_ipce, list):
pass
elif isinstance(ob_ipce, tuple):
msg = "Tuple is not valid."
raise ZValueError(msg, ob_ipce=ob_ipce)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/ipce_spec.py
|
ipce_spec.py
|
from datetime import datetime
from .base64_utils import (
decode_bytes_base64,
encode_bytes_base64,
is_encoded_bytes_base64,
)
def transform_leaf(x, transform):
if isinstance(x, dict):
return {k: transform_leaf(v, transform) for k, v in x.items()}
if isinstance(x, list):
return [transform_leaf(_, transform) for _ in x]
return transform(x)
from decimal import Decimal
DECIMAL_PREFIX = "decimal:"
def encode_bytes_before_json_serialization(x0):
def f(x):
if isinstance(x, bytes):
return encode_bytes_base64(x)
elif isinstance(x, datetime):
return x.isoformat()
elif isinstance(x, Decimal):
return DECIMAL_PREFIX + str(x)
else:
return x
return transform_leaf(x0, f)
def decode_bytes_before_json_deserialization(x0):
def f(x):
if isinstance(x, str) and is_encoded_bytes_base64(x):
return decode_bytes_base64(x)
elif isinstance(x, str) and x.startswith(DECIMAL_PREFIX):
x = x.replace(DECIMAL_PREFIX, "")
return Decimal(x)
else:
return x
return transform_leaf(x0, f)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/json_utils.py
|
json_utils.py
|
from typing import cast
from .constants import JSONSchema, REF_ATT
# def schema_hash(k):
# ob_cbor = cbor2.dumps(k)
# ob_cbor_hash = hashlib.sha256(ob_cbor).digest()
# return ob_cbor_hash
#
# def get_all_refs(schema):
# if isinstance(schema, dict):
# if '$ref' in schema:
# yield schema['$ref']
# for _, v in schema.items():
# yield from get_all_refs(v)
# if isinstance(schema, list):
# for v in schema:
# yield from get_all_refs(v)
def make_url(x: str):
assert isinstance(x, str), x
return f"http://invalid.json-schema.org/{x}#"
def make_ref(x: str) -> JSONSchema:
assert len(x) > 1, x
assert isinstance(x, str), x
return cast(JSONSchema, {REF_ATT: x})
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/schema_utils.py
|
schema_utils.py
|
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, Tuple
from zuper_typing.exceptions import ZValueError
from .constants import JSONSchema, REF_ATT, SCHEMA_ATT, SCHEMA_ID
from .ipce_attr import make_key
from .ipce_spec import assert_canonical_ipce
def assert_canonical_schema(x: JSONSchema):
assert isinstance(x, dict)
if SCHEMA_ATT in x:
assert x[SCHEMA_ATT] in [SCHEMA_ID]
elif REF_ATT in x:
pass
else:
msg = f"No {SCHEMA_ATT} or {REF_ATT}"
raise ZValueError(msg, x=x)
assert_canonical_ipce(x)
# json.dumps(x) # try no bytes
@dataclass
class TRE:
schema: JSONSchema
used: Dict[str, str] = field(default_factory=dict)
def __post_init__(self) -> None:
try:
assert_canonical_schema(self.schema)
except ValueError as e: # pragma: no cover
msg = f"Invalid schema"
raise ZValueError(msg, schema=self.schema) from e
class IPCETypelikeCache:
c: Dict[Tuple, Dict[Tuple, JSONSchema]] = defaultdict(dict)
# def get_cached():
# return {k[1]: [x for x, _ in v.items()] for k, v in IPCETypelikeCache.c.items()}
def get_ipce_from_typelike_cache(T, context: Dict[str, str]) -> TRE:
k = make_key(T)
if k not in IPCETypelikeCache.c:
raise KeyError()
items = list(IPCETypelikeCache.c[k].items())
# actually first look for the ones with more context
items.sort(key=lambda x: len(x[1]), reverse=True)
for context0, schema in items:
if compatible(context0, context):
# if context0:
# logger.debug(f'Returning cached {T} with context {context0}')
return TRE(schema, dict(context0))
raise KeyError()
def compatible(c0: Tuple[Tuple[str, str]], context: Dict[str, str]) -> bool:
for k, v in c0:
if k not in context or context[k] != v:
return False
return True
def set_ipce_from_typelike_cache(T, context: Dict[str, str], schema: JSONSchema):
k = make_key(T)
ci = tuple(sorted(context.items()))
IPCETypelikeCache.c[k][ci] = schema
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/schema_caching.py
|
schema_caching.py
|
import base64
def encode_bytes_base64(data: bytes, mime=None) -> str:
encoded = base64.b64encode(data).decode("ascii")
if mime is None:
mime = "binary/octet-stream"
res = "data:%s;base64,%s" % (mime, encoded)
return res
def is_encoded_bytes_base64(s: str):
return s.startswith("data:") and "base64," in s
def decode_bytes_base64(s: str) -> bytes:
assert is_encoded_bytes_base64(s)
i = s.index("base64,")
j = i + len("base64,")
s2 = s[j:]
res = base64.b64decode(s2)
return res
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/base64_utils.py
|
base64_utils.py
|
import dataclasses
import datetime
from dataclasses import dataclass, field, make_dataclass
from decimal import Decimal
from numbers import Number
from typing import (
Any,
Callable,
cast,
ClassVar,
Dict,
List,
NewType,
Optional,
Tuple,
Type,
TypeVar,
)
from zuper_commons.types.exceptions import ZException
from zuper_typing.logging_util import ztinfo
from zuper_typing.zeneric2 import MyABC
_X = TypeVar("_X")
import numpy as np
from zuper_commons.types import check_isinstance
from zuper_typing.annotations_tricks import (
is_ForwardRef,
make_Tuple,
make_Union,
make_VarTuple,
is_ClassVar,
)
from zuper_typing.constants import PYTHON_36
from zuper_typing.exceptions import ZTypeError, ZValueError
from zuper_typing.monkey_patching_typing import (
get_remembered_class,
MyNamedArg,
remember_created_class,
)
from zuper_typing.my_dict import make_dict, make_list, make_set
from zuper_typing.my_intersection import make_Intersection
from .assorted_recursive_type_subst import recursive_type_subst
from .constants import (
ATT_PYTHON_NAME,
CALLABLE_ORDERING,
CALLABLE_RETURN,
IEDO,
ID_ATT,
IEDS,
JSC_ADDITIONAL_PROPERTIES,
JSC_ALLOF,
JSC_ANYOF,
JSC_ARRAY,
JSC_BOOL,
JSC_DEFAULT,
JSC_DEFINITIONS,
JSC_DESCRIPTION,
JSC_INTEGER,
JSC_NULL,
JSC_NUMBER,
JSC_OBJECT,
JSC_PROPERTIES,
JSC_REQUIRED,
JSC_STRING,
JSC_TITLE,
JSC_TITLE_BYTES,
JSC_TITLE_CALLABLE,
JSC_TITLE_DATETIME,
JSC_TITLE_DECIMAL,
JSC_TITLE_FLOAT,
JSC_TITLE_NUMPY,
JSC_TITLE_SLICE,
JSC_TYPE,
JSONSchema,
REF_ATT,
SCHEMA_ATT,
SCHEMA_ID,
X_CLASSATTS,
X_CLASSVARS,
X_ORDER,
X_PYTHON_MODULE_ATT,
)
from .structures import CannotFindSchemaReference
from .types import TypeLike, IPCE, is_unconstrained
@dataclass
class SRE:
res: TypeLike
used: Dict[str, object] = dataclasses.field(default_factory=dict)
@dataclass
class SRO:
res: object
used: Dict[str, object] = dataclasses.field(default_factory=dict)
def typelike_from_ipce(schema0: JSONSchema, *, iedo: Optional[IEDO] = None) -> TypeLike:
if iedo is None:
iedo = IEDO(use_remembered_classes=False, remember_deserialized_classes=False)
ieds = IEDS({}, {})
sre = typelike_from_ipce_sr(schema0, ieds=ieds, iedo=iedo)
return sre.res
def typelike_from_ipce_sr(schema0: JSONSchema, *, ieds: IEDS, iedo: IEDO) -> SRE:
try:
sre = typelike_from_ipce_sr_(schema0, ieds=ieds, iedo=iedo)
assert isinstance(sre, SRE), (schema0, sre)
res = sre.res
except (TypeError, ValueError) as e: # pragma: no cover
msg = "Cannot interpret schema as a type."
raise ZTypeError(msg, schema0=schema0) from e
if ID_ATT in schema0:
schema_id = schema0[ID_ATT]
ieds.encountered[schema_id] = res
return sre
def typelike_from_ipce_sr_(schema0: JSONSchema, *, ieds: IEDS, iedo: IEDO) -> SRE:
# pprint('schema_to_type_', schema0=schema0)
# encountered = encountered or {}
check_isinstance(schema0, dict)
schema = cast(JSONSchema, dict(schema0))
# noinspection PyUnusedLocal
metaschema = schema.pop(SCHEMA_ATT, None)
schema_id = schema.pop(ID_ATT, None)
if schema_id:
if not JSC_TITLE in schema:
pass
else:
cls_name = schema[JSC_TITLE]
ieds.encountered[schema_id] = cls_name
if schema == {JSC_TITLE: "Any"}:
return SRE(Any)
if schema == {}:
return SRE(object)
if schema == {JSC_TITLE: "object"}:
return SRE(object)
if REF_ATT in schema:
r = schema[REF_ATT]
if r == SCHEMA_ID:
if schema.get(JSC_TITLE, "") == "type":
return SRE(type)
else: # pragma: no cover
raise NotImplementedError(schema)
# return SRE(Type)
if r in ieds.encountered:
res = ieds.encountered[r]
return SRE(res, {r: res})
else:
msg = f"Cannot evaluate reference {r!r}"
raise CannotFindSchemaReference(msg, ieds=ieds)
if JSC_ANYOF in schema:
return typelike_from_ipce_Union(schema, ieds=ieds, iedo=iedo)
if JSC_ALLOF in schema:
return typelike_from_ipce_Intersection(schema, ieds=ieds, iedo=iedo)
jsc_type = schema.get(JSC_TYPE, None)
jsc_title = schema.get(JSC_TITLE, "-not-provided-")
if jsc_title == JSC_TITLE_NUMPY:
res = np.ndarray
return SRE(res)
if jsc_type == "NewType":
kt = KeepTrackDes(ieds, iedo)
if "newtype" not in schema:
original = object
else:
nt = schema["newtype"]
tre = typelike_from_ipce_sr(nt, ieds=ieds, iedo=iedo)
original = tre.res
res = NewType(jsc_title, original)
return kt.sre(res)
if jsc_type == JSC_STRING:
if jsc_title == JSC_TITLE_BYTES:
return SRE(bytes)
elif jsc_title == JSC_TITLE_DATETIME:
return SRE(datetime.datetime)
elif jsc_title == JSC_TITLE_DECIMAL:
return SRE(Decimal)
else:
return SRE(str)
elif jsc_type == JSC_NULL:
return SRE(type(None))
elif jsc_type == JSC_BOOL:
return SRE(bool)
elif jsc_type == JSC_NUMBER:
if jsc_title == JSC_TITLE_FLOAT:
return SRE(float)
else:
return SRE(Number)
elif jsc_type == JSC_INTEGER:
return SRE(int)
elif jsc_type == "subtype":
s = schema["subtype"]
r = typelike_from_ipce_sr(s, ieds=ieds, iedo=iedo)
T = Type[r.res]
return SRE(T, r.used)
elif jsc_type == JSC_OBJECT:
if jsc_title == JSC_TITLE_CALLABLE:
return typelike_from_ipce_Callable(schema, ieds=ieds, iedo=iedo)
elif jsc_title.startswith("Dict["):
return typelike_from_ipce_DictType(schema, ieds=ieds, iedo=iedo)
elif jsc_title.startswith("Set["):
return typelike_from_ipce_SetType(schema, ieds=ieds, iedo=iedo)
elif jsc_title == JSC_TITLE_SLICE:
return SRE(slice)
else:
return typelike_from_ipce_dataclass(
schema, schema_id=schema_id, ieds=ieds, iedo=iedo
)
elif jsc_type == JSC_ARRAY:
return typelike_from_ipce_array(schema, ieds=ieds, iedo=iedo)
msg = "Cannot recover schema"
raise ZValueError(msg, schema=schema)
# assert False, schema # pragma: no cover
def typelike_from_ipce_Union(schema, *, ieds: IEDS, iedo: IEDO) -> SRE:
options = schema[JSC_ANYOF]
kt = KeepTrackDes(ieds, iedo)
args = [kt.typelike_from_ipce(_) for _ in options]
if args and args[-1] is type(None):
V = args[0]
res = Optional[V]
else:
res = make_Union(*args)
return kt.sre(res)
def typelike_from_ipce_Intersection(schema, *, ieds: IEDS, iedo: IEDO) -> SRE:
options = schema[JSC_ALLOF]
kt = KeepTrackDes(ieds, iedo)
args = [kt.typelike_from_ipce(_) for _ in options]
res = make_Intersection(tuple(args))
return kt.sre(res)
class KeepTrackDes:
def __init__(self, ieds: IEDS, iedo: IEDO):
self.ieds = ieds
self.iedo = iedo
self.used = {}
def typelike_from_ipce(self, x: IPCE):
sre = typelike_from_ipce_sr(x, ieds=self.ieds, iedo=self.iedo)
self.used.update(sre.used)
return sre.res
def object_from_ipce(self, x: IPCE, st: Type[_X] = object) -> _X:
from zuper_ipce.conv_object_from_ipce import object_from_ipce_
res = object_from_ipce_(x, st, ieds=self.ieds, iedo=self.iedo)
return res
def sre(self, x: IPCE) -> SRE:
return SRE(x, self.used)
def typelike_from_ipce_array(schema, *, ieds: IEDS, iedo: IEDO) -> SRE:
assert schema[JSC_TYPE] == JSC_ARRAY
items = schema["items"]
kt = KeepTrackDes(ieds, iedo)
if isinstance(items, list):
# assert len(items) > 0
args = tuple([kt.typelike_from_ipce(_) for _ in items])
res = make_Tuple(*args)
else:
if schema[JSC_TITLE].startswith("Tuple["):
V = kt.typelike_from_ipce(items)
res = make_VarTuple(V)
else:
V = kt.typelike_from_ipce(items)
res = make_list(V)
# logger.info(f'found list like: {res}')
return kt.sre(res)
def typelike_from_ipce_DictType(schema, *, ieds: IEDS, iedo: IEDO) -> SRE:
K = str
kt = KeepTrackDes(ieds, iedo)
V = kt.typelike_from_ipce(schema[JSC_ADDITIONAL_PROPERTIES])
# pprint(f'here:', d=dict(V.__dict__))
# if issubclass(V, FakeValues):
if isinstance(V, type) and V.__name__.startswith("FakeValues"):
K = V.__annotations__["real_key"]
V = V.__annotations__["value"]
try:
D = make_dict(K, V)
except (TypeError, ValueError) as e: # pragma: no cover
msg = f"Cannot reconstruct dict type."
raise ZTypeError(msg, K=K, V=V, ieds=ieds) from e
return kt.sre(D)
def typelike_from_ipce_SetType(schema, *, ieds: IEDS, iedo: IEDO):
if not JSC_ADDITIONAL_PROPERTIES in schema: # pragma: no cover
msg = f"Expected {JSC_ADDITIONAL_PROPERTIES!r} in @schema."
raise ZValueError(msg, schema=schema)
kt = KeepTrackDes(ieds, iedo)
V = kt.typelike_from_ipce(schema[JSC_ADDITIONAL_PROPERTIES])
res = make_set(V)
return kt.sre(res)
def typelike_from_ipce_Callable(schema: JSONSchema, *, ieds: IEDS, iedo: IEDO):
kt = KeepTrackDes(ieds, iedo)
schema = dict(schema)
definitions = dict(schema[JSC_DEFINITIONS])
ret = kt.typelike_from_ipce(definitions.pop(CALLABLE_RETURN))
others = []
for k in schema[CALLABLE_ORDERING]:
d = kt.typelike_from_ipce(definitions[k])
if not looks_like_int(k):
d = MyNamedArg(d, k)
others.append(d)
# noinspection PyTypeHints
res = Callable[others, ret]
# logger.info(f'typelike_from_ipce_Callable: {schema} \n others = {others}\n res = {res}')
return kt.sre(res)
def looks_like_int(k: str) -> bool:
try:
int(k)
except:
return False
else:
return True
def typelike_from_ipce_dataclass(
res: JSONSchema, schema_id: Optional[str], *, ieds: IEDS, iedo: IEDO
) -> SRE:
kt = KeepTrackDes(ieds, iedo)
assert res[JSC_TYPE] == JSC_OBJECT
cls_name = res[JSC_TITLE]
definitions = res.get(JSC_DEFINITIONS, {})
required = res.get(JSC_REQUIRED, [])
properties = res.get(JSC_PROPERTIES, {})
classvars = res.get(X_CLASSVARS, {})
classatts = res.get(X_CLASSATTS, {})
if (
not X_PYTHON_MODULE_ATT in res
) or not ATT_PYTHON_NAME in res: # pragma: no cover
msg = f"Cannot find attributes for {cls_name!r}."
raise ZValueError(msg, res=res)
module_name = res[X_PYTHON_MODULE_ATT]
qual_name = res[ATT_PYTHON_NAME]
key = (module_name, qual_name)
if iedo.use_remembered_classes:
try:
res = get_remembered_class(module_name, qual_name)
return SRE(res)
except KeyError:
pass
if key in ieds.klasses:
return SRE(ieds.klasses[key], {})
typevars: List[TypeVar] = []
for tname, t in definitions.items():
bound = kt.typelike_from_ipce(t)
# noinspection PyTypeHints
if is_unconstrained(bound):
bound = None
# noinspection PyTypeHints
tv = TypeVar(tname, bound=bound)
typevars.append(tv)
if ID_ATT in t:
ieds.encountered[t[ID_ATT]] = tv
if typevars:
typevars2: Tuple[TypeVar, ...] = tuple(typevars)
from zuper_typing import Generic
# TODO: typevars
if PYTHON_36: # pragma: no cover
# noinspection PyUnresolvedReferences
# base = Generic.__getitem__(typevars2)
base = Generic.__class_getitem__(typevars2)
else:
# noinspection PyUnresolvedReferences
base = Generic.__class_getitem__(typevars2)
# ztinfo("", base=base, type_base=type(base))
bases = (base,)
else:
class B(metaclass=MyABC):
pass
bases = (B,)
Placeholder = type(f"PlaceholderFor{cls_name}", (), {})
ieds.encountered[schema_id] = Placeholder
fields_triples: List[Tuple[str, TypeLike, Field]] = [] # (name, type, Field)
if X_ORDER in res:
ordered = res[X_ORDER]
else:
ordered = list(properties) + list(classvars) + list(classatts)
# assert_equal(set(names), set(properties), msg=yaml.dump(res))
# else:
# names = list(properties)
#
# logger.info(f'reading {cls_name} names {names}')
# other_set_attr = {}
for pname in ordered:
if pname in properties:
v = properties[pname]
ptype = kt.typelike_from_ipce(v)
_Field = field()
_Field.name = pname
has_default = JSC_DEFAULT in v
if has_default:
default_value = kt.object_from_ipce(v[JSC_DEFAULT], ptype)
if isinstance(default_value, (list, dict, set)):
_Field.default_factory = MyDefaultFactory(default_value)
else:
_Field.default = default_value
assert not isinstance(default_value, dataclasses.Field)
# other_set_attr[pname] = default_value
else:
if not pname in required:
msg = (
f"Field {pname!r} is not required but I did not find a default"
)
raise ZException(msg, res=res)
fields_triples.append((pname, ptype, _Field))
elif pname in classvars:
v = classvars[pname]
ptype = kt.typelike_from_ipce(v)
# logger.info(f'ipce classvar: {pname} {ptype}')
f = field()
if pname in classatts:
f.default = kt.object_from_ipce(classatts[pname], object)
fields_triples.append((pname, ClassVar[ptype], f))
elif pname in classatts: # pragma: no cover
msg = f"Found {pname!r} in @classatts but not in @classvars"
raise ZValueError(msg, res=res, classatts=classatts, classvars=classvars)
else: # pragma: no cover
msg = f"Cannot find {pname!r} either in @properties or @classvars or @classatts."
raise ZValueError(
msg, properties=properties, classvars=classvars, classatts=classatts
)
check_fields_order(fields_triples)
# ztinfo('fields', fields_triples=fields_triples)
unsafe_hash = True
try:
T = make_dataclass(
cls_name,
fields_triples,
bases=bases,
namespace=None,
init=True,
repr=True,
eq=True,
order=True,
unsafe_hash=unsafe_hash,
frozen=False,
)
except TypeError: # pragma: no cover
#
# msg = "Cannot make dataclass with fields:"
# for f in fields:
# msg += f"\n {f}"
# logger.error(msg)
raise
fix_annotations_with_self_reference(T, cls_name, Placeholder)
for pname, v in classatts.items():
if isinstance(v, dict) and SCHEMA_ATT in v and v[SCHEMA_ATT] == SCHEMA_ID:
interpreted = kt.typelike_from_ipce(cast(JSONSchema, v))
else:
interpreted = kt.object_from_ipce(v, object)
assert not isinstance(interpreted, dataclasses.Field)
ztinfo("setting class att", pname=pname, interpreted=interpreted)
setattr(T, pname, interpreted)
if JSC_DESCRIPTION in res:
setattr(T, "__doc__", res[JSC_DESCRIPTION])
else:
# the original one did not have it
setattr(T, "__doc__", None)
setattr(T, "__module__", module_name)
setattr(T, "__qualname__", qual_name)
used = kt.used
if schema_id in used:
used.pop(schema_id)
if not used:
if iedo.remember_deserialized_classes:
remember_created_class(T, "typelike_from_ipce")
ieds.klasses[key] = T
else:
msg = f"Cannot remember {key} because used = {used}"
logger.warning(msg)
# logger.info(f"Estimated class {key} used = {used} ")
# assert not "varargs" in T.__dict__, T
# ztinfo("typelike_from_ipce", T=T, type_T=type(T), bases=bases)
return SRE(T, used)
from .logging import logger
from dataclasses import Field, MISSING
def field_has_default(f: Field) -> bool:
if f.default != MISSING:
return True
elif f.default_factory != MISSING:
return True
else:
return False
def check_fields_order(fields_triples: List[Tuple[str, TypeLike, Field]]):
found_default = None
for name, type_, f in fields_triples:
if is_ClassVar(type_):
continue
if field_has_default(f):
found_default = name
else:
if found_default:
msg = f"Found out of order fields. Field {name!r} without default found after {found_default!r}."
raise ZValueError(msg, fields_triples=fields_triples)
def fix_annotations_with_self_reference(
T: Type[dataclass], cls_name: str, Placeholder: type
) -> None:
# print('fix_annotations_with_self_reference')
# logger.info(f'fix_annotations_with_self_reference {cls_name}, placeholder: {Placeholder}')
# logger.info(f'encountered: {encountered}')
# logger.info(f'global_symbols: {global_symbols}')
def f(M: TypeLike) -> TypeLike:
assert not is_ForwardRef(M)
if M is Placeholder:
return T
# elif hasattr(M, '__name__') and M.__name__ == Placeholder.__name__:
# return T
else:
return M
f.__name__ = f"replacer_for_{cls_name}"
anns2 = {}
anns: dict = T.__annotations__
for k, v0 in anns.items():
v = recursive_type_subst(v0, f)
anns2[k] = v
T.__annotations__ = anns2
for f in dataclasses.fields(T):
f.type = T.__annotations__[f.name]
class MyDefaultFactory:
def __init__(self, value: object):
self.value = value
def __call__(self) -> object:
v = self.value
return type(v)(v)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/conv_typelike_from_ipce.py
|
conv_typelike_from_ipce.py
|
class SchemaCache:
key2schema = {}
def make_key(x: object) -> tuple:
k0 = id(type(x))
k1 = getattr(x, "__qualname__", None)
k2 = getattr(x, "__name__", None)
k2b = getattr(x, "__dict_type__", None)
k2c = getattr(x, "__set_type__", None)
k2d = getattr(x, "__list_type__", None)
k3 = id(x)
k = (k3, k0, k1, k2, k2b, k2c, k2d)
return k
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/ipce_attr.py
|
ipce_attr.py
|
from typing import Dict, Optional
from zuper_commons.text import indent
def pprint(msg: Optional[str] = None, **kwargs: object) -> None:
print(pretty_dict(msg, kwargs))
def pretty_dict(
head: Optional[str], d: Dict[str, object], omit_falsy=False, sort_keys=False
):
if not d:
return head + ": (empty dict)" if head else "(empty dict)"
s = []
n = max(len(str(_)) for _ in d)
ordered = sorted(d) if sort_keys else list(d)
# ks = sorted(d)
for k in ordered:
v = d[k]
prefix = (str(k) + ":").rjust(n + 1) + " "
if isinstance(v, dict):
v = pretty_dict("", v)
s.append(indent(v, "", prefix))
return (head + ":\n" if head else "") + indent("\n".join(s), "│ ")
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/pretty.py
|
pretty.py
|
from typing import cast, Dict, List, Set, Tuple, Type, TypeVar
from zuper_ipce.types import is_unconstrained
from zuper_typing.aliases import TypeLike
from zuper_typing.annotations_tricks import (
get_FixedTupleLike_args,
get_Optional_arg,
get_Union_args,
get_VarTuple_arg,
is_FixedTupleLike,
is_Optional,
is_Union,
is_VarTuple,
)
from zuper_typing.exceptions import ZTypeError, ZValueError
from zuper_typing.my_dict import (
CustomDict,
CustomList,
CustomTuple,
get_CustomDict_args,
get_CustomList_arg,
get_CustomSet_arg,
get_CustomTuple_args,
get_DictLike_args,
get_ListLike_arg,
get_SetLike_arg,
is_CustomDict,
is_CustomList,
is_CustomSet,
is_CustomTuple,
is_DictLike,
is_ListLike,
is_SetLike,
)
X_ = TypeVar('X_')
def get_set_type_suggestion(x: set, st: TypeLike) -> TypeLike:
T = type(x)
if is_CustomSet(T):
return get_CustomSet_arg(T)
if is_SetLike(st):
st = cast(Type[Set], st)
V = get_SetLike_arg(st)
return V
elif is_unconstrained(st):
return object
else:
msg = "suggest_type does not make sense for a list"
raise ZTypeError(msg, suggest_type=st)
def get_list_type_suggestion(x: list, st: TypeLike) -> TypeLike:
T = type(x)
if is_CustomList(T):
T = cast(Type[CustomList], T)
return get_CustomList_arg(T)
# TODO: if it is custom dict
if is_unconstrained(st):
return object
elif is_ListLike(st):
T = cast(Type[List], st)
V = get_ListLike_arg(T)
return V
else:
msg = "suggest_type does not make sense for a list"
raise ZTypeError(msg, suggest_type=st, x=type(st))
def get_dict_type_suggestion(ob: dict, st: TypeLike) -> Tuple[TypeLike, TypeLike]:
""" Gets the type to use to serialize a dict.
Returns Dict[K, V], K, V
"""
T = type(ob)
if is_CustomDict(T):
# if it has the type information, then go for it
T = cast(Type[CustomDict], T)
K, V = get_CustomDict_args(T)
return K, V
if is_DictLike(st):
# There was a suggestion of Dict-like
st = cast(Type[Dict], st)
K, V = get_DictLike_args(st)
return K, V
elif is_unconstrained(st):
# Guess from the dictionary itself
K, V = guess_type_for_naked_dict(ob)
return K, V
else: # pragma: no cover
msg = f"@suggest_type does not make sense for a dict"
raise ZValueError(msg, ob=ob, suggest_type=st)
def is_UnionLike(x: TypeLike) -> bool:
return is_Union(x) or is_Optional(x)
def get_UnionLike_args(x: TypeLike) -> Tuple[TypeLike, ...]:
if is_Union(x):
return get_Union_args(x)
elif is_Optional(x):
y = get_Optional_arg(x)
if is_UnionLike(y):
return get_UnionLike_args(y) + (type(None),)
else:
assert False
def get_tuple_type_suggestion(x: tuple, st: TypeLike) -> Tuple[TypeLike, ...]:
if isinstance(x, CustomTuple):
return type(x).__tuple_types__
if is_CustomTuple(st):
st = cast(Type[CustomTuple], st)
return get_CustomTuple_args(st)
n = len(x)
if is_UnionLike(st):
options = get_UnionLike_args(st)
else:
options = (st,)
# first look for any tuple-like
for op in options:
if is_VarTuple(op):
op = cast(Type[Tuple[X_, ...]], op)
V = get_VarTuple_arg(op)
return tuple([V] * n)
if is_FixedTupleLike(op):
ts = get_FixedTupleLike_args(op)
return ts
for op in options:
if is_unconstrained(op):
return tuple([object] * n)
msg = f"@suggest_type does not make sense for a tuple"
raise ZValueError(msg, suggest_type=st)
def guess_type_for_naked_dict(ob: dict) -> Tuple[type, type]:
if not ob:
return object, object
type_values = tuple(type(_) for _ in ob.values())
type_keys = tuple(type(_) for _ in ob.keys())
if len(set(type_keys)) == 1:
K = type_keys[0]
else:
K = object
if len(set(type_values)) == 1:
V = type_values[0]
else:
V = object
return K, V
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/guesses.py
|
guesses.py
|
import datetime
import inspect
import traceback
from dataclasses import Field, fields, is_dataclass, MISSING, replace
from decimal import Decimal
from typing import cast, Dict, Optional, Set, Type, TypeVar
import numpy as np
import yaml
from zuper_commons.fs import write_ustring_to_utf8_file
from zuper_ipce.constants import IPCE_PASS_THROUGH, REF_ATT
from zuper_ipce.conv_typelike_from_ipce import typelike_from_ipce_sr
from zuper_ipce.exceptions import ZDeserializationErrorSchema
from zuper_ipce.types import is_unconstrained
from zuper_typing.annotations_tricks import (
get_FixedTupleLike_args,
get_Optional_arg,
get_Union_args,
get_VarTuple_arg,
is_ClassVar,
is_FixedTupleLike,
is_Optional,
is_TupleLike,
is_TypeVar,
is_Union,
is_VarTuple,
)
from zuper_typing.exceptions import ZTypeError, ZValueError
from zuper_typing.my_dict import (
get_DictLike_args,
get_ListLike_arg,
get_SetLike_arg,
is_DictLike,
is_ListLike,
is_SetLike,
make_CustomTuple,
make_dict,
make_list,
make_set,
)
from zuper_typing.my_intersection import get_Intersection_args, is_Intersection
from .constants import (
HINTS_ATT,
IEDO,
IEDS,
JSC_TITLE,
JSC_TITLE_TYPE,
JSONSchema,
SCHEMA_ATT,
SCHEMA_ID,
)
from .numpy_encoding import numpy_array_from_ipce
from .structures import FakeValues
from .types import IPCE, TypeLike
DEBUGGING = False
_X = TypeVar("_X")
def object_from_ipce(
mj: IPCE, expect_type: Type[_X] = object, *, iedo: Optional[IEDO] = None
) -> _X:
assert expect_type is not None
if iedo is None:
iedo = IEDO(use_remembered_classes=False, remember_deserialized_classes=False)
ieds = IEDS({}, {})
try:
res = object_from_ipce_(mj, expect_type, ieds=ieds, iedo=iedo)
return res
except IPCE_PASS_THROUGH:
raise
except ZValueError as e:
msg = f"Cannot deserialize object"
if isinstance(mj, dict) and "$schema" in mj:
schema = mj["$schema"]
else:
schema = None
if DEBUGGING:
prefix = f"object_{id(mj)}"
fn = write_out_yaml(prefix + "_data", mj)
msg += f"\n object data in {fn}"
if schema:
fn = write_out_yaml(prefix + "_schema", schema)
msg += f"\n object schema in {fn}"
raise ZValueError(msg, expect_type=expect_type) from e
def object_from_ipce_(mj: IPCE, st: Type[_X] = object, *, ieds: IEDS, iedo: IEDO) -> _X:
# ztinfo('object_from_ipce_', mj=mj, st=st)
# if mj == {'ob': []}:
# raise ZException(mj=mj, st=st)
if is_Optional(st):
return object_from_ipce_optional(mj, st, ieds=ieds, iedo=iedo)
if is_Union(st):
return object_from_ipce_union(mj, st, ieds=ieds, iedo=iedo)
if is_Intersection(st):
return object_from_ipce_intersection(mj, st, ieds=ieds, iedo=iedo)
trivial = (int, float, bool, bytes, str, datetime.datetime, Decimal)
if st in trivial:
if not isinstance(mj, st):
msg = "Type mismatch for a simple type."
raise ZValueError(msg, expected=st, given_object=mj)
else:
return mj
if isinstance(mj, trivial):
T = type(mj)
if not is_unconstrained(st) and not is_TypeVar(st):
msg = f"Type mismatch"
raise ZValueError(msg, expected=st, given_object=mj)
return mj
if isinstance(mj, list):
return object_from_ipce_list(mj, st, ieds=ieds, iedo=iedo)
if mj is None:
if st is type(None):
return None
elif is_unconstrained(st):
return None
else:
msg = f"The value is None but the expected type is @expect_type."
raise ZValueError(msg, st=st)
assert isinstance(mj, dict), type(mj)
from .conv_typelike_from_ipce import typelike_from_ipce_sr
if mj.get(SCHEMA_ATT, "") == SCHEMA_ID or REF_ATT in mj:
schema = cast(JSONSchema, mj)
sr = typelike_from_ipce_sr(schema, ieds=ieds, iedo=iedo)
return sr.res
if mj.get(JSC_TITLE, None) == JSC_TITLE_TYPE:
schema = cast(JSONSchema, mj)
sr = typelike_from_ipce_sr(schema, ieds=ieds, iedo=iedo)
return sr.res
if SCHEMA_ATT in mj:
sa = mj[SCHEMA_ATT]
R = typelike_from_ipce_sr(sa, ieds=ieds, iedo=iedo)
K = R.res
# logger.debug(f' loaded K = {K} from {mj}')
else:
K = st
if K is np.ndarray:
return numpy_array_from_ipce(mj)
if is_DictLike(K):
K = cast(Type[Dict], K)
return object_from_ipce_dict(mj, K, ieds=ieds, iedo=iedo)
if is_SetLike(K):
K = cast(Type[Set], K)
res = object_from_ipce_SetLike(mj, K, ieds=ieds, iedo=iedo)
return res
if is_dataclass(K):
return object_from_ipce_dataclass_instance(mj, K, ieds=ieds, iedo=iedo)
if K is slice:
return object_from_ipce_slice(mj)
if is_unconstrained(K):
if looks_like_set(mj):
st = Set[object]
res = object_from_ipce_SetLike(mj, st, ieds=ieds, iedo=iedo)
return res
else:
msg = "No schema found and very ambiguous."
raise ZDeserializationErrorSchema(msg=msg, mj=mj, ieds=ieds)
# st = Dict[str, object]
#
# return object_from_ipce_dict(mj, st, ieds=ieds, opt=opt)
msg = f"Invalid type or type suggestion."
raise ZValueError(msg, K=K)
def looks_like_set(d: dict):
return len(d) > 0 and all(k.startswith("set:") for k in d)
def object_from_ipce_slice(mj) -> slice:
start = mj["start"]
stop = mj["stop"]
step = mj["step"]
return slice(start, stop, step)
def object_from_ipce_list(mj: IPCE, expect_type, *, ieds: IEDS, iedo: IEDO) -> IPCE:
def rec(x, TT: TypeLike) -> object:
return object_from_ipce_(x, TT, ieds=ieds, iedo=iedo)
# logger.info(f'expect_type for list is {expect_type}')
from zuper_ipce.conv_ipce_from_object import is_unconstrained
if is_unconstrained(expect_type):
suggest = object
seq = [rec(_, suggest) for _ in mj]
T = make_list(object)
return T(seq)
elif is_TupleLike(expect_type):
return object_from_ipce_tuple(mj, expect_type, ieds=ieds, iedo=iedo)
elif is_ListLike(expect_type):
suggest = get_ListLike_arg(expect_type)
seq = [rec(_, suggest) for _ in mj]
T = make_list(suggest)
return T(seq)
else:
msg = f"The object is a list, but expected different"
raise ZValueError(msg, expect_type=expect_type, mj=mj)
def object_from_ipce_optional(
mj: IPCE, expect_type: TypeLike, *, ieds: IEDS, iedo: IEDO
) -> IPCE:
if mj is None:
return mj
K = get_Optional_arg(expect_type)
return object_from_ipce_(mj, K, ieds=ieds, iedo=iedo)
def object_from_ipce_union(
mj: IPCE, expect_type: TypeLike, *, ieds: IEDS, iedo: IEDO
) -> IPCE:
errors = []
ts = get_Union_args(expect_type)
for T in ts:
try:
return object_from_ipce_(mj, T, ieds=ieds, iedo=iedo)
except IPCE_PASS_THROUGH: # pragma: no cover
raise
except BaseException:
errors.append(dict(T=T, e=traceback.format_exc()))
msg = f"Cannot deserialize with any type."
fn = write_out_yaml(f"object{id(mj)}", mj)
msg += f"\n ipce in {fn}"
raise ZValueError(msg, ts=ts, errors=errors)
def object_from_ipce_intersection(
mj: IPCE, expect_type: TypeLike, *, ieds: IEDS, iedo: IEDO
) -> IPCE:
errors = {}
ts = get_Intersection_args(expect_type)
for T in ts:
try:
return object_from_ipce_(mj, T, ieds=ieds, iedo=iedo)
except IPCE_PASS_THROUGH: # pragma: no cover
raise
except BaseException:
errors[str(T)] = traceback.format_exc()
msg = f"Cannot deserialize with any of @ts"
fn = write_out_yaml(f"object{id(mj)}", mj)
msg += f"\n ipce in {fn}"
raise ZValueError(msg, errors=errors, ts=ts)
def object_from_ipce_tuple(mj: IPCE, st: TypeLike, *, ieds: IEDS, iedo: IEDO):
if is_FixedTupleLike(st):
seq = []
ts = get_FixedTupleLike_args(st)
for st_i, ob in zip(ts, mj):
st_i = cast(Type[_X], st_i) # XXX should not be necessary
r = object_from_ipce_(ob, st_i, ieds=ieds, iedo=iedo)
seq.append(r)
T = make_CustomTuple(ts)
return T(seq)
elif is_VarTuple(st):
T = get_VarTuple_arg(st)
seq = []
for i, ob in enumerate(mj):
r = object_from_ipce_(ob, T, ieds=ieds, iedo=iedo)
seq.append(r)
return tuple(seq)
else:
assert False
def get_class_fields(K) -> Dict[str, Field]:
class_fields: Dict[str, Field] = {}
for f in fields(K):
class_fields[f.name] = f
return class_fields
def add_to_globals(ieds: IEDS, name: str, val: object) -> IEDS:
g = dict(ieds.global_symbols)
g[name] = val
return replace(ieds, global_symbols=g)
def object_from_ipce_dataclass_instance(
mj: IPCE, K: TypeLike, *, ieds: IEDS, iedo: IEDO
):
ieds = add_to_globals(ieds, K.__name__, K)
anns = getattr(K, "__annotations__", {})
attrs = {}
hints = mj.get(HINTS_ATT, {})
# ztinfo('hints', mj=mj, h=hints)
# logger.info(f'hints for {K.__name__} = {hints}')
for k, v in mj.items():
if k not in anns:
continue
et_k = anns[k]
if inspect.isabstract(et_k): # pragma: no cover
msg = f"Trying to instantiate abstract class for field {k!r} of class {K.__name__}."
raise ZValueError(msg, K=K, expect_type=et_k, mj=mj, annotation=anns[k])
if k in hints:
R = typelike_from_ipce_sr(hints[k], ieds=ieds, iedo=iedo)
hint = R.res
et_k = hint
else:
hint = None
try:
attrs[k] = object_from_ipce_(v, et_k, ieds=ieds, iedo=iedo)
except IPCE_PASS_THROUGH: # pragma: no cover
raise
except ZValueError as e: # pragma: no cover
msg = f"Cannot deserialize attribute {k!r} of {K.__name__}."
raise ZValueError(
msg,
K_annotations=K.__annotations__,
expect_type=et_k,
ann_K=anns[k],
K_name=K.__name__,
) from e
# ztinfo(f'result for {k}', raw=v, hint = hint, et_k=et_k, attrs_k=attrs[k])
class_fields = get_class_fields(K)
for k, T in anns.items():
if is_ClassVar(T):
continue
if not k in mj:
f = class_fields[k]
if f.default != MISSING:
attrs[k] = f.default
elif f.default_factory != MISSING:
attrs[k] = f.default_factory()
else:
msg = (
f"Cannot find field {k!r} in data for class {K.__name__} "
f"and no default available"
)
raise ZValueError(msg, anns=anns, T=T, known=sorted(mj), f=f)
for k, v in attrs.items():
assert not isinstance(v, Field), (k, v)
try:
return K(**attrs)
except TypeError as e: # pragma: no cover
msg = f"Cannot instantiate type {K.__name__}."
raise ZTypeError(msg, K=K, attrs=attrs, bases=K.__bases__, fields=anns) from e
def ignore_aliases(self, data) -> bool:
_ = self
if data is None:
return True
if isinstance(data, tuple) and data == ():
return True
if isinstance(data, list) and len(data) == 0:
return True
if isinstance(data, (bool, int, float)):
return True
if isinstance(data, str) and len(data) < 10:
return True
safe = ["additionalProperties", "properties", "__module__"]
if isinstance(data, str) and data in safe:
return True
def write_out_yaml(prefix: str, v: object, no_aliases: bool = False) -> str:
if no_aliases:
yaml.Dumper.ignore_aliases = lambda _, data: True
else:
yaml.Dumper.ignore_aliases = ignore_aliases
# d = oyaml_dump(v)
d = yaml.dump(v)
fn = f"errors/{prefix}.yaml"
write_ustring_to_utf8_file(d, fn)
return fn
def object_from_ipce_dict(mj: IPCE, D: Type[Dict], *, ieds: IEDS, iedo: IEDO):
assert is_DictLike(D), D
K, V = get_DictLike_args(D)
D = make_dict(K, V)
ob = D()
attrs = {}
FV = FakeValues[K, V]
if isinstance(K, type) and (issubclass(K, str) or issubclass(K, int)):
et_V = V
else:
et_V = FV
for k, v in mj.items():
if k == SCHEMA_ATT:
continue
try:
attrs[k] = object_from_ipce_(v, et_V, ieds=ieds, iedo=iedo)
except (TypeError, NotImplementedError) as e: # pragma: no cover
msg = f'Cannot deserialize element at index "{k}".'
raise ZTypeError(msg, expect_type_V=et_V, v=v, D=D, mj_yaml=mj) from e
if isinstance(K, type) and issubclass(K, str):
ob.update(attrs)
return ob
elif isinstance(K, type) and issubclass(K, int):
attrs = {int(k): v for k, v in attrs.items()}
ob.update(attrs)
return ob
else:
for k, v in attrs.items():
# noinspection PyUnresolvedReferences
ob[v.real_key] = v.value
return ob
def object_from_ipce_SetLike(mj: IPCE, D: Type[Set], *, ieds: IEDS, iedo: IEDO):
V = get_SetLike_arg(D)
res = set()
# logger.info(f'loading SetLike wiht V = {V}')
for k, v in mj.items():
if k == SCHEMA_ATT:
continue
vob = object_from_ipce_(v, V, ieds=ieds, iedo=iedo)
# logger.info(f'loaded k = {k} vob = {vob}')
res.add(vob)
T = make_set(V)
return T(res)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/conv_object_from_ipce.py
|
conv_object_from_ipce.py
|
import copy
import dataclasses
import datetime
import warnings
from dataclasses import Field, is_dataclass, replace
from decimal import Decimal
from numbers import Number
from typing import (
cast,
Dict,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
)
import numpy as np
from zuper_ipce import IPCE
from zuper_typing import dataclass
from zuper_typing.aliases import TypeLike
from zuper_typing.annotations_tricks import (
get_Callable_info,
get_ClassVar_arg,
get_Dict_name_K_V,
get_fields_including_static,
get_FixedTupleLike_args,
get_FixedTupleLike_name,
get_ForwardRef_arg,
get_NewType_arg,
get_NewType_name,
get_Optional_arg,
get_Sequence_arg,
get_Set_name_V,
get_Tuple_name,
get_Type_arg,
get_TypeVar_bound,
get_TypeVar_name,
get_Union_args,
get_VarTuple_arg,
is_Any,
is_Callable,
is_ClassVar,
is_FixedTupleLike,
is_ForwardRef,
is_NewType,
is_Optional,
is_Sequence,
is_TupleLike,
is_Type,
is_TypeLike,
is_TypeVar,
is_Union,
is_VarTuple,
)
from zuper_typing.constants import BINDINGS_ATT, GENERIC_ATT2
from zuper_typing.exceptions import (
ZAssertionError,
ZNotImplementedError,
ZTypeError,
ZValueError,
)
from zuper_typing.my_dict import (
get_DictLike_args,
get_ListLike_arg,
get_ListLike_name,
get_SetLike_arg,
is_DictLike,
is_ListLike,
is_SetLike,
)
from zuper_typing.my_intersection import get_Intersection_args, is_Intersection
from zuper_typing.recursive_tricks import get_name_without_brackets
from .constants import (
ALL_OF,
ANY_OF,
ATT_PYTHON_NAME,
CALLABLE_ORDERING,
CALLABLE_RETURN,
ID_ATT,
IESO,
IPCE_PASS_THROUGH,
JSC_ADDITIONAL_PROPERTIES,
JSC_ARRAY,
JSC_BOOL,
JSC_DEFINITIONS,
JSC_DESCRIPTION,
JSC_INTEGER,
JSC_ITEMS,
JSC_NULL,
JSC_NUMBER,
JSC_OBJECT,
JSC_PROPERTIES,
JSC_PROPERTY_NAMES,
JSC_REQUIRED,
JSC_STRING,
JSC_TITLE,
JSC_TITLE_CALLABLE,
JSC_TITLE_DATETIME,
JSC_TITLE_DECIMAL,
JSC_TITLE_FLOAT,
JSC_TITLE_NUMPY,
JSC_TITLE_SLICE,
JSC_TITLE_TYPE,
JSC_TYPE,
JSONSchema,
ProcessingDict,
REF_ATT,
SCHEMA_ATT,
SCHEMA_BYTES,
SCHEMA_CID,
SCHEMA_ID,
X_CLASSATTS,
X_CLASSVARS,
X_ORDER,
X_PYTHON_MODULE_ATT,
)
from .ipce_spec import assert_canonical_ipce, sorted_dict_cbor_ord
from .schema_caching import (
get_ipce_from_typelike_cache,
set_ipce_from_typelike_cache,
TRE,
)
from .schema_utils import make_ref, make_url
from .structures import FakeValues
def ipce_from_typelike(
T: TypeLike,
*,
globals0: Optional[dict] = None,
processing: Optional[ProcessingDict] = None,
ieso: Optional[IESO] = None,
) -> JSONSchema:
if ieso is None:
ieso = IESO(with_schema=True)
if processing is None:
processing = {}
if globals0 is None:
globals0 = {}
c = IFTContext(globals0, processing, ())
tr = ipce_from_typelike_tr(T, c, ieso=ieso)
schema = tr.schema
assert_canonical_ipce(schema)
return schema
@dataclass
class IFTContext:
globals_: dict
processing: ProcessingDict
context: Tuple[str, ...]
def ipce_from_typelike_tr(T: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
if not is_TypeLike(T):
raise ValueError(T)
if hasattr(T, "__name__"):
if T.__name__ in c.processing:
ref = c.processing[T.__name__]
res = make_ref(ref)
return TRE(res, {T.__name__: ref})
if ieso.use_ipce_from_typelike_cache:
try:
return get_ipce_from_typelike_cache(T, c.processing)
except KeyError:
pass
try:
if T is type:
res = cast(
JSONSchema,
{
REF_ATT: SCHEMA_ID,
JSC_TITLE: JSC_TITLE_TYPE
# JSC_DESCRIPTION: T.__doc__
},
)
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is type(None):
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID, JSC_TYPE: JSC_NULL})
res = sorted_dict_cbor_ord(res)
return TRE(res)
if isinstance(T, type):
for klass in T.mro():
if klass.__name__.startswith("Generic"):
continue
if klass is object:
continue
globals2 = dict(c.globals_)
globals2[get_name_without_brackets(klass.__name__)] = klass
bindings = getattr(klass, BINDINGS_ATT, {})
for k, v in bindings.items():
if hasattr(v, "__name__") and v.__name__ not in globals2:
globals2[v.__name__] = v
globals2[k.__name__] = v
c = dataclasses.replace(c, globals_=globals2)
tr: TRE = ipce_from_typelike_tr_(T, c=c, ieso=ieso)
if ieso.use_ipce_from_typelike_cache:
set_ipce_from_typelike_cache(T, tr.used, tr.schema)
return tr
except IPCE_PASS_THROUGH: # pragma: no cover
raise
except ValueError as e:
msg = "Cannot get schema for type @T"
raise ZValueError(msg, T=T, T_type=type(T), c=c) from e
except AssertionError as e:
msg = "Cannot get schema for type @T"
raise ZAssertionError(msg, T=T, T_type=type(T), c=c) from e
except BaseException as e:
msg = "Cannot get schema for @T"
raise ZTypeError(msg, T=T, c=c) from e
def ipce_from_typelike_DictLike(T: Type[Dict], c: IFTContext, ieso: IESO) -> TRE:
assert is_DictLike(T), T
K, V = get_DictLike_args(T)
res = cast(JSONSchema, {JSC_TYPE: JSC_OBJECT})
res[JSC_TITLE] = get_Dict_name_K_V(K, V)
if isinstance(K, type) and issubclass(K, str):
res[JSC_PROPERTIES] = {SCHEMA_ATT: {}} # XXX
tr = ipce_from_typelike_tr(V, c=c, ieso=ieso)
res[JSC_ADDITIONAL_PROPERTIES] = tr.schema
res[SCHEMA_ATT] = SCHEMA_ID
res = sorted_dict_cbor_ord(res)
return TRE(res, tr.used)
else:
res[JSC_PROPERTIES] = {SCHEMA_ATT: {}} # XXX
props = FakeValues[K, V]
tr = ipce_from_typelike_tr(props, c=c, ieso=ieso)
# logger.warning(f'props IPCE:\n\n {yaml.dump(tr.schema)}')
res[JSC_ADDITIONAL_PROPERTIES] = tr.schema
res[SCHEMA_ATT] = SCHEMA_ID
res = sorted_dict_cbor_ord(res)
return TRE(res, tr.used)
def ipce_from_typelike_SetLike(T: Type[Set], c: IFTContext, ieso: IESO) -> TRE:
assert is_SetLike(T), T
V = get_SetLike_arg(T)
res = cast(JSONSchema, {JSC_TYPE: JSC_OBJECT})
res[JSC_TITLE] = get_Set_name_V(V)
res[JSC_PROPERTY_NAMES] = SCHEMA_CID
tr = ipce_from_typelike_tr(V, c=c, ieso=ieso)
res[JSC_ADDITIONAL_PROPERTIES] = tr.schema
res[SCHEMA_ATT] = SCHEMA_ID
res = sorted_dict_cbor_ord(res)
return TRE(res, tr.used)
def ipce_from_typelike_TupleLike(T: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
assert is_TupleLike(T), T
used = {}
def f(x: TypeLike) -> JSONSchema:
tr = ipce_from_typelike_tr(x, c=c, ieso=ieso)
used.update(tr.used)
return tr.schema
if is_VarTuple(T):
T = cast(Type[Tuple], T)
items = get_VarTuple_arg(T)
res = cast(JSONSchema, {})
res[SCHEMA_ATT] = SCHEMA_ID
res[JSC_TYPE] = JSC_ARRAY
res[JSC_ITEMS] = f(items)
res[JSC_TITLE] = get_Tuple_name(T)
res = sorted_dict_cbor_ord(res)
return TRE(res, used)
elif is_FixedTupleLike(T):
T = cast(Type[Tuple], T)
args = get_FixedTupleLike_args(T)
res = cast(JSONSchema, {})
res[SCHEMA_ATT] = SCHEMA_ID
res[JSC_TYPE] = JSC_ARRAY
res[JSC_ITEMS] = []
res[JSC_TITLE] = get_FixedTupleLike_name(T)
for a in args:
res[JSC_ITEMS].append(f(a))
res = sorted_dict_cbor_ord(res)
return TRE(res, used)
else:
assert False
class KeepTrackSer:
def __init__(self, c: IFTContext, ieso: IESO):
self.c = c
self.ieso = ieso
self.used = {}
def ipce_from_typelike(self, T: TypeLike) -> JSONSchema:
tre = ipce_from_typelike_tr(T, c=self.c, ieso=self.ieso)
self.used.update(tre.used)
return tre.schema
# def ipce_from_object(self, x: IPCE, st: TypeLike) -> IPCE:
# from zuper_ipce.conv_ipce_from_object import ipce_from_object_
# res = object_from_ipce_(x, st, ieds=self.ieds, iedo=self.iedo)
# return res
def tre(self, x: IPCE) -> TRE:
return TRE(x, self.used)
def ipce_from_typelike_NewType(T: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
_ = c, ieso
name = get_NewType_name(T)
T0 = get_NewType_arg(T)
kt = KeepTrackSer(c, ieso)
res = cast(JSONSchema, {})
res[SCHEMA_ATT] = SCHEMA_ID
res[JSC_TYPE] = "NewType"
res["newtype"] = kt.ipce_from_typelike(T0)
res[JSC_TITLE] = name
res = sorted_dict_cbor_ord(res)
return kt.tre(res)
def ipce_from_typelike_ListLike(T: Type[List], c: IFTContext, ieso: IESO) -> TRE:
assert is_ListLike(T), T
items = get_ListLike_arg(T)
res = cast(JSONSchema, {})
kt = KeepTrackSer(c, ieso)
res[SCHEMA_ATT] = SCHEMA_ID
res[JSC_TYPE] = JSC_ARRAY
res[JSC_ITEMS] = kt.ipce_from_typelike(items)
res[JSC_TITLE] = get_ListLike_name(T)
res = sorted_dict_cbor_ord(res)
return kt.tre(res)
def ipce_from_typelike_Callable(T: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
assert is_Callable(T), T
cinfo = get_Callable_info(T)
kt = KeepTrackSer(c, ieso)
res = cast(
JSONSchema,
{
JSC_TYPE: JSC_OBJECT,
SCHEMA_ATT: SCHEMA_ID,
JSC_TITLE: JSC_TITLE_CALLABLE,
"special": "callable",
},
)
p = res[JSC_DEFINITIONS] = {}
for k, v in cinfo.parameters_by_name.items():
p[k] = kt.ipce_from_typelike(v)
p[CALLABLE_RETURN] = kt.ipce_from_typelike(cinfo.returns)
res[CALLABLE_ORDERING] = list(cinfo.ordering)
# print(res)
res = sorted_dict_cbor_ord(res)
return kt.tre(res)
def ipce_from_typelike_tr_(T: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
if T is None:
msg = "None is not a type!"
raise ZValueError(msg)
# This can actually happen inside a Tuple (or Dict, etc.) even though
# we have a special case for dataclass
if is_ForwardRef(T): # pragma: no cover
msg = "It is not supported to have an ForwardRef here yet."
raise ZValueError(msg, T=T)
if isinstance(T, str): # pragma: no cover
msg = "It is not supported to have a string here."
raise ZValueError(msg, T=T)
if T is str:
res = cast(JSONSchema, {JSC_TYPE: JSC_STRING, SCHEMA_ATT: SCHEMA_ID})
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is bool:
res = cast(JSONSchema, {JSC_TYPE: JSC_BOOL, SCHEMA_ATT: SCHEMA_ID})
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is Number:
res = cast(JSONSchema, {JSC_TYPE: JSC_NUMBER, SCHEMA_ATT: SCHEMA_ID})
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is float:
res = cast(
JSONSchema,
{JSC_TYPE: JSC_NUMBER, SCHEMA_ATT: SCHEMA_ID, JSC_TITLE: JSC_TITLE_FLOAT},
)
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is int:
res = cast(JSONSchema, {JSC_TYPE: JSC_INTEGER, SCHEMA_ATT: SCHEMA_ID})
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is slice:
return ipce_from_typelike_slice(ieso=ieso)
if T is Decimal:
res = cast(
JSONSchema,
{JSC_TYPE: JSC_STRING, JSC_TITLE: JSC_TITLE_DECIMAL, SCHEMA_ATT: SCHEMA_ID},
)
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is datetime.datetime:
res = cast(
JSONSchema,
{
JSC_TYPE: JSC_STRING,
JSC_TITLE: JSC_TITLE_DATETIME,
SCHEMA_ATT: SCHEMA_ID,
},
)
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is bytes:
res = SCHEMA_BYTES
res = sorted_dict_cbor_ord(res)
return TRE(res)
if T is object:
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID, JSC_TITLE: "object"})
res = sorted_dict_cbor_ord(res)
return TRE(res)
# we cannot use isinstance on typing.Any
if is_Any(T): # XXX not possible...
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID, JSC_TITLE: "Any"})
res = sorted_dict_cbor_ord(res)
return TRE(res)
if is_Union(T):
return ipce_from_typelike_Union(T, c=c, ieso=ieso)
if is_Optional(T):
return ipce_from_typelike_Optional(T, c=c, ieso=ieso)
if is_DictLike(T):
T = cast(Type[Dict], T)
return ipce_from_typelike_DictLike(T, c=c, ieso=ieso)
if is_SetLike(T):
T = cast(Type[Set], T)
return ipce_from_typelike_SetLike(T, c=c, ieso=ieso)
if is_Intersection(T):
return ipce_from_typelike_Intersection(T, c=c, ieso=ieso)
if is_Callable(T):
return ipce_from_typelike_Callable(T, c=c, ieso=ieso)
if is_NewType(T):
return ipce_from_typelike_NewType(T, c=c, ieso=ieso)
if is_Sequence(T):
msg = "Translating Sequence into List"
warnings.warn(msg)
T = cast(Type[Sequence], T)
# raise ValueError(msg)
V = get_Sequence_arg(T)
T = List[V]
return ipce_from_typelike_ListLike(T, c=c, ieso=ieso)
if is_ListLike(T):
T = cast(Type[List], T)
return ipce_from_typelike_ListLike(T, c=c, ieso=ieso)
if is_TupleLike(T):
# noinspection PyTypeChecker
return ipce_from_typelike_TupleLike(T, c=c, ieso=ieso)
if is_Type(T):
TT = get_Type_arg(T)
r = ipce_from_typelike_tr(TT, c, ieso=ieso)
res = cast(
JSONSchema,
{SCHEMA_ATT: SCHEMA_ID, JSC_TYPE: "subtype", "subtype": r.schema},
)
res = sorted_dict_cbor_ord(res)
return TRE(res, r.used)
# raise NotImplementedError(T)
assert isinstance(T, type), (T, type(T), is_Optional(T), is_Union(T))
if is_dataclass(T):
return ipce_from_typelike_dataclass(T, c=c, ieso=ieso)
if T is np.ndarray:
return ipce_from_typelike_ndarray()
msg = "Cannot interpret the type @T"
raise ZValueError(msg, T=T, c=c)
def ipce_from_typelike_ndarray() -> TRE:
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID})
res[JSC_TYPE] = JSC_OBJECT
res[JSC_TITLE] = JSC_TITLE_NUMPY
properties = {"shape": {}, "dtype": {}, "data": SCHEMA_BYTES} # TODO # TODO
properties = sorted_dict_cbor_ord(properties)
res[JSC_PROPERTIES] = properties
res = sorted_dict_cbor_ord(res)
return TRE(res)
def ipce_from_typelike_slice(ieso: IESO) -> TRE:
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID})
res[JSC_TYPE] = JSC_OBJECT
res[JSC_TITLE] = JSC_TITLE_SLICE
c = IFTContext({}, {}, ())
tr = ipce_from_typelike_tr(Optional[int], c=c, ieso=ieso)
properties = {
"start": tr.schema, # TODO
"stop": tr.schema, # TODO
"step": tr.schema,
}
res[JSC_PROPERTIES] = sorted_dict_cbor_ord(properties)
res = sorted_dict_cbor_ord(res)
return TRE(res, tr.used)
def ipce_from_typelike_Intersection(T: TypeLike, c: IFTContext, ieso: IESO):
args = get_Intersection_args(T)
kt = KeepTrackSer(c, ieso)
options = [kt.ipce_from_typelike(t) for t in args]
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID, ALL_OF: options})
res = sorted_dict_cbor_ord(res)
return kt.tre(res)
def get_mentioned_names(T: TypeLike, context=()) -> Iterator[str]:
if T in context:
return
c2 = context + (T,)
if is_dataclass(T):
if context:
yield T.__name__
annotations = getattr(T, "__annotations__", {})
for v in annotations.values():
yield from get_mentioned_names(v, c2)
elif is_Type(T):
v = get_Type_arg(T)
yield from get_mentioned_names(v, c2)
elif is_TypeVar(T):
yield get_TypeVar_name(T)
elif is_FixedTupleLike(T):
for t in get_FixedTupleLike_args(T):
yield from get_mentioned_names(t, c2)
elif is_VarTuple(T):
t = get_VarTuple_arg(T)
yield from get_mentioned_names(t, c2)
elif is_ListLike(T):
T = cast(Type[List], T)
t = get_ListLike_arg(T)
yield from get_mentioned_names(t, c2)
elif is_DictLike(T):
T = cast(Type[Dict], T)
K, V = get_DictLike_args(T)
yield from get_mentioned_names(K, c2)
yield from get_mentioned_names(V, c2)
elif is_SetLike(T):
T = cast(Type[Set], T)
t = get_SetLike_arg(T)
yield from get_mentioned_names(t, c2)
elif is_ForwardRef(T):
return get_ForwardRef_arg(T)
elif is_Optional(T):
t = get_Optional_arg(T)
yield from get_mentioned_names(t, c2)
elif is_Union(T):
for t in get_Union_args(T):
yield from get_mentioned_names(t, c2)
else:
pass
def ipce_from_typelike_dataclass(T: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
assert is_dataclass(T), T
# noinspection PyDataclass
c = replace(
c,
globals_=dict(c.globals_),
processing=dict(c.processing),
context=c.context + (T.__name__,),
)
used = {}
def ftl(x: TypeLike) -> JSONSchema:
if not is_TypeLike(x):
raise ValueError(x)
tr = ipce_from_typelike_tr(x, c=c, ieso=ieso)
used.update(tr.used)
return tr.schema
def fob(x: object) -> IPCE:
return ipce_from_object(x, globals_=c.globals_, ieso=ieso)
def f(x: object) -> IPCE:
if is_TypeLike(x):
x = cast(TypeLike, x)
return ftl(x)
else:
return fob(x)
res = cast(JSONSchema, {})
mentioned = set(get_mentioned_names(T, ()))
relevant = [x for x in c.context if x in mentioned and x != T.__name__]
relevant.append(T.__qualname__)
url_name = "_".join(relevant)
my_ref = make_url(url_name)
res[ID_ATT] = my_ref
res[JSC_TITLE] = T.__name__
c.processing[T.__name__] = my_ref
res[ATT_PYTHON_NAME] = T.__qualname__
res[X_PYTHON_MODULE_ATT] = T.__module__
res[SCHEMA_ATT] = SCHEMA_ID
res[JSC_TYPE] = JSC_OBJECT
if hasattr(T, "__doc__") and T.__doc__:
res[JSC_DESCRIPTION] = T.__doc__
if hasattr(T, GENERIC_ATT2):
definitions = {}
types2 = getattr(T, GENERIC_ATT2)
for t2 in types2:
if not isinstance(t2, TypeVar):
continue
url = make_url(f"{T.__qualname__}/{t2.__name__}")
c.processing[f"{t2.__name__}"] = url
# noinspection PyTypeHints
t2_name = get_TypeVar_name(t2)
c.globals_[t2_name] = t2
bound = get_TypeVar_bound(t2)
# bound = t2.__bound__ or object
schema = ftl(bound)
schema = copy.copy(schema)
schema[ID_ATT] = url
schema = sorted_dict_cbor_ord(schema)
definitions[t2.__name__] = schema
c.globals_[t2.__name__] = t2
if definitions:
res[JSC_DEFINITIONS] = sorted_dict_cbor_ord(definitions)
properties = {}
classvars = {}
classatts = {}
required = []
all_fields: Dict[str, Field] = get_fields_including_static(T)
from .conv_ipce_from_object import ipce_from_object
original_order = list(all_fields)
ordered = sorted(all_fields)
for name in ordered:
afield = all_fields[name]
t = afield.type
try:
if isinstance(t, str): # pragma: no cover
# t = eval_just_string(t, c.globals_)
msg = "Before serialization, need to have all text references substituted."
msg += f"\n found reference {t!r} in class {T}."
raise Exception(msg)
if is_ClassVar(t):
tt = get_ClassVar_arg(t)
# logger.info(f'ClassVar found : {tt}')
if False and is_Type(tt):
u = get_Type_arg(tt)
if is_TypeVar(u):
tn = get_TypeVar_name(u)
if tn in c.processing:
ref = c.processing[tn]
schema = make_ref(ref)
classvars[name] = schema
used.update({tn: ref})
classatts[name] = ftl(type)
else: # pragma: no cover
msg = "Unknown typevar @tn in class @T"
raise ZNotImplementedError(msg, tn=tn, T=T, c=c)
else:
classvars[name] = ftl(u)
try:
the_att = get_T_attribute(T, name)
except AttributeError:
pass
else:
classatts[name] = f(the_att)
else:
classvars[name] = ftl(tt)
try:
the_att = get_T_attribute(T, name)
except AttributeError:
pass
else:
classatts[name] = f(the_att)
else: # not classvar
schema = ftl(t)
try:
default = get_field_default(afield)
except KeyError:
required.append(name)
else:
schema = make_schema_with_default(schema, default, c, ieso)
properties[name] = schema
except IPCE_PASS_THROUGH: # pragma: no cover
raise
except BaseException as e:
msg = "Cannot write schema for attribute @name -> @t of type @T."
raise ZTypeError(msg, name=name, t=t, T=T) from e
if required: # empty is error
res[JSC_REQUIRED] = sorted(required)
if classvars:
res[X_CLASSVARS] = classvars
if classatts:
res[X_CLASSATTS] = classatts
assert len(classvars) >= len(classatts), (classvars, classatts)
if properties:
res[JSC_PROPERTIES] = sorted_dict_cbor_ord(properties)
res[X_ORDER] = original_order
if sorted_dict_cbor_ord:
res = sorted_dict_cbor_ord(res)
if T.__name__ in used:
used.pop(T.__name__)
return TRE(res, used)
def get_T_attribute(T: TypeLike, n: str) -> object:
if hasattr(T, n):
# special case
the_att2 = getattr(T, n)
if isinstance(the_att2, Field):
# actually attribute not there
raise AttributeError()
else:
return the_att2
else:
raise AttributeError()
def make_schema_with_default(
schema: JSONSchema, default: object, c: IFTContext, ieso: IESO
) -> JSONSchema:
from zuper_ipce import ipce_from_object
options = [schema]
s_u_one = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID, ANY_OF: options})
ipce_default = ipce_from_object(default, globals_=c.globals_, ieso=ieso)
s_u_one["default"] = ipce_default
s_u_one = sorted_dict_cbor_ord(s_u_one)
return s_u_one
from dataclasses import MISSING
def get_field_default(f: Field) -> object:
if f.default != MISSING:
return f.default
elif f.default_factory != MISSING:
return f.default_factory()
else:
raise KeyError("no default")
def ipce_from_typelike_Union(t: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
types = get_Union_args(t)
used = {}
def f(x: TypeLike) -> JSONSchema:
tr = ipce_from_typelike_tr(x, c=c, ieso=ieso)
used.update(tr.used)
return tr.schema
options = [f(t) for t in types]
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID, ANY_OF: options})
res = sorted_dict_cbor_ord(res)
return TRE(res, used)
def ipce_from_typelike_Optional(t: TypeLike, c: IFTContext, ieso: IESO) -> TRE:
types = [get_Optional_arg(t), type(None)]
kt = KeepTrackSer(c, ieso)
options = [kt.ipce_from_typelike(t) for t in types]
res = cast(JSONSchema, {SCHEMA_ATT: SCHEMA_ID, ANY_OF: options})
res = sorted_dict_cbor_ord(res)
return kt.tre(res)
#
# def ipce_from_typelike_generic(T: Type, globals_: GlobalsDict, processing_: ProcessingDict) -> JSONSchema:
# assert hasattr(T, GENERIC_ATT2)
#
# types2 = getattr(T, GENERIC_ATT2)
# processing2 = dict(processing_)
# globals2 = dict(globals_)
#
# res = cast(JSONSchema, {})
# res[SCHEMA_ATT] = SCHEMA_ID
#
# res[JSC_TITLE] = T.__name__
# # res[ATT_PYTHON_NAME] = T.__qualname__
# res[X_PYTHON_MODULE_ATT] = T.__module__
#
# res[ID_ATT] = make_url(T.__name__)
#
# res[JSC_TYPE] = JSC_OBJECT
#
# processing2[f'{T.__name__}'] = make_ref(res[ID_ATT])
#
# # print(f'T: {T.__name__} ')
# definitions = {}
#
# if hasattr(T, '__doc__') and T.__doc__:
# res[JSC_DESCRIPTION] = T.__doc__
# globals_ = dict(globals_)
# for t2 in types2:
# if not isinstance(t2, TypeVar):
# continue
#
# url = make_url(f'{T.__name__}/{t2.__name__}')
#
# # processing2[f'~{name}'] = {'$ref': url}
# processing2[f'{t2.__name__}'] = make_ref(url)
# # noinspection PyTypeHints
# globals2[t2.__name__] = t2
#
# bound = t2.__bound__ or Any
# schema = ipce_from_typelike(bound, globals2, processing2)
# schema = copy.copy(schema)
# schema[ID_ATT] = url
#
# definitions[t2.__name__] = schema
#
# globals_[t2.__name__] = t2
#
# if definitions:
# res[JSC_DEFINITIONS] = definitions
# properties = {}
# required = []
#
# # names = list(T.__annotations__)
# # ordered = sorted(names)
# original_order = []
# for name, t in T.__annotations__.items():
# t = replace_typevars(t, bindings={}, symbols=globals_, rl=None)
# if is_ClassVar(t):
# continue
# try:
# result = eval_field(t, globals2, processing2)
# except PASS_THROUGH:
# raise
# except BaseException as e:
# msg = f'Cannot evaluate field "{name}" of class {T} annotated as {t}'
# raise Exception(msg) from e
# assert isinstance(result, Result), result
# properties[name] = result.schema
# original_order.append(name)
# if not result.optional:
# required.append(name)
# if required:
# res[JSC_REQUIRED] = sorted(required)
#
# sorted_vars = sorted(original_order)
# res[JSC_PROPERTIES] = {k: properties[k] for k in sorted_vars}
# res['order'] = original_order
# res = sorted_dict_with_cbor_ordering(res)
# return res
# @dataclasses.dataclass
# class Result:
# tre: TRE
# optional: Optional[bool] = False
#
# def __post_init__(self):
# assert isinstance(self.tre, TRE), self
# #
# # def __init__(self, tr: TRE, optional: bool = None):
# # self.schema = schema
# # self.optional = optional
#
# def eval_field(t, globals_: GlobalsDict, processing: ProcessingDict) -> Result:
# debug_info2 = lambda: dict(globals_=globals_, processing=processing)
#
# c = IFTContext(globals_=globals_, processing=processing, context=())
# if isinstance(t, str):
# te = eval_type_string(t, globals_, processing)
# return te
#
# if is_Type(t):
# res = cast(JSONSchema, make_ref(SCHEMA_ID))
# return Result(TRE(res))
#
# if is_TupleLike(t):
# tr = ipce_from_typelike_TupleLike(t, c)
# return Result(tr)
#
# if is_ListLike(t):
# tr = ipce_from_typelike_ListLike(t, c)
# return Result(tr)
#
# if is_DictLike(t):
# tr = ipce_from_typelike_dict(t, c)
# return Result(tr)
#
# if is_SetLike(t):
# tr = ipce_from_typelike_SetLike(t, c)
# return Result(tr)
#
# if is_ForwardRef(t):
# tn = get_ForwardRef_arg(t)
# return eval_type_string(tn, globals_, processing)
#
# if is_Optional(t):
# tt = get_Optional_arg(t)
# result = eval_field(tt, globals_, processing)
# return Result(result.tre, optional=True)
#
# if is_Union(t):
# return Result(ipce_from_typelike_Union(t, c))
#
# if is_Any(t):
# res = cast(JSONSchema, {'$schema': 'http://json-schema.org/draft-07/schema#'})
# return Result(TRE(res))
#
# if isinstance(t, TypeVar):
# l = t.__name__
# if l in processing:
# ref = processing[l]
# schema = make_ref(ref)
# return Result(TRE(schema, {l: ref}))
# # I am not sure why this is different in Python 3.6
# if PYTHON_36 and (l in globals_): # pragma: no cover
# T = globals_[l]
# tr = ipce_from_typelike_tr(T, c)
# return Result(tr)
#
# m = f'Could not resolve the TypeVar {t}'
# msg = pretty_dict(m, debug_info2())
# raise CannotResolveTypeVar(msg)
#
# if isinstance(t, type):
# # catch recursion here
# if t.__name__ in processing:
# return eval_field(t.__name__, globals_, processing)
# else:
# tr = ipce_from_typelike_tr(t, c)
# return Result(tr)
#
# msg = f'Could not deal with {t}'
# msg += f'\nglobals: {globals_}'
# msg += f'\nprocessing: {processing}'
# raise NotImplementedError(msg)
#
# def eval_type_string(t: str, globals_: GlobalsDict, processing: ProcessingDict) -> Result:
# check_isinstance(t, str)
# globals2 = dict(globals_)
# debug_info = lambda: dict(t=t, globals2=pretty_dict("", globals2), processing=pretty_dict("", processing))
#
# if t in processing:
# url = make_url(t)
# schema: JSONSchema = make_ref(url)
# return Result(TRE(schema, {t: url})) # XXX not sure
#
# elif t in globals2:
# return eval_field(globals2[t], globals2, processing)
# else:
# try:
# res = eval_just_string(t, globals2)
# return eval_field(res, globals2, processing)
# except NotImplementedError as e: # pragma: no cover
# m = 'While evaluating string'
# msg = pretty_dict(m, debug_info())
# raise NotImplementedError(msg) from e
# except PASS_THROUGH:
# raise
# except BaseException as e: # pragma: no cover
# m = 'Could not evaluate type string'
# msg = pretty_dict(m, debug_info())
# raise ValueError(msg) from e
#
#
# def eval_just_string(t: str, globals_):
# from typing import Optional
# eval_locals = {
# 'Optional': Optional, 'List': List,
# 'Dict': Dict, 'Union': Union, 'Set': typing.Set, 'Any': Any
# }
# # TODO: put more above?
# # do not pollute environment
# if t in globals_:
# return globals_[t]
# eval_globals = dict(globals_)
# try:
# res = eval(t, eval_globals, eval_locals)
# return res
# except PASS_THROUGH:
# raise
# except BaseException as e:
# m = f'Error while evaluating the string {t!r} using eval().'
# msg = pretty_dict(m, dict(eval_locals=eval_locals, eval_globals=eval_globals))
# raise type(e)(msg) from e
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/conv_ipce_from_typelike.py
|
conv_ipce_from_typelike.py
|
from typing import TypeVar
from zuper_commons.types.exceptions import ZException
from zuper_typing.monkey_patching_typing import my_dataclass
from zuper_typing.zeneric2 import ZenericFix
class CannotFindSchemaReference(ZException):
pass
class CannotResolveTypeVar(ZException):
pass
KK = TypeVar("KK")
VV = TypeVar("VV")
@my_dataclass
class FakeValues(ZenericFix[KK, VV]):
real_key: KK
value: VV
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/structures.py
|
structures.py
|
__version__ = "5.3.0"
from .logging import logger
logger.info(f"zuper-ipce {__version__}")
from .types import IPCE, TypeLike
from .constants import IEDO, IESO
from .conv_ipce_from_object import ipce_from_object
from .conv_ipce_from_typelike import ipce_from_typelike
from .conv_object_from_ipce import object_from_ipce
from .conv_typelike_from_ipce import typelike_from_ipce
_ = (
ipce_from_object,
object_from_ipce,
typelike_from_ipce,
ipce_from_typelike,
TypeLike,
IPCE,
IEDO,
IESO,
)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/__init__.py
|
__init__.py
|
import datetime
import traceback
from dataclasses import dataclass, Field, fields, is_dataclass, MISSING
from decimal import Decimal
from typing import cast, Dict, Iterator, Optional, Set, TypeVar
import numpy as np
from frozendict import frozendict
from zuper_ipce.guesses import (
get_dict_type_suggestion,
get_list_type_suggestion,
get_set_type_suggestion,
get_tuple_type_suggestion,
)
from zuper_ipce.types import is_unconstrained
from zuper_typing.my_dict import make_dict
X = TypeVar("X")
from zuper_typing.annotations_tricks import (
get_Optional_arg,
get_Union_args,
is_Optional,
is_SpecialForm,
is_Union,
)
from zuper_typing.exceptions import ZNotImplementedError, ZTypeError, ZValueError
from .constants import GlobalsDict, HINTS_ATT, SCHEMA_ATT, IESO, IPCE_PASS_THROUGH
from .conv_ipce_from_typelike import ipce_from_typelike, ipce_from_typelike_ndarray
from .ipce_spec import assert_canonical_ipce, sorted_dict_cbor_ord
from .structures import FakeValues
from .types import IPCE, TypeLike
def ipce_from_object(
ob: object,
suggest_type: TypeLike = object,
*,
globals_: GlobalsDict = None,
ieso: Optional[IESO] = None,
# with_schema: bool = True,
) -> IPCE:
# logger.debug(f'ipce_from_object({ob})')
if ieso is None:
ieso = IESO(with_schema=True)
if globals_ is None:
globals_ = {}
try:
res = ipce_from_object_(ob, suggest_type, globals_=globals_, ieso=ieso)
except TypeError as e:
msg = "ipce_from_object() for type @t failed."
raise ZTypeError(msg, ob=ob, T=type(ob)) from e
assert_canonical_ipce(res)
return res
def ipce_from_object_(
ob: object, st: TypeLike, *, globals_: GlobalsDict, ieso: IESO
) -> IPCE:
unconstrained = is_unconstrained(st)
if ob is None:
if unconstrained or (st is type(None)) or is_Optional(st):
return ob
else:
msg = f"ob is None but suggest_type is @suggest_type"
raise ZTypeError(msg, suggest_type=st)
if is_Optional(st):
assert ob is not None # from before
T = get_Optional_arg(st)
return ipce_from_object_(ob, T, globals_=globals_, ieso=ieso)
if is_Union(st):
return ipce_from_object_union(ob, st, globals_=globals_, ieso=ieso)
if isinstance(ob, datetime.datetime):
if not ob.tzinfo:
msg = "Cannot serialize dates without a timezone."
raise ZValueError(msg, ob=ob)
trivial = (bool, int, str, float, bytes, Decimal, datetime.datetime)
if st in trivial:
if not isinstance(ob, st):
msg = "Expected this to be @suggest_type."
raise ZTypeError(msg, st=st, ob=ob, T=type(ob))
return ob
if isinstance(ob, trivial):
return ob
if isinstance(ob, list):
return ipce_from_object_list(ob, st, globals_=globals_, ieso=ieso)
if isinstance(ob, tuple):
return ipce_from_object_tuple(ob, st, globals_=globals_, ieso=ieso)
if isinstance(ob, slice):
return ipce_from_object_slice(ob, ieso=ieso)
if isinstance(ob, set):
return ipce_from_object_set(ob, st, globals_=globals_, ieso=ieso)
if isinstance(ob, (dict, frozendict)):
return ipce_from_object_dict(ob, st, globals_=globals_, ieso=ieso)
if isinstance(ob, type):
return ipce_from_typelike(ob, globals0=globals_, processing={}, ieso=ieso)
if is_SpecialForm(ob):
ob = cast(TypeLike, ob)
return ipce_from_typelike(ob, globals0=globals_, processing={}, ieso=ieso)
if isinstance(ob, np.ndarray):
return ipce_from_object_numpy(ob, ieso=ieso)
assert not isinstance(ob, type), ob
if is_dataclass(ob):
return ipce_from_object_dataclass_instance(ob, globals_=globals_, ieso=ieso)
msg = "I do not know a way to convert object @ob of type @T."
raise ZNotImplementedError(msg, ob=ob, T=type(ob))
def ipce_from_object_numpy(ob, *, ieso: IESO) -> IPCE:
from .numpy_encoding import ipce_from_numpy_array
res = ipce_from_numpy_array(ob)
if ieso.with_schema:
res[SCHEMA_ATT] = ipce_from_typelike_ndarray().schema
return res
def ipce_from_object_slice(ob, *, ieso: IESO):
from .conv_ipce_from_typelike import ipce_from_typelike_slice
res = {"start": ob.start, "step": ob.step, "stop": ob.stop}
if ieso.with_schema:
res[SCHEMA_ATT] = ipce_from_typelike_slice(ieso=ieso).schema
res = sorted_dict_cbor_ord(res)
return res
def ipce_from_object_union(ob: object, st: TypeLike, *, globals_, ieso: IESO) -> IPCE:
ts = get_Union_args(st)
errors = []
for Ti in ts:
try:
return ipce_from_object(ob, Ti, globals_=globals_, ieso=ieso)
except IPCE_PASS_THROUGH:
raise
except BaseException:
errors.append((Ti, traceback.format_exc()))
msg = "Cannot save union."
raise ZTypeError(msg, suggest_type=st, value=ob, errors=errors)
def ipce_from_object_list(ob, st: TypeLike, *, globals_: dict, ieso: IESO) -> IPCE:
assert st is not None
V = get_list_type_suggestion(ob, st)
def rec(x: X) -> X:
return ipce_from_object(x, V, globals_=globals_, ieso=ieso)
return [rec(_) for _ in ob]
def ipce_from_object_tuple(ob: tuple, st: TypeLike, *, globals_, ieso: IESO) -> IPCE:
ts = get_tuple_type_suggestion(ob, st)
res = []
for _, T in zip(ob, ts):
x = ipce_from_object(_, T, globals_=globals_, ieso=ieso)
res.append(x)
return res
@dataclass
class IterAtt:
attr: str
T: TypeLike
value: object
def has_default(f: Field):
if f.default != MISSING:
return True
elif f.default_factory != MISSING:
return True
else:
return False
def get_default(f: Field) -> object:
assert has_default(f)
if f.default != MISSING:
return f.default
elif f.default_factory != MISSING:
return f.default_factory()
assert False
def same_as_default(f: Field, value: object) -> bool:
if f.default != MISSING:
return f.default == value
elif f.default_factory != MISSING:
default = f.default_factory()
return default == value
else:
return False
def iterate_resolved_type_values_without_default(x: dataclass) -> Iterator[IterAtt]:
for f in fields(type(x)):
k = f.name
v0 = getattr(x, k)
if same_as_default(f, v0):
continue
k_st = f.type
yield IterAtt(k, k_st, v0)
def get_fields_values(x: dataclass) -> Dict[str, object]:
res = {}
T = type(x)
try:
fields_ = fields(T)
except:
raise ZValueError(T=T)
for f in fields_:
k = f.name
v0 = getattr(x, k)
res[k] = v0
return res
def ipce_from_object_dataclass_instance(ob: dataclass, *, globals_, ieso: IESO) -> IPCE:
globals_ = dict(globals_)
res = {}
T = type(ob)
from .conv_ipce_from_typelike import ipce_from_typelike
if ieso.with_schema:
res[SCHEMA_ATT] = ipce_from_typelike(T, globals0=globals_, ieso=ieso)
globals_[T.__name__] = T
H = make_dict(str, type)
hints = H()
for ia in iterate_resolved_type_values_without_default(ob):
k = ia.attr
v = ia.value
T = ia.T
try:
res[k] = ipce_from_object(v, T, globals_=globals_, ieso=ieso)
needs_schema = isinstance(v, (list, tuple))
if ieso.with_schema and needs_schema and is_unconstrained(T):
# hints[k] = ipce_from_typelike(type(v), globals0=globals_, ieso=ieso)
hints[k] = type(v)
except IPCE_PASS_THROUGH:
raise
except BaseException as e:
msg = (
f"Could not serialize an object. Problem "
f"occurred with the attribute {k!r}. It is supposed to be of type @expected."
)
raise ZValueError(msg, expected=T, ob=ob) from e
if hints:
res[HINTS_ATT] = ipce_from_object(hints, ieso=ieso)
res = sorted_dict_cbor_ord(res)
return res
def ipce_from_object_dict(ob: dict, st: TypeLike, *, globals_: GlobalsDict, ieso: IESO):
K, V = get_dict_type_suggestion(ob, st)
DT = Dict[K, V]
res = {}
from .conv_ipce_from_typelike import ipce_from_typelike
if ieso.with_schema:
res[SCHEMA_ATT] = ipce_from_typelike(DT, globals0=globals_, ieso=ieso)
if isinstance(K, type) and issubclass(K, str):
for k, v in ob.items():
res[k] = ipce_from_object(v, V, globals_=globals_, ieso=ieso)
elif isinstance(K, type) and issubclass(K, int):
for k, v in ob.items():
res[str(k)] = ipce_from_object(v, V, globals_=globals_, ieso=ieso)
else:
FV = FakeValues[K, V]
# group first by the type name, then sort by key
items = [(type(k).__name__, k, v) for k, v in ob.items()]
items = sorted(items)
for i, (_, k, v) in enumerate(items):
h = get_key_for_set_entry(i, len(ob))
fv = FV(k, v)
res[h] = ipce_from_object(fv, globals_=globals_, ieso=ieso)
res = sorted_dict_cbor_ord(res)
return res
def ipce_from_object_set(ob: set, st: TypeLike, *, globals_: GlobalsDict, ieso: IESO):
from .conv_ipce_from_typelike import ipce_from_typelike
V = get_set_type_suggestion(ob, st)
ST = Set[V]
res = {}
if ieso.with_schema:
res[SCHEMA_ATT] = ipce_from_typelike(ST, globals0=globals_, ieso=ieso)
# group first by the type name, then sort by key
items = [(type(v).__name__, v) for v in ob]
items = sorted(items)
for i, (_, v) in enumerate(items):
vj = ipce_from_object(v, V, globals_=globals_, ieso=ieso)
h = get_key_for_set_entry(i, len(ob))
res[h] = vj
res = sorted_dict_cbor_ord(res)
return res
def get_key_for_set_entry(i: int, n: int):
ndigits = len(str(n))
format = f"%0{ndigits}d"
x = format % i
return f"set:{x}"
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/conv_ipce_from_object.py
|
conv_ipce_from_object.py
|
from zuper_typing.exceptions import ZValueError
class ZDeserializationError(ZValueError):
pass
class ZDeserializationErrorSchema(ZDeserializationError):
pass
class ZSerializationError(ZValueError):
pass
class ZInvalidSchema(ZValueError):
pass
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce/exceptions.py
|
exceptions.py
|
from typing import List, Tuple
from zuper_typing import dataclass
from zuper_typing.my_dict import make_list, make_CustomTuple
from .test_utils import assert_object_roundtrip, assert_type_roundtrip
symbols = {}
def test_tuples1():
@dataclass
class M:
a: Tuple[int, str]
a = M((1, "32"))
assert_object_roundtrip(a)
assert_type_roundtrip(M)
def test_tuples3():
T = Tuple[str, int]
assert_type_roundtrip(T, use_globals=symbols)
def test_tuples2():
T = Tuple[str, ...]
assert_type_roundtrip(T, use_globals=symbols)
def test_tuples4():
T = make_CustomTuple((str, int))
assert_type_roundtrip(T, use_globals=symbols)
def test_tuples5():
T = make_CustomTuple(())
assert_type_roundtrip(T, use_globals=symbols)
def test_tuples6():
T = Tuple[str, int]
assert_type_roundtrip(T, use_globals=symbols)
def test_list1():
T = make_list(str)
assert_type_roundtrip(T, use_globals=symbols)
def test_list2():
@dataclass
class M:
a: List[str]
a = M(["a", "b"])
assert_object_roundtrip(a, use_globals=symbols)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_tuples.py
|
test_tuples.py
|
from typing import (
Callable,
cast,
ClassVar,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
)
from nose.tools import raises
from zuper_ipce.assorted_recursive_type_subst import recursive_type_subst
from zuper_ipce.constants import JSONSchema
from zuper_ipce.schema_caching import assert_canonical_schema
from zuper_ipce_tests.test_utils import assert_type_roundtrip
from zuper_typing.get_patches_ import NotEquivalentException, assert_equivalent_types
from zuper_typing import dataclass
from zuper_typing.annotations_tricks import (
get_ClassVar_arg,
is_ClassVar,
is_Dict,
is_Type,
make_ForwardRef,
)
from zuper_typing.monkey_patching_typing import MyNamedArg, original_dict_getitem
from zuper_typing.my_dict import make_dict, make_list, make_set
def test_rec1():
@dataclass
class A:
a: Dict[int, bool]
a2: Dict[bool, bool]
b: Union[float, int]
b2: Dict[bool, float]
c: Set[int]
c2: Set[bool]
d: List[int]
d2: List[bool]
e: Tuple[int, bool]
e2: Tuple[float, bool]
f: make_dict(int, int)
g: make_set(int)
h: make_list(int)
h2: make_list(bool)
i: Optional[int]
l: Tuple[int, ...]
m: original_dict_getitem((int, float))
n: original_dict_getitem((bool, float))
q: ClassVar[int]
r: ClassVar[bool]
s: Callable[[int], int]
s2: Callable[[bool], int]
# noinspection PyUnresolvedReferences
t: Callable[[MyNamedArg(int, "varname")], int]
# noinspection PyUnresolvedReferences
t2: Callable[[MyNamedArg(int, "varname")], int]
T2 = recursive_type_subst(A, swap)
T3 = recursive_type_subst(T2, swap)
# logger.info(pretty_dict("A", A.__annotations__))
# logger.info(pretty_dict("T2", T2.__annotations__))
# logger.info(pretty_dict("T3", T3.__annotations__))
assert_equivalent_types(A, T3, set())
assert_type_roundtrip(A)
def test_recursive_fwd():
T = make_ForwardRef("n")
recursive_type_subst(T, identity)
def test_recursive_fwd2():
T = original_dict_getitem((str, str))
assert is_Dict(T)
recursive_type_subst(T, identity)
def test_Type_1():
T = Type
assert is_Type(T)
recursive_type_subst(T, identity)
def identity(x):
return x
def test_Type_2():
T = Type[int]
assert is_Type(T)
recursive_type_subst(T, swap)
def test_Type_3():
T = Type[bool]
assert is_Type(T)
recursive_type_subst(T, swap)
@raises(ValueError)
def test_schema1():
schema = cast(JSONSchema, {})
assert_canonical_schema(schema)
def swap(x):
if x is int:
return str
if x is str:
return int
return x
def test_classvar():
T = ClassVar[int]
assert is_ClassVar(T)
assert get_ClassVar_arg(T) is int, T
T2 = recursive_type_subst(T, swap)
# print(T)
# print(T2)
assert get_ClassVar_arg(T2) is str, T2
try:
assert_equivalent_types(T, T2, set())
except NotEquivalentException:
pass
else: # pragma: no cover
raise Exception()
U = ClassVar[bool]
assert is_ClassVar(U)
assert get_ClassVar_arg(U) is bool, U
U2 = recursive_type_subst(U, swap)
# print(U)
# print(U2)
assert get_ClassVar_arg(U2) is bool, U
#
# def test_list_swap():
# def swap(x):
# if x is int:
# return str
# if x is str:
# return int
# return x
#
# U = make_list(bool)
# assert is_CustomList(U)
# assert get_CustomList_arg(U) is bool, U
# U2 = recursive_type_subst(U, swap)
# print(U)
# print(U2)
#
# assert get_CustomList_arg(U2) is bool, U
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_recursive_subst.py
|
test_recursive_subst.py
|
from zuper_ipce_tests.test_utils import assert_object_roundtrip, assert_type_roundtrip
# TODO:
# if not USE_REMEMBERED_CLASSES: # pragma: no cover
#
# def test_default_arguments():
# @dataclass
# class A1b:
# a: List[int] = field(default_factory=list)
#
# F = assert_type_roundtrip(A1b, expect_type_equal=False)
# F(a=[])
# F()
def test_object():
T = object
assert_type_roundtrip(T)
def test_slice():
T = slice
assert_type_roundtrip(T)
def test_slice1():
T = slice(1, None, None)
assert_object_roundtrip(T)
def test_slice2():
T = slice(1, 2, None)
assert_object_roundtrip(T)
def test_slice3():
T = slice(1, 2, 3)
assert_object_roundtrip(T)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_not_implemented.py
|
test_not_implemented.py
|
from typing import TypeVar
from zuper_typing import dataclass, Generic
from .test_utils import assert_object_roundtrip, assert_type_roundtrip
symbols = {}
def test_subclass1():
@dataclass
class A:
a: int
@dataclass
class B(A):
b: bool
# print(type(B))
# print(f'bases for B: {B.__bases__}')
# print(f'mro for B: {B.mro()}')
assert A in B.__bases__
b = B(1, True)
assert_type_roundtrip(B)
assert_object_roundtrip(b)
def test_subclass2_generic():
X = TypeVar("X")
@dataclass
class A(Generic[X]):
a: X
@dataclass
class B(A[int]):
b: bool
b = B(1, True)
assert_type_roundtrip(B)
assert_object_roundtrip(b)
def test_subclass3_generic():
X = TypeVar("X")
@dataclass
class S3A(Generic[X]):
a: X
@dataclass
class S3B0(S3A):
b: bool
S3B = S3B0[int]
b = S3B(1, True)
assert S3B0.__name__ == "S3B0[X]", S3B0.__name__
assert_type_roundtrip(S3B0)
assert_type_roundtrip(S3A)
assert_type_roundtrip(S3B)
assert_object_roundtrip(b)
if __name__ == "__main__":
test_subclass3_generic()
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_subclass.py
|
test_subclass.py
|
from typing import Optional
from jsonschema import validate
from zuper_ipce import ipce_from_object
from zuper_typing import dataclass
@dataclass
class AName:
""" Describes a Name with optional middle name"""
first: str
last: str
middle: Optional[str] = None
symbols = {"AName": AName}
def test_schema1():
n1 = AName("one", "two")
y1 = ipce_from_object(n1, globals_=symbols)
# print(json.dumps(y1, indent=2))
validate(y1, y1["$schema"])
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_json_schema.py
|
test_json_schema.py
|
from dataclasses import dataclass
from zuper_ipce_tests.test_utils import assert_object_roundtrip, assert_type_roundtrip
from zuper_typing.my_intersection import Intersection, make_Intersection
from zuper_typing.type_algebra import type_inf
def test_ipce_intersection1():
@dataclass
class A:
a: bool
@dataclass
class B:
a: int
I = make_Intersection((A, B))
assert_type_roundtrip(I)
def test_intersection1():
@dataclass
class A1:
a: int
@dataclass
class B1:
b: str
AB = Intersection[A1, B1]
assert_type_roundtrip(AB, expect_type_equal=False)
def test_intersection2():
@dataclass
class A:
a: int
@dataclass
class B:
b: str
# AB = Intersection[A, B]
AB = type_inf(A, B)
# print(AB.__annotations__)
e = AB(a=1, b="2")
assert_object_roundtrip(e) # raise here
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_ipce_intersection.py
|
test_ipce_intersection.py
|
from zuper_ipce import ipce_from_object
from zuper_typing import dataclass
from zuper_typing.my_dict import make_dict
from .test_utils import assert_object_roundtrip, assert_type_roundtrip
def test_serialize_klasses0():
assert_type_roundtrip(type)
@dataclass
class A:
a: int
Aj = ipce_from_object(A)
# pprint(Aj=Aj)
assert_object_roundtrip(A, expect_equality=False) # because of classes
def test_serialize_klasses1():
""" Note: equality does not come because the two As do not compare equal """
D = make_dict(str, type)
@dataclass
class MyLanguage:
# my_types: Dict[str, type]
my_types: D
@dataclass
class A:
a: int
pass
x = MyLanguage(D({"A": A}))
assert_type_roundtrip(MyLanguage)
#
# x2: MyLanguage = object_from_ipce(ipce_from_object(x), {}, {})
# print(f' x: {x}')
# print(f'x2: {x2}')
# assert_equal(x.my_types['A'], x2.my_types['A'])
# assert_equal(x.my_types, x2.my_types)
# assert x == x2
expect_equality = False
assert_object_roundtrip(x, expect_equality=expect_equality) # because of classes
def test_serialize_klasses2():
@dataclass
class MyLanguage:
my_type: type
@dataclass
class A:
a: int
a = MyLanguage(A)
assert_type_roundtrip(MyLanguage)
expect_equality = False
assert_object_roundtrip(a, expect_equality=expect_equality) # because of classes
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_serialize_klasses.py
|
test_serialize_klasses.py
|
from typing import List, Optional
from zuper_commons.logs import setup_logging
from zuper_ipce import IPCE
from zuper_ipce import ipce_from_object
from zuper_ipce_tests.test_utils import assert_object_roundtrip
from zuper_typing import dataclass
@dataclass
class Tree:
data: int
branches: "List[Tree]"
def create_tree(nlevels, branching, x) -> Tree:
if nlevels == 0:
branches = []
else:
branches = []
for i in range(branching):
branches.append(
create_tree(nlevels - 1, branching, x * (branching + 1) + i + 1)
)
return Tree(x, branches)
#
# @dataclass
# class TreeDict:
# data: int
# branches: 'Dict[str, TreeDict]'
#
#
# def create_tree_dict(nlevels, branching, x) -> TreeDict:
# if nlevels == 0:
# branches = {}
# else:
# branches = {}
# for i in range(branching):
# branches[str(i)] = create_tree_dict(nlevels - 1, branching, x * (branching + 1) + i + 1)
#
# return TreeDict(x, branches)
@dataclass
class Chain:
data: object
next_link: "Optional[Chain]" = None
def create_chain(nlevels, x):
if nlevels == 0:
n = None
else:
n = create_chain(nlevels - 1, x + 1)
return Chain(x, n)
def test_recursive_chain_ipce():
t = create_chain(100, 0)
ipce: IPCE = ipce_from_object(t)
# print(ipce)
def test_recursive_ipce():
n = 6
t = create_tree(n, 2, 0)
# print(debug_print(t))
# ipce: IPCE = ipce_from_object(t, {})
assert_object_roundtrip(t)
#
# async def test_recursive_chain_2():
# n = 1000
# t = None
# for i in range(n):
# t = Chain(i, t)
#
# cid: CID = await CID_from_object(t)
#
# print(f'cid: {cid}')
# ipcl: IPCL = await ipcl_from_object(t)
# print(yaml.dump(ipcl))
# @with_private_register
# async def test_recursive_chain_ok():
# n = 1000
# # XXX: you should try with 1000 or more
# n = 100
# t = None
# for i in range(n):
# t = Chain(i, t)
# cid: CID = await CID_from_object(t)
# # print(f'iteration {i} -> {cid}')
#
# cid: CID = await CID_from_object(t)
#
# print(f'cid: {cid}')
# ipcl: IPCL = await ipcl_from_object(t)
# print(yaml.dump(ipcl))
if __name__ == "__main__":
setup_logging()
test_recursive_chain_ipce()
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_performance_recursive.py
|
test_performance_recursive.py
|
from dataclasses import field
import numpy as np
from numpy.testing import assert_allclose
from zuper_ipce.json_utils import encode_bytes_before_json_serialization
from zuper_ipce.numpy_encoding import ipce_from_numpy_array, numpy_array_from_ipce
from zuper_typing import dataclass
from .test_utils import assert_object_roundtrip, assert_type_roundtrip
# def array_eq(arr1, arr2):
# return (isinstance(arr1, np.ndarray) and
# isinstance(arr2, np.ndarray) and
# arr1.shape == arr2.shape and
# (arr1 == arr2).all())
def test_numpy_01():
@dataclass
class C:
data: np.ndarray = field(metadata=dict(contract="array[HxWx3](uint8)"))
assert_type_roundtrip(C)
def test_numpy_02():
@dataclass
class C:
data: np.ndarray = field(metadata=dict(contract="array[HxWx3](uint8)"))
#
# def __eq__(self, other):
# if not isinstance(other, C):
# return NotImplemented
# return array_eq(self.data, other.data)
x = np.array(0.23)
c = C(x)
assert_object_roundtrip(c)
#
# def test_numpy_03():
# x = np.random.rand(2, 3)
# b = bytes_from_numpy(x)
# y = numpy_from_bytes(b)
# assert_allclose(x, y)
def test_numpy_04():
x = np.random.rand(2, 3)
d = ipce_from_numpy_array(x)
d1 = encode_bytes_before_json_serialization(d)
# print(json.dumps(d1, indent=3))
y = numpy_array_from_ipce(d)
assert_allclose(x, y)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_numpy.py
|
test_numpy.py
|
from typing import List, Optional, Tuple, cast
from nose.tools import raises
from zuper_commons.logs import setup_logging
from zuper_ipce import typelike_from_ipce
from zuper_ipce.constants import JSONSchema
from zuper_ipce.utils_text import oyaml_load
from zuper_typing import dataclass
from .test_utils import assert_object_roundtrip, assert_type_roundtrip
def test_list_1():
@dataclass
class MyClass:
f: List[int]
e = MyClass([1, 2, 3])
assert_object_roundtrip(e)
def test_tuple1a():
@dataclass
class MyClass:
f: Tuple[int, ...]
assert_type_roundtrip(MyClass)
def test_tuple1():
@dataclass
class MyClass:
f: Tuple[int, ...]
e = MyClass((1, 2, 3))
assert_object_roundtrip(e)
def test_tuple2a():
@dataclass
class MyClass:
f: Tuple[int, str]
assert_type_roundtrip(MyClass)
def test_tuple2():
@dataclass
class MyClass:
f: Tuple[int, str]
e = MyClass((1, "a"))
assert_object_roundtrip(e)
def test_tuple_inside_class():
""" tuple inside needs a schema hint"""
@dataclass
class MyClass:
f: object
e = MyClass((1, 2))
assert_object_roundtrip(e, works_without_schema=False)
@raises(AssertionError)
def test_tuple_inside_class_withoutschema():
""" tuple inside needs a schema hint"""
@dataclass
class MyClass:
f: object
e = MyClass((1, 2))
assert_object_roundtrip(e, works_without_schema=True)
def test_Optional_fields():
@dataclass
class MyClass:
f: int
g: Optional[int] = None
e = MyClass(1)
assert_object_roundtrip(e, works_without_schema=True)
def test_another():
a = """\
$schema:
$id: http://invalid.json-schema.org/A#
$schema: http://json-schema.org/draft-07/schema#
__module__: zuper_lang.compile_utils
__qualname__: my_make_dataclass.<locals>.C
order: [a]
properties:
a:
$schema: http://json-schema.org/draft-07/schema#
items: {$schema: 'http://json-schema.org/draft-07/schema#', type: string}
title: List[str]
type: array
required: [a]
title: A
type: object
"""
ipce = cast(JSONSchema, oyaml_load(a))
r = typelike_from_ipce(ipce)
# print(r)
assert_type_roundtrip(r)
if __name__ == "__main__":
setup_logging()
test_tuple_inside_class()
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_list.py
|
test_list.py
|
from typing import Any, Optional, TypeVar
from nose.tools import assert_equal
from zuper_ipce.pretty import pprint
from zuper_ipce_tests.test_utils import assert_type_roundtrip
from zuper_typing import dataclass, Generic
def test_type():
X = TypeVar("X")
Y = TypeVar("Y")
class A:
pass
@dataclass
class Another(Generic[Y]):
data0: Y
assert_equal("Another[Y]", Another.__name__)
@dataclass
class MyClass(Generic[X]):
another: Another[X]
# print(MyClass.__annotations__['another'].__annotations__['data0'])
assert_equal(MyClass.__annotations__["another"].__annotations__["data0"], X)
C = MyClass[A]
# print(C.__annotations__["another"])
# print(C.__annotations__["another"].__annotations__["data0"])
assert_equal(C.__annotations__["another"].__annotations__["data0"], A)
# print(C.__annotations__["another"])
assert_equal(C.__annotations__["another"].__name__, "Another[A]")
# cannot serialize because A is not a dataclass
#
# assert_type_roundtrip(Another, {})
# assert_type_roundtrip(MyClass, {})
# assert_type_roundtrip(C, {})
def test_type02():
X = TypeVar("X")
V = TypeVar("V")
@dataclass
class MyClass(Generic[X]):
data0: X
C0 = MyClass
C1 = MyClass[V]
# print(C0.__annotations__)
# print(C1.__annotations__)
assert C0.__annotations__["data0"] == X
assert C1.__annotations__["data0"] == V
assert_type_roundtrip(C0)
assert_type_roundtrip(C1)
def test_type05():
@dataclass
class A:
pass
X = TypeVar("X")
@dataclass
class MyEntity(Generic[X]):
guid: str
forked1: "MyEntity[X]"
forked2: Optional["MyEntity[X]"]
forked3: "Optional[MyEntity[X]]"
# print("%s" % MyEntity)
# print("name: %s" % MyEntity.__name__)
# resolve_types(MyEntity, locals())
forked1_X = MyEntity.__annotations__["forked1"]
# print(f"forked1_X: {forked1_X!r}")
forked2_X = MyEntity.__annotations__["forked2"]
# print(f"forked2_X: {forked2_X!r}")
forked3_X = MyEntity.__annotations__["forked3"]
# print(f"forked3_X: {forked3_X!r}")
E = MyEntity[A]
forked1_A = E.__annotations__["forked1"]
# print(f"forked1_A: {forked1_A!r}")
forked2_A = E.__annotations__["forked2"]
# print(f"forked2_A: {forked2_A!r}")
forked3_A = E.__annotations__["forked3"]
# print(f"forked3_A: {forked3_A!r}")
assert_equal(E.__name__, "MyEntity[A]")
# assert_equal(E.__annotations__['parent'].__args__[0].__name__, Entity[Any].__name__)
# print(E.__annotations__["forked1"])
assert_equal(E.__annotations__["forked1"].__name__, MyEntity[A].__name__)
# print(E.__annotations__["forked2"])
assert_equal(
E.__annotations__["forked2"].__args__[0].__name__, MyEntity[A].__name__
)
assert_type_roundtrip(MyEntity)
def test_type06():
@dataclass
class Values:
a: int
Z = TypeVar("Z")
U = TypeVar("U")
M = TypeVar("M")
@dataclass
class EntityUpdateProposal(Generic[M]):
proposal: M
A = EntityUpdateProposal[Z]
assert_equal(A.__name__, "EntityUpdateProposal[Z]")
assert_equal(A.__annotations__["proposal"], Z)
@dataclass
class Signed(Generic[U]):
value: U
B = Signed[EntityUpdateProposal[Z]]
assert_equal(B.__name__, "Signed[EntityUpdateProposal[Z]]")
assert_equal(B.__annotations__["value"].__name__, "EntityUpdateProposal[Z]")
@dataclass
class VersionChainWithAuthors(Generic[Z]):
# signed_proposals: List[Signed[EntityUpdateProposal[Z]]]
signed_proposal: Signed[EntityUpdateProposal[Z]]
# previous: 'Optional[VersionChainWithAuthors[Z]]' = None
# print("**********\n\n\n")
C = VersionChainWithAuthors[Values]
pprint("C annotations", C=C, **C.__annotations__)
assert_equal(C.__name__, "VersionChainWithAuthors[Values]")
assert_equal(
C.__annotations__["signed_proposal"].__name__,
"Signed[EntityUpdateProposal[Values]]",
)
# print(oyaml_dump(ipce_from_typelike(C)))
#
# assert_equal(E.__name__, 'Entity[A]')
# assert_equal(E.__annotations__['parent'].__args__[0].__name__, Entity[Any].__name__)
# pprint('Annotations of E', **E.__annotations__)
# assert_equal(E.__annotations__['forked'].__args__[0].__name__, Entity[A].__name__)
assert_type_roundtrip(A)
assert_type_roundtrip(B)
def test_another():
X = TypeVar("X")
@dataclass
class Entity4(Generic[X]):
guid: str
forked: "Optional[Entity4[X]]" = None
parent: "Optional[Entity4[Any]]" = None
assert_type_roundtrip(Entity4)
if __name__ == "__main__":
test_another()
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_extra.py
|
test_extra.py
|
from typing import Optional
from zuper_ipce_tests.test_utils import assert_type_roundtrip
from zuper_typing import dataclass
def test_recursive01():
@dataclass
class Rec1:
a: int
parent: "Rec1"
assert_type_roundtrip(Rec1)
def test_recursive02():
@dataclass
class Rec2:
a: int
parent: "Optional[Rec2]"
assert_type_roundtrip(Rec2)
def test_recursive03():
@dataclass
class Rec3:
a: int
parent: Optional["Rec3"]
assert_type_roundtrip(Rec3)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_recursive.py
|
test_recursive.py
|
from nose.tools import assert_equal
from zuper_ipce import ipce_from_typelike
from zuper_ipce.utils_text import oyaml_load
from zuper_typing import dataclass
def test_u_of_one():
@dataclass
class A:
v: int = 2
ipce = ipce_from_typelike(A)
# print(oyaml_dump(ipce))
expect = """\
$id: http://invalid.json-schema.org/test_u_of_one.<locals>.A#
$schema: http://json-schema.org/draft-07/schema#
__module__: zuper_ipce_tests.test_union_of_one
__qualname__: test_u_of_one.<locals>.A
order: [v]
properties:
v:
$schema: http://json-schema.org/draft-07/schema#
anyOf:
- {$schema: 'http://json-schema.org/draft-07/schema#', type: integer}
default: 2
title: A
type: object
"""
expect_ipce = oyaml_load(expect)
assert_equal(expect_ipce, ipce)
|
zuper-ipce-z5
|
/zuper-ipce-z5-5.3.0.tar.gz/zuper-ipce-z5-5.3.0/src/zuper_ipce_tests/test_union_of_one.py
|
test_union_of_one.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.