code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
import os
import re
import itertools
from pathlib import Path
from schema_enforcer.utils import find_files, load_file
SCHEMA_TAG = "jsonschema"
class InstanceFileManager: # pylint: disable=too-few-public-methods
"""InstanceFileManager."""
def __init__(self, config):
"""Initialize the interface File manager.
The file manager will locate all potential instance files in the search directories.
Args:
config (pydantic.BaseSettings): The Pydantec settings object.
"""
self.instances = []
self.config = config
# Find all instance files
# TODO need to load file extensions from the config
instance_files = find_files(
file_extensions=config.data_file_extensions,
search_directories=config.data_file_search_directories,
excluded_filenames=config.data_file_exclude_filenames,
excluded_directories=[config.main_directory],
return_dir=True,
)
# For each instance file, check if there is a static mapping defined in the config
# Create the InstanceFile object and save it
for root, filename in instance_files:
matches = set()
if filename in config.schema_mapping:
matches.update(config.schema_mapping[filename])
instance = InstanceFile(root=root, filename=filename, matches=matches)
self.instances.append(instance)
def add_matches_by_property_automap(self, schema_manager):
"""Adds schema_ids to matches by automapping top level schema properties to top level keys in instance data.
Args:
schema_manager (schema_enforcer.schemas.manager.SchemaManager): Schema manager oject
"""
for instance in self.instances:
instance.add_matches_by_property_automap(schema_manager)
def print_schema_mapping(self):
"""Print in CLI the matches for all instance files."""
print("{:50} Schema ID".format("Structured Data File")) # pylint: disable=consider-using-f-string
print("-" * 80)
print_strings = []
for instance in self.instances:
filepath = f"{instance.path}/{instance.filename}"
print_strings.append(f"{filepath:50} {sorted(instance.matches)}")
print("\n".join(sorted(print_strings)))
class InstanceFile:
"""Class to manage an instance file."""
def __init__(self, root, filename, matches=None):
"""Initializes InstanceFile object.
Args:
root (string): Absolute path to the directory where the schema file is located.
filename (string): Name of the file.
matches (set, optional): Set of schema IDs that matches with this Instance file. Defaults to None.
"""
self.data = None
self.path = root
self.full_path = os.path.realpath(root)
self.filename = filename
# Internal vars for caching data
self._top_level_properties = set()
if matches:
self.matches = matches
else:
self.matches = set()
self._add_matches_by_decorator()
@property
def top_level_properties(self):
"""Return a list of top level properties in the structured data defined by the data pulled from _get_content.
Returns:
set: Set of the strings of top level properties defined by the data file
"""
if not self._top_level_properties:
content = self._get_content()
self._top_level_properties = set(content.keys())
return self._top_level_properties
def _add_matches_by_decorator(self, content=None):
"""Add matches which declare schema IDs they should adhere to using a decorator comment.
If a line of the form # jsonschema: <schema_id>,<schema_id> is defined in the data file, the
schema IDs will be added to the list of schema IDs the data will be checked for adherence to.
Args:
content (string, optional): Content of the file to analyze. Default to None.
Returns:
set(string): Set of matches (strings of schema_ids) found in the file.
"""
if not content:
content = self._get_content(structured=False)
matches = set()
if SCHEMA_TAG in content:
line_regexp = r"^#.*{0}:\s*(.*)$".format(SCHEMA_TAG) # pylint: disable=consider-using-f-string
match = re.match(line_regexp, content, re.MULTILINE)
if match:
matches = {x.strip() for x in match.group(1).split(",")}
self.matches.update(matches)
def _get_content(self, structured=True):
"""Returns the content of the instance file.
Args:
structured (bool): Return structured data if true. If false returns the string representation of the data
stored in the instance file. Defaults to True.
Returns:
dict, list, or str: File Contents. Dict or list if structured is set to True. Otherwise returns a string.
"""
file_location = os.path.join(self.full_path, self.filename)
if not structured:
return Path(file_location).read_text(encoding="utf-8")
return load_file(file_location)
def add_matches_by_property_automap(self, schema_manager):
"""Adds schema_ids to self.matches by automapping top level schema properties to top level keys in instance data.
Args:
schema_manager (schema_enforcer.schemas.manager.SchemaManager): Schema manager oject
"""
matches = set()
for schema_id, schema_obj in schema_manager.iter_schemas():
if schema_obj.top_level_properties.intersection(self.top_level_properties):
matches.add(schema_id)
self.matches.update(matches)
def validate(self, schema_manager, strict=False):
"""Validate this instance file with all matching schema in the schema manager.
Args:
schema_manager (SchemaManager): A SchemaManager object.
strict (bool, optional): True is the validation should automatically flag unsupported element. Defaults to False.
Returns:
iterator: Iterator of ValidationErrors returned by schema.validate.
"""
# TODO need to add something to check if a schema is missing
# Create new iterator chain to be able to aggregate multiple iterators
errs = itertools.chain()
# Go over all schemas and skip any schema not present in the matches
for schema_id, schema in schema_manager.iter_schemas():
if schema_id not in self.matches:
continue
schema.validate(self._get_content(), strict)
results = schema.get_results()
errs = itertools.chain(errs, results)
schema.clear_results()
return errs
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/schema_enforcer/instances/file.py
|
file.py
|
import os
import re
import itertools
from pathlib import Path
from schema_enforcer.utils import find_files, load_file
SCHEMA_TAG = "jsonschema"
class InstanceFileManager: # pylint: disable=too-few-public-methods
"""InstanceFileManager."""
def __init__(self, config):
"""Initialize the interface File manager.
The file manager will locate all potential instance files in the search directories.
Args:
config (pydantic.BaseSettings): The Pydantec settings object.
"""
self.instances = []
self.config = config
# Find all instance files
# TODO need to load file extensions from the config
instance_files = find_files(
file_extensions=config.data_file_extensions,
search_directories=config.data_file_search_directories,
excluded_filenames=config.data_file_exclude_filenames,
excluded_directories=[config.main_directory],
return_dir=True,
)
# For each instance file, check if there is a static mapping defined in the config
# Create the InstanceFile object and save it
for root, filename in instance_files:
matches = set()
if filename in config.schema_mapping:
matches.update(config.schema_mapping[filename])
instance = InstanceFile(root=root, filename=filename, matches=matches)
self.instances.append(instance)
def add_matches_by_property_automap(self, schema_manager):
"""Adds schema_ids to matches by automapping top level schema properties to top level keys in instance data.
Args:
schema_manager (schema_enforcer.schemas.manager.SchemaManager): Schema manager oject
"""
for instance in self.instances:
instance.add_matches_by_property_automap(schema_manager)
def print_schema_mapping(self):
"""Print in CLI the matches for all instance files."""
print("{:50} Schema ID".format("Structured Data File")) # pylint: disable=consider-using-f-string
print("-" * 80)
print_strings = []
for instance in self.instances:
filepath = f"{instance.path}/{instance.filename}"
print_strings.append(f"{filepath:50} {sorted(instance.matches)}")
print("\n".join(sorted(print_strings)))
class InstanceFile:
"""Class to manage an instance file."""
def __init__(self, root, filename, matches=None):
"""Initializes InstanceFile object.
Args:
root (string): Absolute path to the directory where the schema file is located.
filename (string): Name of the file.
matches (set, optional): Set of schema IDs that matches with this Instance file. Defaults to None.
"""
self.data = None
self.path = root
self.full_path = os.path.realpath(root)
self.filename = filename
# Internal vars for caching data
self._top_level_properties = set()
if matches:
self.matches = matches
else:
self.matches = set()
self._add_matches_by_decorator()
@property
def top_level_properties(self):
"""Return a list of top level properties in the structured data defined by the data pulled from _get_content.
Returns:
set: Set of the strings of top level properties defined by the data file
"""
if not self._top_level_properties:
content = self._get_content()
self._top_level_properties = set(content.keys())
return self._top_level_properties
def _add_matches_by_decorator(self, content=None):
"""Add matches which declare schema IDs they should adhere to using a decorator comment.
If a line of the form # jsonschema: <schema_id>,<schema_id> is defined in the data file, the
schema IDs will be added to the list of schema IDs the data will be checked for adherence to.
Args:
content (string, optional): Content of the file to analyze. Default to None.
Returns:
set(string): Set of matches (strings of schema_ids) found in the file.
"""
if not content:
content = self._get_content(structured=False)
matches = set()
if SCHEMA_TAG in content:
line_regexp = r"^#.*{0}:\s*(.*)$".format(SCHEMA_TAG) # pylint: disable=consider-using-f-string
match = re.match(line_regexp, content, re.MULTILINE)
if match:
matches = {x.strip() for x in match.group(1).split(",")}
self.matches.update(matches)
def _get_content(self, structured=True):
"""Returns the content of the instance file.
Args:
structured (bool): Return structured data if true. If false returns the string representation of the data
stored in the instance file. Defaults to True.
Returns:
dict, list, or str: File Contents. Dict or list if structured is set to True. Otherwise returns a string.
"""
file_location = os.path.join(self.full_path, self.filename)
if not structured:
return Path(file_location).read_text(encoding="utf-8")
return load_file(file_location)
def add_matches_by_property_automap(self, schema_manager):
"""Adds schema_ids to self.matches by automapping top level schema properties to top level keys in instance data.
Args:
schema_manager (schema_enforcer.schemas.manager.SchemaManager): Schema manager oject
"""
matches = set()
for schema_id, schema_obj in schema_manager.iter_schemas():
if schema_obj.top_level_properties.intersection(self.top_level_properties):
matches.add(schema_id)
self.matches.update(matches)
def validate(self, schema_manager, strict=False):
"""Validate this instance file with all matching schema in the schema manager.
Args:
schema_manager (SchemaManager): A SchemaManager object.
strict (bool, optional): True is the validation should automatically flag unsupported element. Defaults to False.
Returns:
iterator: Iterator of ValidationErrors returned by schema.validate.
"""
# TODO need to add something to check if a schema is missing
# Create new iterator chain to be able to aggregate multiple iterators
errs = itertools.chain()
# Go over all schemas and skip any schema not present in the matches
for schema_id, schema in schema_manager.iter_schemas():
if schema_id not in self.matches:
continue
schema.validate(self._get_content(), strict)
results = schema.get_results()
errs = itertools.chain(errs, results)
schema.clear_results()
return errs
| 0.606382 | 0.152537 |
# 简介
程序入口的构造工具.
这个基类的设计目的是为了配置化入口的定义.通过继承和覆盖基类中的特定字段和方法来实现入口的参数配置读取.
目前的实现可以依次从指定路径下的json文件,环境变量,命令行参数读取需要的数据.
然后校验是否符合设定的json schema规定的模式,在符合模式后执行注册进去的回调函数.
入口树中可以有中间节点,用于分解复杂命令行参数,中间节点不会执行.
他们将参数传递给下一级节点,直到尾部可以执行为止.
# 特性
+ 根据子类的名字小写构造命令
+ 根据子类的docstring,`epilog字段`和`description字段`自动构造,命令行说明.
+ 根据子类的`schema字段`和`env_prefix字段`自动构造环境变量的读取规则.
+ 根据子类的`default_config_file_paths字段`自动按顺序读取json,yaml格式配置文件中的参数.
+ 根据`schema字段`构造命令行参数和配置校验
+ 使用装饰器`@as_main`注册获取到配置后执行的函数
+ 通过覆写`parse_commandline_args`方法来定义命令行参数的读取
+ 入口节点可以通过方法`regist_sub`注册子节点
# 安装
```bash
pip install schema_entry
```
# 使用介绍
## 动机
`schema_entry`模块提供了一个基类`EntryPoint`用于构造复杂的程序入口.通常我们的程序入口参数有3个途径:
1. 配置文件
2. 环境变量
3. 命令行参数
在docker广泛应用之前可能用的最多的是命令行参数.但在docker大行其道的现在,配置文件(docker config)和环境变量(environment字段)变得更加重要.
随之而来的是参数的校验问题,python标准库`argparse`本身有不错的参数约束能力,但配置文件中的和环境变量中的参数就需要额外校验了.
这个项目的目的是简化定义入口这个非常通用的业务,将代码尽量配置化.
## 使用方法
首先我们来分析下一个入口形式.
通常一个程序的入口可能简单也可能复杂,但无非两种
1. 中间节点,比如`docker stack`, 它本质上并不执行操作,它只是表示要执行的是关于子模块的操作.当单独执行这条命令时实际上它什么都没做,它下面的子命令`git submodule add`这类才是实际可以执行的节点.而我定义这种中间节点单独被执行应该打印其帮助说明文本.
2. 执行节点,比如`docker run`,这种就是`可以执行的节点`.
本模块的基本用法是:
1. 通过继承`EntryPoint`类并覆写其中的字段来定义不同的节点
2. 通过实例化`EntryPoint`的子类并使用其实例方法`regist_subcmd`或者`regist_sub`来定义不同节点的类型和节点的调用顺序
3. 使用`可以执行节点`的实例方法`as_main`(装饰器)来指定不同节点的入口函数.
4. 命令行中按`根节点`到`可以执行节点`的顺序输入构造命令,获取来自配置文件,环境变量,命令行参数中的参数,作为注册入口函数的参数调用入口函数.
### 节点名
我们可以定义`_name`字段为节点命名,如果没有那么节点名则为子类类名的全小写形式.
### 节点的帮助信息
我们可以定义`usage`来定义用法帮助字符串,如果没有定义则会自动构造,中间节点会是`root subcmd ... [subcmd]`;
可执行节点会是`root subcmd ... entry [options]`
### 执行节点
上面说过执行节点的任务有3个:
1. 从配置文件,环境变量,命令行参数获取配置参数
2. [可选]校验配置参数是否符合要求
3. [可选]将配置作为参数引用到程序中.
#### 通过定义`schema字段进行参数校验`
我们可以定义`schema字段`来激活校验功能
```python
class Test_A(EntryPoint):
default_config_file_paths = [
"/test_config.json",
str(Path.home().joinpath(".test_config.json")),
"./test_config.json"
]
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a": {
"type": "integer"
}
},
"required": ["a"]
}
```
`EntryPoint`的子类会在解析获得参数后校验参数字典是否符合schema中定义的模式.
当然schema字段也不能乱写,它的规则是json schema的一个子集:
```json
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"properties": {
"type": "object",
"minProperties": 1,
"additionalProperties": False,
"patternProperties": {
"^\\w+$": {
"oneOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "boolean"
},
"default": {
"type": "boolean",
},
"const": {
"type": "string"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "string"
},
"default": {
"type": "string",
},
"const": {
"type": "string"
},
"enum": {
"type": "array",
"items": {
"type": "string"
}
},
"maxLength": {
"type": "integer",
"minimum": 0
},
"minLength": {
"type": "integer",
"minimum": 0
},
"pattern": {
"type": "string"
},
"format": {
"type": "string"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "number"
},
"default": {
"type": "number",
},
"const": {
"type": "number"
},
"enum": {
"type": "array",
"items": {
"type": "number"
}
},
"maximum": {
"type": "number",
},
"exclusiveMaximum": {
"type": "number",
},
"minimum": {
"type": "number",
},
"exclusiveMinimum": {
"type": "number",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "integer"
},
"default": {
"type": "integer",
},
"const": {
"type": "integer"
},
"enum": {
"type": "array",
"items": {
"type": "integer"
}
},
"maximum": {
"type": "integer",
},
"exclusiveMaximum": {
"type": "integer",
},
"minimum": {
"type": "integer",
},
"exclusiveMinimum": {
"type": "integer",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "array"
},
"default": {
"type": "array",
"items": {
"type": ["string", "number", "integer"]
}
},
"items": {
"type": "object",
"required": ["type"],
"additionalProperties": false,
"properties": {
"type": {
"type": "string",
"enum": ["string", "number", "integer"]
},
"enum":{
"type": "array"
}
}
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
}
]
}
}
},
"type": {
"type": "string",
"const": "object"
},
"required": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["properties", "type"]
}
```
简而言之就是:
1. 最外层必须有`properties`和`type`字段且`type`字段必须为`object`,可以有`required`字段
2. 最外层`properties`中的字段名必须是由`数字`,`字母`和`_`组成,
3. 字段类型只能是`string`,`boolean`,`number`,`integer`,`array`之一
4. 字段类型如果为`array`则内部必须要有`items`且`items`中必须有`type`字段,且该`type`字段的值必须为`string`,`number`,`integer`之一
如果我们不想校验,那么可以设置`verify_schema`为`False`强行关闭这个功能.
#### 从定义的schema中获取默认配置
我们在定义schema时可以在`"properties"`字段定义的模式描述中通过`default`字段指定描述字段的默认值
```python
class Test_A(EntryPoint):
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a_a": {
"type": "number"
"default": 10.1
}
},
"required": ["a_a"]
}
```
这样即便没有其他输入这个参数也会有这个默认值兜底
#### 从指定配置文件中读取配置
我们可以使用字段`default_config_file_paths`指定从固定的几个路径中读取配置文件,配置文件支持`json`和`yaml`两种格式.
我们也可以通过字段`config_file_only_get_need`定义从配置文件中读取配置的行为(默认为`True`),
当置为`True`时我们只会在配置文件中读取schema中定义的字段,否则则会加载全部字段.
也可以通过设置`load_all_config_file = True`来按设定顺序读取全部预设的配置文件位置
默认配置文件地址是一个列表,会按顺序查找读取,只要找到了满足条件的配置文件就会读取.
```python
from pathlib import Path
from schema_entry import EntryPoint
class Test_A(EntryPoint):
default_config_file_paths = [
"/test_config.json",
str(Path.home().joinpath(".test_config.json")),
"./test_config.json",
"./test_config_other.json"
]
```
##### 指定特定命名的配置文件的解析方式
可以使用`@regist_config_file_parser(config_file_name)`来注册如何解析特定命名的配置文件.这一特性可以更好的定制化配置文件的读取
```python
class Test_AC(EntryPoint):
load_all_config_file = True
default_config_file_paths = [
"./test_config.json",
"./test_config1.json",
"./test_other_config2.json"
]
root = Test_AC()
@root.regist_config_file_parser("test_other_config2.json")
def _1(p: Path) -> Dict[str, Any]:
with open(p) as f:
temp = json.load(f)
return {k.lower(): v for k, v in temp.items()}
```
如果想在定义子类时固定好,也可以定义`_config_file_parser_map:Dict[str,Callable[[Path], Dict[str, Any]]]`
```python
def test_other_config2_parser( p: Path) -> Dict[str, Any]:
with open(p) as f:
temp = json.load(f)
return {k.lower(): v for k, v in temp.items()}
class Test_AC(EntryPoint):
load_all_config_file = True
default_config_file_paths = [
"./test_config.json",
"./test_config1.json",
"./test_other_config2.json"
]
_config_file_parser_map = {
"test_other_config2.json": test_other_config2_parser
}
root = Test_AC()
```
#### 从环境变量中读取配置参数
要从环境变量中读取配置必须设置`schema`字段,`EntryPoint`会按照其中`properties`字段定义的字段范围和字段类型解析环境变量.
环境变量key的规则为`前缀_字段名的大写`.前缀的默认值为`...父节命令节点的父命令节点大写_父节命令节点大写_子命令节点大写`.
我们也可以通过设定`env_prefix`字段来替换默认前缀,替换的前缀依然会被转化为大写.
```python
class Test_A(EntryPoint):
env_prefix = "app"
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a_a": {
"type": "number"
}
},
"required": ["a_a"]
}
```
如果我们不希望从环境变量中解析配置,那么也可以设置`parse_env`为`False`
#### 从命令行参数中获取配置参数
当我们定义好`schema`后所有schema中定义好的参数都可以以`--xxxx`的形式从命令行中读取,需要注意schema中定义的字段中`_`会被修改为`-`.
如果定义的字段模式中含有`title`字段,则使用title字段作为命令行缩写即`-x`的形式
这个命令行读取是使用的标准库`argparse`,构造出的解析器中`useage`,`epilog`和`description`会由类中定义的`usage`,`epilog`和docstring决定;`argv`则为传到节点处时剩下的命令行参数(每多一个节点就会从左侧摘掉一个命令行参数).
通常情况下构造的命令行解析器全部都是可选项,如果我们希望指定`schema`中一项是没有`--`的那种配置,那么可以在定义类时指定`argparse_noflag`为想要的字段,如果希望命令行中校验必填项则可以在定义类时指定`argparse_check_required=True`.需要注意如果一个字段被指定为了`noflag`那么它就是必填项了.
```python
class Test_A(EntryPoint):
argparse_noflag = "a"
argparse_check_required=True
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a": {
"type": "number"
},
"b": {
"type": "number"
}
},
"required": ["a","b"]
}
```
命令行中默认使用`-c`/`--config`来指定读取配置文件,它的读取行为受上面介绍的从自定义配置文件中读取配置的设置影响.
#### 配置的读取顺序
配置的读取顺序为`schema中定义的default值`->`配置指定的配置文件路径`->`命令行指定的配置文件`->`环境变量`->`命令行参数`,而覆盖顺序则是反过来.
#### 注册入口的执行函数
我们使用实例的装饰器方法`as_main`来实现对执行节点入口函数的注册,注册的入口函数会在解析好参数后执行,其参数就是解析好的`**config`
```python
root = Test_A()
@root.as_main
def main(a,b):
print(a)
print(b)
```
另一种指定入口函数的方法是重写子类的`do_main(self)->None`方法
```python
class Test_A(EntryPoint):
argparse_noflag = "a"
argparse_check_required=True
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a": {
"type": "number"
},
"b": {
"type": "number"
}
},
"required": ["a","b"]
}
def do_main(self)->None:
print(self.config)
```
#### 直接从节点对象中获取配置
节点对象的`config`属性会在每次调用时copy一份当前的配置值,config是不可写的.
```python
print(root.config)
```
### 中间节点
中间节点并不能执行程序,它只是用于描述一个范围内的命令集合,因此它的作用就是充当`help`指令.我们定义中间节点并不能执行.但必须有至少一个子节点才是中间节点.因此即便一个节点定义了上面的配置,只要它有子节点就不会按上面的执行流程执行.
利用中间节点我们可以构造出非常复杂的启动命令树.
#### 注册子节点
中间节点的注册有两个接口
+ `regist_subcmd`用于注册一个已经实例化的子节点
```python
class A(EntryPoint):
pass
class B(EntryPoint):
pass
a = A()
b = B()
a.regist_subcmd(b)
```
+ `regist_sub`用于注册一个子节点类,它会返回被注册的节点的一个实例
```python
class A(EntryPoint):
pass
class B(EntryPoint):
pass
a = A()
b =a.regist_sub(B)
```
|
schema-entry
|
/schema_entry-0.1.5.tar.gz/schema_entry-0.1.5/README.md
|
README.md
|
pip install schema_entry
class Test_A(EntryPoint):
default_config_file_paths = [
"/test_config.json",
str(Path.home().joinpath(".test_config.json")),
"./test_config.json"
]
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a": {
"type": "integer"
}
},
"required": ["a"]
}
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"properties": {
"type": "object",
"minProperties": 1,
"additionalProperties": False,
"patternProperties": {
"^\\w+$": {
"oneOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "boolean"
},
"default": {
"type": "boolean",
},
"const": {
"type": "string"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "string"
},
"default": {
"type": "string",
},
"const": {
"type": "string"
},
"enum": {
"type": "array",
"items": {
"type": "string"
}
},
"maxLength": {
"type": "integer",
"minimum": 0
},
"minLength": {
"type": "integer",
"minimum": 0
},
"pattern": {
"type": "string"
},
"format": {
"type": "string"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "number"
},
"default": {
"type": "number",
},
"const": {
"type": "number"
},
"enum": {
"type": "array",
"items": {
"type": "number"
}
},
"maximum": {
"type": "number",
},
"exclusiveMaximum": {
"type": "number",
},
"minimum": {
"type": "number",
},
"exclusiveMinimum": {
"type": "number",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "integer"
},
"default": {
"type": "integer",
},
"const": {
"type": "integer"
},
"enum": {
"type": "array",
"items": {
"type": "integer"
}
},
"maximum": {
"type": "integer",
},
"exclusiveMaximum": {
"type": "integer",
},
"minimum": {
"type": "integer",
},
"exclusiveMinimum": {
"type": "integer",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": false,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "array"
},
"default": {
"type": "array",
"items": {
"type": ["string", "number", "integer"]
}
},
"items": {
"type": "object",
"required": ["type"],
"additionalProperties": false,
"properties": {
"type": {
"type": "string",
"enum": ["string", "number", "integer"]
},
"enum":{
"type": "array"
}
}
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": "^[a-b]|[d-z]$"
}
}
}
]
}
}
},
"type": {
"type": "string",
"const": "object"
},
"required": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["properties", "type"]
}
class Test_A(EntryPoint):
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a_a": {
"type": "number"
"default": 10.1
}
},
"required": ["a_a"]
}
from pathlib import Path
from schema_entry import EntryPoint
class Test_A(EntryPoint):
default_config_file_paths = [
"/test_config.json",
str(Path.home().joinpath(".test_config.json")),
"./test_config.json",
"./test_config_other.json"
]
class Test_AC(EntryPoint):
load_all_config_file = True
default_config_file_paths = [
"./test_config.json",
"./test_config1.json",
"./test_other_config2.json"
]
root = Test_AC()
@root.regist_config_file_parser("test_other_config2.json")
def _1(p: Path) -> Dict[str, Any]:
with open(p) as f:
temp = json.load(f)
return {k.lower(): v for k, v in temp.items()}
def test_other_config2_parser( p: Path) -> Dict[str, Any]:
with open(p) as f:
temp = json.load(f)
return {k.lower(): v for k, v in temp.items()}
class Test_AC(EntryPoint):
load_all_config_file = True
default_config_file_paths = [
"./test_config.json",
"./test_config1.json",
"./test_other_config2.json"
]
_config_file_parser_map = {
"test_other_config2.json": test_other_config2_parser
}
root = Test_AC()
class Test_A(EntryPoint):
env_prefix = "app"
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a_a": {
"type": "number"
}
},
"required": ["a_a"]
}
class Test_A(EntryPoint):
argparse_noflag = "a"
argparse_check_required=True
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a": {
"type": "number"
},
"b": {
"type": "number"
}
},
"required": ["a","b"]
}
root = Test_A()
@root.as_main
def main(a,b):
print(a)
print(b)
class Test_A(EntryPoint):
argparse_noflag = "a"
argparse_check_required=True
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"a": {
"type": "number"
},
"b": {
"type": "number"
}
},
"required": ["a","b"]
}
def do_main(self)->None:
print(self.config)
print(root.config)
| 0.647018 | 0.879871 |
import abc
import argparse
from pathlib import Path
from typing import Callable, Sequence, Dict, Any, Optional, Tuple, List, Union
from mypy_extensions import TypedDict
class ItemType(TypedDict):
type: str
enum: List[Union[int, float, str]]
class PropertyType(TypedDict):
type: str
title: str
description: str
enum: List[Union[int, float, str]]
default: Union[int, float, str, bool]
items: ItemType
class SchemaType(TypedDict):
required: List[str]
type: str
properties: Dict[str, PropertyType]
class EntryPointABC(abc.ABC):
"""程序入口类.
Attributes:
epilog (str): 命令行展示介绍时的epilog部分
usage (str): 命令行展示介绍时的使用方法介绍
parent (Optional["EntryPointABC"]): 入口节点的父节点.Default None
schema (Optional[Dict[str, Any]]): 入口节点的设置需要满足的json schema对应字典.Default None
verify_schema (bool): 获得设置后节点是否校验设置是否满足定义的json schema模式
default_config_file_paths (Sequence[str]): 设置默认的配置文件位置.
config_file_only_get_need (bool): 设置是否只从配置文件中获取schema中定义的配置项
load_all_config_file (bool): 设置的默认配置文件全部加载.
env_prefix (str): 设置环境变量的前缀
parse_env (bool): 展示是否解析环境变量
argparse_check_required (bool): 命令行参数是否解析必填项为必填项
argparse_noflag (Optional[str]): 命令行参数解析哪个字段为无`--`的参数
"""
epilog: str
usage: str
_name: str
parent: Optional["EntryPointABC"]
schema: Optional[SchemaType] # Optional[Dict[str, Union[str, List[str], Dict[str, Dict[str, Any]]]]]
verify_schema: bool
default_config_file_paths: Sequence[str]
config_file_only_get_need: bool
load_all_config_file: bool
env_prefix: Optional[str]
parse_env: bool
argparse_check_required: bool
argparse_noflag: Optional[str]
_subcmds: Dict[str, "EntryPointABC"]
_main: Optional[Callable[..., None]]
_config_file_parser_map: Dict[str, Callable[[Path], Dict[str, Any]]]
_config: Dict[str, Any]
@abc.abstractproperty
def name(self) -> str:
"""实例的名字.
实例名字就是它的构造类名.
"""
@abc.abstractproperty
def prog(self) -> str:
"""命令路径."""
@abc.abstractproperty
def config(self) -> Dict[str, Any]:
"""执行配置.
配置为只读数据.
"""
@abc.abstractmethod
def regist_subcmd(self, subcmd: "EntryPointABC") -> None:
"""注册子命令.
Args:
subcmd (EntryPointABC): 子命令的实例
"""
@abc.abstractmethod
def regist_sub(self, subcmdclz: type, **kwargs: Any) -> "EntryPointABC":
'''注册子命令.
Args:
subcmdclz (EntryPointABC): 子命令的定义类
Returns:
[EntryPointABC]: 注册类的实例
'''
@abc.abstractmethod
def regist_config_file_parser(self, file_name: str) -> Callable[[Callable[[Path], Dict[str, Any]]], Callable[[Path], Dict[str, Any]]]:
'''注册特定配置文件名的解析方式.
Args:
file_name (str): 指定文件名
Returns:
Callable[[Callable[[Path], None]], Callable[[Path], None]]: 注册的解析函数
'''
@abc.abstractmethod
def as_main(self, func: Callable[..., None]) -> Callable[..., None]:
"""注册函数在解析参数成功后执行.
执行顺序按被注册的顺序来.
Args:
func (Callable[[Dict[str,Any]],None]): 待执行的参数.
"""
@abc.abstractmethod
def __call__(self, argv: Sequence[str]) -> None:
"""执行命令.
如果当前的命令节点不是终点(也就是下面还有子命令)则传递参数到下一级;
如果当前节点已经是终点则解析命令行参数,环境变量,指定路径后获取参数,然后构造成配置,并检验是否符合定义的json schema模式.
然后如果通过验证并有注册执行函数的话则执行注册的函数.
Args:
argv (Sequence[str]): [description]
"""
@abc.abstractmethod
def pass_args_to_sub(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
"""解析复杂命令行参数并将参数传递至下一级."""
@abc.abstractmethod
def parse_commandline_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
'''默认端点不会再做命令行解析,如果要做则需要在继承时覆盖此方法.
Args:
parser (argparse.ArgumentParser): 命令行解析对象
argv (Sequence[str]): 待解析的参数列表
Returns:
Tuple[Dict[str, Any], Dict[str, Any]]: 命令行指定配置文件获得配置,命令行其他flag获得的配置
'''
@abc.abstractmethod
def parse_env_args(self) -> Dict[str, Any]:
"""从环境变量中读取配置.
必须设定json schema,且parse_env为True才能从环境变量中读取配置.
程序会读取schema结构,并解析其中的`properties`字段.如果没有定义schema则不会解析环境变量.
如果是列表型的数据,那么使用`,`分隔,如果是object型的数据,那么使用`key:value;key:value`的形式分隔
Returns:
Dict[str,Any]: 环境变量中解析出来的参数.
"""
@abc.abstractmethod
def parse_configfile_args(self) -> Dict[str, Any]:
"""从指定的配置文件队列中构造配置参数.
目前只支持json格式的配置文件.
指定的配置文件路径队列中第一个json格式且存在的配置文件将被读取解析.
一旦读取到了配置后面的路径将被忽略.
Args:
argv (Sequence[str]): 配置的可能路径
Returns:
Dict[str,Any]: 从配置文件中读取到的配置
"""
@abc.abstractmethod
def validat_config(self) -> bool:
"""校验配置.
在定义好schema,解析到config并且verify_schema为True后才会进行校验.
Returns:
bool: 是否通过校验
"""
@abc.abstractmethod
def do_main(self) -> None:
"""执行入口函数."""
@abc.abstractmethod
def parse_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
"""解析获取配置
配置的加载顺序为: 指定路径的配置文件->环境变量->命令行参数
在加载完配置后校验是否满足schema的要求.
Args:
parser (argparse.ArgumentParser): 命令行参数解析器
argv (Sequence[str]): 命令行参数序列
"""
|
schema-entry
|
/schema_entry-0.1.5.tar.gz/schema_entry-0.1.5/schema_entry/entrypoint_base.py
|
entrypoint_base.py
|
import abc
import argparse
from pathlib import Path
from typing import Callable, Sequence, Dict, Any, Optional, Tuple, List, Union
from mypy_extensions import TypedDict
class ItemType(TypedDict):
type: str
enum: List[Union[int, float, str]]
class PropertyType(TypedDict):
type: str
title: str
description: str
enum: List[Union[int, float, str]]
default: Union[int, float, str, bool]
items: ItemType
class SchemaType(TypedDict):
required: List[str]
type: str
properties: Dict[str, PropertyType]
class EntryPointABC(abc.ABC):
"""程序入口类.
Attributes:
epilog (str): 命令行展示介绍时的epilog部分
usage (str): 命令行展示介绍时的使用方法介绍
parent (Optional["EntryPointABC"]): 入口节点的父节点.Default None
schema (Optional[Dict[str, Any]]): 入口节点的设置需要满足的json schema对应字典.Default None
verify_schema (bool): 获得设置后节点是否校验设置是否满足定义的json schema模式
default_config_file_paths (Sequence[str]): 设置默认的配置文件位置.
config_file_only_get_need (bool): 设置是否只从配置文件中获取schema中定义的配置项
load_all_config_file (bool): 设置的默认配置文件全部加载.
env_prefix (str): 设置环境变量的前缀
parse_env (bool): 展示是否解析环境变量
argparse_check_required (bool): 命令行参数是否解析必填项为必填项
argparse_noflag (Optional[str]): 命令行参数解析哪个字段为无`--`的参数
"""
epilog: str
usage: str
_name: str
parent: Optional["EntryPointABC"]
schema: Optional[SchemaType] # Optional[Dict[str, Union[str, List[str], Dict[str, Dict[str, Any]]]]]
verify_schema: bool
default_config_file_paths: Sequence[str]
config_file_only_get_need: bool
load_all_config_file: bool
env_prefix: Optional[str]
parse_env: bool
argparse_check_required: bool
argparse_noflag: Optional[str]
_subcmds: Dict[str, "EntryPointABC"]
_main: Optional[Callable[..., None]]
_config_file_parser_map: Dict[str, Callable[[Path], Dict[str, Any]]]
_config: Dict[str, Any]
@abc.abstractproperty
def name(self) -> str:
"""实例的名字.
实例名字就是它的构造类名.
"""
@abc.abstractproperty
def prog(self) -> str:
"""命令路径."""
@abc.abstractproperty
def config(self) -> Dict[str, Any]:
"""执行配置.
配置为只读数据.
"""
@abc.abstractmethod
def regist_subcmd(self, subcmd: "EntryPointABC") -> None:
"""注册子命令.
Args:
subcmd (EntryPointABC): 子命令的实例
"""
@abc.abstractmethod
def regist_sub(self, subcmdclz: type, **kwargs: Any) -> "EntryPointABC":
'''注册子命令.
Args:
subcmdclz (EntryPointABC): 子命令的定义类
Returns:
[EntryPointABC]: 注册类的实例
'''
@abc.abstractmethod
def regist_config_file_parser(self, file_name: str) -> Callable[[Callable[[Path], Dict[str, Any]]], Callable[[Path], Dict[str, Any]]]:
'''注册特定配置文件名的解析方式.
Args:
file_name (str): 指定文件名
Returns:
Callable[[Callable[[Path], None]], Callable[[Path], None]]: 注册的解析函数
'''
@abc.abstractmethod
def as_main(self, func: Callable[..., None]) -> Callable[..., None]:
"""注册函数在解析参数成功后执行.
执行顺序按被注册的顺序来.
Args:
func (Callable[[Dict[str,Any]],None]): 待执行的参数.
"""
@abc.abstractmethod
def __call__(self, argv: Sequence[str]) -> None:
"""执行命令.
如果当前的命令节点不是终点(也就是下面还有子命令)则传递参数到下一级;
如果当前节点已经是终点则解析命令行参数,环境变量,指定路径后获取参数,然后构造成配置,并检验是否符合定义的json schema模式.
然后如果通过验证并有注册执行函数的话则执行注册的函数.
Args:
argv (Sequence[str]): [description]
"""
@abc.abstractmethod
def pass_args_to_sub(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
"""解析复杂命令行参数并将参数传递至下一级."""
@abc.abstractmethod
def parse_commandline_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
'''默认端点不会再做命令行解析,如果要做则需要在继承时覆盖此方法.
Args:
parser (argparse.ArgumentParser): 命令行解析对象
argv (Sequence[str]): 待解析的参数列表
Returns:
Tuple[Dict[str, Any], Dict[str, Any]]: 命令行指定配置文件获得配置,命令行其他flag获得的配置
'''
@abc.abstractmethod
def parse_env_args(self) -> Dict[str, Any]:
"""从环境变量中读取配置.
必须设定json schema,且parse_env为True才能从环境变量中读取配置.
程序会读取schema结构,并解析其中的`properties`字段.如果没有定义schema则不会解析环境变量.
如果是列表型的数据,那么使用`,`分隔,如果是object型的数据,那么使用`key:value;key:value`的形式分隔
Returns:
Dict[str,Any]: 环境变量中解析出来的参数.
"""
@abc.abstractmethod
def parse_configfile_args(self) -> Dict[str, Any]:
"""从指定的配置文件队列中构造配置参数.
目前只支持json格式的配置文件.
指定的配置文件路径队列中第一个json格式且存在的配置文件将被读取解析.
一旦读取到了配置后面的路径将被忽略.
Args:
argv (Sequence[str]): 配置的可能路径
Returns:
Dict[str,Any]: 从配置文件中读取到的配置
"""
@abc.abstractmethod
def validat_config(self) -> bool:
"""校验配置.
在定义好schema,解析到config并且verify_schema为True后才会进行校验.
Returns:
bool: 是否通过校验
"""
@abc.abstractmethod
def do_main(self) -> None:
"""执行入口函数."""
@abc.abstractmethod
def parse_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
"""解析获取配置
配置的加载顺序为: 指定路径的配置文件->环境变量->命令行参数
在加载完配置后校验是否满足schema的要求.
Args:
parser (argparse.ArgumentParser): 命令行参数解析器
argv (Sequence[str]): 命令行参数序列
"""
| 0.70416 | 0.396535 |
import os
import sys
import json
import warnings
import argparse
import functools
from copy import deepcopy
from pathlib import Path
from typing import Callable, Sequence, Dict, List, Any, Tuple, Optional
from jsonschema import validate
from yaml import load as yaml_load
from .protocol import SUPPORT_SCHEMA
from .utils import get_parent_tree, parse_value_string_by_schema, parse_schema_as_cmd
from .entrypoint_base import SchemaType, PropertyType, EntryPointABC
class EntryPoint(EntryPointABC):
epilog = ""
usage = ""
_name = ""
parent = None
schema = None
verify_schema = True
default_config_file_paths: List[str] = []
config_file_only_get_need = True
load_all_config_file = False
env_prefix = None
parse_env = True
argparse_check_required = False
argparse_noflag: Optional[str] = None
_config_file_parser_map: Dict[str, Callable[[Path], Dict[str, Any]]] = {}
def _check_schema(self) -> None:
if self.schema is not None:
try:
validate(instance=self.schema, schema=SUPPORT_SCHEMA)
except Exception as e:
warnings.warn(str(e))
raise e
# sys.exit(1)
def __init__(self, *,
description: Optional[str] = None,
epilog: Optional[str] = None,
usage: Optional[str] = None,
name: Optional[str] = None,
schema: Optional[SchemaType] = None,
verify_schema: Optional[bool] = None,
default_config_file_paths: Optional[List[str]] = None,
config_file_only_get_need: Optional[bool] = None,
load_all_config_file: Optional[bool] = None,
env_prefix: Optional[str] = None,
parse_env: Optional[bool] = None,
argparse_check_required: Optional[bool] = None,
argparse_noflag: Optional[str] = None,
config_file_parser_map: Optional[Dict[str, Callable[[Path], Dict[str, Any]]]] = None,
main: Optional[Callable[..., None]] = None
) -> None:
"""初始化时定义配置.
使用这一特性我们就可以不用继承也可以定义节点了.这一特性比较适合用于那些非叶子节点.
Args:
description (Optional[str], optional): 节点命令行的描述信息. Defaults to None.
epilog (Optional[str], optional): 节点命令行的epilog信息. Defaults to None.
usage (Optional[str], optional): 节点命令行的usage信息. Defaults to None.
name (Optional[str], optional): 节点的name属性. Defaults to None.
schema (Optional[Dict[str, Union[str, List[str], Dict[str, Dict[str, Any]]]]], optional): 节点的校验json schema. Defaults to None.
verify_schema (Optional[bool], optional): 配置是否校验schema. Defaults to None.
default_config_file_paths (Optional[List[str]], optional): 默认配置文件路径列表. Defaults to None.
config_file_only_get_need (Optional[bool], optional): 设置是否在加载配置文件时只获取schema中定义的内容. Defaults to None.
load_all_config_file (Optional[bool], optional): 是否尝试加载全部指定的配置文件路径下的配置文件. Defaults to None.
env_prefix (Optional[str], optional): 设置环境变量的前缀. Defaults to None.
parse_env (Optional[bool], optional): 设置是否加载环境变量. Defaults to None.
argparse_check_required (Optional[bool], optional): 设置是否构造叶子节点命令行时指定schema中定义为必须的参数项为必填项. Defaults to None.
argparse_noflag (Optional[str], optional): 指定命令行中noflag的参数. Defaults to None.
config_file_parser_map (Optional[Dict[str, Callable[[Path], Dict[str, Any]]]], optional): 设置自定义配置文件名的解析映射. Defaults to None.
main (Optional[Callable[..., None]], optional): 设置作为入口的执行函数. Defaults to None.
"""
if description is not None:
self.__doc__ = description
if epilog is not None:
self.epilog = epilog
if usage is not None:
self.usage = usage
if name is not None:
self._name = name
if schema is not None:
self.schema = schema
if verify_schema is not None:
self.verify_schema = verify_schema
if default_config_file_paths is not None:
self.default_config_file_paths = default_config_file_paths
if config_file_only_get_need is not None:
self.config_file_only_get_need = config_file_only_get_need
if load_all_config_file is not None:
self.load_all_config_file = load_all_config_file
if env_prefix is not None:
self.env_prefix = env_prefix
if parse_env is not None:
self.parse_env = parse_env
if argparse_check_required is not None:
self.argparse_check_required = argparse_check_required
if argparse_noflag is not None:
self.argparse_noflag = argparse_noflag
if config_file_parser_map is not None:
self._config_file_parser_map = config_file_parser_map
if config_file_parser_map is not None:
self._config_file_parser_map = config_file_parser_map
if main is not None:
self._main = main
else:
self._main = None
self._check_schema()
self._subcmds = {}
self._config = {}
@ property
def name(self) -> str:
return self._name if self._name else self.__class__.__name__.lower()
@ property
def prog(self) -> str:
parent_list = get_parent_tree(self)
parent_list.append(self.name)
return " ".join(parent_list)
@ property
def config(self) -> Dict[str, Any]:
return deepcopy(self._config)
def regist_subcmd(self, subcmd: EntryPointABC) -> None:
subcmd.parent = self
self._subcmds[subcmd.name] = subcmd
def regist_sub(self, subcmdclz: type, **kwargs: Any) -> EntryPointABC:
instance = subcmdclz(**kwargs)
self.regist_subcmd(instance)
return instance
def as_main(self, func: Callable[..., None]) -> Callable[..., None]:
@ functools.wraps(func)
def warp(*args: Any, **kwargs: Any) -> None:
return func(*args, **kwargs)
self._main = warp
return warp
def __call__(self, argv: Sequence[str]) -> None:
if not self.usage:
if len(self._subcmds) == 0:
self.usage = f"{self.prog} [options]"
else:
self.usage = f"{self.prog} [subcmd]"
parser = argparse.ArgumentParser(
prog=self.prog,
epilog=self.epilog,
description=self.__doc__,
usage=self.usage)
if len(self._subcmds) != 0:
if self.epilog:
epilog = self.epilog
else:
epilog = "子命令描述:\n"
rows = []
for subcmd, ins in self._subcmds.items():
if ins.__doc__ and isinstance(ins.__doc__, str):
desc = ins.__doc__.splitlines()[0]
rows.append(f"{subcmd}\t{desc}")
else:
rows.append(f"{subcmd}")
epilog += "\n".join(rows)
parser = argparse.ArgumentParser(
prog=self.prog,
epilog=epilog,
description=self.__doc__,
usage=self.usage,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.pass_args_to_sub(parser, argv)
else:
parser = argparse.ArgumentParser(
prog=self.prog,
epilog=self.epilog,
description=self.__doc__,
usage=self.usage)
self.parse_args(parser, argv)
def pass_args_to_sub(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
scmds = list(self._subcmds.keys())
scmdss = ",".join(scmds)
parser.add_argument('subcmd', help=f'执行子命令,可选的子命有{scmdss}')
args = parser.parse_args(argv[0:1])
if self._subcmds.get(args.subcmd):
self._subcmds[args.subcmd](argv[1:])
else:
print(f'未知的子命令 `{argv[0]}`')
parser.print_help()
sys.exit(1)
def _make_commandline_parse_by_schema(self, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
if self.schema is None:
raise AttributeError("此处不该被执行")
else:
properties: Dict[str, PropertyType] = self.schema.get("properties", {})
requireds: List[str] = self.schema.get("required", [])
for key, prop in properties.items():
required = False
noflag = False
if self.argparse_noflag == key:
noflag = True
else:
if self.argparse_check_required and key in requireds:
required = True
parser = parse_schema_as_cmd(key, prop, parser, required=required, noflag=noflag)
return parser
def _parse_commandline_args_by_schema(self,
parser: argparse.ArgumentParser,
argv: Sequence[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
if self.schema:
parser = self._make_commandline_parse_by_schema(parser)
args = parser.parse_args(argv)
config_file_res: Dict[str, Any] = {}
cmd_res: Dict[str, Any] = {}
for key, value in vars(args).items():
if key == "config":
if value:
p = Path(value)
if not p.is_file():
warnings.warn(f"{str(p)}不是文件")
continue
if p.suffix == ".json":
config_file_res = self.parse_json_configfile_args(p)
elif p.suffix == ".yml":
config_file_res = self.parse_yaml_configfile_args(p)
else:
warnings.warn(f"跳过不支持的配置格式的文件{str(p)}")
continue
else:
continue
else:
try:
if self.schema is not None and self.schema.get("properties") is not None:
if self.schema["properties"].get(key) is not None and self.schema["properties"][key]["type"] == "boolean":
if value is False:
if self.schema.get("required") is None:
value = None
else:
if key not in self.schema["required"]:
value = None
except Exception as e:
warnings.warn(str(e))
finally:
if value is not None:
cmd_res.update({
key: value
})
return config_file_res, cmd_res
def parse_commandline_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""解析命令行获得参数
Args:
parser (argparse.ArgumentParser): 命令行解析器
argv (Sequence[str]): 命令行参数序列
Returns:
Tuple[Dict[str, Any], Dict[str, Any]]: 命令行指定配置文件获得的参数,其他命令行参数获得的参数
"""
parser.add_argument("-c", "--config", type=str, help='指定配置文件位置')
return self._parse_commandline_args_by_schema(parser, argv)
def _parse_env_args(self, key: str, info: PropertyType) -> Any:
if self.env_prefix:
env_prefix = self.env_prefix.upper()
else:
env_prefix = self.prog.replace(" ", "_").upper()
key = key.replace("-", "_")
env = os.environ.get(f"{env_prefix}_{key.upper()}")
if not env:
env = None
else:
env = parse_value_string_by_schema(info, env)
return env
def parse_env_args(self) -> Dict[str, Any]:
properties: Dict[str, Any]
if self.schema and self.parse_env:
properties = self.schema.get("properties", {})
result = {}
for key, info in properties.items():
value = self._parse_env_args(key, info)
if value is not None:
result.update({
key: value
})
return result
else:
return {}
def file_config_filter(self, file_param: Dict[str, Any]) -> Dict[str, Any]:
"""根据条件筛选从文件中获得的参数.
Args:
file_param (Dict[str, Any]): 文件中获得的全量参数
Returns:
Dict[str, Any]: 筛选过后的参数
"""
if self.config_file_only_get_need and self.schema is not None and self.schema.get("properties") is not None:
needs = list(self.schema["properties"].keys())
res = {}
for key in needs:
if file_param.get(key) is not None:
res[key] = file_param.get(key)
return res
return file_param
def parse_json_configfile_args(self, p: Path) -> Dict[str, Any]:
with open(p, "r", encoding="utf-8") as f:
result = json.load(f)
return result
def parse_yaml_configfile_args(self, p: Path) -> Dict[str, Any]:
with open(p, "r", encoding="utf-8") as f:
result = yaml_load(f)
return result
def regist_config_file_parser(self, file_name: str) -> Callable[[Callable[[Path], Dict[str, Any]]], Callable[[Path], Dict[str, Any]]]:
def decorate(func: Callable[[Path], Dict[str, Any]]) -> Callable[[Path], Dict[str, Any]]:
@functools.wraps(func)
def wrap(p: Path) -> Dict[str, Any]:
return func(p)
self._config_file_parser_map[file_name] = func
return wrap
return decorate
def parse_configfile_args(self) -> Dict[str, Any]:
if not self.default_config_file_paths:
return {}
if not self.load_all_config_file:
for p_str in self.default_config_file_paths:
p = Path(p_str)
if p.is_file():
parfunc = self._config_file_parser_map.get(p.name)
if parfunc:
return self.file_config_filter(parfunc(p))
if p.suffix == ".json":
return self.file_config_filter(self.parse_json_configfile_args(p))
elif p.suffix == ".yml":
return self.file_config_filter(self.parse_yaml_configfile_args(p))
else:
warnings.warn(f"跳过不支持的配置格式的文件{str(p)}")
else:
warnings.warn("配置文件的指定路径都不可用.")
return {}
else:
result = {}
for p_str in self.default_config_file_paths:
p = Path(p_str)
if p.is_file():
parfunc = self._config_file_parser_map.get(p.name)
if parfunc:
result.update(self.file_config_filter(parfunc(p)))
else:
if p.suffix == ".json":
result.update(self.file_config_filter(self.parse_json_configfile_args(p)))
elif p.suffix == ".yml":
result.update(self.file_config_filter(self.parse_yaml_configfile_args(p)))
else:
warnings.warn(f"跳过不支持的配置格式的文件{str(p)}")
return result
def validat_config(self) -> bool:
if self.verify_schema:
if self.schema and self.config:
try:
validate(instance=self.config, schema=self.schema)
except Exception as e:
warnings.warn(str(e))
return False
else:
return True
else:
warnings.warn("必须有schema和config才能校验.")
return True
else:
return True
def do_main(self) -> None:
if self._main is None:
print("未注册main函数")
sys.exit(1)
else:
config = self.config
self._main(**config)
def parse_default(self) -> Dict[str, Any]:
if self.schema:
prop = self.schema.get("properties")
if prop:
return {key: sch.get("default") for key, sch in prop.items() if sch.get("default")}
return {}
return {}
def parse_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
"""解析获取配置
配置的加载顺序为: 指定路径的配置文件->环境变量->命令行参数
在加载完配置后校验是否满足schema的要求.
Args:
parser (argparse.ArgumentParser): 命令行参数解析器
argv (Sequence[str]): 命令行参数序列
"""
# 默认配置
default_config = self.parse_default()
self._config.update(default_config)
# 默认配置文件配置
file_config = self.parse_configfile_args()
self._config.update(file_config)
# 命令行指定配置文件配置
cmd_config_file_config, cmd_config = self.parse_commandline_args(parser, argv)
self._config.update(cmd_config_file_config)
# 环境变量配置
env_config = self.parse_env_args()
self._config.update(env_config)
# 命令行指定配置
self._config.update(cmd_config)
if self.validat_config():
self.do_main()
else:
sys.exit(1)
|
schema-entry
|
/schema_entry-0.1.5.tar.gz/schema_entry-0.1.5/schema_entry/entrypoint.py
|
entrypoint.py
|
import os
import sys
import json
import warnings
import argparse
import functools
from copy import deepcopy
from pathlib import Path
from typing import Callable, Sequence, Dict, List, Any, Tuple, Optional
from jsonschema import validate
from yaml import load as yaml_load
from .protocol import SUPPORT_SCHEMA
from .utils import get_parent_tree, parse_value_string_by_schema, parse_schema_as_cmd
from .entrypoint_base import SchemaType, PropertyType, EntryPointABC
class EntryPoint(EntryPointABC):
epilog = ""
usage = ""
_name = ""
parent = None
schema = None
verify_schema = True
default_config_file_paths: List[str] = []
config_file_only_get_need = True
load_all_config_file = False
env_prefix = None
parse_env = True
argparse_check_required = False
argparse_noflag: Optional[str] = None
_config_file_parser_map: Dict[str, Callable[[Path], Dict[str, Any]]] = {}
def _check_schema(self) -> None:
if self.schema is not None:
try:
validate(instance=self.schema, schema=SUPPORT_SCHEMA)
except Exception as e:
warnings.warn(str(e))
raise e
# sys.exit(1)
def __init__(self, *,
description: Optional[str] = None,
epilog: Optional[str] = None,
usage: Optional[str] = None,
name: Optional[str] = None,
schema: Optional[SchemaType] = None,
verify_schema: Optional[bool] = None,
default_config_file_paths: Optional[List[str]] = None,
config_file_only_get_need: Optional[bool] = None,
load_all_config_file: Optional[bool] = None,
env_prefix: Optional[str] = None,
parse_env: Optional[bool] = None,
argparse_check_required: Optional[bool] = None,
argparse_noflag: Optional[str] = None,
config_file_parser_map: Optional[Dict[str, Callable[[Path], Dict[str, Any]]]] = None,
main: Optional[Callable[..., None]] = None
) -> None:
"""初始化时定义配置.
使用这一特性我们就可以不用继承也可以定义节点了.这一特性比较适合用于那些非叶子节点.
Args:
description (Optional[str], optional): 节点命令行的描述信息. Defaults to None.
epilog (Optional[str], optional): 节点命令行的epilog信息. Defaults to None.
usage (Optional[str], optional): 节点命令行的usage信息. Defaults to None.
name (Optional[str], optional): 节点的name属性. Defaults to None.
schema (Optional[Dict[str, Union[str, List[str], Dict[str, Dict[str, Any]]]]], optional): 节点的校验json schema. Defaults to None.
verify_schema (Optional[bool], optional): 配置是否校验schema. Defaults to None.
default_config_file_paths (Optional[List[str]], optional): 默认配置文件路径列表. Defaults to None.
config_file_only_get_need (Optional[bool], optional): 设置是否在加载配置文件时只获取schema中定义的内容. Defaults to None.
load_all_config_file (Optional[bool], optional): 是否尝试加载全部指定的配置文件路径下的配置文件. Defaults to None.
env_prefix (Optional[str], optional): 设置环境变量的前缀. Defaults to None.
parse_env (Optional[bool], optional): 设置是否加载环境变量. Defaults to None.
argparse_check_required (Optional[bool], optional): 设置是否构造叶子节点命令行时指定schema中定义为必须的参数项为必填项. Defaults to None.
argparse_noflag (Optional[str], optional): 指定命令行中noflag的参数. Defaults to None.
config_file_parser_map (Optional[Dict[str, Callable[[Path], Dict[str, Any]]]], optional): 设置自定义配置文件名的解析映射. Defaults to None.
main (Optional[Callable[..., None]], optional): 设置作为入口的执行函数. Defaults to None.
"""
if description is not None:
self.__doc__ = description
if epilog is not None:
self.epilog = epilog
if usage is not None:
self.usage = usage
if name is not None:
self._name = name
if schema is not None:
self.schema = schema
if verify_schema is not None:
self.verify_schema = verify_schema
if default_config_file_paths is not None:
self.default_config_file_paths = default_config_file_paths
if config_file_only_get_need is not None:
self.config_file_only_get_need = config_file_only_get_need
if load_all_config_file is not None:
self.load_all_config_file = load_all_config_file
if env_prefix is not None:
self.env_prefix = env_prefix
if parse_env is not None:
self.parse_env = parse_env
if argparse_check_required is not None:
self.argparse_check_required = argparse_check_required
if argparse_noflag is not None:
self.argparse_noflag = argparse_noflag
if config_file_parser_map is not None:
self._config_file_parser_map = config_file_parser_map
if config_file_parser_map is not None:
self._config_file_parser_map = config_file_parser_map
if main is not None:
self._main = main
else:
self._main = None
self._check_schema()
self._subcmds = {}
self._config = {}
@ property
def name(self) -> str:
return self._name if self._name else self.__class__.__name__.lower()
@ property
def prog(self) -> str:
parent_list = get_parent_tree(self)
parent_list.append(self.name)
return " ".join(parent_list)
@ property
def config(self) -> Dict[str, Any]:
return deepcopy(self._config)
def regist_subcmd(self, subcmd: EntryPointABC) -> None:
subcmd.parent = self
self._subcmds[subcmd.name] = subcmd
def regist_sub(self, subcmdclz: type, **kwargs: Any) -> EntryPointABC:
instance = subcmdclz(**kwargs)
self.regist_subcmd(instance)
return instance
def as_main(self, func: Callable[..., None]) -> Callable[..., None]:
@ functools.wraps(func)
def warp(*args: Any, **kwargs: Any) -> None:
return func(*args, **kwargs)
self._main = warp
return warp
def __call__(self, argv: Sequence[str]) -> None:
if not self.usage:
if len(self._subcmds) == 0:
self.usage = f"{self.prog} [options]"
else:
self.usage = f"{self.prog} [subcmd]"
parser = argparse.ArgumentParser(
prog=self.prog,
epilog=self.epilog,
description=self.__doc__,
usage=self.usage)
if len(self._subcmds) != 0:
if self.epilog:
epilog = self.epilog
else:
epilog = "子命令描述:\n"
rows = []
for subcmd, ins in self._subcmds.items():
if ins.__doc__ and isinstance(ins.__doc__, str):
desc = ins.__doc__.splitlines()[0]
rows.append(f"{subcmd}\t{desc}")
else:
rows.append(f"{subcmd}")
epilog += "\n".join(rows)
parser = argparse.ArgumentParser(
prog=self.prog,
epilog=epilog,
description=self.__doc__,
usage=self.usage,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.pass_args_to_sub(parser, argv)
else:
parser = argparse.ArgumentParser(
prog=self.prog,
epilog=self.epilog,
description=self.__doc__,
usage=self.usage)
self.parse_args(parser, argv)
def pass_args_to_sub(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
scmds = list(self._subcmds.keys())
scmdss = ",".join(scmds)
parser.add_argument('subcmd', help=f'执行子命令,可选的子命有{scmdss}')
args = parser.parse_args(argv[0:1])
if self._subcmds.get(args.subcmd):
self._subcmds[args.subcmd](argv[1:])
else:
print(f'未知的子命令 `{argv[0]}`')
parser.print_help()
sys.exit(1)
def _make_commandline_parse_by_schema(self, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
if self.schema is None:
raise AttributeError("此处不该被执行")
else:
properties: Dict[str, PropertyType] = self.schema.get("properties", {})
requireds: List[str] = self.schema.get("required", [])
for key, prop in properties.items():
required = False
noflag = False
if self.argparse_noflag == key:
noflag = True
else:
if self.argparse_check_required and key in requireds:
required = True
parser = parse_schema_as_cmd(key, prop, parser, required=required, noflag=noflag)
return parser
def _parse_commandline_args_by_schema(self,
parser: argparse.ArgumentParser,
argv: Sequence[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
if self.schema:
parser = self._make_commandline_parse_by_schema(parser)
args = parser.parse_args(argv)
config_file_res: Dict[str, Any] = {}
cmd_res: Dict[str, Any] = {}
for key, value in vars(args).items():
if key == "config":
if value:
p = Path(value)
if not p.is_file():
warnings.warn(f"{str(p)}不是文件")
continue
if p.suffix == ".json":
config_file_res = self.parse_json_configfile_args(p)
elif p.suffix == ".yml":
config_file_res = self.parse_yaml_configfile_args(p)
else:
warnings.warn(f"跳过不支持的配置格式的文件{str(p)}")
continue
else:
continue
else:
try:
if self.schema is not None and self.schema.get("properties") is not None:
if self.schema["properties"].get(key) is not None and self.schema["properties"][key]["type"] == "boolean":
if value is False:
if self.schema.get("required") is None:
value = None
else:
if key not in self.schema["required"]:
value = None
except Exception as e:
warnings.warn(str(e))
finally:
if value is not None:
cmd_res.update({
key: value
})
return config_file_res, cmd_res
def parse_commandline_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""解析命令行获得参数
Args:
parser (argparse.ArgumentParser): 命令行解析器
argv (Sequence[str]): 命令行参数序列
Returns:
Tuple[Dict[str, Any], Dict[str, Any]]: 命令行指定配置文件获得的参数,其他命令行参数获得的参数
"""
parser.add_argument("-c", "--config", type=str, help='指定配置文件位置')
return self._parse_commandline_args_by_schema(parser, argv)
def _parse_env_args(self, key: str, info: PropertyType) -> Any:
if self.env_prefix:
env_prefix = self.env_prefix.upper()
else:
env_prefix = self.prog.replace(" ", "_").upper()
key = key.replace("-", "_")
env = os.environ.get(f"{env_prefix}_{key.upper()}")
if not env:
env = None
else:
env = parse_value_string_by_schema(info, env)
return env
def parse_env_args(self) -> Dict[str, Any]:
properties: Dict[str, Any]
if self.schema and self.parse_env:
properties = self.schema.get("properties", {})
result = {}
for key, info in properties.items():
value = self._parse_env_args(key, info)
if value is not None:
result.update({
key: value
})
return result
else:
return {}
def file_config_filter(self, file_param: Dict[str, Any]) -> Dict[str, Any]:
"""根据条件筛选从文件中获得的参数.
Args:
file_param (Dict[str, Any]): 文件中获得的全量参数
Returns:
Dict[str, Any]: 筛选过后的参数
"""
if self.config_file_only_get_need and self.schema is not None and self.schema.get("properties") is not None:
needs = list(self.schema["properties"].keys())
res = {}
for key in needs:
if file_param.get(key) is not None:
res[key] = file_param.get(key)
return res
return file_param
def parse_json_configfile_args(self, p: Path) -> Dict[str, Any]:
with open(p, "r", encoding="utf-8") as f:
result = json.load(f)
return result
def parse_yaml_configfile_args(self, p: Path) -> Dict[str, Any]:
with open(p, "r", encoding="utf-8") as f:
result = yaml_load(f)
return result
def regist_config_file_parser(self, file_name: str) -> Callable[[Callable[[Path], Dict[str, Any]]], Callable[[Path], Dict[str, Any]]]:
def decorate(func: Callable[[Path], Dict[str, Any]]) -> Callable[[Path], Dict[str, Any]]:
@functools.wraps(func)
def wrap(p: Path) -> Dict[str, Any]:
return func(p)
self._config_file_parser_map[file_name] = func
return wrap
return decorate
def parse_configfile_args(self) -> Dict[str, Any]:
if not self.default_config_file_paths:
return {}
if not self.load_all_config_file:
for p_str in self.default_config_file_paths:
p = Path(p_str)
if p.is_file():
parfunc = self._config_file_parser_map.get(p.name)
if parfunc:
return self.file_config_filter(parfunc(p))
if p.suffix == ".json":
return self.file_config_filter(self.parse_json_configfile_args(p))
elif p.suffix == ".yml":
return self.file_config_filter(self.parse_yaml_configfile_args(p))
else:
warnings.warn(f"跳过不支持的配置格式的文件{str(p)}")
else:
warnings.warn("配置文件的指定路径都不可用.")
return {}
else:
result = {}
for p_str in self.default_config_file_paths:
p = Path(p_str)
if p.is_file():
parfunc = self._config_file_parser_map.get(p.name)
if parfunc:
result.update(self.file_config_filter(parfunc(p)))
else:
if p.suffix == ".json":
result.update(self.file_config_filter(self.parse_json_configfile_args(p)))
elif p.suffix == ".yml":
result.update(self.file_config_filter(self.parse_yaml_configfile_args(p)))
else:
warnings.warn(f"跳过不支持的配置格式的文件{str(p)}")
return result
def validat_config(self) -> bool:
if self.verify_schema:
if self.schema and self.config:
try:
validate(instance=self.config, schema=self.schema)
except Exception as e:
warnings.warn(str(e))
return False
else:
return True
else:
warnings.warn("必须有schema和config才能校验.")
return True
else:
return True
def do_main(self) -> None:
if self._main is None:
print("未注册main函数")
sys.exit(1)
else:
config = self.config
self._main(**config)
def parse_default(self) -> Dict[str, Any]:
if self.schema:
prop = self.schema.get("properties")
if prop:
return {key: sch.get("default") for key, sch in prop.items() if sch.get("default")}
return {}
return {}
def parse_args(self, parser: argparse.ArgumentParser, argv: Sequence[str]) -> None:
"""解析获取配置
配置的加载顺序为: 指定路径的配置文件->环境变量->命令行参数
在加载完配置后校验是否满足schema的要求.
Args:
parser (argparse.ArgumentParser): 命令行参数解析器
argv (Sequence[str]): 命令行参数序列
"""
# 默认配置
default_config = self.parse_default()
self._config.update(default_config)
# 默认配置文件配置
file_config = self.parse_configfile_args()
self._config.update(file_config)
# 命令行指定配置文件配置
cmd_config_file_config, cmd_config = self.parse_commandline_args(parser, argv)
self._config.update(cmd_config_file_config)
# 环境变量配置
env_config = self.parse_env_args()
self._config.update(env_config)
# 命令行指定配置
self._config.update(cmd_config)
if self.validat_config():
self.do_main()
else:
sys.exit(1)
| 0.511229 | 0.127707 |
import warnings
import argparse
from typing import List, Dict, Any, Optional
from .entrypoint_base import EntryPointABC, PropertyType, ItemType
def _get_parent_tree(c: EntryPointABC, result: List[str]) -> None:
if c.parent:
result.append(c.parent.name)
_get_parent_tree(c.parent, result)
else:
return
def get_parent_tree(c: EntryPointABC) -> List[str]:
"""获取父节点树.
Args:
c (EntryPoint): 节点类
Returns:
List[str]: 父节点树
"""
result_list: List[str] = []
_get_parent_tree(c, result_list)
return list(reversed(result_list))
def parse_value_string_by_schema(schema: Any, value_str: str) -> Any:
"""根据schema的定义解析字符串的值.
Args:
schema (Dict[str, Any]): 描述字符串值的json schema字典.
value_str (str): 待解析的字符串.
Returns:
Any: 字段的值
"""
t = schema.get("type")
if not t:
return value_str
elif t == "string":
return value_str
elif t == "number":
return float(value_str)
elif t == "integer":
return int(value_str)
elif t == "boolean":
value_u = value_str.upper()
return True if value_u == "TRUE" else False
elif t == "array":
item_info = schema.get("items")
if not item_info:
return value_str.split(",")
else:
return [parse_value_string_by_schema(item_info, i) for i in value_str.split(",")]
else:
warnings.warn(f"不支持的数据类型{t}")
return value_str
def _argparse_base_handdler(_type: Any, key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
kwargs: Dict[str, Any] = {}
kwargs.update({
"type": _type
})
_enum = schema.get("enum")
if _enum:
kwargs.update({
"choices": _enum
})
_description = schema.get("description")
if _description:
kwargs.update({
"help": _description
})
if required:
kwargs.update({
"required": required
})
if noflag:
parser.add_argument(f"{key}", **kwargs)
else:
if schema.get("title"):
short = schema["title"][0]
parser.add_argument(f"-{short}", f"--{key}", **kwargs)
else:
parser.add_argument(f"--{key}", **kwargs)
return parser
def _argparse_number_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
return _argparse_base_handdler(float, key, schema, parser, required=required, noflag=noflag)
def _argparse_string_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
return _argparse_base_handdler(str, key, schema, parser, required=required, noflag=noflag)
def _argparse_integer_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
return _argparse_base_handdler(int, key, schema, parser, required=required, noflag=noflag)
def _argparse_boolean_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
kwargs: Dict[str, Any] = {}
kwargs.update({
"action": "store_true"
})
_description = schema.get("description")
if _description:
kwargs.update({
"help": _description
})
if schema.get("title"):
short = schema["title"][0]
parser.add_argument(f"-{short}", f"--{key}", **kwargs)
else:
parser.add_argument(f"--{key}", **kwargs)
return parser
def _argparse_array_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
noflag: bool = False) -> argparse.ArgumentParser:
sub_schema: Optional[ItemType] = schema.get("items")
if sub_schema is None:
print("array params must have sub schema items")
return parser
sub_type = sub_schema.get("type")
if sub_type not in ("number", "string", "integer"):
print("array params item type must in number,string,integer")
return parser
kwargs: Dict[str, Any] = {}
if sub_type == "number":
kwargs.update({
"type": float
})
elif sub_type == "string":
kwargs.update({
"type": str
})
elif sub_type == "integer":
kwargs.update({
"type": int
})
_default = schema.get("default")
if _default:
kwargs.update({
"default": _default
})
_description = schema.get("description")
if _description:
kwargs.update({
"help": _description
})
_enum = sub_schema.get("enum")
if _enum:
kwargs.update({
"choices": _enum
})
if noflag:
kwargs.update({
"nargs": "+"
})
parser.add_argument(f"{key}", **kwargs)
else:
kwargs.update({
"action": "append"
})
if schema.get("title"):
short = schema["title"][0]
parser.add_argument(f"-{short}", f"--{key}", **kwargs)
else:
parser.add_argument(f"--{key}", **kwargs)
return parser
def parse_schema_as_cmd(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
"""根据字段的模式解析命令行行为
Args:
key (str): 字段名
schema (PropertyType): 字段的模式
parser (argparse.ArgumentParser): 添加命令行解析的解析器
Returns:
argparse.ArgumentParser: 命令行的解析器
"""
_type = schema.get("type")
if not _type:
return parser
if not noflag:
key = key.replace("_", "-")
if _type == "number":
return _argparse_number_handdler(key, schema, parser, required=required, noflag=noflag)
elif _type == "string":
return _argparse_string_handdler(key, schema, parser, required=required, noflag=noflag)
elif _type == "integer":
return _argparse_integer_handdler(key, schema, parser, required=required, noflag=noflag)
elif _type == "boolean":
return _argparse_boolean_handdler(key, schema, parser)
elif _type == "array":
return _argparse_array_handdler(key, schema, parser, noflag=noflag)
else:
print(f"未支持的类型{_type}")
return parser
|
schema-entry
|
/schema_entry-0.1.5.tar.gz/schema_entry-0.1.5/schema_entry/utils.py
|
utils.py
|
import warnings
import argparse
from typing import List, Dict, Any, Optional
from .entrypoint_base import EntryPointABC, PropertyType, ItemType
def _get_parent_tree(c: EntryPointABC, result: List[str]) -> None:
if c.parent:
result.append(c.parent.name)
_get_parent_tree(c.parent, result)
else:
return
def get_parent_tree(c: EntryPointABC) -> List[str]:
"""获取父节点树.
Args:
c (EntryPoint): 节点类
Returns:
List[str]: 父节点树
"""
result_list: List[str] = []
_get_parent_tree(c, result_list)
return list(reversed(result_list))
def parse_value_string_by_schema(schema: Any, value_str: str) -> Any:
"""根据schema的定义解析字符串的值.
Args:
schema (Dict[str, Any]): 描述字符串值的json schema字典.
value_str (str): 待解析的字符串.
Returns:
Any: 字段的值
"""
t = schema.get("type")
if not t:
return value_str
elif t == "string":
return value_str
elif t == "number":
return float(value_str)
elif t == "integer":
return int(value_str)
elif t == "boolean":
value_u = value_str.upper()
return True if value_u == "TRUE" else False
elif t == "array":
item_info = schema.get("items")
if not item_info:
return value_str.split(",")
else:
return [parse_value_string_by_schema(item_info, i) for i in value_str.split(",")]
else:
warnings.warn(f"不支持的数据类型{t}")
return value_str
def _argparse_base_handdler(_type: Any, key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
kwargs: Dict[str, Any] = {}
kwargs.update({
"type": _type
})
_enum = schema.get("enum")
if _enum:
kwargs.update({
"choices": _enum
})
_description = schema.get("description")
if _description:
kwargs.update({
"help": _description
})
if required:
kwargs.update({
"required": required
})
if noflag:
parser.add_argument(f"{key}", **kwargs)
else:
if schema.get("title"):
short = schema["title"][0]
parser.add_argument(f"-{short}", f"--{key}", **kwargs)
else:
parser.add_argument(f"--{key}", **kwargs)
return parser
def _argparse_number_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
return _argparse_base_handdler(float, key, schema, parser, required=required, noflag=noflag)
def _argparse_string_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
return _argparse_base_handdler(str, key, schema, parser, required=required, noflag=noflag)
def _argparse_integer_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
return _argparse_base_handdler(int, key, schema, parser, required=required, noflag=noflag)
def _argparse_boolean_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
kwargs: Dict[str, Any] = {}
kwargs.update({
"action": "store_true"
})
_description = schema.get("description")
if _description:
kwargs.update({
"help": _description
})
if schema.get("title"):
short = schema["title"][0]
parser.add_argument(f"-{short}", f"--{key}", **kwargs)
else:
parser.add_argument(f"--{key}", **kwargs)
return parser
def _argparse_array_handdler(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
noflag: bool = False) -> argparse.ArgumentParser:
sub_schema: Optional[ItemType] = schema.get("items")
if sub_schema is None:
print("array params must have sub schema items")
return parser
sub_type = sub_schema.get("type")
if sub_type not in ("number", "string", "integer"):
print("array params item type must in number,string,integer")
return parser
kwargs: Dict[str, Any] = {}
if sub_type == "number":
kwargs.update({
"type": float
})
elif sub_type == "string":
kwargs.update({
"type": str
})
elif sub_type == "integer":
kwargs.update({
"type": int
})
_default = schema.get("default")
if _default:
kwargs.update({
"default": _default
})
_description = schema.get("description")
if _description:
kwargs.update({
"help": _description
})
_enum = sub_schema.get("enum")
if _enum:
kwargs.update({
"choices": _enum
})
if noflag:
kwargs.update({
"nargs": "+"
})
parser.add_argument(f"{key}", **kwargs)
else:
kwargs.update({
"action": "append"
})
if schema.get("title"):
short = schema["title"][0]
parser.add_argument(f"-{short}", f"--{key}", **kwargs)
else:
parser.add_argument(f"--{key}", **kwargs)
return parser
def parse_schema_as_cmd(key: str, schema: PropertyType, parser: argparse.ArgumentParser, *,
required: bool = False, noflag: bool = False) -> argparse.ArgumentParser:
"""根据字段的模式解析命令行行为
Args:
key (str): 字段名
schema (PropertyType): 字段的模式
parser (argparse.ArgumentParser): 添加命令行解析的解析器
Returns:
argparse.ArgumentParser: 命令行的解析器
"""
_type = schema.get("type")
if not _type:
return parser
if not noflag:
key = key.replace("_", "-")
if _type == "number":
return _argparse_number_handdler(key, schema, parser, required=required, noflag=noflag)
elif _type == "string":
return _argparse_string_handdler(key, schema, parser, required=required, noflag=noflag)
elif _type == "integer":
return _argparse_integer_handdler(key, schema, parser, required=required, noflag=noflag)
elif _type == "boolean":
return _argparse_boolean_handdler(key, schema, parser)
elif _type == "array":
return _argparse_array_handdler(key, schema, parser, noflag=noflag)
else:
print(f"未支持的类型{_type}")
return parser
| 0.707304 | 0.324597 |
SUPPORT_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"properties": {
"type": "object",
"minProperties": 1,
"additionalProperties": False,
"patternProperties": {
r"^\w+$": {
"oneOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "boolean"
},
"default": {
"type": "boolean",
},
"const": {
"type": "boolean"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "string"
},
"default": {
"type": "string",
},
"const": {
"type": "string"
},
"enum": {
"type": "array",
"items": {
"type": "string"
}
},
"maxLength": {
"type": "integer",
"minimum": 0
},
"minLength": {
"type": "integer",
"minimum": 0
},
"pattern": {
"type": "string"
},
"format": {
"type": "string"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "number"
},
"default": {
"type": "number",
},
"const": {
"type": "number"
},
"enum": {
"type": "array",
"items": {
"type": "number"
}
},
"maximum": {
"type": "number",
},
"exclusiveMaximum": {
"type": "number",
},
"minimum": {
"type": "number",
},
"exclusiveMinimum": {
"type": "number",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "integer"
},
"default": {
"type": "integer",
},
"const": {
"type": "integer"
},
"enum": {
"type": "array",
"items": {
"type": "integer"
}
},
"maximum": {
"type": "integer",
},
"exclusiveMaximum": {
"type": "integer",
},
"minimum": {
"type": "integer",
},
"exclusiveMinimum": {
"type": "integer",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "array"
},
"default": {
"type": "array",
"items": {
"type": ["string", "number", "integer"]
}
},
"items": {
"type": "object",
"required": ["type"],
"additionalProperties":False,
"properties": {
"type": {
"type": "string",
"enum": ["string", "number", "integer"]
},
"enum":{
"type": "array",
"items": {
"type": ["string", "number", "integer"]
}
}
}
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
}
]
}
}
},
"type": {
"type": "string",
"const": "object"
},
"required": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["properties", "type"]
}
|
schema-entry
|
/schema_entry-0.1.5.tar.gz/schema_entry-0.1.5/schema_entry/protocol.py
|
protocol.py
|
SUPPORT_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"properties": {
"type": "object",
"minProperties": 1,
"additionalProperties": False,
"patternProperties": {
r"^\w+$": {
"oneOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "boolean"
},
"default": {
"type": "boolean",
},
"const": {
"type": "boolean"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "string"
},
"default": {
"type": "string",
},
"const": {
"type": "string"
},
"enum": {
"type": "array",
"items": {
"type": "string"
}
},
"maxLength": {
"type": "integer",
"minimum": 0
},
"minLength": {
"type": "integer",
"minimum": 0
},
"pattern": {
"type": "string"
},
"format": {
"type": "string"
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "number"
},
"default": {
"type": "number",
},
"const": {
"type": "number"
},
"enum": {
"type": "array",
"items": {
"type": "number"
}
},
"maximum": {
"type": "number",
},
"exclusiveMaximum": {
"type": "number",
},
"minimum": {
"type": "number",
},
"exclusiveMinimum": {
"type": "number",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "integer"
},
"default": {
"type": "integer",
},
"const": {
"type": "integer"
},
"enum": {
"type": "array",
"items": {
"type": "integer"
}
},
"maximum": {
"type": "integer",
},
"exclusiveMaximum": {
"type": "integer",
},
"minimum": {
"type": "integer",
},
"exclusiveMinimum": {
"type": "integer",
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
},
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {
"type": {
"type": "string",
"const": "array"
},
"default": {
"type": "array",
"items": {
"type": ["string", "number", "integer"]
}
},
"items": {
"type": "object",
"required": ["type"],
"additionalProperties":False,
"properties": {
"type": {
"type": "string",
"enum": ["string", "number", "integer"]
},
"enum":{
"type": "array",
"items": {
"type": ["string", "number", "integer"]
}
}
}
},
"description": {
"type": "string"
},
"$comment": {
"type": "string"
},
"title": {
"type": "string",
"pattern": r"^[a-b]|[d-z]$"
}
}
}
]
}
}
},
"type": {
"type": "string",
"const": "object"
},
"required": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["properties", "type"]
}
| 0.683947 | 0.381018 |
import io
import unicodecsv as csv
from schema_induction.type import Type
from schema_induction.union_type import UnionType
def dump_csv(array, delimiter=','):
f = io.BytesIO()
writer = csv.writer(f, delimiter=delimiter, quoting=csv.QUOTE_ALL)
writer.writerow(array)
return f.getvalue()[:-2].decode('utf-8')
class PrimitiveType(Type):
"""
Represent primitive type such as string, float, int
"""
MAX_N_KEEP_VALUE = 7
def __init__(self, type=None):
if type is None:
self.type = None
else:
self.set_type(type)
# implement using dictionary so we can keep the order
self.possible_values = {}
def set_type(self, type):
assert type in {'float', 'int', 'str', 'bool'}, type
self.type = type
return self
def add_value(self, value):
if len(self.possible_values) > PrimitiveType.MAX_N_KEEP_VALUE:
return self
if value not in self.possible_values:
self.possible_values[value] = 1
return self
def is_mergeable(self, another):
"""
test if two PRIMITIVE TYPEs can be merged
:param another: PrimitiveType
:return: bool
"""
if not isinstance(another, PrimitiveType):
return False
return self.type == another.type or {self.type, another.type} == {'float', 'int'}
def optimize(self):
"""
@inherit
"""
return self
def merge(self, another):
"""
@inherit
"""
if isinstance(another, PrimitiveType):
if self.type == another.type:
for value in another.possible_values.keys():
self.add_value(value)
return self
if {self.type, another.type} == {'float', 'int'}:
self.type = 'float'
return self
return UnionType().add(self).add(another)
def to_string(self, shift=0, indent=0):
"""
@inherit
"""
if len(self.possible_values) < PrimitiveType.MAX_N_KEEP_VALUE:
string = '%s{%s}' % (self.type, dump_csv(list(self.possible_values.keys())))
return string
else:
return self.type
class NoneType(Type):
"""
Represent null
"""
def optimize(self):
"""
@inherit
"""
return self
def merge(self, another):
"""
@inherit
"""
if isinstance(another, NoneType):
return self
return another.merge(self)
def to_string(self, shift=0, indent=0):
"""
@inherit
"""
return "null"
|
schema-induction
|
/schema_induction-1.1.7-py3-none-any.whl/schema_induction/primitive_type.py
|
primitive_type.py
|
import io
import unicodecsv as csv
from schema_induction.type import Type
from schema_induction.union_type import UnionType
def dump_csv(array, delimiter=','):
f = io.BytesIO()
writer = csv.writer(f, delimiter=delimiter, quoting=csv.QUOTE_ALL)
writer.writerow(array)
return f.getvalue()[:-2].decode('utf-8')
class PrimitiveType(Type):
"""
Represent primitive type such as string, float, int
"""
MAX_N_KEEP_VALUE = 7
def __init__(self, type=None):
if type is None:
self.type = None
else:
self.set_type(type)
# implement using dictionary so we can keep the order
self.possible_values = {}
def set_type(self, type):
assert type in {'float', 'int', 'str', 'bool'}, type
self.type = type
return self
def add_value(self, value):
if len(self.possible_values) > PrimitiveType.MAX_N_KEEP_VALUE:
return self
if value not in self.possible_values:
self.possible_values[value] = 1
return self
def is_mergeable(self, another):
"""
test if two PRIMITIVE TYPEs can be merged
:param another: PrimitiveType
:return: bool
"""
if not isinstance(another, PrimitiveType):
return False
return self.type == another.type or {self.type, another.type} == {'float', 'int'}
def optimize(self):
"""
@inherit
"""
return self
def merge(self, another):
"""
@inherit
"""
if isinstance(another, PrimitiveType):
if self.type == another.type:
for value in another.possible_values.keys():
self.add_value(value)
return self
if {self.type, another.type} == {'float', 'int'}:
self.type = 'float'
return self
return UnionType().add(self).add(another)
def to_string(self, shift=0, indent=0):
"""
@inherit
"""
if len(self.possible_values) < PrimitiveType.MAX_N_KEEP_VALUE:
string = '%s{%s}' % (self.type, dump_csv(list(self.possible_values.keys())))
return string
else:
return self.type
class NoneType(Type):
"""
Represent null
"""
def optimize(self):
"""
@inherit
"""
return self
def merge(self, another):
"""
@inherit
"""
if isinstance(another, NoneType):
return self
return another.merge(self)
def to_string(self, shift=0, indent=0):
"""
@inherit
"""
return "null"
| 0.605333 | 0.376279 |
from pyspark.sql.types import (
BooleanType,
BooleanType,
DoubleType,
IntegerType,
StringType,
StructField,
StructType,
)
def bronze_machine_raw():
"""
fill in
"""
schema = StructType(
[
StructField("N8j2", DoubleType(), True),
StructField("42mj", DoubleType(), True),
StructField("6tk3", BooleanType(), True),
]
)
return schema
def silver_machine_raw():
"""
fill in
"""
schema = StructType(
[
StructField("N8j2", DoubleType(), True),
StructField("42mj", DoubleType(), True),
StructField("6tk3", BooleanType(), True),
StructField("engine_type", StringType(), True),
]
)
return schema
def bronze_sap_bseg():
"""
fill in
"""
schema = StructType(
[
StructField("MANDT", StringType(), True),
StructField("BUKRS", StringType(), True),
StructField("BELNR", StringType(), True),
StructField("GJAHR", DoubleType(), True),
StructField("BUZEI", DoubleType(), True),
]
)
return schema
def bronze_sales():
"""
fill in
"""
schema = StructType(
[
StructField("ORDERNUMBER", IntegerType(), True),
StructField("SALE", DoubleType(), True),
StructField("ORDERDATE", StringType(), True),
StructField("STATUS", BooleanType(), True),
StructField("CUSTOMERNAME", StringType(), True),
StructField("ADDRESSLINE", StringType(), True),
StructField("CITY", StringType(), True),
StructField("STATE", StringType(), True),
StructField("STORE", StringType(), True),
]
)
return schema
def gold_sales():
"""
fill in
"""
schema = StructType(
[
StructField("CUSTOMERNAME", StringType(), True),
StructField("AVG", DoubleType(), True),
StructField("TOTAL", DoubleType(), True),
]
)
return schema
|
schema-jobs
|
/schema_jobs-0.1.15-py3-none-any.whl/schema_jobs/jobs/utility/schema/schemas.py
|
schemas.py
|
from pyspark.sql.types import (
BooleanType,
BooleanType,
DoubleType,
IntegerType,
StringType,
StructField,
StructType,
)
def bronze_machine_raw():
"""
fill in
"""
schema = StructType(
[
StructField("N8j2", DoubleType(), True),
StructField("42mj", DoubleType(), True),
StructField("6tk3", BooleanType(), True),
]
)
return schema
def silver_machine_raw():
"""
fill in
"""
schema = StructType(
[
StructField("N8j2", DoubleType(), True),
StructField("42mj", DoubleType(), True),
StructField("6tk3", BooleanType(), True),
StructField("engine_type", StringType(), True),
]
)
return schema
def bronze_sap_bseg():
"""
fill in
"""
schema = StructType(
[
StructField("MANDT", StringType(), True),
StructField("BUKRS", StringType(), True),
StructField("BELNR", StringType(), True),
StructField("GJAHR", DoubleType(), True),
StructField("BUZEI", DoubleType(), True),
]
)
return schema
def bronze_sales():
"""
fill in
"""
schema = StructType(
[
StructField("ORDERNUMBER", IntegerType(), True),
StructField("SALE", DoubleType(), True),
StructField("ORDERDATE", StringType(), True),
StructField("STATUS", BooleanType(), True),
StructField("CUSTOMERNAME", StringType(), True),
StructField("ADDRESSLINE", StringType(), True),
StructField("CITY", StringType(), True),
StructField("STATE", StringType(), True),
StructField("STORE", StringType(), True),
]
)
return schema
def gold_sales():
"""
fill in
"""
schema = StructType(
[
StructField("CUSTOMERNAME", StringType(), True),
StructField("AVG", DoubleType(), True),
StructField("TOTAL", DoubleType(), True),
]
)
return schema
| 0.794505 | 0.413714 |
|PyPI| |Docs|
.. |PyPI| image:: https://img.shields.io/pypi/v/schema_learn.svg
:target: https://pypi.org/project/schema_learn
.. |Docs| image:: https://readthedocs.org/projects/schema-multimodal/badge/?version=latest
:target: https://schema-multimodal.readthedocs.io/en/latest/?badge=latest
Schema - Analyze and Visualize Multimodal Single-Cell Data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Schema is a Python library for the synthesis and integration of heterogeneous single-cell modalities.
**It is designed for the case where the modalities have all been assayed for the same cells simultaneously.**
Here are some of the analyses that you can do with Schema:
- infer cell types jointly across modalities.
- perform spatial transcriptomic analyses to identify differntially-expressed genes in cells that display a specific spatial characteristic.
- create informative t-SNE & UMAP visualizations of multimodal data by infusing information from other modalities into scRNA-seq data.
Schema offers support for the incorporation of more than two modalities and can also simultaneously handle batch effects and metadata (e.g., cell age).
Schema is based on a metric learning approach and formulates the modality-synthesis problem as a quadratic programming problem. Its Python-based implementation can efficiently process large datasets without the need of a GPU.
Read the documentation_.
We encourage you to report issues at our `Github page`_ ; you can also create pull reports there to contribute your enhancements.
If Schema is useful in your research, please consider citing our papers: `Genome Biology (2021)`_, with preprint in `bioRxiv (2019)`_.
.. _documentation: https://schema-multimodal.readthedocs.io/en/latest/overview.html
.. _bioRxiv (2019): http://doi.org/10.1101/834549
.. _Github page: https://github.com/rs239/schema
.. _Genome Biology (2021): https://genomebiology.biomedcentral.com/articles/10.1186/s13059-021-02313-2
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/README.rst
|
README.rst
|
|PyPI| |Docs|
.. |PyPI| image:: https://img.shields.io/pypi/v/schema_learn.svg
:target: https://pypi.org/project/schema_learn
.. |Docs| image:: https://readthedocs.org/projects/schema-multimodal/badge/?version=latest
:target: https://schema-multimodal.readthedocs.io/en/latest/?badge=latest
Schema - Analyze and Visualize Multimodal Single-Cell Data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Schema is a Python library for the synthesis and integration of heterogeneous single-cell modalities.
**It is designed for the case where the modalities have all been assayed for the same cells simultaneously.**
Here are some of the analyses that you can do with Schema:
- infer cell types jointly across modalities.
- perform spatial transcriptomic analyses to identify differntially-expressed genes in cells that display a specific spatial characteristic.
- create informative t-SNE & UMAP visualizations of multimodal data by infusing information from other modalities into scRNA-seq data.
Schema offers support for the incorporation of more than two modalities and can also simultaneously handle batch effects and metadata (e.g., cell age).
Schema is based on a metric learning approach and formulates the modality-synthesis problem as a quadratic programming problem. Its Python-based implementation can efficiently process large datasets without the need of a GPU.
Read the documentation_.
We encourage you to report issues at our `Github page`_ ; you can also create pull reports there to contribute your enhancements.
If Schema is useful in your research, please consider citing our papers: `Genome Biology (2021)`_, with preprint in `bioRxiv (2019)`_.
.. _documentation: https://schema-multimodal.readthedocs.io/en/latest/overview.html
.. _bioRxiv (2019): http://doi.org/10.1101/834549
.. _Github page: https://github.com/rs239/schema
.. _Genome Biology (2021): https://genomebiology.biomedcentral.com/articles/10.1186/s13059-021-02313-2
| 0.842313 | 0.715362 |
Installation
============
We recommend Python v3.6 or higher.
PyPI, Virtualenv, or Anaconda
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can use ``pip`` (or ``pip3``):
.. code-block:: bash
pip install schema_learn
Docker
~~~~~~
Schema has been designed to be compatible with the popular and excellent single-cell Python package, Scanpy_.
We recommend installing the Docker image recommended_ by Scanpy maintainers and then using ``pip``, as described above, to install Schema in it.
.. _Scanpy: http://scanpy.readthedocs.io
.. _recommended: https://scanpy.readthedocs.io/en/1.4.4.post1/installation.html#docker
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/docs/source/installation.rst
|
installation.rst
|
Installation
============
We recommend Python v3.6 or higher.
PyPI, Virtualenv, or Anaconda
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can use ``pip`` (or ``pip3``):
.. code-block:: bash
pip install schema_learn
Docker
~~~~~~
Schema has been designed to be compatible with the popular and excellent single-cell Python package, Scanpy_.
We recommend installing the Docker image recommended_ by Scanpy maintainers and then using ``pip``, as described above, to install Schema in it.
.. _Scanpy: http://scanpy.readthedocs.io
.. _recommended: https://scanpy.readthedocs.io/en/1.4.4.post1/installation.html#docker
| 0.70028 | 0.461138 |
Overview
========
Schema is a general algorithm for integrating heterogeneous data
modalities. While it has been specially designed for multi-modal
single-cell biological datasets, it should work in other multi-modal
contexts too.
.. image:: ../_static/Schema-Overview-v2.png
:width: 648
:alt: 'Overview of Schema'
Schema is designed for single-cell assays where multiple modalities have
been *simultaneously* measured for each cell. For example, this could be
simultaneously-asayed ("paired") scRNA-seq and scATAC-seq data, or a
spatial-transcriptomics dataset (e.g. 10x Visium, Slideseq or
STARmap). Schema can also be used with just a scRNA-seq dataset where some
per-cell metadata is available (e.g., cell age, donor information, batch
ID etc.). With this data, Schema can help perform analyses like:
* Characterize cells that look similar transcriptionally but differ
epigenetically.
* Improve cell-type inference by combining RNA-seq and ATAC-seq data.
* In spatially-resolved single-cell data, identify differentially
expressed genes (DEGs) specific to a spatial pattern.
* **Improved visualizations**: tune t-SNE or UMAP plots to more clearly
arrange cells along a desired manifold.
* Simultaneously account for batch effects while also integrating
other modalities.
Intuition
~~~~~~~~~
To integrate multi-modal data, Schema takes a `metric learning`_
approach. Each modality is interepreted as a multi-dimensional space, with
observations mapped to points in it (**B** in figure above). We associate
a distance metric with each modality: the metric reflects what it means
for cells to be similar under that modality. For example, Euclidean
distances between L2-normalized expression vectors are a proxy for
coexpression. Across the three graphs in the figure (**B**), the dashed and
dotted lines indicate distances between the same pairs of
observations.
Schema learns a new distance metric between points, informed
jointly by all the modalities. In Schema, we start by designating one
high-confidence modality as the *primary* (i.e., reference) and the
remaining modalities as *secondary*--- we've found scRNA-seq to typically
be a good choice for the primary modality. Schema transforms the
primary-modality space by scaling each of its dimensions so that the
distances in the transformed space have a higher (or lower, if desired!)
correlation with corresponding distances in the secondary modalities
(**C,D** in the figure above). You can choose any distance metric for the
secondary modalities, though the primary modality's metric needs to be Euclidean.
The primary modality can be pre-transformed by
a `PCA`_ or `NMF`_ transformation so that the scaling occurs in this latter
space; this can often be more powerful because the major directions of variance are
now axis-aligned and hence can be scaled independently.
Advantages
~~~~~~~~~~
In generating a shared-space representation, Schema is similar to
statistical approaches like CCA (canonical correlation analysis) and
deep-learning methods like autoencoders (which map multiple
representations into a shared latent space). Each of these approaches offers a
different set of trade-offs. Schema, for instance, requires the output
space to be a linear transformation of the primary modality. Doing so
allows it to offer the following advantages:
* **Interpretability**: Schema identifies which features of the primary
modality were important in maximizing its agreement with the secondary
modalities. If the features corresponded to genes (or principal components),
this can directly be interpreted in terms of gene importances.
* **Regularization**: single-cell data can be sparse and noisy. As we
discuss in our `paper`_, unconstrained approaches like CCA and
autoencoders seek to maximize the alignment between modalities without
any other considerations. In doing so, they can pick up on artifacts
rather than true biology. A key feature of Schema is its
regularization: if enforces a limit on the distortion of the primary
modality, making sure that the final result remains biologically
informative.
* **Speed and flexibility**: Schema is a based on a fast quadratic
programming approach that allows for substantial flexibility in the
number of secondary modalities supported and their relative weights. Also, arbitrary
distance metrics (i.e., kernels) are supported for the secondary modalities.
Quick Start
~~~~~~~~~~~
Install via pip
.. code-block:: bash
pip install schema_learn
**Example**: correlate gene expression with developmental stage. We demonstrate use with Anndata objects here.
.. code-block:: Python
import schema
adata = schema.datasets.fly_brain() # adata has scRNA-seq data & cell age
sqp = schema.SchemaQP( min_desired_corr=0.99, # require 99% agreement with original scRNA-seq distances
params= {'decomposition_model': 'nmf', 'num_top_components': 20} )
#correlate the gene expression with the 'age' parameter
mod_X = sqp.fit_transform( adata.X, # primary modality
[ adata.obs['age'] ], # list of secondary modalities
[ 'numeric' ] ) # datatypes of secondary modalities
gene_wts = sqp.feature_weights() # get a ranking of gene wts important to the alignment
Paper & Code
~~~~~~~~~~~~
Schema is described in the paper *Schema: metric learning enables
interpretable synthesis of heterogeneous single-cell modalities*
(http://doi.org/10.1101/834549)
Source code available at: https://github.com/rs239/schema
.. _metric learning: https://en.wikipedia.org/wiki/Similarity_learning#Metric_learning
.. _paper: https://doi.org/10.1101/834549
.. _PCA: https://en.wikipedia.org/wiki/Principal_component_analysis
.. _NMF: https://en.wikipedia.org/wiki/Non-negative_matrix_factorization
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/docs/source/overview.rst
|
overview.rst
|
Overview
========
Schema is a general algorithm for integrating heterogeneous data
modalities. While it has been specially designed for multi-modal
single-cell biological datasets, it should work in other multi-modal
contexts too.
.. image:: ../_static/Schema-Overview-v2.png
:width: 648
:alt: 'Overview of Schema'
Schema is designed for single-cell assays where multiple modalities have
been *simultaneously* measured for each cell. For example, this could be
simultaneously-asayed ("paired") scRNA-seq and scATAC-seq data, or a
spatial-transcriptomics dataset (e.g. 10x Visium, Slideseq or
STARmap). Schema can also be used with just a scRNA-seq dataset where some
per-cell metadata is available (e.g., cell age, donor information, batch
ID etc.). With this data, Schema can help perform analyses like:
* Characterize cells that look similar transcriptionally but differ
epigenetically.
* Improve cell-type inference by combining RNA-seq and ATAC-seq data.
* In spatially-resolved single-cell data, identify differentially
expressed genes (DEGs) specific to a spatial pattern.
* **Improved visualizations**: tune t-SNE or UMAP plots to more clearly
arrange cells along a desired manifold.
* Simultaneously account for batch effects while also integrating
other modalities.
Intuition
~~~~~~~~~
To integrate multi-modal data, Schema takes a `metric learning`_
approach. Each modality is interepreted as a multi-dimensional space, with
observations mapped to points in it (**B** in figure above). We associate
a distance metric with each modality: the metric reflects what it means
for cells to be similar under that modality. For example, Euclidean
distances between L2-normalized expression vectors are a proxy for
coexpression. Across the three graphs in the figure (**B**), the dashed and
dotted lines indicate distances between the same pairs of
observations.
Schema learns a new distance metric between points, informed
jointly by all the modalities. In Schema, we start by designating one
high-confidence modality as the *primary* (i.e., reference) and the
remaining modalities as *secondary*--- we've found scRNA-seq to typically
be a good choice for the primary modality. Schema transforms the
primary-modality space by scaling each of its dimensions so that the
distances in the transformed space have a higher (or lower, if desired!)
correlation with corresponding distances in the secondary modalities
(**C,D** in the figure above). You can choose any distance metric for the
secondary modalities, though the primary modality's metric needs to be Euclidean.
The primary modality can be pre-transformed by
a `PCA`_ or `NMF`_ transformation so that the scaling occurs in this latter
space; this can often be more powerful because the major directions of variance are
now axis-aligned and hence can be scaled independently.
Advantages
~~~~~~~~~~
In generating a shared-space representation, Schema is similar to
statistical approaches like CCA (canonical correlation analysis) and
deep-learning methods like autoencoders (which map multiple
representations into a shared latent space). Each of these approaches offers a
different set of trade-offs. Schema, for instance, requires the output
space to be a linear transformation of the primary modality. Doing so
allows it to offer the following advantages:
* **Interpretability**: Schema identifies which features of the primary
modality were important in maximizing its agreement with the secondary
modalities. If the features corresponded to genes (or principal components),
this can directly be interpreted in terms of gene importances.
* **Regularization**: single-cell data can be sparse and noisy. As we
discuss in our `paper`_, unconstrained approaches like CCA and
autoencoders seek to maximize the alignment between modalities without
any other considerations. In doing so, they can pick up on artifacts
rather than true biology. A key feature of Schema is its
regularization: if enforces a limit on the distortion of the primary
modality, making sure that the final result remains biologically
informative.
* **Speed and flexibility**: Schema is a based on a fast quadratic
programming approach that allows for substantial flexibility in the
number of secondary modalities supported and their relative weights. Also, arbitrary
distance metrics (i.e., kernels) are supported for the secondary modalities.
Quick Start
~~~~~~~~~~~
Install via pip
.. code-block:: bash
pip install schema_learn
**Example**: correlate gene expression with developmental stage. We demonstrate use with Anndata objects here.
.. code-block:: Python
import schema
adata = schema.datasets.fly_brain() # adata has scRNA-seq data & cell age
sqp = schema.SchemaQP( min_desired_corr=0.99, # require 99% agreement with original scRNA-seq distances
params= {'decomposition_model': 'nmf', 'num_top_components': 20} )
#correlate the gene expression with the 'age' parameter
mod_X = sqp.fit_transform( adata.X, # primary modality
[ adata.obs['age'] ], # list of secondary modalities
[ 'numeric' ] ) # datatypes of secondary modalities
gene_wts = sqp.feature_weights() # get a ranking of gene wts important to the alignment
Paper & Code
~~~~~~~~~~~~
Schema is described in the paper *Schema: metric learning enables
interpretable synthesis of heterogeneous single-cell modalities*
(http://doi.org/10.1101/834549)
Source code available at: https://github.com/rs239/schema
.. _metric learning: https://en.wikipedia.org/wiki/Similarity_learning#Metric_learning
.. _paper: https://doi.org/10.1101/834549
.. _PCA: https://en.wikipedia.org/wiki/Principal_component_analysis
.. _NMF: https://en.wikipedia.org/wiki/Non-negative_matrix_factorization
| 0.969389 | 0.838217 |
Datasets
=========
Ageing *Drosophila* brain
~~~~~~~~~~~~~~~~~~~~~~
This is sourced from `Davie et al.`_ (*Cell* 2018, `GSE 107451`_) and contains scRNA-seq data from a collection of fly brain cells along with each cell's age (in days). It is a useful dataset for exploring a common scenario in multi-modal integration: scRNA-seq data aligned to a 1-dimensional secondary modality. Please see the `example in Visualization`_ where this dataset is used.
.. code-block:: Python
import schema
adata = schema.datasets.fly_brain()
Paired RNA-seq and ATAC-seq from mouse kidney cells
~~~~~~~~~~~~~~~~~~~~~~
This is sourced from `Cao et al.`_ (*Science* 2018, `GSE 117089`_) and contains paired RNA-seq and ATAC-seq data from a collection of mouse kidney cells. The AnnData object provided here has some additional processing done to remove very low count genes and peaks. This is a useful dataset for the case where one of the modalities is very sparse (here, ATAC-seq). Please see the example in `Paired RNA-seq and ATAC-seq`_ where this dataset is used.
.. code-block:: Python
import schema
adata = schema.datasets.scicar_mouse_kidney()
.. _Davie et al.: https://doi.org/10.1016/j.cell.2018.05.057
.. _GSE 107451: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE107451
.. _example in Visualization: https://schema-multimodal.readthedocs.io/en/latest/visualization/index.html#ageing-fly-brain
.. _Cao et al.: https://doi.org/10.1126/science.aau0730
.. _GSE 117089: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE117089
.. _Paired RNA-seq and ATAC-seq: https://schema-multimodal.readthedocs.io/en/latest/recipes/index.html#paired-rna-seq-and-atac-seq
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/docs/source/datasets.rst
|
datasets.rst
|
Datasets
=========
Ageing *Drosophila* brain
~~~~~~~~~~~~~~~~~~~~~~
This is sourced from `Davie et al.`_ (*Cell* 2018, `GSE 107451`_) and contains scRNA-seq data from a collection of fly brain cells along with each cell's age (in days). It is a useful dataset for exploring a common scenario in multi-modal integration: scRNA-seq data aligned to a 1-dimensional secondary modality. Please see the `example in Visualization`_ where this dataset is used.
.. code-block:: Python
import schema
adata = schema.datasets.fly_brain()
Paired RNA-seq and ATAC-seq from mouse kidney cells
~~~~~~~~~~~~~~~~~~~~~~
This is sourced from `Cao et al.`_ (*Science* 2018, `GSE 117089`_) and contains paired RNA-seq and ATAC-seq data from a collection of mouse kidney cells. The AnnData object provided here has some additional processing done to remove very low count genes and peaks. This is a useful dataset for the case where one of the modalities is very sparse (here, ATAC-seq). Please see the example in `Paired RNA-seq and ATAC-seq`_ where this dataset is used.
.. code-block:: Python
import schema
adata = schema.datasets.scicar_mouse_kidney()
.. _Davie et al.: https://doi.org/10.1016/j.cell.2018.05.057
.. _GSE 107451: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE107451
.. _example in Visualization: https://schema-multimodal.readthedocs.io/en/latest/visualization/index.html#ageing-fly-brain
.. _Cao et al.: https://doi.org/10.1126/science.aau0730
.. _GSE 117089: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE117089
.. _Paired RNA-seq and ATAC-seq: https://schema-multimodal.readthedocs.io/en/latest/recipes/index.html#paired-rna-seq-and-atac-seq
| 0.840259 | 0.695861 |
Data Integration Examples
=======
API-usage Examples
~~~~~~~~~~~~~~
*Note*: The code snippets below show how Schema could be used for hypothetical datasets and illustrates the API usage. In the next sections (`Paired RNA-seq and ATAC-seq`_, `Paired-Tag`_) and in `Visualization`_, we describe worked examples where we also provide the dataset to try things on. We are working to add more datasets.
**Example** Correlate gene expression 1) positively with ATAC-Seq data and 2) negatively with Batch information.
.. code-block:: Python
atac_50d = sklearn.decomposition.TruncatedSVD(50).fit_transform( atac_cnts_sp_matrix)
sqp = SchemaQP(min_corr=0.9)
# df is a pd.DataFrame, srs is a pd.Series, -1 means try to disagree
mod_X = sqp.fit_transform( df_gene_exp, # gene expression dataframe: rows=cells, cols=genes
[ atac_50d, batch_id], # batch_info can be a pd.Series or np.array. rows=cells
[ 'feature_vector', 'categorical'],
[ 1, -1]) # maximize combination of (agreement with ATAC-seq + disagreement with batch_id)
gene_wts = sqp.feature_weights() # get gene importances
**Example** Correlate gene expression with three secondary modalities.
.. code-block:: Python
sqp = SchemaQP(min_corr = 0.9) # lower than the default, allowing greater distortion of the primary modality
sqp.fit( adata.X,
[ adata.obs['col1'], adata.obs['col2'], adata.obsm['Matrix1'] ],
[ "categorical", "numeric", "feature_vector"]) # data types of the three modalities
mod_X = sqp.transform( adata.X) # transform
gene_wts = sqp.feature_weights() # get gene importances
Paired RNA-seq and ATAC-seq
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here, we integrate simultaneously assayed RNA- and ATAC-seq data from `Cao et al.'s`_ sci-CAR study of mouse kidney cells. Specifically, we'll try to do better cell-type inference by considering both RNA-seq and ATAC-seq data simultaneously. The original study has ground-truth labels for most of the cell types, allowing us to benchmark automatically-computed clusters (generated by Leiden clustering here). As we'll show, a key challenge here is that the ATAC-seq data is very sparse and noisy. Naively incorporating it with RNA-seq can actually be counter-productive--- the joint clustering from a naive approach can actually have a *lower* overlap with the ground truth labels than if we were to just use RNA-seq-based clustering.
*Note*: This example involves generating Leiden clusters; you will need to install the *igraph* and *leidenalg* Python packages if you want to use them:
.. code-block:: bash
pip install igraph
pip install leidenalg
Let's start by getting the data. We have preprocessed the original dataset, done some basic cleanup, and put it into an AnnData object that you can download. Please remember to also cite the original study if you use this dataset.
.. code-block:: Python
import schema
adata = schema.datasets.scicar_mouse_kidney()
print(adata.shape, adata.uns['atac.X'].shape)
print(adata.uns.keys())
As you see, we have stored the ATAC data (as a sparse numpy matrix) in the .uns slots of the anndata object. Also look at the *adata.obs* dataframe which has t-SNE coordinates, ground-truth cell type names (as assigned by Cao et al.) and cluster colors etc. You'll notice that some cells don't have ground truth assignments. When evaluating, we'll skip those.
To use the ATAC-seq data, we reduce its dimensionality to 50. Instead of PCA, we apply *TruncatedSVD* since the ATAC counts matrix is sparse.
.. code-block:: Python
svd2 = sklearn.decomposition.TruncatedSVD(n_components= 50, random_state = 17)
H2 = svd2.fit_transform(adata.uns["atac.X"])
Next, we run Schema. We choose RNA-seq as the primary modality because 1) it has lower noise than ATAC-seq, and 2) we want to investigate which of its features (i.e., genes) are important during the integration. We will first perform a NMF transformation on the RNA-seq data. For the secondary modality, we'll use the dimensionality-reduced ATAC-seq. We require a positive correlation between the two (`secondary_data_wt_list = [1]` below). **Importantly, we force Schema to generate a low-distortation transformation** : the correlation of distances between original RNA-seq space and the transformed space, `min_desired_corr` is required to be >99%. This low-distortion capability of Schema is crucial here, as we'll demonstrate.
In the `params` settings below, the number of randomly sampled point-pairs has been bumped up to 5M (from default=2M). It helps with the accuracy and doesn't cost too much computationally. We also turned off `do_whiten` (default=1, i.e., true). When `do_whiten=1`, Schema first rescales the PCA/NMF transformation so that each axis has unit variance; typically, doing so is "nice" from a theoretical/statistical perspective. But it can interfere with downstream analyses (e.g., Leiden clustering here).
.. code-block:: Python
sqp99 = schema.SchemaQP(0.99, mode='affine', params= {"decomposition_model":"nmf",
"num_top_components":50,
"do_whiten": 0,
"dist_npairs": 5000000})
dz99 = sqp99.fit_transform(adata.X, [H2], ['feature_vector'], [1])
Let's look at the feature weights. Since we ran the code in 'affine' mode, the raw weights from the quadratic program will correspond to the 50 NMF factors. Three of these factors seem to stand out; most other weights are quite low.
.. code-block:: Python
plt.plot(sqp99._wts)
.. image:: ../_static/schema_atacrna_demo_wts1.png
:width: 300
Schema offers a helper function to convert these NMF (or PCA) feature weights to gene weights. The function offers a few ways of doing so, but the default is to simply average the loadings across the top-k factors:
.. code-block:: Python
v99 = sqp99.feature_weights("top-k-loading", 3)
Let's do a dotplot to visualize how the expression of these genes varies by cell name. We plot the top 10 genes by importance here.
.. code-block:: Python
dfv99 = pd.DataFrame({"gene": adata.var_names, "v":v99}).sort_values("v", ascending=False).reset_index(drop=True)
sc.pl.dotplot(adata, dfv99.gene.head(10).tolist(),'cell_name_short', figsize=(8,6))
As you'll notice, theese gene seem to be differentially expressed in PT cells, PBA and Ki-67+ cells. Essentially, these are cell types where ATAC-seq data was most informative. As we'll see shortly, it is preciely in these cells where Schema is able to offer the biggest improvement.
.. image:: ../_static/schema_atacrna_demo_dotplot1.png
:width: 500
For a comparison later, let's also do a Schema run without a strong distortion control. Below, we set the `min_desired_corr` parameter to 0.10 (i.e., 10%). Thus, the ATAC-seq data will get to influence the transformation a lot more.
.. code-block:: Python
sqp10 = schema.SchemaQP(0.10, mode='affine', params= {"decomposition_model":"nmf",
"num_top_components":50,
"do_whiten": 0,
"dist_npairs": 5000000})
dz10 = sqp10.fit_transform(adata.X, [H2], ['feature_vector'], [1])
Finally, let's do Leiden clustering of the RNA-seq, ATAC-seq, and the two Schema runs. We'll compare the cluster assignments to the ground truth cell labels. Intuitively, by combining RNA-seq and ATAC-seq, one should be able to get a more biologically accurate clustering. We visually evaluate the clusterings below; in the paper, we've supplemented this with more quantitative estimates.
.. code-block:: Python
import schema.utils
fcluster = schema.utils.get_leiden_clustering #feel free to try your own clustering algo
ld_cluster_rna = fcluster(sqp99._decomp_mdl.transform(adata.X.todense()))
ld_cluster_atac = fcluster(H2)
ld_cluster_sqp99 = fcluster(dz99)
ld_cluster_sqp10 = fcluster(dz10)
.. code-block:: Python
x = adata.obs.tsne_1
y = adata.obs.tsne_2
idx = adata.obs.rgb.apply(lambda s: isinstance(s,str) and '#' in s).values.tolist() #skip nan cells
fig, axs = plt.subplots(3,2, figsize=(10,15))
axs[0][0].scatter(x[idx], y[idx], c=adata.obs.rgb.values[idx], s=1)
axs[0][0].set_title('Ground Truth')
axs[0][1].scatter(x[idx], y[idx], c=adata.obs.rgb.values[idx], s=1, alpha=0.1)
axs[0][1].set_title('Ground Truth Labels')
for c in np.unique(adata.obs.cell_name_short[idx]):
if c=='nan': continue
cx,cy = x[adata.obs.cell_name_short==c].mean(), y[adata.obs.cell_name_short==c].mean()
axs[0][1].text(cx,cy,c,fontsize=10)
axs[1][0].scatter(x[idx], y[idx], c=ld_cluster_rna[idx], cmap='tab20b', s=1)
axs[1][0].set_title('RNA-seq')
axs[1][1].scatter(x[idx], y[idx], c=ld_cluster_atac[idx], cmap='tab20b', s=1)
axs[1][1].set_title('ATAC-seq')
axs[2][0].scatter(x[idx], y[idx], c=ld_cluster_sqp99[idx], cmap='tab20b', s=1)
axs[2][0].set_title('Schema-99%')
axs[2][1].scatter(x[idx], y[idx], c=ld_cluster_sqp10[idx], cmap='tab20b', s=1)
axs[2][1].set_title('Schema-10%')
for ax in np.ravel(axs): ax.axis('off')
Below, we show the figures in a 3x2 panel of t-SNE plots. In the first row, the left panel shows the cells colored by ground-truth cell types; the right panel is basically the same but lists the cell types explicitly. The next row shows cells colored by RNA- or ATAC-only clustering. Notice how noisy the ATAC-only clustering is! This is not a bug in our analysis-- less than 0.3% of ATAC count matrix entries are non-zero and the sparsity of the ATAC data makes it difficult to produce high-quality cell type estimates.
The third row shows cells colored by Schema-based clustering at 99% (left) and 10% (right) `min_desired_corr` thresholds. With Schema at a low-distortion setting (i.e., `min_desired_corr = 99%`), notice that PT cells and Ki-67+ cells, circled in red, are getting more correctly classified now. This improvement of the Schema-implied clustering over the RNA-seq-only clustering can be quantified by measuring the overlap with ground truth cell grouping, as we do in the paper.
**This is a key strength of Schema** --- even with a modality that is sparse and noisy (like ATAC-seq here), it can nonetheless extract something of value from the noisy modality because the constraint on distortion of the primary modality acts as a regularization. This is also why we recommend that your highest-confidence modality be set as the primary. Lastly as demonstration, if we relax the distortion constraint by setting `min_desired_corr = 10%`, you'll notice that the noise of ATAC-seq data does swamp out the RNA-seq signal. With an unconstrained approach (e.g., CCA or some deep learning approaches), this ends being a major challenge.
.. image:: ../_static/schema_atacrna_demo_tsne1.png
:width: 600
Paired-Tag
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here we synthesize simultaneously assayed RNA-seq, ATAC-seq and histone-modification data at a single-cell resolution, from the Paired-Tag protocol described in `Zhu et al.’s study`_ of adult mouse frontal cortex and hippocampus (Nature Methods, 2021). This is a fascinating dataset with five different histone modifications assayed separately (3 repressors and 2 activators), in addition to RNA-seq and ATAC-seq. As in the original study, we consider each of the histone modifications as a separate modality, implying a hepta-modal assay!
Interestingly, though, the modalities are available only in pairwise combinations with RNA-seq: some cells were assayed for H3K4me1 & RNA-seq while another set of cells provided ATAC-seq & RNA-seq data, and so on. Here’s the overall distribution of non-RNA-seq modalities across 64,849 cells.
.. image:: ../_static/schema_paired-tag_data-dist.png
:width: 300
This organization of data might be tricky to integrate with a method which expects *each* modality to be available for *all* cells and has difficulty accomodating partial coverage of some modalities. Of course, you could always fall back to an integrative approach that treats each modality’s cell population as independent, but then you miss out on the simultaneously-multimodal aspect of this data.
With Schema, you can have your cake and eat it too! We do 6 two-way integrations (RNA-seq as the primary modality against each of the other modalities) using the subsets of cells available in each case. Schema’s interpretable and linear framework makes it easy to combine these. Once Schema computes the optimal transformation of RNA-seq that aligns it with, say, ATAC-seq, we apply that transformation to the entire RNA-seq dataset, including cells that do *not* have ATAC-seq data.
Such full-dataset extensions of the pairwise syntheses can then be stacked together. Doing Leiden clustering on the result would enable us to infer cell types by integrating information from all modalities. As we will show below, Schema's synthesis helps improve the quality of cell type inference over what you could get just from RNA-seq. Similarly for feature selection, Schema's computed feature weights for each two-way synthesis can be averaged to get the genes important to the overall synthesis. In a completely automated fashion and without any knowledge of tissue’s source or biology, we’ll find that the genes Schema identifies as important turn out to be very relevant to neuronal function and disease. Ready for more?
First, you will need the data. The original is available on GEO (`GSE152020`_) but the individual modalities are huge (e.g., the ATAC-seq peak-counts are in a 14,095 x 2,443,832 sparse matrix!). This is not unusual--- epigenetic modalites are typically very sparse (we discuss why this matters in `Paired RNA-seq and ATAC-seq`_). As a preprocessing step, we performed singular value decompositions (SVD) of these modalities and also reduced the RNA-seq data to its 4,000 highly variable genes. An AnnData object with this preprocessing is available here (please remember to also cite the original study if you use this dataset) :
.. code-block:: bash
wget http://cb.csail.mit.edu/cb/schema/adata_dimreduced_paired-tag.pkl
Let's load it in:
.. code-block:: Python
import schema, pickle, anndata, sklearn.metrics
import scanpy as sc
# you may need to change the file location as appopriate to your setup
adata = pickle.load(open("adata_dimreduced_paired-tag.pkl", "rb"))
print (adata.shape,
[(c, adata.uns['SVD_'+c].shape) for c in adata.uns['sec_modalities']])
As you see, we have stored the 50-dimensional SVDs of the secondary modalities in the :code:`.uns` slots of the anndata object. Also look at the :code:`adata.obs` dataframe which has UMAP coordinates, ground-truth cell type names (as assigned by Zhu et al.) etc.
We now do Schema runs for the 6 two-way modality combinations, with RNA-seq as the primary in each run. Each run will also store the transformation on the entire 64,849-cell RNA-seq dataset and also store the gene importances.
.. code-block:: Python
d_rna = adata.X.todense()
desc2transforms = {}
for desc in adata.uns['sec_modalities']:
print(desc)
# we mostly stick with the default settings, explicitly listed here for clarity
sqp = schema.SchemaQP(0.99, mode='affine', params= {"decomposition_model": 'pca',
"num_top_components":50,
"do_whiten": 0, # this is different from default
"dist_npairs": 5000000})
# extract the relevant subset
idx1 = adata.obs['rowidx'][adata.uns["SVD_"+desc].index]
prim_d = d_rna[idx1,:]
sec_d = adata.uns["SVD_"+desc].values
print(len(idx1), prim_d.shape, sec_d.shape)
sqp.fit(prim_d, [sec_d], ['feature_vector'], [1]) # fit on the idx1 subset...
dz = sqp.transform(d_rna) # ...then transform the full RNA-seq dataset
desc2transforms[desc] = (sqp, dz, idx1, sqp.feature_weights(k=3))
**Cell type inference:**: In each of the 6 runs above, :code:`dz` is a 64,849 x 50 matrix. We can horizontally stack these matrices for a 64,849 x 300 matrix that represents the transformation of RNA-seq data informed simultaneously by all 6 secondary modalities.
.. code-block:: Python
a6Xpca = np.hstack([dz for _,dz,_,_ in desc2transforms.values()])
adata_schema = anndata.AnnData(X=a6Xpca, obs=adata.obs)
print (adata_schema.shape)
We then perform Leiden clustering on the original and transformed data, computing the overlap with expert marker-gene-based annotation by Zhu et al.
.. code-block:: Python
# original
sc.pp.pca(adata)
sc.pp.neighbors(adata)
sc.tl.leiden(adata)
# Schema-transformed
# since Schema had already done PCA before it transformed, let's stick with its raw output
sc.pp.neighbors(adata_schema, use_rep='X')
sc.tl.leiden(adata_schema)
# we'll do plots etc. with the original AnnData object
adata.obs['leiden_schema'] = adata_schema.obs['leiden'].values
# compute overlap with manual cell type annotations
ari_orig = sklearn.metrics.adjusted_rand_score(adata.obs.Annotation, adata.obs.leiden)
ari_schema= sklearn.metrics.adjusted_rand_score(adata.obs.Annotation, adata.obs.leiden_schema)
print ("ARI: Orig: {} With Schema: {}".format( ari_orig, ari_schema))
As you can see, the ARI with Schema improved from 0.437 (using only RNA-seq) to 0.446 (using all modalities). Single-cell epigenetic modalities are very sparse, making it difficult to distinguish signal from noise. However, Schema's constrained approach allows it to extract signal from these secondary modalities nonetheless, a task which has otherwise been challenging (see the related discussion in our `paper`_ or in `Paired RNA-seq and ATAC-seq`_).
Before we plot these clusters, we'll relabel the Schema-based Leiden clusters to match the labeling of RNA-seq only Leiden clusters; this will make their color schemes consistent. You will need to install the Python package *munkres* (:code:`pip install munkres`) for the related computation.
.. code-block:: Python
import munkres
list1 = adata.obs['leiden'].astype(int).tolist()
list2 = adata.obs['leiden_schema'].astype(int).tolist()
contmat = sklearn.metrics.cluster.contingency_matrix(list1, list2)
map21 = dict(munkres.Munkres().compute(contmat.max() - contmat))
adata.obs['leiden_schema_relabeled'] = [str(map21[a]) for a in list2]
adata.obs['Schema_reassign'] = [('Same' if (map21[a]==a) else 'Different') for a in list2]
for c in ['Annotation','Annot2', 'leiden', 'leiden_schema_relabeled', 'Schema_reassign']:
sc.pl.umap(adata, color=c)
.. image:: ../_static/schema_paired-tag_umap-row1.png
:width: 800
.. image:: ../_static/schema_paired-tag_umap-row2.png
:width: 650
It's also interesting to identify cells where the cluster assignments changed after multi-modal synthesis. As you can see, it's only in certain cell types where the epigenetic data suggests a different clustering than the primary RNA-seq modality.
.. image:: ../_static/schema_paired-tag_umap-row3.png
:width: 300
**Gene set identification:** The feature importances output by Schema here identify the genes whose expression variations best agree with epigenetic variations in these tissues. We first aggregate the feature importances across the 6 two-ways runs:
.. code-block:: Python
df_genes = pd.DataFrame({'gene': adata.var.symbol})
for desc, (_,_,_,wts) in desc2transforms.items():
df_genes[desc] = wts
df_genes['avg_wt'] = df_genes.iloc[:,1:].mean(axis=1)
df_genes = df_genes.sort_values('avg_wt', ascending=False).reset_index(drop=True)
gene_list = df_genes.gene.values
sc.pl.umap(adata, color= gene_list[:6], gene_symbols='symbol', color_map='plasma', frameon=False, ncols=3)
.. image:: ../_static/schema_paired-tag_gene_plots.png
:width: 800
Many of the top genes identified by Schema (e.g., `Erbb4`_, `Npas3`_, `Zbtb20`_, `Luzp2`_) are known to be relevant to neuronal function or disease. Note that all of this fell out of the synthesis directly--- we didn't do any differential expression analysis against an external background or provide the method some other indication that the data is from brain tissue.
We also did a GO enrichment analysis (via `Gorilla`_) of the top 100 genes by Schema weight. Here are the significant hits (FDR q-val < 0.1). Again, most GO terms relate to neuronal development, activity, and communication:
.. csv-table:: GO Enrichment of Top Schema-identified genes
:file: ../_static/schema_paired-tag_go-annot.csv
:widths: 20, 80
:header-rows: 0
.. _Visualization: https://schema-multimodal.readthedocs.io/en/latest/visualization/index.html#ageing-fly-brain
.. _Cao et al.'s: https://science.sciencemag.org/content/361/6409/1380/
.. _paper: https://genomebiology.biomedcentral.com/articles/10.1186/s13059-021-02313-2
.. _Erbb4: https://www.ncbi.nlm.nih.gov/gene/2066
.. _Npas3: https://www.ncbi.nlm.nih.gov/gene/64067
.. _Zbtb20: https://www.ncbi.nlm.nih.gov/gene/26137
.. _Luzp2: https://www.ncbi.nlm.nih.gov/gene/338645
.. _Gorilla: http://cbl-gorilla.cs.technion.ac.il/
.. _Zhu et al.’s study: https://www.nature.com/articles/s41592-021-01060-3
.. _GSE152020: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE152020
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/docs/source/recipes/index.rst
|
index.rst
|
Data Integration Examples
=======
API-usage Examples
~~~~~~~~~~~~~~
*Note*: The code snippets below show how Schema could be used for hypothetical datasets and illustrates the API usage. In the next sections (`Paired RNA-seq and ATAC-seq`_, `Paired-Tag`_) and in `Visualization`_, we describe worked examples where we also provide the dataset to try things on. We are working to add more datasets.
**Example** Correlate gene expression 1) positively with ATAC-Seq data and 2) negatively with Batch information.
.. code-block:: Python
atac_50d = sklearn.decomposition.TruncatedSVD(50).fit_transform( atac_cnts_sp_matrix)
sqp = SchemaQP(min_corr=0.9)
# df is a pd.DataFrame, srs is a pd.Series, -1 means try to disagree
mod_X = sqp.fit_transform( df_gene_exp, # gene expression dataframe: rows=cells, cols=genes
[ atac_50d, batch_id], # batch_info can be a pd.Series or np.array. rows=cells
[ 'feature_vector', 'categorical'],
[ 1, -1]) # maximize combination of (agreement with ATAC-seq + disagreement with batch_id)
gene_wts = sqp.feature_weights() # get gene importances
**Example** Correlate gene expression with three secondary modalities.
.. code-block:: Python
sqp = SchemaQP(min_corr = 0.9) # lower than the default, allowing greater distortion of the primary modality
sqp.fit( adata.X,
[ adata.obs['col1'], adata.obs['col2'], adata.obsm['Matrix1'] ],
[ "categorical", "numeric", "feature_vector"]) # data types of the three modalities
mod_X = sqp.transform( adata.X) # transform
gene_wts = sqp.feature_weights() # get gene importances
Paired RNA-seq and ATAC-seq
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here, we integrate simultaneously assayed RNA- and ATAC-seq data from `Cao et al.'s`_ sci-CAR study of mouse kidney cells. Specifically, we'll try to do better cell-type inference by considering both RNA-seq and ATAC-seq data simultaneously. The original study has ground-truth labels for most of the cell types, allowing us to benchmark automatically-computed clusters (generated by Leiden clustering here). As we'll show, a key challenge here is that the ATAC-seq data is very sparse and noisy. Naively incorporating it with RNA-seq can actually be counter-productive--- the joint clustering from a naive approach can actually have a *lower* overlap with the ground truth labels than if we were to just use RNA-seq-based clustering.
*Note*: This example involves generating Leiden clusters; you will need to install the *igraph* and *leidenalg* Python packages if you want to use them:
.. code-block:: bash
pip install igraph
pip install leidenalg
Let's start by getting the data. We have preprocessed the original dataset, done some basic cleanup, and put it into an AnnData object that you can download. Please remember to also cite the original study if you use this dataset.
.. code-block:: Python
import schema
adata = schema.datasets.scicar_mouse_kidney()
print(adata.shape, adata.uns['atac.X'].shape)
print(adata.uns.keys())
As you see, we have stored the ATAC data (as a sparse numpy matrix) in the .uns slots of the anndata object. Also look at the *adata.obs* dataframe which has t-SNE coordinates, ground-truth cell type names (as assigned by Cao et al.) and cluster colors etc. You'll notice that some cells don't have ground truth assignments. When evaluating, we'll skip those.
To use the ATAC-seq data, we reduce its dimensionality to 50. Instead of PCA, we apply *TruncatedSVD* since the ATAC counts matrix is sparse.
.. code-block:: Python
svd2 = sklearn.decomposition.TruncatedSVD(n_components= 50, random_state = 17)
H2 = svd2.fit_transform(adata.uns["atac.X"])
Next, we run Schema. We choose RNA-seq as the primary modality because 1) it has lower noise than ATAC-seq, and 2) we want to investigate which of its features (i.e., genes) are important during the integration. We will first perform a NMF transformation on the RNA-seq data. For the secondary modality, we'll use the dimensionality-reduced ATAC-seq. We require a positive correlation between the two (`secondary_data_wt_list = [1]` below). **Importantly, we force Schema to generate a low-distortation transformation** : the correlation of distances between original RNA-seq space and the transformed space, `min_desired_corr` is required to be >99%. This low-distortion capability of Schema is crucial here, as we'll demonstrate.
In the `params` settings below, the number of randomly sampled point-pairs has been bumped up to 5M (from default=2M). It helps with the accuracy and doesn't cost too much computationally. We also turned off `do_whiten` (default=1, i.e., true). When `do_whiten=1`, Schema first rescales the PCA/NMF transformation so that each axis has unit variance; typically, doing so is "nice" from a theoretical/statistical perspective. But it can interfere with downstream analyses (e.g., Leiden clustering here).
.. code-block:: Python
sqp99 = schema.SchemaQP(0.99, mode='affine', params= {"decomposition_model":"nmf",
"num_top_components":50,
"do_whiten": 0,
"dist_npairs": 5000000})
dz99 = sqp99.fit_transform(adata.X, [H2], ['feature_vector'], [1])
Let's look at the feature weights. Since we ran the code in 'affine' mode, the raw weights from the quadratic program will correspond to the 50 NMF factors. Three of these factors seem to stand out; most other weights are quite low.
.. code-block:: Python
plt.plot(sqp99._wts)
.. image:: ../_static/schema_atacrna_demo_wts1.png
:width: 300
Schema offers a helper function to convert these NMF (or PCA) feature weights to gene weights. The function offers a few ways of doing so, but the default is to simply average the loadings across the top-k factors:
.. code-block:: Python
v99 = sqp99.feature_weights("top-k-loading", 3)
Let's do a dotplot to visualize how the expression of these genes varies by cell name. We plot the top 10 genes by importance here.
.. code-block:: Python
dfv99 = pd.DataFrame({"gene": adata.var_names, "v":v99}).sort_values("v", ascending=False).reset_index(drop=True)
sc.pl.dotplot(adata, dfv99.gene.head(10).tolist(),'cell_name_short', figsize=(8,6))
As you'll notice, theese gene seem to be differentially expressed in PT cells, PBA and Ki-67+ cells. Essentially, these are cell types where ATAC-seq data was most informative. As we'll see shortly, it is preciely in these cells where Schema is able to offer the biggest improvement.
.. image:: ../_static/schema_atacrna_demo_dotplot1.png
:width: 500
For a comparison later, let's also do a Schema run without a strong distortion control. Below, we set the `min_desired_corr` parameter to 0.10 (i.e., 10%). Thus, the ATAC-seq data will get to influence the transformation a lot more.
.. code-block:: Python
sqp10 = schema.SchemaQP(0.10, mode='affine', params= {"decomposition_model":"nmf",
"num_top_components":50,
"do_whiten": 0,
"dist_npairs": 5000000})
dz10 = sqp10.fit_transform(adata.X, [H2], ['feature_vector'], [1])
Finally, let's do Leiden clustering of the RNA-seq, ATAC-seq, and the two Schema runs. We'll compare the cluster assignments to the ground truth cell labels. Intuitively, by combining RNA-seq and ATAC-seq, one should be able to get a more biologically accurate clustering. We visually evaluate the clusterings below; in the paper, we've supplemented this with more quantitative estimates.
.. code-block:: Python
import schema.utils
fcluster = schema.utils.get_leiden_clustering #feel free to try your own clustering algo
ld_cluster_rna = fcluster(sqp99._decomp_mdl.transform(adata.X.todense()))
ld_cluster_atac = fcluster(H2)
ld_cluster_sqp99 = fcluster(dz99)
ld_cluster_sqp10 = fcluster(dz10)
.. code-block:: Python
x = adata.obs.tsne_1
y = adata.obs.tsne_2
idx = adata.obs.rgb.apply(lambda s: isinstance(s,str) and '#' in s).values.tolist() #skip nan cells
fig, axs = plt.subplots(3,2, figsize=(10,15))
axs[0][0].scatter(x[idx], y[idx], c=adata.obs.rgb.values[idx], s=1)
axs[0][0].set_title('Ground Truth')
axs[0][1].scatter(x[idx], y[idx], c=adata.obs.rgb.values[idx], s=1, alpha=0.1)
axs[0][1].set_title('Ground Truth Labels')
for c in np.unique(adata.obs.cell_name_short[idx]):
if c=='nan': continue
cx,cy = x[adata.obs.cell_name_short==c].mean(), y[adata.obs.cell_name_short==c].mean()
axs[0][1].text(cx,cy,c,fontsize=10)
axs[1][0].scatter(x[idx], y[idx], c=ld_cluster_rna[idx], cmap='tab20b', s=1)
axs[1][0].set_title('RNA-seq')
axs[1][1].scatter(x[idx], y[idx], c=ld_cluster_atac[idx], cmap='tab20b', s=1)
axs[1][1].set_title('ATAC-seq')
axs[2][0].scatter(x[idx], y[idx], c=ld_cluster_sqp99[idx], cmap='tab20b', s=1)
axs[2][0].set_title('Schema-99%')
axs[2][1].scatter(x[idx], y[idx], c=ld_cluster_sqp10[idx], cmap='tab20b', s=1)
axs[2][1].set_title('Schema-10%')
for ax in np.ravel(axs): ax.axis('off')
Below, we show the figures in a 3x2 panel of t-SNE plots. In the first row, the left panel shows the cells colored by ground-truth cell types; the right panel is basically the same but lists the cell types explicitly. The next row shows cells colored by RNA- or ATAC-only clustering. Notice how noisy the ATAC-only clustering is! This is not a bug in our analysis-- less than 0.3% of ATAC count matrix entries are non-zero and the sparsity of the ATAC data makes it difficult to produce high-quality cell type estimates.
The third row shows cells colored by Schema-based clustering at 99% (left) and 10% (right) `min_desired_corr` thresholds. With Schema at a low-distortion setting (i.e., `min_desired_corr = 99%`), notice that PT cells and Ki-67+ cells, circled in red, are getting more correctly classified now. This improvement of the Schema-implied clustering over the RNA-seq-only clustering can be quantified by measuring the overlap with ground truth cell grouping, as we do in the paper.
**This is a key strength of Schema** --- even with a modality that is sparse and noisy (like ATAC-seq here), it can nonetheless extract something of value from the noisy modality because the constraint on distortion of the primary modality acts as a regularization. This is also why we recommend that your highest-confidence modality be set as the primary. Lastly as demonstration, if we relax the distortion constraint by setting `min_desired_corr = 10%`, you'll notice that the noise of ATAC-seq data does swamp out the RNA-seq signal. With an unconstrained approach (e.g., CCA or some deep learning approaches), this ends being a major challenge.
.. image:: ../_static/schema_atacrna_demo_tsne1.png
:width: 600
Paired-Tag
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here we synthesize simultaneously assayed RNA-seq, ATAC-seq and histone-modification data at a single-cell resolution, from the Paired-Tag protocol described in `Zhu et al.’s study`_ of adult mouse frontal cortex and hippocampus (Nature Methods, 2021). This is a fascinating dataset with five different histone modifications assayed separately (3 repressors and 2 activators), in addition to RNA-seq and ATAC-seq. As in the original study, we consider each of the histone modifications as a separate modality, implying a hepta-modal assay!
Interestingly, though, the modalities are available only in pairwise combinations with RNA-seq: some cells were assayed for H3K4me1 & RNA-seq while another set of cells provided ATAC-seq & RNA-seq data, and so on. Here’s the overall distribution of non-RNA-seq modalities across 64,849 cells.
.. image:: ../_static/schema_paired-tag_data-dist.png
:width: 300
This organization of data might be tricky to integrate with a method which expects *each* modality to be available for *all* cells and has difficulty accomodating partial coverage of some modalities. Of course, you could always fall back to an integrative approach that treats each modality’s cell population as independent, but then you miss out on the simultaneously-multimodal aspect of this data.
With Schema, you can have your cake and eat it too! We do 6 two-way integrations (RNA-seq as the primary modality against each of the other modalities) using the subsets of cells available in each case. Schema’s interpretable and linear framework makes it easy to combine these. Once Schema computes the optimal transformation of RNA-seq that aligns it with, say, ATAC-seq, we apply that transformation to the entire RNA-seq dataset, including cells that do *not* have ATAC-seq data.
Such full-dataset extensions of the pairwise syntheses can then be stacked together. Doing Leiden clustering on the result would enable us to infer cell types by integrating information from all modalities. As we will show below, Schema's synthesis helps improve the quality of cell type inference over what you could get just from RNA-seq. Similarly for feature selection, Schema's computed feature weights for each two-way synthesis can be averaged to get the genes important to the overall synthesis. In a completely automated fashion and without any knowledge of tissue’s source or biology, we’ll find that the genes Schema identifies as important turn out to be very relevant to neuronal function and disease. Ready for more?
First, you will need the data. The original is available on GEO (`GSE152020`_) but the individual modalities are huge (e.g., the ATAC-seq peak-counts are in a 14,095 x 2,443,832 sparse matrix!). This is not unusual--- epigenetic modalites are typically very sparse (we discuss why this matters in `Paired RNA-seq and ATAC-seq`_). As a preprocessing step, we performed singular value decompositions (SVD) of these modalities and also reduced the RNA-seq data to its 4,000 highly variable genes. An AnnData object with this preprocessing is available here (please remember to also cite the original study if you use this dataset) :
.. code-block:: bash
wget http://cb.csail.mit.edu/cb/schema/adata_dimreduced_paired-tag.pkl
Let's load it in:
.. code-block:: Python
import schema, pickle, anndata, sklearn.metrics
import scanpy as sc
# you may need to change the file location as appopriate to your setup
adata = pickle.load(open("adata_dimreduced_paired-tag.pkl", "rb"))
print (adata.shape,
[(c, adata.uns['SVD_'+c].shape) for c in adata.uns['sec_modalities']])
As you see, we have stored the 50-dimensional SVDs of the secondary modalities in the :code:`.uns` slots of the anndata object. Also look at the :code:`adata.obs` dataframe which has UMAP coordinates, ground-truth cell type names (as assigned by Zhu et al.) etc.
We now do Schema runs for the 6 two-way modality combinations, with RNA-seq as the primary in each run. Each run will also store the transformation on the entire 64,849-cell RNA-seq dataset and also store the gene importances.
.. code-block:: Python
d_rna = adata.X.todense()
desc2transforms = {}
for desc in adata.uns['sec_modalities']:
print(desc)
# we mostly stick with the default settings, explicitly listed here for clarity
sqp = schema.SchemaQP(0.99, mode='affine', params= {"decomposition_model": 'pca',
"num_top_components":50,
"do_whiten": 0, # this is different from default
"dist_npairs": 5000000})
# extract the relevant subset
idx1 = adata.obs['rowidx'][adata.uns["SVD_"+desc].index]
prim_d = d_rna[idx1,:]
sec_d = adata.uns["SVD_"+desc].values
print(len(idx1), prim_d.shape, sec_d.shape)
sqp.fit(prim_d, [sec_d], ['feature_vector'], [1]) # fit on the idx1 subset...
dz = sqp.transform(d_rna) # ...then transform the full RNA-seq dataset
desc2transforms[desc] = (sqp, dz, idx1, sqp.feature_weights(k=3))
**Cell type inference:**: In each of the 6 runs above, :code:`dz` is a 64,849 x 50 matrix. We can horizontally stack these matrices for a 64,849 x 300 matrix that represents the transformation of RNA-seq data informed simultaneously by all 6 secondary modalities.
.. code-block:: Python
a6Xpca = np.hstack([dz for _,dz,_,_ in desc2transforms.values()])
adata_schema = anndata.AnnData(X=a6Xpca, obs=adata.obs)
print (adata_schema.shape)
We then perform Leiden clustering on the original and transformed data, computing the overlap with expert marker-gene-based annotation by Zhu et al.
.. code-block:: Python
# original
sc.pp.pca(adata)
sc.pp.neighbors(adata)
sc.tl.leiden(adata)
# Schema-transformed
# since Schema had already done PCA before it transformed, let's stick with its raw output
sc.pp.neighbors(adata_schema, use_rep='X')
sc.tl.leiden(adata_schema)
# we'll do plots etc. with the original AnnData object
adata.obs['leiden_schema'] = adata_schema.obs['leiden'].values
# compute overlap with manual cell type annotations
ari_orig = sklearn.metrics.adjusted_rand_score(adata.obs.Annotation, adata.obs.leiden)
ari_schema= sklearn.metrics.adjusted_rand_score(adata.obs.Annotation, adata.obs.leiden_schema)
print ("ARI: Orig: {} With Schema: {}".format( ari_orig, ari_schema))
As you can see, the ARI with Schema improved from 0.437 (using only RNA-seq) to 0.446 (using all modalities). Single-cell epigenetic modalities are very sparse, making it difficult to distinguish signal from noise. However, Schema's constrained approach allows it to extract signal from these secondary modalities nonetheless, a task which has otherwise been challenging (see the related discussion in our `paper`_ or in `Paired RNA-seq and ATAC-seq`_).
Before we plot these clusters, we'll relabel the Schema-based Leiden clusters to match the labeling of RNA-seq only Leiden clusters; this will make their color schemes consistent. You will need to install the Python package *munkres* (:code:`pip install munkres`) for the related computation.
.. code-block:: Python
import munkres
list1 = adata.obs['leiden'].astype(int).tolist()
list2 = adata.obs['leiden_schema'].astype(int).tolist()
contmat = sklearn.metrics.cluster.contingency_matrix(list1, list2)
map21 = dict(munkres.Munkres().compute(contmat.max() - contmat))
adata.obs['leiden_schema_relabeled'] = [str(map21[a]) for a in list2]
adata.obs['Schema_reassign'] = [('Same' if (map21[a]==a) else 'Different') for a in list2]
for c in ['Annotation','Annot2', 'leiden', 'leiden_schema_relabeled', 'Schema_reassign']:
sc.pl.umap(adata, color=c)
.. image:: ../_static/schema_paired-tag_umap-row1.png
:width: 800
.. image:: ../_static/schema_paired-tag_umap-row2.png
:width: 650
It's also interesting to identify cells where the cluster assignments changed after multi-modal synthesis. As you can see, it's only in certain cell types where the epigenetic data suggests a different clustering than the primary RNA-seq modality.
.. image:: ../_static/schema_paired-tag_umap-row3.png
:width: 300
**Gene set identification:** The feature importances output by Schema here identify the genes whose expression variations best agree with epigenetic variations in these tissues. We first aggregate the feature importances across the 6 two-ways runs:
.. code-block:: Python
df_genes = pd.DataFrame({'gene': adata.var.symbol})
for desc, (_,_,_,wts) in desc2transforms.items():
df_genes[desc] = wts
df_genes['avg_wt'] = df_genes.iloc[:,1:].mean(axis=1)
df_genes = df_genes.sort_values('avg_wt', ascending=False).reset_index(drop=True)
gene_list = df_genes.gene.values
sc.pl.umap(adata, color= gene_list[:6], gene_symbols='symbol', color_map='plasma', frameon=False, ncols=3)
.. image:: ../_static/schema_paired-tag_gene_plots.png
:width: 800
Many of the top genes identified by Schema (e.g., `Erbb4`_, `Npas3`_, `Zbtb20`_, `Luzp2`_) are known to be relevant to neuronal function or disease. Note that all of this fell out of the synthesis directly--- we didn't do any differential expression analysis against an external background or provide the method some other indication that the data is from brain tissue.
We also did a GO enrichment analysis (via `Gorilla`_) of the top 100 genes by Schema weight. Here are the significant hits (FDR q-val < 0.1). Again, most GO terms relate to neuronal development, activity, and communication:
.. csv-table:: GO Enrichment of Top Schema-identified genes
:file: ../_static/schema_paired-tag_go-annot.csv
:widths: 20, 80
:header-rows: 0
.. _Visualization: https://schema-multimodal.readthedocs.io/en/latest/visualization/index.html#ageing-fly-brain
.. _Cao et al.'s: https://science.sciencemag.org/content/361/6409/1380/
.. _paper: https://genomebiology.biomedcentral.com/articles/10.1186/s13059-021-02313-2
.. _Erbb4: https://www.ncbi.nlm.nih.gov/gene/2066
.. _Npas3: https://www.ncbi.nlm.nih.gov/gene/64067
.. _Zbtb20: https://www.ncbi.nlm.nih.gov/gene/26137
.. _Luzp2: https://www.ncbi.nlm.nih.gov/gene/338645
.. _Gorilla: http://cbl-gorilla.cs.technion.ac.il/
.. _Zhu et al.’s study: https://www.nature.com/articles/s41592-021-01060-3
.. _GSE152020: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE152020
| 0.919054 | 0.943764 |
Visualization Examples
=============
Popular tools like `t-SNE`_ and `UMAP`_ can produce intuitive and appealing
visualizations. However, since they perform opaque non-linear transformations of
the input data, it can be unclear how to "tweak" the visualization to
accentuate a specific aspect of the input. Also, it can can sometimes
be difficult to understand which features (e.g. genes) of the input were most important to getting
the plot.
Schema can help with both of these issues. With scRNA-seq data as the primary
modality, Schema can transform it by infusing additional information into it
while preserving a high level of similarity with the original data. When
t-SNE/UMAP are applied on the transformed data, we have found that the
broad contours of the original plot are preserved while the new
information is also reflected. Furthermore, the relative weight of the new data
can be calibrated using the `min_desired_corr` parameter of Schema.
Ageing fly brain
~~~~~~~~~~~~~~~~
Here, we tweak the UMAP plot of `Davie et al.'s`_ ageing fly brain data to
accentuate cell age.
First, let's get the data and do a regular UMAP plot.
.. code-block:: Python
import schema
import scanpy as sc
import anndata
def sc_umap_pipeline(bdata, fig_suffix):
sc.pp.pca(bdata)
sc.pp.neighbors(bdata, n_neighbors=15)
sc.tl.umap(bdata)
sc.pl.umap(bdata, color='age', color_map='coolwarm', save='_{}.png'.format(fig_suffix) )
.. code-block:: Python
adata = schema.datasets.fly_brain() # adata has scRNA-seq data & cell age
sc_umap_pipeline(adata, 'regular')
This should produce a plot like this, where cells are colored by age.
.. image:: ../_static/umap_flybrain_regular_r3.png
:width: 300
Next, we apply Schema to infuse cell age into the scRNA-seq data, while
preserving a high level of correlation with the original scRNA-seq
distances. We start by requiring a minimum 99.9% correlation with original
scRNA-seq distances
.. code-block:: Python
sqp = schema.SchemaQP( min_desired_corr=0.999, # require 99.9% agreement with original scRNA-seq distances
params= {'decomposition_model': 'nmf', 'num_top_components': 20} )
mod999_X = sqp.fit_transform( adata.X, [ adata.obs['age'] ], ['numeric']) # correlate gene expression with the age
sc_umap_pipeline( anndata.AnnData( mod999_X, obs=adata.obs), '0.999' )
We then loosen the `min_desired_corr` constraint a tiny bit, to 99%
.. code-block:: Python
sqp.reset_mincorr_param(0.99) # we can re-use the NMF transform (which takes more time than the quadratic program)
mod990_X = sqp.fit_transform( adata.X, [ adata.obs['age'] ], ['numeric'])
sc_umap_pipeline( anndata.AnnData( mod990_X, obs=adata.obs), '0.990' )
diffexp_gene_wts = sqp.feature_weights() # get a ranking of genes important to the alignment
These runs should produce a pair of plots like the ones shown below. Note
how cell-age progressively stands out as a characteristic feature. We also
encourage you to try out other choices of `min_desired_corr` (e.g., 0.90
or 0.7); these will show the effect of allowing greater distortions of the
primary modality.
.. image:: ../_static/umap_flybrain_schema0.999-0.99_r3.png
:width: 620
This example also illustrates Scehma's interpretability. The variable
`diffexp_gene_wts` identifies the genes most important to aligning
scRNA-seq with cell age. As we describe in our `paper`_, these genes turn
out to be differentially expressed between young cells and old cells.
.. _Davie et al.'s: https://doi.org/10.1016/j.cell.2018.05.057
.. _paper: https://doi.org/10.1101/834549
.. _t-SNE: https://lvdmaaten.github.io/tsne/
.. _UMAP: https://umap-learn.readthedocs.io/en/latest/
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/docs/source/visualization/index.rst
|
index.rst
|
Visualization Examples
=============
Popular tools like `t-SNE`_ and `UMAP`_ can produce intuitive and appealing
visualizations. However, since they perform opaque non-linear transformations of
the input data, it can be unclear how to "tweak" the visualization to
accentuate a specific aspect of the input. Also, it can can sometimes
be difficult to understand which features (e.g. genes) of the input were most important to getting
the plot.
Schema can help with both of these issues. With scRNA-seq data as the primary
modality, Schema can transform it by infusing additional information into it
while preserving a high level of similarity with the original data. When
t-SNE/UMAP are applied on the transformed data, we have found that the
broad contours of the original plot are preserved while the new
information is also reflected. Furthermore, the relative weight of the new data
can be calibrated using the `min_desired_corr` parameter of Schema.
Ageing fly brain
~~~~~~~~~~~~~~~~
Here, we tweak the UMAP plot of `Davie et al.'s`_ ageing fly brain data to
accentuate cell age.
First, let's get the data and do a regular UMAP plot.
.. code-block:: Python
import schema
import scanpy as sc
import anndata
def sc_umap_pipeline(bdata, fig_suffix):
sc.pp.pca(bdata)
sc.pp.neighbors(bdata, n_neighbors=15)
sc.tl.umap(bdata)
sc.pl.umap(bdata, color='age', color_map='coolwarm', save='_{}.png'.format(fig_suffix) )
.. code-block:: Python
adata = schema.datasets.fly_brain() # adata has scRNA-seq data & cell age
sc_umap_pipeline(adata, 'regular')
This should produce a plot like this, where cells are colored by age.
.. image:: ../_static/umap_flybrain_regular_r3.png
:width: 300
Next, we apply Schema to infuse cell age into the scRNA-seq data, while
preserving a high level of correlation with the original scRNA-seq
distances. We start by requiring a minimum 99.9% correlation with original
scRNA-seq distances
.. code-block:: Python
sqp = schema.SchemaQP( min_desired_corr=0.999, # require 99.9% agreement with original scRNA-seq distances
params= {'decomposition_model': 'nmf', 'num_top_components': 20} )
mod999_X = sqp.fit_transform( adata.X, [ adata.obs['age'] ], ['numeric']) # correlate gene expression with the age
sc_umap_pipeline( anndata.AnnData( mod999_X, obs=adata.obs), '0.999' )
We then loosen the `min_desired_corr` constraint a tiny bit, to 99%
.. code-block:: Python
sqp.reset_mincorr_param(0.99) # we can re-use the NMF transform (which takes more time than the quadratic program)
mod990_X = sqp.fit_transform( adata.X, [ adata.obs['age'] ], ['numeric'])
sc_umap_pipeline( anndata.AnnData( mod990_X, obs=adata.obs), '0.990' )
diffexp_gene_wts = sqp.feature_weights() # get a ranking of genes important to the alignment
These runs should produce a pair of plots like the ones shown below. Note
how cell-age progressively stands out as a characteristic feature. We also
encourage you to try out other choices of `min_desired_corr` (e.g., 0.90
or 0.7); these will show the effect of allowing greater distortions of the
primary modality.
.. image:: ../_static/umap_flybrain_schema0.999-0.99_r3.png
:width: 620
This example also illustrates Scehma's interpretability. The variable
`diffexp_gene_wts` identifies the genes most important to aligning
scRNA-seq with cell age. As we describe in our `paper`_, these genes turn
out to be differentially expressed between young cells and old cells.
.. _Davie et al.'s: https://doi.org/10.1016/j.cell.2018.05.057
.. _paper: https://doi.org/10.1101/834549
.. _t-SNE: https://lvdmaaten.github.io/tsne/
.. _UMAP: https://umap-learn.readthedocs.io/en/latest/
| 0.946138 | 0.836688 |
### Preamble
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import scanpy as sc
## local paths etc. You'll want to change these
DATASET_DIR = "/scratch1/rsingh/work/schema/data/tasic-nature"
import sys; sys.path.extend(['/scratch1/rsingh/tools','/afs/csail.mit.edu/u/r/rsingh/work/schema/'])
```
#### Import Schema and tSNE
We use fast-tsne here, but use whatever you like
```
from fast_tsne import fast_tsne
from schema import SchemaQP
```
### Get example data
* This data is from Tasic et al. (Nature 2018, DOI: 10.1038/s41586-018-0654-5 )
* Shell commands to get our copy of the data:
* wget http://schema.csail.mit.edu/datasets/Schema_demo_Tasic2018.h5ad.gz
* gunzip Schema_demo_Tasic2018.h5ad.gz
* The processing of raw data here broadly followed the steps in Kobak & Berens, https://www.biorxiv.org/content/10.1101/453449v1
* The gene expression data has been count-normalized and log-transformed.
```
adata = sc.read(DATASET_DIR + "/" + "Schema_demo_Tasic2018.h5ad")
```
### Schema examples
* In all of what follows, the primary dataset is gene expression. The secondary datasets are 1) cluster IDs; and 2) cell-type "class" variables which correspond to superclusters (i.e. higher-level clusters) in the Tasic et al. paper.
#### Recommendations for parameter settings
* min_desired_corr and w_max_to_avg are the names for the hyperparameters $s_1$ and $\bar{w}$ from our paper
* *min_desired_corr*: at first, you should try a range of values for min_desired_corr (e.g., 0.99, 0.90, 0.50). This will give you a sense of what might work well for your data; after this, you can progressively narrow down your range. In typical use-cases, high min_desired_corr values (> 0.80) work best.
* *w_max_to_avg*: start by keeping this constraint very loose. This ensures that min_desired_corr remains the binding constraint. Later, as you get a better sense for min_desired_corr values, you can experiment with this too. A value of 100 is pretty high and should work well in the beginning.
#### With PCA as change-of-basis, min_desired_corr=0.75, positive correlation with secondary datasets
```
afx = SchemaQP(0.75) # min_desired_corr is the only required argument.
dx_pca = afx.fit_transform(adata.X, # primary dataset
[adata.obs["class"].values], # one secondary dataset
['categorical'] #it has labels, i.e., is a categorical datatype
)
```
#### Similar to above, with NMF as change-of-basis and a different min_desired_corr
```
afx = SchemaQP(0.6, params= {"decomposition_model": "nmf", "num_top_components": 50})
dx_nmf = afx.fit_transform(adata.X,
[adata.obs["class"].values, adata.obs.cluster_id.values], # two secondary datasets
['categorical', 'categorical'], # both are labels
[10, 1] # relative wts
)
```
#### Now let's do something unusual. Perturb the data so it *disagrees* with cluster ids
```
afx = SchemaQP(0.97, # Notice that we bumped up the min_desired_corr so the perturbation is limited
params = {"decomposition_model": "nmf", "num_top_components": 50})
dx_perturb = afx.fit_transform(adata.X,
[adata.obs.cluster_id.values], # could have used both secondary datasets, but one's fine here
['categorical'],
[-1] # This is key: we are putting a negative wt on the correlation
)
```
### tSNE plots of the baseline and Schema transforms
```
fig = plt.figure(constrained_layout=True, figsize=(8,2), dpi=300)
tmps = {}
for i,p in enumerate([("Original", adata.X),
("PCA1 (pos corr)", dx_pca),
("NMF (pos corr)", dx_nmf),
("Perturb (neg corr)", dx_perturb)
]):
titlestr, dx1 = p
ax = fig.add_subplot(1,4,i+1, frameon=False)
tmps[titlestr] = dy = fast_tsne(dx1, seed=42)
ax = plt.gca()
ax.set_aspect('equal', adjustable='datalim')
ax.scatter(dy[:,0], dy[:,1], s=1, color=adata.obs['cluster_color'])
ax.set_title(titlestr)
ax.axis("off")
```
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/examples/Schema_demo.ipynb
|
Schema_demo.ipynb
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import scanpy as sc
## local paths etc. You'll want to change these
DATASET_DIR = "/scratch1/rsingh/work/schema/data/tasic-nature"
import sys; sys.path.extend(['/scratch1/rsingh/tools','/afs/csail.mit.edu/u/r/rsingh/work/schema/'])
from fast_tsne import fast_tsne
from schema import SchemaQP
adata = sc.read(DATASET_DIR + "/" + "Schema_demo_Tasic2018.h5ad")
afx = SchemaQP(0.75) # min_desired_corr is the only required argument.
dx_pca = afx.fit_transform(adata.X, # primary dataset
[adata.obs["class"].values], # one secondary dataset
['categorical'] #it has labels, i.e., is a categorical datatype
)
afx = SchemaQP(0.6, params= {"decomposition_model": "nmf", "num_top_components": 50})
dx_nmf = afx.fit_transform(adata.X,
[adata.obs["class"].values, adata.obs.cluster_id.values], # two secondary datasets
['categorical', 'categorical'], # both are labels
[10, 1] # relative wts
)
afx = SchemaQP(0.97, # Notice that we bumped up the min_desired_corr so the perturbation is limited
params = {"decomposition_model": "nmf", "num_top_components": 50})
dx_perturb = afx.fit_transform(adata.X,
[adata.obs.cluster_id.values], # could have used both secondary datasets, but one's fine here
['categorical'],
[-1] # This is key: we are putting a negative wt on the correlation
)
fig = plt.figure(constrained_layout=True, figsize=(8,2), dpi=300)
tmps = {}
for i,p in enumerate([("Original", adata.X),
("PCA1 (pos corr)", dx_pca),
("NMF (pos corr)", dx_nmf),
("Perturb (neg corr)", dx_perturb)
]):
titlestr, dx1 = p
ax = fig.add_subplot(1,4,i+1, frameon=False)
tmps[titlestr] = dy = fast_tsne(dx1, seed=42)
ax = plt.gca()
ax.set_aspect('equal', adjustable='datalim')
ax.scatter(dy[:,0], dy[:,1], s=1, color=adata.obs['cluster_color'])
ax.set_title(titlestr)
ax.axis("off")
| 0.335677 | 0.883236 |
# Schema
Schema is a general algorithm for integrating heterogeneous data
modalities. It has been specially designed for multi-modal
single-cell biological datasets, but should work in other contexts too.
This version is based on a Quadratic Programming framework.
It is described in the paper
["*Schema: A general framework for integrating heterogeneous single-cell modalities*"](https://www.biorxiv.org/content/10.1101/834549v1).
The module provides a class SchemaQP that offers a sklearn type fit+transform API for affine
transformations of input datasets such that the transformed data is in agreement
with all the input datasets.
## Getting Started
The examples provided here are also available in the examples/Schema_demo.ipynb notebook
### Installation
```
pip install schema_learn
```
### Schema: A simple example
For the examples below, you'll also need scanpy (`pip install scanpy`).
We use `fast_tsne` below for visualization, but feel free to use your favorite tool.
#### Sample data
The data in the examples below is from the paper below; we thank the authors for making it available:
* Tasic et al. [*Shared and distinct transcriptomic cell types across neocortical areas*.](https://www.nature.com/articles/s41586-018-0654-5) Nature. 2018 Nov;563(7729):72-78. doi:10.1038/s41586-018-0654-5
We make available a processed subset of the data for demonstration and analysis.
Linux shell commands to get this data:
```
wget http://schema.csail.mit.edu/datasets/Schema_demo_Tasic2018.h5ad.gz
gunzip Schema_demo_Tasic2018.h5ad.gz
```
In Python, set the `DATASET_DIR` variable to the folder containing this file.
The processing of raw data here broadly followed the steps in Kobak & Berens
* https://www.biorxiv.org/content/10.1101/453449v1
The gene expression data has been count-normalized and log-transformed. Load with the commands
```python
import scanpy as sc
adata = sc.read(DATASET_DIR + "/" + "Schema_demo_Tasic2018.h5ad")
```
#### Sample Schema usage
Import Schema as:
```python
from schema import SchemaQP
afx = SchemaQP(0.75) # min_desired_corr is the only required argument.
dx_pca = afx.fit_transform(adata.X, # primary dataset
[adata.obs["class"].values], # just one secondary dataset
['categorical'] # has labels, i.e., is a categorical datatype
)
```
This uses PCA as the change-of-basis transform; requires a min corr of 0.75 between the
primary dataset (gene expression) and the transformed dataset; and maximizes
correlation between the primary dataset and the secondary dataset, supercluster
(i.e. higher-level clusters) labels produced during Tasic et al.'s hierarchical clustering.
### More Schema examples
* In all of what follows, the primary dataset is gene expression. The secondary datasets are 1) cluster IDs; and/or 2) cell-type "class" variables which correspond to superclusters (i.e. higher-level clusters) in the Tasic et al. paper.
#### With NMF (Non-negative Matrix Factorization) as change-of-basis, a different min_desired_corr, and two secondary datasets
```python
afx = SchemaQP(0.6, params= {"decomposition_model": "nmf", "num_top_components": 50})
dx_nmf = afx.fit_transform(adata.X,
[adata.obs["class"].values, adata.obs.cluster_id.values], # two secondary datasets
['categorical', 'categorical'], # both are labels
[10, 1] # relative wts
)
```
#### Now let's do something unusual. Perturb the data so it *disagrees* with cluster ids
```python
afx = SchemaQP(0.97, # Notice that we bumped up the min_desired_corr so the perturbation is limited
params = {"decomposition_model": "nmf", "num_top_components": 50})
dx_perturb = afx.fit_transform(adata.X,
[adata.obs.cluster_id.values], # could have used both secondary datasets, but one's fine here
['categorical'],
[-1] # This is key: we are putting a negative wt on the correlation
)
```
#### Recommendations for parameter settings
* `min_desired_corr` and `w_max_to_avg` are the names for the hyperparameters $s_1$ and $\bar{w}$ from our paper
* *min_desired_corr*: at first, you should try a range of values for `min_desired_corr` (e.g., 0.99, 0.90, 0.50). This will give you a sense of what might work well for your data; after this, you can progressively narrow down your range. In typical use-cases, high `min_desired_corr` values (> 0.80) work best.
* *w_max_to_avg*: start by keeping this constraint very loose. This ensures that `min_desired_corr` remains the binding constraint. Later, as you get a better sense for `min_desired_corr` values, you can experiment with this too. A value of 100 is pretty high and should work well in the beginning.
#### tSNE plots of the baseline and Schema transforms
```python
fig = plt.figure(constrained_layout=True, figsize=(8,2), dpi=300)
tmps = {}
for i,p in enumerate([("Original", adata.X),
("PCA1 (pos corr)", dx_pca),
("NMF (pos corr)", dx_nmf),
("Perturb (neg corr)", dx_perturb)
]):
titlestr, dx1 = p
ax = fig.add_subplot(1,4,i+1, frameon=False)
tmps[titlestr] = dy = fast_tsne(dx1, seed=42)
ax = plt.gca()
ax.set_aspect('equal', adjustable='datalim')
ax.scatter(dy[:,0], dy[:,1], s=1, color=adata.obs['cluster_color'])
ax.set_title(titlestr)
ax.axis("off")
```
## API
### Constructor
Initializes the `SchemaQP` object
#### Parameters
`min_desired_corr`: `float` in [0,1)
The minimum desired correlation between squared L2 distances in the transformed space
and distances in the original space.
RECOMMENDED VALUES: At first, you should try a range of values (e.g., 0.99, 0.90, 0.50).
This will give you a sense of what might work well for your data.
After this, you can progressively narrow down your range.
In typical use-cases of large biological datasets,
high values (> 0.80) will probably work best.
`w_max_to_avg`: `float` >1, optional (default: 100)
Sets the upper-bound on the ratio of w's largest element to w's avg element.
Making it large will allow for more severe transformations.
RECOMMENDED VALUES: Start by keeping this constraint very loose; the default value (100) does
this, ensuring that min_desired_corr remains the binding constraint.
Later, as you get a better sense for the right min_desired_corr values
for your data, you can experiment with this too.
To really constrain this, set it in the (1-5] range, depending on
how many features you have.
`params`: `dict` of key-value pairs, optional (see defaults below)
Additional configuration parameters.
Here are the important ones:
* decomposition_model: "pca" or "nmf" (default=pca)
* num_top_components: (default=50) number of PCA (or NMF) components to use
when mode=="affine".
You can ignore the rest on your first pass; the default values are pretty reasonable:
* dist_npairs: (default=2000000). How many pt-pairs to use for computing pairwise distances
value=None means compute exhaustively over all n*(n-1)/2 pt-pairs. Not recommended for n>5000.
Otherwise, the given number of pt-pairs is sampled randomly. The sampling is done
in a way in which each point will be represented roughly equally.
* scale_mode_uses_standard_scaler: 1 or 0 (default=0), apply the standard scaler
in the scaling mode
* do_whiten: 1 or 0 (default=1). When mode=="affine", should the change-of-basis loadings
be made 1-variance?
`mode`: {`'affine'`, `'scale'`}, optional (default: `'affine'`)
Whether to perform a general affine transformation or just a scaling transformation
* 'scale' does scaling transformations only.
* 'affine' first does a mapping to PCA or NMF space (you can specify n_components)
It then does a scaling transform in that space and then maps everything back to the
regular space, the final space being an affine transformation
RECOMMENDED VALUES: 'affine' is the default, which uses PCA or NMF to do the change-of-basis.
You'll want 'scale' only in one of two cases:
1) You have some features on which you directly want Schema to compute
feature-weights.
2) You want to do a change-of-basis transform other PCA or NMF. If so, you will
need to do that yourself and then call SchemaQP with the transformed
primary dataset with mode='scale'.
#### Returns
A SchemaQP object on which you can call fit(...), transform(...) or fit_transform(....).
### Fit
Given the primary dataset 'd' and a list of secondary datasets, fit a linear transformation (d*) of
'd' such that the correlation between squared pairwise distances in d* and those in secondary datasets
is maximized while the correlation between the primary dataset d and d* remains above
min_desired_corr
#### Parameters
`d`: A numpy 2-d `array`
The primary dataset (e.g. scanpy/anndata's .X).
The rows are observations (e.g., cells) and the cols are variables (e.g., gene expression).
The default distance measure computed is L2: sum((point1-point2)**2). See d0_dist_transform.
`secondary_data_val_list`: `list` of 1-d or 2-d numpy `array`s, each with same number of rows as `d`
The secondary datasets you want to align the primary data towards.
Columns in scanpy's .obs variables work well (just remember to use .values)
`secondary_data_type_list`: `list` of `string`s, each value in {'numeric','feature_vector','categorical'}
The list's length should match the length of secondary_data_val_list
* 'numeric' means you're giving one floating-pt value for each obs.
The default distance measure is L2: (point1-point2)**2
* 'feature_vector' means you're giving some multi-dimensional representation for each obs.
The default distance measure is L2: sum((point1-point2)**2)
* 'categorical' means that you are providing label information that should be compared for equality.
The default distance measure is: 1*(val1!=val2)
`secondary_data_wt_list`: `list` of `float`s, optional (default: `None`)
User-specified wts for each dataset. If 'None', the wts are 1.
If specified, the list's length should match the length of secondary_data_wt_list
NOTE: you can try to get a mapping that *disagrees* with a dataset_info instead of *agreeing*.
To do so, pass in a negative number (e.g., -1) here. This works even if you have just one secondary
dataset
`d0`: A 1-d or 2-d numpy array, same number of rows as 'd', optional (default: `None`)
An alternative representation of the primary dataset.
HANDLE WITH CARE! Most likely, you don't need this parameter.
This is useful if you want to provide the primary dataset in two forms: one for transforming and
another one for computing pairwise distances to use in the QP constraint; if so, 'd' is used for the
former, while 'd0' is used for the latter
`d0_dist_transform`: a function that takes a non-negative float as input and
returns a non-negative float, optional (default: `None`)
HANDLE WITH CARE! Most likely, you don't need this parameter.
The transformation to apply on d or d0's L2 distances before using them for correlations.
`secondary_data_dist_transform`: `list` of functions, each taking a non-negative float and
returning a non-negative float, optional (default: `None`)
HANDLE WITH CARE! Most likely, you don't need this parameter.
The transformations to apply on secondary dataset's L2 distances before using them for correlations.
If specified, the length of the list should match that of secondary_data_val_list
#### Returns:
None
### Transform
Given a dataset `d`, apply the fitted transform to it
#### Parameters
`d`: a numpy 2-d array with same number of columns as primary dataset `d` in the fit(...)
The rows are observations (e.g., cells) and the cols are variables (e.g., gene expression).
#### Returns
a 2-d numpy array with the same shape as `d`
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/deprecated/old_readme.md
|
old_readme.md
|
pip install schema_learn
wget http://schema.csail.mit.edu/datasets/Schema_demo_Tasic2018.h5ad.gz
gunzip Schema_demo_Tasic2018.h5ad.gz
import scanpy as sc
adata = sc.read(DATASET_DIR + "/" + "Schema_demo_Tasic2018.h5ad")
from schema import SchemaQP
afx = SchemaQP(0.75) # min_desired_corr is the only required argument.
dx_pca = afx.fit_transform(adata.X, # primary dataset
[adata.obs["class"].values], # just one secondary dataset
['categorical'] # has labels, i.e., is a categorical datatype
)
afx = SchemaQP(0.6, params= {"decomposition_model": "nmf", "num_top_components": 50})
dx_nmf = afx.fit_transform(adata.X,
[adata.obs["class"].values, adata.obs.cluster_id.values], # two secondary datasets
['categorical', 'categorical'], # both are labels
[10, 1] # relative wts
)
afx = SchemaQP(0.97, # Notice that we bumped up the min_desired_corr so the perturbation is limited
params = {"decomposition_model": "nmf", "num_top_components": 50})
dx_perturb = afx.fit_transform(adata.X,
[adata.obs.cluster_id.values], # could have used both secondary datasets, but one's fine here
['categorical'],
[-1] # This is key: we are putting a negative wt on the correlation
)
fig = plt.figure(constrained_layout=True, figsize=(8,2), dpi=300)
tmps = {}
for i,p in enumerate([("Original", adata.X),
("PCA1 (pos corr)", dx_pca),
("NMF (pos corr)", dx_nmf),
("Perturb (neg corr)", dx_perturb)
]):
titlestr, dx1 = p
ax = fig.add_subplot(1,4,i+1, frameon=False)
tmps[titlestr] = dy = fast_tsne(dx1, seed=42)
ax = plt.gca()
ax.set_aspect('equal', adjustable='datalim')
ax.scatter(dy[:,0], dy[:,1], s=1, color=adata.obs['cluster_color'])
ax.set_title(titlestr)
ax.axis("off")
| 0.780244 | 0.990505 |
from schema import SchemaQP
from anndata import AnnData
import numpy as np
import scanpy as sc
from .process import load_names
def load_meta(fname):
age, strain = [], []
with open(fname) as f:
f.readline() # Consume header.
for line in f:
fields = line.rstrip().split()
age.append(int(fields[4]))
strain.append(fields[3])
return np.array(age), np.array(strain)
if __name__ == '__main__':
[ X ], [ genes ], _ = load_names([ 'data/fly_brain/GSE107451' ], norm=False)
age, strain = load_meta('data/fly_brain/GSE107451/annotation.tsv')
# Only analyze wild-type strain.
adata = AnnData(X[strain == 'DGRP-551'])
adata.var['gene_symbols'] = genes
adata.obs['age'] = age[strain == 'DGRP-551']
# No Schema transformation.
sc.pp.pca(adata)
sc.tl.tsne(adata, n_pcs=50)
sc.pl.tsne(adata, color='age', color_map='coolwarm',
save='_flybrain_regular.png')
sc.pp.neighbors(adata, n_neighbors=15)
sc.tl.umap(adata)
sc.pl.umap(adata, color='age', color_map='coolwarm',
save='_flybrain_regular.png')
# Schema transformation to include age.
schema_corrs = [ 0.9999, 0.999, 0.99, 0.9, 0.7, 0.5 ]
for schema_corr in schema_corrs:
sqp = SchemaQP(
min_desired_corr=schema_corr,
w_max_to_avg=100,
params={
'decomposition_model': 'nmf',
'num_top_components': 20,
},
)
X = sqp.fit_transform(
adata.X,
[ adata.obs['age'].values, ],
[ 'numeric', ],
[ 1, ]
)
sdata = AnnData(X)
sdata.obs['age'] = age[strain == 'DGRP-551']
sc.tl.tsne(sdata)
sc.pl.tsne(sdata, color='age', color_map='coolwarm',
save='_flybrain_schema_corr{}_w100.png'.format(schema_corr))
sc.pp.neighbors(sdata, n_neighbors=15)
sc.tl.umap(sdata)
sc.pl.umap(sdata, color='age', color_map='coolwarm',
save='_flybrain_schema{}_w100.png'.format(schema_corr))
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/deprecated/old_examples/fly_brain/fly_brain.py
|
fly_brain.py
|
from schema import SchemaQP
from anndata import AnnData
import numpy as np
import scanpy as sc
from .process import load_names
def load_meta(fname):
age, strain = [], []
with open(fname) as f:
f.readline() # Consume header.
for line in f:
fields = line.rstrip().split()
age.append(int(fields[4]))
strain.append(fields[3])
return np.array(age), np.array(strain)
if __name__ == '__main__':
[ X ], [ genes ], _ = load_names([ 'data/fly_brain/GSE107451' ], norm=False)
age, strain = load_meta('data/fly_brain/GSE107451/annotation.tsv')
# Only analyze wild-type strain.
adata = AnnData(X[strain == 'DGRP-551'])
adata.var['gene_symbols'] = genes
adata.obs['age'] = age[strain == 'DGRP-551']
# No Schema transformation.
sc.pp.pca(adata)
sc.tl.tsne(adata, n_pcs=50)
sc.pl.tsne(adata, color='age', color_map='coolwarm',
save='_flybrain_regular.png')
sc.pp.neighbors(adata, n_neighbors=15)
sc.tl.umap(adata)
sc.pl.umap(adata, color='age', color_map='coolwarm',
save='_flybrain_regular.png')
# Schema transformation to include age.
schema_corrs = [ 0.9999, 0.999, 0.99, 0.9, 0.7, 0.5 ]
for schema_corr in schema_corrs:
sqp = SchemaQP(
min_desired_corr=schema_corr,
w_max_to_avg=100,
params={
'decomposition_model': 'nmf',
'num_top_components': 20,
},
)
X = sqp.fit_transform(
adata.X,
[ adata.obs['age'].values, ],
[ 'numeric', ],
[ 1, ]
)
sdata = AnnData(X)
sdata.obs['age'] = age[strain == 'DGRP-551']
sc.tl.tsne(sdata)
sc.pl.tsne(sdata, color='age', color_map='coolwarm',
save='_flybrain_schema_corr{}_w100.png'.format(schema_corr))
sc.pp.neighbors(sdata, n_neighbors=15)
sc.tl.umap(sdata)
sc.pl.umap(sdata, color='age', color_map='coolwarm',
save='_flybrain_schema{}_w100.png'.format(schema_corr))
| 0.634204 | 0.388444 |
import gzip
import numpy as np
import os.path
import scipy.sparse
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.preprocessing import normalize
import sys
MIN_TRANSCRIPTS = 0
def load_tab(fname, delim='\t'):
if fname.endswith('.gz'):
opener = gzip.open
else:
opener = open
with opener(fname, 'r') as f:
if fname.endswith('.gz'):
header = f.readline().decode('utf-8').rstrip().replace('"', '').split(delim)
else:
header = f.readline().rstrip().replace('"', '').split(delim)
X = []
genes = []
for i, line in enumerate(f):
if fname.endswith('.gz'):
line = line.decode('utf-8')
fields = line.rstrip().replace('"', '').split(delim)
genes.append(fields[0])
X.append([ float(f) for f in fields[1:] ])
if i == 0:
if len(header) == (len(fields) - 1):
cells = header
elif len(header) == len(fields):
cells = header[1:]
else:
raise ValueError('Incompatible header/value dimensions {} and {}'
.format(len(header), len(fields)))
return np.array(X).T, np.array(cells), np.array(genes)
def load_mtx(dname):
with open(dname + '/matrix.mtx', 'r') as f:
while True:
header = f.readline()
if not header.startswith('%'):
break
header = header.rstrip().split()
n_genes, n_cells = int(header[0]), int(header[1])
data, i, j = [], [], []
for line in f:
fields = line.rstrip().split()
data.append(float(fields[2]))
i.append(int(fields[1])-1)
j.append(int(fields[0])-1)
X = csr_matrix((data, (i, j)), shape=(n_cells, n_genes))
genes = []
with open(dname + '/genes.tsv', 'r') as f:
for line in f:
fields = line.rstrip().split()
genes.append(fields[1])
assert(len(genes) == n_genes)
return X, np.array(genes)
def load_h5(fname, genome='GRCh38'):
try:
import tables
except ImportError:
sys.stderr.write('Please install PyTables to read .h5 files: '
'https://www.pytables.org/usersguide/installation.html\n')
exit(1)
# Adapted from scanpy's read_10x_h5() method.
with tables.open_file(str(fname), 'r') as f:
try:
dsets = {}
for node in f.walk_nodes('/' + genome, 'Array'):
dsets[node.name] = node.read()
n_genes, n_cells = dsets['shape']
data = dsets['data']
if dsets['data'].dtype == np.dtype('int32'):
data = dsets['data'].view('float32')
data[:] = dsets['data']
X = csr_matrix((data, dsets['indices'], dsets['indptr']),
shape=(n_cells, n_genes))
genes = [ gene for gene in dsets['gene_names'].astype(str) ]
assert(len(genes) == n_genes)
assert(len(genes) == X.shape[1])
except tables.NoSuchNodeError:
raise Exception('Genome %s does not exist in this file.' % genome)
except KeyError:
raise Exception('File is missing one or more required datasets.')
return X, np.array(genes)
def process_tab(fname, min_trans=MIN_TRANSCRIPTS, delim='\t'):
X, cells, genes = load_tab(fname, delim=delim)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = csr_matrix(X[gt_idx, :])
cells = cells[gt_idx]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(fname))
if fname.endswith('.txt'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.txt.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
elif fname.endswith('.tsv'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.tsv.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
elif fname.endswith('.csv'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.csv.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
else:
cache_prefix = fname
cache_fname = cache_prefix + '_tab.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(cache_prefix + '_tab.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, cells, genes
def process_mtx(dname, min_trans=MIN_TRANSCRIPTS):
X, genes = load_mtx(dname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(dname))
cache_fname = dname + '/tab.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(dname + '/tab.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, genes
def process_h5(fname, min_trans=MIN_TRANSCRIPTS):
X, genes = load_h5(fname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(fname))
if fname.endswith('.h5'):
cache_prefix = '.'.join(fname.split('.')[:-1])
cache_fname = cache_prefix + '.h5.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(cache_prefix + '.h5.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, genes
def load_data(name):
if os.path.isfile(name + '.h5.npz'):
X = scipy.sparse.load_npz(name + '.h5.npz')
with open(name + '.h5.genes.txt') as f:
genes = np.array(f.read().rstrip().split('\n'))
elif os.path.isfile(name + '_tab.npz'):
X = scipy.sparse.load_npz(name + '_tab.npz')
with open(name + '_tab.genes.txt') as f:
genes = np.array(f.read().rstrip().split('\n'))
elif os.path.isfile(name + '/tab.npz'):
X = scipy.sparse.load_npz(name + '/tab.npz')
with open(name + '/tab.genes.txt') as f:
genes = np.array(f.read().rstrip().split('\n'))
else:
sys.stderr.write('Could not find: {}\n'.format(name))
exit(1)
genes = np.array([ gene.upper() for gene in genes ])
return X, genes
def load_names(data_names, norm=False, log1p=False, verbose=True):
# Load datasets.
datasets = []
genes_list = []
n_cells = 0
for name in data_names:
X_i, genes_i = load_data(name)
if norm:
X_i = normalize(X_i, axis=1)
if log1p:
X_i = np.log1p(X_i)
X_i = csr_matrix(X_i)
datasets.append(X_i)
genes_list.append(genes_i)
n_cells += X_i.shape[0]
if verbose:
print('Loaded {} with {} genes and {} cells'.
format(name, X_i.shape[1], X_i.shape[0]))
if verbose:
print('Found {} cells among all datasets'
.format(n_cells))
return datasets, genes_list, n_cells
def save_datasets(datasets, genes, data_names, verbose=True,
truncate_neg=False):
for i in range(len(datasets)):
dataset = datasets[i].toarray()
name = data_names[i]
if truncate_neg:
dataset[dataset < 0] = 0
with open(name + '.scanorama_corrected.txt', 'w') as of:
# Save header.
of.write('Genes\t')
of.write('\t'.join(
[ 'cell' + str(cell) for cell in range(dataset.shape[0]) ]
) + '\n')
for g in range(dataset.shape[1]):
of.write(genes[g] + '\t')
of.write('\t'.join(
[ str(expr) for expr in dataset[:, g] ]
) + '\n')
def merge_datasets(datasets, genes, ds_names=None, verbose=True,
union=False, keep_genes=None):
if keep_genes is None:
# Find genes in common.
keep_genes = set()
for idx, gene_list in enumerate(genes):
gene_list = [ g for gene in gene_list for g in gene.split(';') ]
if len(keep_genes) == 0:
keep_genes = set(gene_list)
elif union:
keep_genes |= set(gene_list)
else:
keep_genes &= set(gene_list)
if not union and not ds_names is None and verbose:
print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))
if len(keep_genes) == 0:
print('Error: No genes found in all datasets, exiting...')
exit(1)
else:
union = True
if verbose:
print('Found {} genes among all datasets'
.format(len(keep_genes)))
if union:
union_genes = sorted(keep_genes)
for i in range(len(datasets)):
if verbose:
print('Processing dataset {}'.format(i))
X_new = np.zeros((datasets[i].shape[0], len(union_genes)))
X_old = csc_matrix(datasets[i])
gene_to_idx = { g: idx for idx, gene in enumerate(genes[i])
for g in gene.split(';') }
for j, gene in enumerate(union_genes):
if gene in gene_to_idx:
X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()
datasets[i] = csr_matrix(X_new)
ret_genes = np.array(union_genes)
else:
# Only keep genes in common.
ret_genes = np.array(sorted(keep_genes))
for i in range(len(datasets)):
if len(genes[i]) != datasets[i].shape[1]:
raise ValueError('Mismatch along gene dimension for dataset {}, '
'{} genes vs {} matrix shape'
.format(ds_names[i] if ds_names is not None
else i, len(genes[i]), datasets[i].shape[1]))
# Remove duplicate genes.
uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)
datasets[i] = datasets[i][:, uniq_idx]
# Do gene filtering.
gene_sort_idx = np.argsort(uniq_genes)
gene_idx = [
idx
for idx in gene_sort_idx
for g in uniq_genes[idx].split(';') if g in keep_genes
]
datasets[i] = datasets[i][:, gene_idx]
assert(len(uniq_genes[gene_idx]) == len(ret_genes))
return datasets, ret_genes
def process(data_names, min_trans=MIN_TRANSCRIPTS):
for name in data_names:
if os.path.isdir(name):
process_mtx(name, min_trans=min_trans)
elif os.path.isfile(name) and name.endswith('.h5'):
process_h5(name, min_trans=min_trans)
elif os.path.isfile(name + '.h5'):
process_h5(name + '.h5', min_trans=min_trans)
elif os.path.isfile(name):
process_tab(name, min_trans=min_trans)
elif os.path.isfile(name + '.txt'):
process_tab(name + '.txt', min_trans=min_trans)
elif os.path.isfile(name + '.txt.gz'):
process_tab(name + '.txt.gz', min_trans=min_trans)
elif os.path.isfile(name + '.tsv'):
process_tab(name + '.tsv', min_trans=min_trans)
elif os.path.isfile(name + '.tsv.gz'):
process_tab(name + '.tsv.gz', min_trans=min_trans)
elif os.path.isfile(name + '.csv'):
process_tab(name + '.csv', min_trans=min_trans, delim=',')
elif os.path.isfile(name + '.csv.gz'):
process_tab(name + '.csv.gz', min_trans=min_trans, delim=',')
else:
sys.stderr.write('Warning: Could not find {}\n'.format(name))
continue
print('Successfully processed {}'.format(name))
if __name__ == '__main__':
from config import data_names
process(data_names)
|
schema-learn
|
/schema_learn-0.1.5.5.tar.gz/schema_learn-0.1.5.5/deprecated/old_examples/fly_brain/process.py
|
process.py
|
import gzip
import numpy as np
import os.path
import scipy.sparse
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.preprocessing import normalize
import sys
MIN_TRANSCRIPTS = 0
def load_tab(fname, delim='\t'):
if fname.endswith('.gz'):
opener = gzip.open
else:
opener = open
with opener(fname, 'r') as f:
if fname.endswith('.gz'):
header = f.readline().decode('utf-8').rstrip().replace('"', '').split(delim)
else:
header = f.readline().rstrip().replace('"', '').split(delim)
X = []
genes = []
for i, line in enumerate(f):
if fname.endswith('.gz'):
line = line.decode('utf-8')
fields = line.rstrip().replace('"', '').split(delim)
genes.append(fields[0])
X.append([ float(f) for f in fields[1:] ])
if i == 0:
if len(header) == (len(fields) - 1):
cells = header
elif len(header) == len(fields):
cells = header[1:]
else:
raise ValueError('Incompatible header/value dimensions {} and {}'
.format(len(header), len(fields)))
return np.array(X).T, np.array(cells), np.array(genes)
def load_mtx(dname):
with open(dname + '/matrix.mtx', 'r') as f:
while True:
header = f.readline()
if not header.startswith('%'):
break
header = header.rstrip().split()
n_genes, n_cells = int(header[0]), int(header[1])
data, i, j = [], [], []
for line in f:
fields = line.rstrip().split()
data.append(float(fields[2]))
i.append(int(fields[1])-1)
j.append(int(fields[0])-1)
X = csr_matrix((data, (i, j)), shape=(n_cells, n_genes))
genes = []
with open(dname + '/genes.tsv', 'r') as f:
for line in f:
fields = line.rstrip().split()
genes.append(fields[1])
assert(len(genes) == n_genes)
return X, np.array(genes)
def load_h5(fname, genome='GRCh38'):
try:
import tables
except ImportError:
sys.stderr.write('Please install PyTables to read .h5 files: '
'https://www.pytables.org/usersguide/installation.html\n')
exit(1)
# Adapted from scanpy's read_10x_h5() method.
with tables.open_file(str(fname), 'r') as f:
try:
dsets = {}
for node in f.walk_nodes('/' + genome, 'Array'):
dsets[node.name] = node.read()
n_genes, n_cells = dsets['shape']
data = dsets['data']
if dsets['data'].dtype == np.dtype('int32'):
data = dsets['data'].view('float32')
data[:] = dsets['data']
X = csr_matrix((data, dsets['indices'], dsets['indptr']),
shape=(n_cells, n_genes))
genes = [ gene for gene in dsets['gene_names'].astype(str) ]
assert(len(genes) == n_genes)
assert(len(genes) == X.shape[1])
except tables.NoSuchNodeError:
raise Exception('Genome %s does not exist in this file.' % genome)
except KeyError:
raise Exception('File is missing one or more required datasets.')
return X, np.array(genes)
def process_tab(fname, min_trans=MIN_TRANSCRIPTS, delim='\t'):
X, cells, genes = load_tab(fname, delim=delim)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = csr_matrix(X[gt_idx, :])
cells = cells[gt_idx]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(fname))
if fname.endswith('.txt'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.txt.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
elif fname.endswith('.tsv'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.tsv.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
elif fname.endswith('.csv'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.csv.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
else:
cache_prefix = fname
cache_fname = cache_prefix + '_tab.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(cache_prefix + '_tab.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, cells, genes
def process_mtx(dname, min_trans=MIN_TRANSCRIPTS):
X, genes = load_mtx(dname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(dname))
cache_fname = dname + '/tab.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(dname + '/tab.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, genes
def process_h5(fname, min_trans=MIN_TRANSCRIPTS):
X, genes = load_h5(fname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(fname))
if fname.endswith('.h5'):
cache_prefix = '.'.join(fname.split('.')[:-1])
cache_fname = cache_prefix + '.h5.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(cache_prefix + '.h5.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, genes
def load_data(name):
if os.path.isfile(name + '.h5.npz'):
X = scipy.sparse.load_npz(name + '.h5.npz')
with open(name + '.h5.genes.txt') as f:
genes = np.array(f.read().rstrip().split('\n'))
elif os.path.isfile(name + '_tab.npz'):
X = scipy.sparse.load_npz(name + '_tab.npz')
with open(name + '_tab.genes.txt') as f:
genes = np.array(f.read().rstrip().split('\n'))
elif os.path.isfile(name + '/tab.npz'):
X = scipy.sparse.load_npz(name + '/tab.npz')
with open(name + '/tab.genes.txt') as f:
genes = np.array(f.read().rstrip().split('\n'))
else:
sys.stderr.write('Could not find: {}\n'.format(name))
exit(1)
genes = np.array([ gene.upper() for gene in genes ])
return X, genes
def load_names(data_names, norm=False, log1p=False, verbose=True):
# Load datasets.
datasets = []
genes_list = []
n_cells = 0
for name in data_names:
X_i, genes_i = load_data(name)
if norm:
X_i = normalize(X_i, axis=1)
if log1p:
X_i = np.log1p(X_i)
X_i = csr_matrix(X_i)
datasets.append(X_i)
genes_list.append(genes_i)
n_cells += X_i.shape[0]
if verbose:
print('Loaded {} with {} genes and {} cells'.
format(name, X_i.shape[1], X_i.shape[0]))
if verbose:
print('Found {} cells among all datasets'
.format(n_cells))
return datasets, genes_list, n_cells
def save_datasets(datasets, genes, data_names, verbose=True,
truncate_neg=False):
for i in range(len(datasets)):
dataset = datasets[i].toarray()
name = data_names[i]
if truncate_neg:
dataset[dataset < 0] = 0
with open(name + '.scanorama_corrected.txt', 'w') as of:
# Save header.
of.write('Genes\t')
of.write('\t'.join(
[ 'cell' + str(cell) for cell in range(dataset.shape[0]) ]
) + '\n')
for g in range(dataset.shape[1]):
of.write(genes[g] + '\t')
of.write('\t'.join(
[ str(expr) for expr in dataset[:, g] ]
) + '\n')
def merge_datasets(datasets, genes, ds_names=None, verbose=True,
union=False, keep_genes=None):
if keep_genes is None:
# Find genes in common.
keep_genes = set()
for idx, gene_list in enumerate(genes):
gene_list = [ g for gene in gene_list for g in gene.split(';') ]
if len(keep_genes) == 0:
keep_genes = set(gene_list)
elif union:
keep_genes |= set(gene_list)
else:
keep_genes &= set(gene_list)
if not union and not ds_names is None and verbose:
print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))
if len(keep_genes) == 0:
print('Error: No genes found in all datasets, exiting...')
exit(1)
else:
union = True
if verbose:
print('Found {} genes among all datasets'
.format(len(keep_genes)))
if union:
union_genes = sorted(keep_genes)
for i in range(len(datasets)):
if verbose:
print('Processing dataset {}'.format(i))
X_new = np.zeros((datasets[i].shape[0], len(union_genes)))
X_old = csc_matrix(datasets[i])
gene_to_idx = { g: idx for idx, gene in enumerate(genes[i])
for g in gene.split(';') }
for j, gene in enumerate(union_genes):
if gene in gene_to_idx:
X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()
datasets[i] = csr_matrix(X_new)
ret_genes = np.array(union_genes)
else:
# Only keep genes in common.
ret_genes = np.array(sorted(keep_genes))
for i in range(len(datasets)):
if len(genes[i]) != datasets[i].shape[1]:
raise ValueError('Mismatch along gene dimension for dataset {}, '
'{} genes vs {} matrix shape'
.format(ds_names[i] if ds_names is not None
else i, len(genes[i]), datasets[i].shape[1]))
# Remove duplicate genes.
uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)
datasets[i] = datasets[i][:, uniq_idx]
# Do gene filtering.
gene_sort_idx = np.argsort(uniq_genes)
gene_idx = [
idx
for idx in gene_sort_idx
for g in uniq_genes[idx].split(';') if g in keep_genes
]
datasets[i] = datasets[i][:, gene_idx]
assert(len(uniq_genes[gene_idx]) == len(ret_genes))
return datasets, ret_genes
def process(data_names, min_trans=MIN_TRANSCRIPTS):
for name in data_names:
if os.path.isdir(name):
process_mtx(name, min_trans=min_trans)
elif os.path.isfile(name) and name.endswith('.h5'):
process_h5(name, min_trans=min_trans)
elif os.path.isfile(name + '.h5'):
process_h5(name + '.h5', min_trans=min_trans)
elif os.path.isfile(name):
process_tab(name, min_trans=min_trans)
elif os.path.isfile(name + '.txt'):
process_tab(name + '.txt', min_trans=min_trans)
elif os.path.isfile(name + '.txt.gz'):
process_tab(name + '.txt.gz', min_trans=min_trans)
elif os.path.isfile(name + '.tsv'):
process_tab(name + '.tsv', min_trans=min_trans)
elif os.path.isfile(name + '.tsv.gz'):
process_tab(name + '.tsv.gz', min_trans=min_trans)
elif os.path.isfile(name + '.csv'):
process_tab(name + '.csv', min_trans=min_trans, delim=',')
elif os.path.isfile(name + '.csv.gz'):
process_tab(name + '.csv.gz', min_trans=min_trans, delim=',')
else:
sys.stderr.write('Warning: Could not find {}\n'.format(name))
continue
print('Successfully processed {}'.format(name))
if __name__ == '__main__':
from config import data_names
process(data_names)
| 0.264453 | 0.316 |
def validate_type_model_errors(types):
"""
Validate a user type model's types
:param dict types: The map of user type name to user type model
:returns: The list of type name, member name, and error message tuples
"""
errors = []
# Check each user type
for type_name, user_type in types.items():
# Struct?
if 'struct' in user_type:
struct = user_type['struct']
# Inconsistent type name?
if type_name != struct['name']:
errors.append((type_name, None, f'Inconsistent type name {struct["name"]!r} for {type_name!r}'))
# Check base types
if 'bases' in struct:
is_union = struct.get('union', False)
for base_name in struct['bases']:
invalid_base = True
base_user_type = _get_effective_user_type(types, base_name)
if base_user_type is not None and 'struct' in base_user_type:
if is_union == base_user_type['struct'].get('union', False):
invalid_base = False
if invalid_base:
errors.append((type_name, None, f'Invalid struct base type {base_name!r}'))
# Iterate the members
try:
members = set()
for member in _get_struct_members(types, struct, set()):
member_name = member['name']
# Duplicate member?
if member_name not in members:
members.add(member_name)
else:
errors.append((type_name, member_name, f'Redefinition of {type_name!r} member {member_name!r}'))
# Check member type and attributes
_validate_type_model_type(errors, types, member['type'], member.get('attr'), struct['name'], member['name'])
except ValueError:
errors.append((type_name, None, f'Circular base type detected for type {type_name!r}'))
# Enum?
elif 'enum' in user_type:
enum = user_type['enum']
# Inconsistent type name?
if type_name != enum['name']:
errors.append((type_name, None, f'Inconsistent type name {enum["name"]!r} for {type_name!r}'))
# Check base types
if 'bases' in enum:
for base_name in enum['bases']:
base_user_type = _get_effective_user_type(types, base_name)
if base_user_type is None or 'enum' not in base_user_type:
errors.append((type_name, None, f'Invalid enum base type {base_name!r}'))
# Get the enumeration values
try:
values = set()
for value in _get_enum_values(types, enum, set()):
value_name = value['name']
# Duplicate value?
if value_name not in values:
values.add(value_name)
else:
errors.append((type_name, value_name, f'Redefinition of {type_name!r} value {value_name!r}'))
except ValueError:
errors.append((type_name, None, f'Circular base type detected for type {type_name!r}'))
# Typedef?
elif 'typedef' in user_type:
typedef = user_type['typedef']
# Inconsistent type name?
if type_name != typedef['name']:
errors.append((type_name, None, f'Inconsistent type name {typedef["name"]!r} for {type_name!r}'))
# Check the type and its attributes
_validate_type_model_type(errors, types, typedef['type'], typedef.get('attr'), type_name, None)
# Action?
elif 'action' in user_type: # pragma: no branch
action = user_type['action']
# Inconsistent type name?
if type_name != action['name']:
errors.append((type_name, None, f'Inconsistent type name {action["name"]!r} for {type_name!r}'))
# Check action section types
for section in ('path', 'query', 'input', 'output', 'errors'):
if section in action:
section_type_name = action[section]
# Check the section type
_validate_type_model_type(errors, types, {'user': section_type_name}, None, type_name, None)
# Compute effective input member counts
member_sections = {}
for section in ('path', 'query', 'input'):
if section in action:
section_type_name = action[section]
if section_type_name in types:
section_user_type = _get_effective_user_type(types, section_type_name)
if section_user_type is not None and 'struct' in section_user_type:
section_struct = section_user_type['struct']
# Get the section struct's members and count member occurrences
try:
for member in _get_struct_members(types, section_struct, set()):
member_name = member['name']
if member_name not in member_sections:
member_sections[member_name] = []
member_sections[member_name].append(section_struct['name'])
except ValueError:
pass
# Check for duplicate input members
for member_name, member_section_names in member_sections.items():
if len(member_section_names) > 1:
for section_type in member_section_names:
errors.append((section_type, member_name, f'Redefinition of {section_type!r} member {member_name!r}'))
return errors
def _get_effective_type(types, type_):
if 'user' in type_ and type_['user'] in types:
user_type = types[type_['user']]
if 'typedef' in user_type:
return _get_effective_type(types, user_type['typedef']['type'])
return type_
def _get_effective_user_type(types, user_type_name):
user_type = types.get(user_type_name)
if user_type is not None and 'typedef' in user_type:
type_effective = _get_effective_type(types, user_type['typedef']['type'])
if 'user' not in type_effective:
return None
return types.get(type_effective['user'])
return user_type
def _get_struct_members(types, struct, visited=None):
yield from _get_type_items(types, struct, visited, 'struct', 'members')
def _get_enum_values(types, enum, visited=None):
yield from _get_type_items(types, enum, visited, 'enum', 'values')
def _get_type_items(types, type_, visited, def_name, member_name):
if 'bases' in type_:
for base in type_['bases']:
user_type = _get_effective_user_type(types, base)
if user_type is not None and def_name in user_type:
user_type_name = user_type[def_name]['name']
if user_type_name not in visited:
visited.add(user_type_name)
yield from _get_type_items(types, user_type[def_name], visited, def_name, member_name)
else:
raise ValueError()
if member_name in type_:
yield from type_[member_name]
# Map of attribute struct member name to attribute description
_ATTR_TO_TEXT = {
'eq': '==',
'lt': '<',
'lte': '<=',
'gt': '>',
'gte': '>=',
'lenEq': 'len ==',
'lenLT': 'len <',
'lenLTE': 'len <=',
'lenGT': 'len >',
'lenGTE': 'len >='
}
# Map of type name to valid attribute set
_TYPE_TO_ALLOWED_ATTR = {
'float': set(['eq', 'lt', 'lte', 'gt', 'gte']),
'int': set(['eq', 'lt', 'lte', 'gt', 'gte']),
'string': set(['lenEq', 'lenLT', 'lenLTE', 'lenGT', 'lenGTE']),
'array': set(['lenEq', 'lenLT', 'lenLTE', 'lenGT', 'lenGTE']),
'dict': set(['lenEq', 'lenLT', 'lenLTE', 'lenGT', 'lenGTE'])
}
def _validate_type_model_type(errors, types, type_, attr, type_name, member_name):
# Helper function to push an error tuple
def error(message):
if member_name is not None:
errors.append((type_name, member_name, f'{message} from {type_name!r} member {member_name!r}'))
else:
errors.append((type_name, None, f'{message} from {type_name!r}'))
# Array?
if 'array' in type_:
array = type_['array']
# Check the type and its attributes
array_type = _get_effective_type(types, array['type'])
_validate_type_model_type(errors, types, array_type, array.get('attr'), type_name, member_name)
# Dict?
elif 'dict' in type_:
dict_ = type_['dict']
# Check the type and its attributes
dict_type = _get_effective_type(types, dict_['type'])
_validate_type_model_type(errors, types, dict_type, dict_.get('attr'), type_name, member_name)
# Check the dict key type and its attributes
if 'keyType' in dict_:
dict_key_type = _get_effective_type(types, dict_['keyType'])
_validate_type_model_type(errors, types, dict_key_type, dict_.get('keyAttr'), type_name, member_name)
# Valid dict key type (string or enum)
if not ('builtin' in dict_key_type and dict_key_type['builtin'] == 'string') and \
not ('user' in dict_key_type and dict_key_type['user'] in types and 'enum' in types[dict_key_type['user']]):
error('Invalid dictionary key type')
# User type?
elif 'user' in type_:
user_type_name = type_['user']
# Unknown user type?
if user_type_name not in types:
error(f'Unknown type {user_type_name!r}')
else:
user_type = types[user_type_name]
# Action type references not allowed
if 'action' in user_type:
error(f'Invalid reference to action {user_type_name!r}')
# Any not-allowed attributes?
if attr is not None:
type_effective = _get_effective_type(types, type_)
type_key = next(iter(type_effective.keys()), None)
allowed_attr = _TYPE_TO_ALLOWED_ATTR.get(type_effective[type_key] if type_key == 'builtin' else type_key)
disallowed_attr = set(attr)
disallowed_attr.discard('nullable')
if allowed_attr is not None:
disallowed_attr -= allowed_attr
if disallowed_attr:
for attr_key in disallowed_attr:
attr_value = f'{attr[attr_key]:.6f}'.rstrip('0').rstrip('.')
attr_text = f'{_ATTR_TO_TEXT[attr_key]} {attr_value}'
error(f'Invalid attribute {attr_text!r}')
|
schema-markdown
|
/schema_markdown-1.2.6-py3-none-any.whl/schema_markdown/schema_util.py
|
schema_util.py
|
def validate_type_model_errors(types):
"""
Validate a user type model's types
:param dict types: The map of user type name to user type model
:returns: The list of type name, member name, and error message tuples
"""
errors = []
# Check each user type
for type_name, user_type in types.items():
# Struct?
if 'struct' in user_type:
struct = user_type['struct']
# Inconsistent type name?
if type_name != struct['name']:
errors.append((type_name, None, f'Inconsistent type name {struct["name"]!r} for {type_name!r}'))
# Check base types
if 'bases' in struct:
is_union = struct.get('union', False)
for base_name in struct['bases']:
invalid_base = True
base_user_type = _get_effective_user_type(types, base_name)
if base_user_type is not None and 'struct' in base_user_type:
if is_union == base_user_type['struct'].get('union', False):
invalid_base = False
if invalid_base:
errors.append((type_name, None, f'Invalid struct base type {base_name!r}'))
# Iterate the members
try:
members = set()
for member in _get_struct_members(types, struct, set()):
member_name = member['name']
# Duplicate member?
if member_name not in members:
members.add(member_name)
else:
errors.append((type_name, member_name, f'Redefinition of {type_name!r} member {member_name!r}'))
# Check member type and attributes
_validate_type_model_type(errors, types, member['type'], member.get('attr'), struct['name'], member['name'])
except ValueError:
errors.append((type_name, None, f'Circular base type detected for type {type_name!r}'))
# Enum?
elif 'enum' in user_type:
enum = user_type['enum']
# Inconsistent type name?
if type_name != enum['name']:
errors.append((type_name, None, f'Inconsistent type name {enum["name"]!r} for {type_name!r}'))
# Check base types
if 'bases' in enum:
for base_name in enum['bases']:
base_user_type = _get_effective_user_type(types, base_name)
if base_user_type is None or 'enum' not in base_user_type:
errors.append((type_name, None, f'Invalid enum base type {base_name!r}'))
# Get the enumeration values
try:
values = set()
for value in _get_enum_values(types, enum, set()):
value_name = value['name']
# Duplicate value?
if value_name not in values:
values.add(value_name)
else:
errors.append((type_name, value_name, f'Redefinition of {type_name!r} value {value_name!r}'))
except ValueError:
errors.append((type_name, None, f'Circular base type detected for type {type_name!r}'))
# Typedef?
elif 'typedef' in user_type:
typedef = user_type['typedef']
# Inconsistent type name?
if type_name != typedef['name']:
errors.append((type_name, None, f'Inconsistent type name {typedef["name"]!r} for {type_name!r}'))
# Check the type and its attributes
_validate_type_model_type(errors, types, typedef['type'], typedef.get('attr'), type_name, None)
# Action?
elif 'action' in user_type: # pragma: no branch
action = user_type['action']
# Inconsistent type name?
if type_name != action['name']:
errors.append((type_name, None, f'Inconsistent type name {action["name"]!r} for {type_name!r}'))
# Check action section types
for section in ('path', 'query', 'input', 'output', 'errors'):
if section in action:
section_type_name = action[section]
# Check the section type
_validate_type_model_type(errors, types, {'user': section_type_name}, None, type_name, None)
# Compute effective input member counts
member_sections = {}
for section in ('path', 'query', 'input'):
if section in action:
section_type_name = action[section]
if section_type_name in types:
section_user_type = _get_effective_user_type(types, section_type_name)
if section_user_type is not None and 'struct' in section_user_type:
section_struct = section_user_type['struct']
# Get the section struct's members and count member occurrences
try:
for member in _get_struct_members(types, section_struct, set()):
member_name = member['name']
if member_name not in member_sections:
member_sections[member_name] = []
member_sections[member_name].append(section_struct['name'])
except ValueError:
pass
# Check for duplicate input members
for member_name, member_section_names in member_sections.items():
if len(member_section_names) > 1:
for section_type in member_section_names:
errors.append((section_type, member_name, f'Redefinition of {section_type!r} member {member_name!r}'))
return errors
def _get_effective_type(types, type_):
if 'user' in type_ and type_['user'] in types:
user_type = types[type_['user']]
if 'typedef' in user_type:
return _get_effective_type(types, user_type['typedef']['type'])
return type_
def _get_effective_user_type(types, user_type_name):
user_type = types.get(user_type_name)
if user_type is not None and 'typedef' in user_type:
type_effective = _get_effective_type(types, user_type['typedef']['type'])
if 'user' not in type_effective:
return None
return types.get(type_effective['user'])
return user_type
def _get_struct_members(types, struct, visited=None):
yield from _get_type_items(types, struct, visited, 'struct', 'members')
def _get_enum_values(types, enum, visited=None):
yield from _get_type_items(types, enum, visited, 'enum', 'values')
def _get_type_items(types, type_, visited, def_name, member_name):
if 'bases' in type_:
for base in type_['bases']:
user_type = _get_effective_user_type(types, base)
if user_type is not None and def_name in user_type:
user_type_name = user_type[def_name]['name']
if user_type_name not in visited:
visited.add(user_type_name)
yield from _get_type_items(types, user_type[def_name], visited, def_name, member_name)
else:
raise ValueError()
if member_name in type_:
yield from type_[member_name]
# Map of attribute struct member name to attribute description
_ATTR_TO_TEXT = {
'eq': '==',
'lt': '<',
'lte': '<=',
'gt': '>',
'gte': '>=',
'lenEq': 'len ==',
'lenLT': 'len <',
'lenLTE': 'len <=',
'lenGT': 'len >',
'lenGTE': 'len >='
}
# Map of type name to valid attribute set
_TYPE_TO_ALLOWED_ATTR = {
'float': set(['eq', 'lt', 'lte', 'gt', 'gte']),
'int': set(['eq', 'lt', 'lte', 'gt', 'gte']),
'string': set(['lenEq', 'lenLT', 'lenLTE', 'lenGT', 'lenGTE']),
'array': set(['lenEq', 'lenLT', 'lenLTE', 'lenGT', 'lenGTE']),
'dict': set(['lenEq', 'lenLT', 'lenLTE', 'lenGT', 'lenGTE'])
}
def _validate_type_model_type(errors, types, type_, attr, type_name, member_name):
# Helper function to push an error tuple
def error(message):
if member_name is not None:
errors.append((type_name, member_name, f'{message} from {type_name!r} member {member_name!r}'))
else:
errors.append((type_name, None, f'{message} from {type_name!r}'))
# Array?
if 'array' in type_:
array = type_['array']
# Check the type and its attributes
array_type = _get_effective_type(types, array['type'])
_validate_type_model_type(errors, types, array_type, array.get('attr'), type_name, member_name)
# Dict?
elif 'dict' in type_:
dict_ = type_['dict']
# Check the type and its attributes
dict_type = _get_effective_type(types, dict_['type'])
_validate_type_model_type(errors, types, dict_type, dict_.get('attr'), type_name, member_name)
# Check the dict key type and its attributes
if 'keyType' in dict_:
dict_key_type = _get_effective_type(types, dict_['keyType'])
_validate_type_model_type(errors, types, dict_key_type, dict_.get('keyAttr'), type_name, member_name)
# Valid dict key type (string or enum)
if not ('builtin' in dict_key_type and dict_key_type['builtin'] == 'string') and \
not ('user' in dict_key_type and dict_key_type['user'] in types and 'enum' in types[dict_key_type['user']]):
error('Invalid dictionary key type')
# User type?
elif 'user' in type_:
user_type_name = type_['user']
# Unknown user type?
if user_type_name not in types:
error(f'Unknown type {user_type_name!r}')
else:
user_type = types[user_type_name]
# Action type references not allowed
if 'action' in user_type:
error(f'Invalid reference to action {user_type_name!r}')
# Any not-allowed attributes?
if attr is not None:
type_effective = _get_effective_type(types, type_)
type_key = next(iter(type_effective.keys()), None)
allowed_attr = _TYPE_TO_ALLOWED_ATTR.get(type_effective[type_key] if type_key == 'builtin' else type_key)
disallowed_attr = set(attr)
disallowed_attr.discard('nullable')
if allowed_attr is not None:
disallowed_attr -= allowed_attr
if disallowed_attr:
for attr_key in disallowed_attr:
attr_value = f'{attr[attr_key]:.6f}'.rstrip('0').rstrip('.')
attr_text = f'{_ATTR_TO_TEXT[attr_key]} {attr_value}'
error(f'Invalid attribute {attr_text!r}')
| 0.625209 | 0.155495 |
from datetime import date, datetime, timezone
from decimal import Decimal
from math import isnan, isinf
from uuid import UUID
from .schema_util import validate_type_model_errors
from .type_model import TYPE_MODEL
def get_referenced_types(types, type_name, referenced_types=None):
"""
Get a type's referenced type model
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param str type_name: The type name
:param dict referenced_types: An optional map of referenced user type name to user type
:returns: The referenced `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
"""
return _get_referenced_types(types, {'user': type_name}, referenced_types)
def _get_referenced_types(types, type_, referenced_types=None):
# Create the referenced types dict, if necessary
if referenced_types is None:
referenced_types = {}
# Array?
if 'array' in type_:
array = type_['array']
_get_referenced_types(types, array['type'], referenced_types)
# Dict?
elif 'dict' in type_:
dict_ = type_['dict']
_get_referenced_types(types, dict_['type'], referenced_types)
if 'keyType' in dict_:
_get_referenced_types(types, dict_['keyType'], referenced_types)
# User type?
elif 'user' in type_:
type_name = type_['user']
# Already encountered?
if type_name not in referenced_types:
user_type = types[type_name]
referenced_types[type_name] = user_type
# Struct?
if 'struct' in user_type:
struct = user_type['struct']
if 'bases' in struct:
for base in struct['bases']:
_get_referenced_types(types, {'user': base}, referenced_types)
for member in get_struct_members(types, struct):
_get_referenced_types(types, member['type'], referenced_types)
# Enum
elif 'enum' in user_type:
enum = user_type['enum']
if 'bases' in enum:
for base in enum['bases']:
_get_referenced_types(types, {'user': base}, referenced_types)
# Typedef?
elif 'typedef' in user_type:
typedef = user_type['typedef']
_get_referenced_types(types, typedef['type'], referenced_types)
# Action?
elif 'action' in user_type: # pragma: no branch
action = user_type['action']
if 'path' in action:
_get_referenced_types(types, {'user': action['path']}, referenced_types)
if 'query' in action:
_get_referenced_types(types, {'user': action['query']}, referenced_types)
if 'input' in action:
_get_referenced_types(types, {'user': action['input']}, referenced_types)
if 'output' in action:
_get_referenced_types(types, {'user': action['output']}, referenced_types)
if 'errors' in action:
_get_referenced_types(types, {'user': action['errors']}, referenced_types)
return referenced_types
class ValidationError(Exception):
"""
schema-markdown type model validation error
:param str msg: The error message
:param member_fqn: The fully qualified member name or None
:type member_fqn: str or None
"""
__slots__ = ('member',)
def __init__(self, msg, member_fqn=None):
super().__init__(msg)
#: The fully qualified member name or None
self.member = member_fqn
def validate_type(types, type_name, value, member_fqn=None):
"""
Type-validate a value using the schema-markdown user type model. Container values are duplicated
since some member types are transformed during validation.
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param str type_name: The type name
:param object value: The value object to validate
:param str member_fqn: The fully-qualified member name
:returns: The validated, transformed value object
:raises ValidationError: A validation error occurred
"""
if type_name not in types:
raise ValidationError(f"Unknown type {type_name!r}")
return _validate_type(types, {'user': type_name}, value, member_fqn)
def _validate_type(types, type_, value, member_fqn=None):
value_new = value
# Built-in type?
if 'builtin' in type_:
builtin = type_['builtin']
# string?
if builtin == 'string':
# Not a string?
if not isinstance(value, str):
raise _member_error(type_, value, member_fqn)
# int?
elif builtin == 'int':
# Convert string, float, or Decimal?
if isinstance(value, (str, float, Decimal)):
try:
value_new = int(value)
if not isinstance(value, str) and value_new != value:
raise ValueError()
except ValueError:
raise _member_error(type_, value, member_fqn) from None
# Not an int?
elif not isinstance(value, int) or isinstance(value, bool):
raise _member_error(type_, value, member_fqn)
# float?
elif builtin == 'float':
# Convert string, int, or Decimal?
if isinstance(value, (str, int, Decimal)) and not isinstance(value, bool):
try:
value_new = float(value)
if isnan(value_new) or isinf(value_new):
raise ValueError()
except ValueError:
raise _member_error(type_, value, member_fqn) from None
# Not a float?
elif not isinstance(value, float):
raise _member_error(type_, value, member_fqn)
# bool?
elif builtin == 'bool':
# Convert string?
if isinstance(value, str):
if value == 'true':
value_new = True
elif value == 'false':
value_new = False
else:
raise _member_error(type_, value, member_fqn)
# Not a bool?
elif not isinstance(value, bool):
raise _member_error(type_, value, member_fqn)
# date?
elif builtin == 'date':
# Convert string?
if isinstance(value, str):
try:
value_new = datetime.fromisoformat(value).date()
except ValueError:
raise _member_error(type_, value, member_fqn)
# Not a date?
elif not isinstance(value, date) or isinstance(value, datetime):
raise _member_error(type_, value, member_fqn)
# datetime?
elif builtin == 'datetime':
# Convert string?
if isinstance(value, str):
try:
value_new = datetime.fromisoformat(value)
except ValueError:
raise _member_error(type_, value, member_fqn)
# No timezone?
if value_new.tzinfo is None:
value_new = value_new.replace(tzinfo=timezone.utc)
# Not a datetime?
elif not isinstance(value, datetime):
raise _member_error(type_, value, member_fqn)
# uuid?
elif builtin == 'uuid':
# Convert string?
if isinstance(value, str):
try:
value_new = UUID(value)
except ValueError:
raise _member_error(type_, value, member_fqn)
# Not a UUID?
elif not isinstance(value, UUID):
raise _member_error(type_, value, member_fqn)
# array?
elif 'array' in type_:
# Valid value type?
array = type_['array']
array_type = array['type']
array_attr = array.get('attr')
if isinstance(value, str) and value == '':
value_new = []
elif not isinstance(value, (list, tuple)):
raise _member_error(type_, value, member_fqn)
# Validate the list contents
value_copy = []
array_value_nullable = array_attr is not None and 'nullable' in array_attr and array_attr['nullable']
for ix_array_value, array_value in enumerate(value_new):
member_fqn_value = f'{ix_array_value}' if member_fqn is None else f'{member_fqn}.{ix_array_value}'
if array_value_nullable and (array_value is None or array_value == 'null'):
array_value = None
else:
array_value = _validate_type(types, array_type, array_value, member_fqn_value)
_validate_attr(array_type, array_attr, array_value, member_fqn_value)
value_copy.append(array_value)
# Return the validated, transformed copy
value_new = value_copy
# dict?
elif 'dict' in type_:
# Valid value type?
dict_ = type_['dict']
dict_type = dict_['type']
dict_attr = dict_.get('attr')
dict_key_type = dict_['keyType'] if 'keyType' in dict_ else {'builtin': 'string'}
dict_key_attr = dict_.get('keyAttr')
if isinstance(value, str) and value == '':
value_new = {}
elif not isinstance(value, dict):
raise _member_error(type_, value, member_fqn)
# Validate the dict key/value pairs
value_copy = {}
dict_key_nullable = dict_key_attr is not None and 'nullable' in dict_key_attr and dict_key_attr['nullable']
dict_value_nullable = dict_attr is not None and 'nullable' in dict_attr and dict_attr['nullable']
for dict_key, dict_value in value_new.items():
member_fqn_key = dict_key if member_fqn is None else f'{member_fqn}.{dict_key}'
# Validate the key
if dict_key_nullable and (dict_key is None or dict_key == 'null'):
dict_key = None
else:
dict_key = _validate_type(types, dict_key_type, dict_key, member_fqn)
_validate_attr(dict_key_type, dict_key_attr, dict_key, member_fqn)
# Validate the value
if dict_value_nullable and (dict_value is None or dict_value == 'null'):
dict_value = None
else:
dict_value = _validate_type(types, dict_type, dict_value, member_fqn_key)
_validate_attr(dict_type, dict_attr, dict_value, member_fqn_key)
# Copy the key/value
value_copy[dict_key] = dict_value
# Return the validated, transformed copy
value_new = value_copy
# User type?
elif 'user' in type_:
user_type = types[type_['user']]
# action?
if 'action' in user_type:
raise _member_error(type_, value, member_fqn)
# typedef?
if 'typedef' in user_type:
typedef = user_type['typedef']
typedef_attr = typedef.get('attr')
# Validate the value
value_nullable = typedef_attr is not None and 'nullable' in typedef_attr and typedef_attr['nullable']
if value_nullable and (value is None or value == 'null'):
value_new = None
else:
value_new = _validate_type(types, typedef['type'], value, member_fqn)
_validate_attr(type_, typedef_attr, value_new, member_fqn)
# enum?
elif 'enum' in user_type:
enum = user_type['enum']
# Not a valid enum value?
if value not in (enum_value['name'] for enum_value in get_enum_values(types, enum)):
raise _member_error(type_, value, member_fqn)
# struct?
elif 'struct' in user_type:
struct = user_type['struct']
# Valid value type?
if isinstance(value, str) and value == '':
value_new = {}
elif not isinstance(value, dict):
raise _member_error({'user': struct['name']}, value, member_fqn)
# Valid union?
is_union = struct.get('union', False)
if is_union:
if len(value) != 1:
raise _member_error({'user': struct['name']}, value, member_fqn)
# Validate the struct members
value_copy = {}
for member in get_struct_members(types, struct):
member_name = member['name']
member_fqn_member = member_name if member_fqn is None else f'{member_fqn}.{member_name}'
member_optional = member.get('optional', False)
member_nullable = 'attr' in member and member['attr'].get('nullable', False)
# Missing non-optional member?
if member_name not in value_new:
if not member_optional and not is_union:
raise ValidationError(f"Required member {member_fqn_member!r} missing")
else:
# Validate the member value
member_value = value_new[member_name]
if member_nullable and (member_value is None or member_value == 'null'):
member_value = None
else:
member_value = _validate_type(types, member['type'], member_value, member_fqn_member)
_validate_attr(member['type'], member.get('attr'), member_value, member_fqn_member)
# Copy the validated member
value_copy[member_name] = member_value
# Any unknown members?
if len(value_copy) != len(value_new):
member_set = {member['name'] for member in get_struct_members(types, struct)}
unknown_key = next(value_name for value_name in value_new.keys() if value_name not in member_set) # pragma: no branch
unknown_fqn = unknown_key if member_fqn is None else f'{member_fqn}.{unknown_key}'
raise ValidationError(f"Unknown member {unknown_fqn!r:.100s}")
# Return the validated, transformed copy
value_new = value_copy
return value_new
def _member_error(type_, value, member_fqn, attr=None):
member_part = f" for member {member_fqn!r}" if member_fqn else ''
type_name = type_['builtin'] if 'builtin' in type_ else (
'array' if 'array' in type_ else ('dict' if 'dict' in type_ else type_['user']))
attr_part = f' [{attr}]' if attr else ''
msg = f"Invalid value {value!r:.1000s} (type {value.__class__.__name__!r}){member_part}, expected type {type_name!r}{attr_part}"
return ValidationError(msg, member_fqn)
def _validate_attr(type_, attr, value, member_fqn):
if attr is not None:
if 'eq' in attr and not value == attr['eq']:
raise _member_error(type_, value, member_fqn, f'== {attr["eq"]}')
if 'lt' in attr and not value < attr['lt']:
raise _member_error(type_, value, member_fqn, f'< {attr["lt"]}')
if 'lte' in attr and not value <= attr['lte']:
raise _member_error(type_, value, member_fqn, f'<= {attr["lte"]}')
if 'gt' in attr and not value > attr['gt']:
raise _member_error(type_, value, member_fqn, f'> {attr["gt"]}')
if 'gte' in attr and not value >= attr['gte']:
raise _member_error(type_, value, member_fqn, f'>= {attr["gte"]}')
if 'lenEq' in attr and not len(value) == attr['lenEq']:
raise _member_error(type_, value, member_fqn, f'len == {attr["lenEq"]}')
if 'lenLT' in attr and not len(value) < attr['lenLT']:
raise _member_error(type_, value, member_fqn, f'len < {attr["lenLT"]}')
if 'lenLTE' in attr and not len(value) <= attr['lenLTE']:
raise _member_error(type_, value, member_fqn, f'len <= {attr["lenLTE"]}')
if 'lenGT' in attr and not len(value) > attr['lenGT']:
raise _member_error(type_, value, member_fqn, f'len > {attr["lenGT"]}')
if 'lenGTE' in attr and not len(value) >= attr['lenGTE']:
raise _member_error(type_, value, member_fqn, f'len >= {attr["lenGTE"]}')
def get_struct_members(types, struct):
"""
Iterate the struct's members (inherited members first)
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param dict struct: The `struct model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Struct'>`__
:returns: An iterator of `struct member models <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='StructMember'>`__
"""
if 'bases' in struct:
for base in struct['bases']:
base_user_type = types[base]
while 'typedef' in base_user_type:
base_user_type = types[base_user_type['typedef']['type']['user']]
yield from get_struct_members(types, base_user_type['struct'])
if 'members' in struct:
yield from struct['members']
def get_enum_values(types, enum):
"""
Iterate the enum's values (inherited values first)
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param dict enum: The `enum model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Enum'>`__
:returns: An iterator of `enum value models <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='EnumValue'>`__
"""
if 'bases' in enum:
for base in enum['bases']:
base_user_type = types[base]
while 'typedef' in base_user_type:
base_user_type = types[base_user_type['typedef']['type']['user']]
yield from get_enum_values(types, base_user_type['enum'])
if 'values' in enum:
yield from enum['values']
def validate_type_model(types):
"""
Validate a user type model
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:returns: The validated `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:raises ValidationError: A validation error occurred
"""
# Validate with the type model
validated_types = validate_type(TYPE_MODEL, 'Types', types)
# Do additional type model validation
errors = validate_type_model_errors(validated_types)
if errors:
raise ValidationError('\n'.join(message for _, _, message in sorted(errors)))
return validated_types
|
schema-markdown
|
/schema_markdown-1.2.6-py3-none-any.whl/schema_markdown/schema.py
|
schema.py
|
from datetime import date, datetime, timezone
from decimal import Decimal
from math import isnan, isinf
from uuid import UUID
from .schema_util import validate_type_model_errors
from .type_model import TYPE_MODEL
def get_referenced_types(types, type_name, referenced_types=None):
"""
Get a type's referenced type model
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param str type_name: The type name
:param dict referenced_types: An optional map of referenced user type name to user type
:returns: The referenced `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
"""
return _get_referenced_types(types, {'user': type_name}, referenced_types)
def _get_referenced_types(types, type_, referenced_types=None):
# Create the referenced types dict, if necessary
if referenced_types is None:
referenced_types = {}
# Array?
if 'array' in type_:
array = type_['array']
_get_referenced_types(types, array['type'], referenced_types)
# Dict?
elif 'dict' in type_:
dict_ = type_['dict']
_get_referenced_types(types, dict_['type'], referenced_types)
if 'keyType' in dict_:
_get_referenced_types(types, dict_['keyType'], referenced_types)
# User type?
elif 'user' in type_:
type_name = type_['user']
# Already encountered?
if type_name not in referenced_types:
user_type = types[type_name]
referenced_types[type_name] = user_type
# Struct?
if 'struct' in user_type:
struct = user_type['struct']
if 'bases' in struct:
for base in struct['bases']:
_get_referenced_types(types, {'user': base}, referenced_types)
for member in get_struct_members(types, struct):
_get_referenced_types(types, member['type'], referenced_types)
# Enum
elif 'enum' in user_type:
enum = user_type['enum']
if 'bases' in enum:
for base in enum['bases']:
_get_referenced_types(types, {'user': base}, referenced_types)
# Typedef?
elif 'typedef' in user_type:
typedef = user_type['typedef']
_get_referenced_types(types, typedef['type'], referenced_types)
# Action?
elif 'action' in user_type: # pragma: no branch
action = user_type['action']
if 'path' in action:
_get_referenced_types(types, {'user': action['path']}, referenced_types)
if 'query' in action:
_get_referenced_types(types, {'user': action['query']}, referenced_types)
if 'input' in action:
_get_referenced_types(types, {'user': action['input']}, referenced_types)
if 'output' in action:
_get_referenced_types(types, {'user': action['output']}, referenced_types)
if 'errors' in action:
_get_referenced_types(types, {'user': action['errors']}, referenced_types)
return referenced_types
class ValidationError(Exception):
"""
schema-markdown type model validation error
:param str msg: The error message
:param member_fqn: The fully qualified member name or None
:type member_fqn: str or None
"""
__slots__ = ('member',)
def __init__(self, msg, member_fqn=None):
super().__init__(msg)
#: The fully qualified member name or None
self.member = member_fqn
def validate_type(types, type_name, value, member_fqn=None):
"""
Type-validate a value using the schema-markdown user type model. Container values are duplicated
since some member types are transformed during validation.
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param str type_name: The type name
:param object value: The value object to validate
:param str member_fqn: The fully-qualified member name
:returns: The validated, transformed value object
:raises ValidationError: A validation error occurred
"""
if type_name not in types:
raise ValidationError(f"Unknown type {type_name!r}")
return _validate_type(types, {'user': type_name}, value, member_fqn)
def _validate_type(types, type_, value, member_fqn=None):
value_new = value
# Built-in type?
if 'builtin' in type_:
builtin = type_['builtin']
# string?
if builtin == 'string':
# Not a string?
if not isinstance(value, str):
raise _member_error(type_, value, member_fqn)
# int?
elif builtin == 'int':
# Convert string, float, or Decimal?
if isinstance(value, (str, float, Decimal)):
try:
value_new = int(value)
if not isinstance(value, str) and value_new != value:
raise ValueError()
except ValueError:
raise _member_error(type_, value, member_fqn) from None
# Not an int?
elif not isinstance(value, int) or isinstance(value, bool):
raise _member_error(type_, value, member_fqn)
# float?
elif builtin == 'float':
# Convert string, int, or Decimal?
if isinstance(value, (str, int, Decimal)) and not isinstance(value, bool):
try:
value_new = float(value)
if isnan(value_new) or isinf(value_new):
raise ValueError()
except ValueError:
raise _member_error(type_, value, member_fqn) from None
# Not a float?
elif not isinstance(value, float):
raise _member_error(type_, value, member_fqn)
# bool?
elif builtin == 'bool':
# Convert string?
if isinstance(value, str):
if value == 'true':
value_new = True
elif value == 'false':
value_new = False
else:
raise _member_error(type_, value, member_fqn)
# Not a bool?
elif not isinstance(value, bool):
raise _member_error(type_, value, member_fqn)
# date?
elif builtin == 'date':
# Convert string?
if isinstance(value, str):
try:
value_new = datetime.fromisoformat(value).date()
except ValueError:
raise _member_error(type_, value, member_fqn)
# Not a date?
elif not isinstance(value, date) or isinstance(value, datetime):
raise _member_error(type_, value, member_fqn)
# datetime?
elif builtin == 'datetime':
# Convert string?
if isinstance(value, str):
try:
value_new = datetime.fromisoformat(value)
except ValueError:
raise _member_error(type_, value, member_fqn)
# No timezone?
if value_new.tzinfo is None:
value_new = value_new.replace(tzinfo=timezone.utc)
# Not a datetime?
elif not isinstance(value, datetime):
raise _member_error(type_, value, member_fqn)
# uuid?
elif builtin == 'uuid':
# Convert string?
if isinstance(value, str):
try:
value_new = UUID(value)
except ValueError:
raise _member_error(type_, value, member_fqn)
# Not a UUID?
elif not isinstance(value, UUID):
raise _member_error(type_, value, member_fqn)
# array?
elif 'array' in type_:
# Valid value type?
array = type_['array']
array_type = array['type']
array_attr = array.get('attr')
if isinstance(value, str) and value == '':
value_new = []
elif not isinstance(value, (list, tuple)):
raise _member_error(type_, value, member_fqn)
# Validate the list contents
value_copy = []
array_value_nullable = array_attr is not None and 'nullable' in array_attr and array_attr['nullable']
for ix_array_value, array_value in enumerate(value_new):
member_fqn_value = f'{ix_array_value}' if member_fqn is None else f'{member_fqn}.{ix_array_value}'
if array_value_nullable and (array_value is None or array_value == 'null'):
array_value = None
else:
array_value = _validate_type(types, array_type, array_value, member_fqn_value)
_validate_attr(array_type, array_attr, array_value, member_fqn_value)
value_copy.append(array_value)
# Return the validated, transformed copy
value_new = value_copy
# dict?
elif 'dict' in type_:
# Valid value type?
dict_ = type_['dict']
dict_type = dict_['type']
dict_attr = dict_.get('attr')
dict_key_type = dict_['keyType'] if 'keyType' in dict_ else {'builtin': 'string'}
dict_key_attr = dict_.get('keyAttr')
if isinstance(value, str) and value == '':
value_new = {}
elif not isinstance(value, dict):
raise _member_error(type_, value, member_fqn)
# Validate the dict key/value pairs
value_copy = {}
dict_key_nullable = dict_key_attr is not None and 'nullable' in dict_key_attr and dict_key_attr['nullable']
dict_value_nullable = dict_attr is not None and 'nullable' in dict_attr and dict_attr['nullable']
for dict_key, dict_value in value_new.items():
member_fqn_key = dict_key if member_fqn is None else f'{member_fqn}.{dict_key}'
# Validate the key
if dict_key_nullable and (dict_key is None or dict_key == 'null'):
dict_key = None
else:
dict_key = _validate_type(types, dict_key_type, dict_key, member_fqn)
_validate_attr(dict_key_type, dict_key_attr, dict_key, member_fqn)
# Validate the value
if dict_value_nullable and (dict_value is None or dict_value == 'null'):
dict_value = None
else:
dict_value = _validate_type(types, dict_type, dict_value, member_fqn_key)
_validate_attr(dict_type, dict_attr, dict_value, member_fqn_key)
# Copy the key/value
value_copy[dict_key] = dict_value
# Return the validated, transformed copy
value_new = value_copy
# User type?
elif 'user' in type_:
user_type = types[type_['user']]
# action?
if 'action' in user_type:
raise _member_error(type_, value, member_fqn)
# typedef?
if 'typedef' in user_type:
typedef = user_type['typedef']
typedef_attr = typedef.get('attr')
# Validate the value
value_nullable = typedef_attr is not None and 'nullable' in typedef_attr and typedef_attr['nullable']
if value_nullable and (value is None or value == 'null'):
value_new = None
else:
value_new = _validate_type(types, typedef['type'], value, member_fqn)
_validate_attr(type_, typedef_attr, value_new, member_fqn)
# enum?
elif 'enum' in user_type:
enum = user_type['enum']
# Not a valid enum value?
if value not in (enum_value['name'] for enum_value in get_enum_values(types, enum)):
raise _member_error(type_, value, member_fqn)
# struct?
elif 'struct' in user_type:
struct = user_type['struct']
# Valid value type?
if isinstance(value, str) and value == '':
value_new = {}
elif not isinstance(value, dict):
raise _member_error({'user': struct['name']}, value, member_fqn)
# Valid union?
is_union = struct.get('union', False)
if is_union:
if len(value) != 1:
raise _member_error({'user': struct['name']}, value, member_fqn)
# Validate the struct members
value_copy = {}
for member in get_struct_members(types, struct):
member_name = member['name']
member_fqn_member = member_name if member_fqn is None else f'{member_fqn}.{member_name}'
member_optional = member.get('optional', False)
member_nullable = 'attr' in member and member['attr'].get('nullable', False)
# Missing non-optional member?
if member_name not in value_new:
if not member_optional and not is_union:
raise ValidationError(f"Required member {member_fqn_member!r} missing")
else:
# Validate the member value
member_value = value_new[member_name]
if member_nullable and (member_value is None or member_value == 'null'):
member_value = None
else:
member_value = _validate_type(types, member['type'], member_value, member_fqn_member)
_validate_attr(member['type'], member.get('attr'), member_value, member_fqn_member)
# Copy the validated member
value_copy[member_name] = member_value
# Any unknown members?
if len(value_copy) != len(value_new):
member_set = {member['name'] for member in get_struct_members(types, struct)}
unknown_key = next(value_name for value_name in value_new.keys() if value_name not in member_set) # pragma: no branch
unknown_fqn = unknown_key if member_fqn is None else f'{member_fqn}.{unknown_key}'
raise ValidationError(f"Unknown member {unknown_fqn!r:.100s}")
# Return the validated, transformed copy
value_new = value_copy
return value_new
def _member_error(type_, value, member_fqn, attr=None):
member_part = f" for member {member_fqn!r}" if member_fqn else ''
type_name = type_['builtin'] if 'builtin' in type_ else (
'array' if 'array' in type_ else ('dict' if 'dict' in type_ else type_['user']))
attr_part = f' [{attr}]' if attr else ''
msg = f"Invalid value {value!r:.1000s} (type {value.__class__.__name__!r}){member_part}, expected type {type_name!r}{attr_part}"
return ValidationError(msg, member_fqn)
def _validate_attr(type_, attr, value, member_fqn):
if attr is not None:
if 'eq' in attr and not value == attr['eq']:
raise _member_error(type_, value, member_fqn, f'== {attr["eq"]}')
if 'lt' in attr and not value < attr['lt']:
raise _member_error(type_, value, member_fqn, f'< {attr["lt"]}')
if 'lte' in attr and not value <= attr['lte']:
raise _member_error(type_, value, member_fqn, f'<= {attr["lte"]}')
if 'gt' in attr and not value > attr['gt']:
raise _member_error(type_, value, member_fqn, f'> {attr["gt"]}')
if 'gte' in attr and not value >= attr['gte']:
raise _member_error(type_, value, member_fqn, f'>= {attr["gte"]}')
if 'lenEq' in attr and not len(value) == attr['lenEq']:
raise _member_error(type_, value, member_fqn, f'len == {attr["lenEq"]}')
if 'lenLT' in attr and not len(value) < attr['lenLT']:
raise _member_error(type_, value, member_fqn, f'len < {attr["lenLT"]}')
if 'lenLTE' in attr and not len(value) <= attr['lenLTE']:
raise _member_error(type_, value, member_fqn, f'len <= {attr["lenLTE"]}')
if 'lenGT' in attr and not len(value) > attr['lenGT']:
raise _member_error(type_, value, member_fqn, f'len > {attr["lenGT"]}')
if 'lenGTE' in attr and not len(value) >= attr['lenGTE']:
raise _member_error(type_, value, member_fqn, f'len >= {attr["lenGTE"]}')
def get_struct_members(types, struct):
"""
Iterate the struct's members (inherited members first)
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param dict struct: The `struct model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Struct'>`__
:returns: An iterator of `struct member models <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='StructMember'>`__
"""
if 'bases' in struct:
for base in struct['bases']:
base_user_type = types[base]
while 'typedef' in base_user_type:
base_user_type = types[base_user_type['typedef']['type']['user']]
yield from get_struct_members(types, base_user_type['struct'])
if 'members' in struct:
yield from struct['members']
def get_enum_values(types, enum):
"""
Iterate the enum's values (inherited values first)
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param dict enum: The `enum model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Enum'>`__
:returns: An iterator of `enum value models <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='EnumValue'>`__
"""
if 'bases' in enum:
for base in enum['bases']:
base_user_type = types[base]
while 'typedef' in base_user_type:
base_user_type = types[base_user_type['typedef']['type']['user']]
yield from get_enum_values(types, base_user_type['enum'])
if 'values' in enum:
yield from enum['values']
def validate_type_model(types):
"""
Validate a user type model
:param dict types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:returns: The validated `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:raises ValidationError: A validation error occurred
"""
# Validate with the type model
validated_types = validate_type(TYPE_MODEL, 'Types', types)
# Do additional type model validation
errors = validate_type_model_errors(validated_types)
if errors:
raise ValidationError('\n'.join(message for _, _, message in sorted(errors)))
return validated_types
| 0.818483 | 0.172102 |
from datetime import date, datetime, timezone
from decimal import Decimal
import json
from urllib.parse import quote, unquote
from uuid import UUID
class JSONEncoder(json.JSONEncoder):
"""
A :class:`~json.JSONEncoder` sub-class with support for :class:`~datetime.datetime`, :class:`~datetime.date`,
:class:`~decimal.Decimal`, and :class:`~uuid.UUID` objects.
"""
__slots__ = ()
def default(self, o):
"""
The override of the :meth:`~json.JSONEncoder.default` method to add support for :class:`~datetime.datetime`,
:class:`~datetime.date`, :class:`~decimal.Decimal`, and :class:`~uuid.UUID` objects.
"""
if isinstance(o, datetime):
return (o if o.tzinfo else o.replace(tzinfo=timezone.utc)).isoformat()
if isinstance(o, date):
return o.isoformat()
if isinstance(o, Decimal):
return float(o)
if isinstance(o, UUID):
return f'{o}'
return json.JSONEncoder.default(self, o)
def encode_query_string(obj, encoding='utf-8'):
"""
Encode an object as a query string. Dictionaries, lists, and tuples are recursed. Each member key is expressed in
fully-qualified form. List keys are the index into the list, and are in order. For example:
>>> schema_markdown.encode_query_string({'a': 5, 'b': 3.14, 'c': {'d': 'foo', 'e': [1, 2, 3]}, 'f': [{'g': True}, {'g': False}]})
'a=5&b=3.14&c.d=foo&c.e.0=1&c.e.1=2&c.e.2=3&f.0.g=true&f.1.g=false'
:param object obj: The object to encode as a query string
:param str encoding: The query string encoding
:returns: The encoded query string
"""
return '&'.join(f'{v}' if k is None else f'{k}={v}' for k, v in _encode_query_string_items(obj, None, encoding))
def _encode_query_string_items(obj, parent, encoding):
if isinstance(obj, dict):
if obj:
for member, value in sorted(obj.items()):
member_quoted = quote(f'{member}', encoding=encoding)
parent_member = f'{parent}.{member_quoted}' if parent else member_quoted
yield from _encode_query_string_items(value, parent_member, encoding)
elif parent:
yield parent, ''
elif isinstance(obj, (list, tuple)):
if obj:
for idx, value in enumerate(obj):
parent_member = f'{parent}.{idx}' if parent else f'{idx}'
yield from _encode_query_string_items(value, parent_member, encoding)
elif parent:
yield parent, ''
else:
if isinstance(obj, bool):
yield parent, 'true' if obj else 'false' # quote safe
elif isinstance(obj, int):
yield parent, f'{obj}' # quote safe
elif isinstance(obj, datetime):
if not obj.tzinfo:
obj = obj.replace(tzinfo=timezone.utc)
yield parent, quote(obj.isoformat(), encoding=encoding)
elif isinstance(obj, date):
yield parent, obj.isoformat() # quote safe
elif isinstance(obj, UUID):
yield parent, f'{obj}' # quote safe
elif obj is None:
yield parent, 'null'
else: # str, float
yield parent, quote(f'{obj}', encoding=encoding)
def decode_query_string(query_string, encoding='utf-8'):
"""
Decode an object from a query string. Each member key of the query string is expressed in fully-qualified
form. List keys are the index into the list, must be in order. For example:
>>> schema_markdown.decode_query_string('a=5&b=3.14&c.d=foo&c.e.0=1&c.e.1=2&c.e.2=3&f.0.g=true&f.1.g=false')
{'a': '5', 'b': '3.14', 'c': {'d': 'foo', 'e': ['1', '2', '3']}, 'f': [{'g': 'true'}, {'g': 'false'}]}
:param str query_string: The query string
:param str encoding: The query string encoding
:returns: The decoded object
:raises ValueError: Query string is invalid
"""
# Build the object
result = [None]
key_values = query_string.split('&')
for ix_key_value, key_value in enumerate(key_values):
# Split the key/value string
try:
key_str, value_str = key_value.split('=', 1)
value = unquote(value_str, encoding=encoding)
except ValueError:
# Ignore hash IDs
if ix_key_value == len(key_values) - 1:
continue
raise ValueError(f"Invalid key/value pair {key_value!r:.100s}")
# Find/create the object on which to set the value
parent = result
key_parent = 0
for key in (unquote(key, encoding=encoding) for key in key_str.split('.')):
obj = parent[key_parent]
# Array key? First "key" of an array must start with "0".
if isinstance(obj, list) or (obj is None and key == '0'):
# Create this key's container, if necessary
if obj is None:
obj = parent[key_parent] = []
# Parse the key as an integer
try:
key = int(key)
except:
raise ValueError(f"Invalid array index {key!r:.100s} in key {key_str!r:.100s}")
# Append the value placeholder None
if key == len(obj):
obj.append(None)
elif key < 0 or key > len(obj):
raise ValueError(f"Invalid array index {key} in key {key_str!r:.100s}")
# Dictionary key
else:
# Create this key's container, if necessary
if obj is None:
obj = parent[key_parent] = {}
# Create the index for this key
if obj.get(key) is None:
obj[key] = None
# Update the parent object and key
parent = obj
key_parent = key
# Set the value
if parent[key_parent] is not None:
raise ValueError(f"Duplicate key {key_str!r:.100s}")
parent[key_parent] = value
return result[0] if result[0] is not None else {}
|
schema-markdown
|
/schema_markdown-1.2.6-py3-none-any.whl/schema_markdown/encode.py
|
encode.py
|
from datetime import date, datetime, timezone
from decimal import Decimal
import json
from urllib.parse import quote, unquote
from uuid import UUID
class JSONEncoder(json.JSONEncoder):
"""
A :class:`~json.JSONEncoder` sub-class with support for :class:`~datetime.datetime`, :class:`~datetime.date`,
:class:`~decimal.Decimal`, and :class:`~uuid.UUID` objects.
"""
__slots__ = ()
def default(self, o):
"""
The override of the :meth:`~json.JSONEncoder.default` method to add support for :class:`~datetime.datetime`,
:class:`~datetime.date`, :class:`~decimal.Decimal`, and :class:`~uuid.UUID` objects.
"""
if isinstance(o, datetime):
return (o if o.tzinfo else o.replace(tzinfo=timezone.utc)).isoformat()
if isinstance(o, date):
return o.isoformat()
if isinstance(o, Decimal):
return float(o)
if isinstance(o, UUID):
return f'{o}'
return json.JSONEncoder.default(self, o)
def encode_query_string(obj, encoding='utf-8'):
"""
Encode an object as a query string. Dictionaries, lists, and tuples are recursed. Each member key is expressed in
fully-qualified form. List keys are the index into the list, and are in order. For example:
>>> schema_markdown.encode_query_string({'a': 5, 'b': 3.14, 'c': {'d': 'foo', 'e': [1, 2, 3]}, 'f': [{'g': True}, {'g': False}]})
'a=5&b=3.14&c.d=foo&c.e.0=1&c.e.1=2&c.e.2=3&f.0.g=true&f.1.g=false'
:param object obj: The object to encode as a query string
:param str encoding: The query string encoding
:returns: The encoded query string
"""
return '&'.join(f'{v}' if k is None else f'{k}={v}' for k, v in _encode_query_string_items(obj, None, encoding))
def _encode_query_string_items(obj, parent, encoding):
if isinstance(obj, dict):
if obj:
for member, value in sorted(obj.items()):
member_quoted = quote(f'{member}', encoding=encoding)
parent_member = f'{parent}.{member_quoted}' if parent else member_quoted
yield from _encode_query_string_items(value, parent_member, encoding)
elif parent:
yield parent, ''
elif isinstance(obj, (list, tuple)):
if obj:
for idx, value in enumerate(obj):
parent_member = f'{parent}.{idx}' if parent else f'{idx}'
yield from _encode_query_string_items(value, parent_member, encoding)
elif parent:
yield parent, ''
else:
if isinstance(obj, bool):
yield parent, 'true' if obj else 'false' # quote safe
elif isinstance(obj, int):
yield parent, f'{obj}' # quote safe
elif isinstance(obj, datetime):
if not obj.tzinfo:
obj = obj.replace(tzinfo=timezone.utc)
yield parent, quote(obj.isoformat(), encoding=encoding)
elif isinstance(obj, date):
yield parent, obj.isoformat() # quote safe
elif isinstance(obj, UUID):
yield parent, f'{obj}' # quote safe
elif obj is None:
yield parent, 'null'
else: # str, float
yield parent, quote(f'{obj}', encoding=encoding)
def decode_query_string(query_string, encoding='utf-8'):
"""
Decode an object from a query string. Each member key of the query string is expressed in fully-qualified
form. List keys are the index into the list, must be in order. For example:
>>> schema_markdown.decode_query_string('a=5&b=3.14&c.d=foo&c.e.0=1&c.e.1=2&c.e.2=3&f.0.g=true&f.1.g=false')
{'a': '5', 'b': '3.14', 'c': {'d': 'foo', 'e': ['1', '2', '3']}, 'f': [{'g': 'true'}, {'g': 'false'}]}
:param str query_string: The query string
:param str encoding: The query string encoding
:returns: The decoded object
:raises ValueError: Query string is invalid
"""
# Build the object
result = [None]
key_values = query_string.split('&')
for ix_key_value, key_value in enumerate(key_values):
# Split the key/value string
try:
key_str, value_str = key_value.split('=', 1)
value = unquote(value_str, encoding=encoding)
except ValueError:
# Ignore hash IDs
if ix_key_value == len(key_values) - 1:
continue
raise ValueError(f"Invalid key/value pair {key_value!r:.100s}")
# Find/create the object on which to set the value
parent = result
key_parent = 0
for key in (unquote(key, encoding=encoding) for key in key_str.split('.')):
obj = parent[key_parent]
# Array key? First "key" of an array must start with "0".
if isinstance(obj, list) or (obj is None and key == '0'):
# Create this key's container, if necessary
if obj is None:
obj = parent[key_parent] = []
# Parse the key as an integer
try:
key = int(key)
except:
raise ValueError(f"Invalid array index {key!r:.100s} in key {key_str!r:.100s}")
# Append the value placeholder None
if key == len(obj):
obj.append(None)
elif key < 0 or key > len(obj):
raise ValueError(f"Invalid array index {key} in key {key_str!r:.100s}")
# Dictionary key
else:
# Create this key's container, if necessary
if obj is None:
obj = parent[key_parent] = {}
# Create the index for this key
if obj.get(key) is None:
obj[key] = None
# Update the parent object and key
parent = obj
key_parent = key
# Set the value
if parent[key_parent] is not None:
raise ValueError(f"Duplicate key {key_str!r:.100s}")
parent[key_parent] = value
return result[0] if result[0] is not None else {}
| 0.854171 | 0.2901 |
from .parser import parse_schema_markdown
#: The Schema Markdown type model
TYPE_MODEL = parse_schema_markdown('''\
# Map of user type name to user type model
typedef UserType{len > 0} Types
# Union representing a member type
union Type
# A built-in type
BuiltinType builtin
# An array type
Array array
# A dictionary type
Dict dict
# A user type name
string user
# A type or member's attributes
struct Attributes
# If true, the value may be null
optional bool nullable
# The value is equal
optional float eq
# The value is less than
optional float lt
# The value is less than or equal to
optional float lte
# The value is greater than
optional float gt
# The value is greater than or equal to
optional float gte
# The length is equal to
optional int lenEq
# The length is less-than
optional int lenLT
# The length is less than or equal to
optional int lenLTE
# The length is greater than
optional int lenGT
# The length is greater than or equal to
optional int lenGTE
# The built-in type enumeration
enum BuiltinType
# The string type
string
# The integer type
int
# The float type
float
# The boolean type
bool
# A date formatted as an ISO-8601 date string
date
# A date/time formatted as an ISO-8601 date/time string
datetime
# A UUID formatted as string
uuid
# An object of any type
object
# An array type
struct Array
# The contained type
Type type
# The contained type's attributes
optional Attributes attr
# A dictionary type
struct Dict
# The contained key type
Type type
# The contained key type's attributes
optional Attributes attr
# The contained value type
optional Type keyType
# The contained value type's attributes
optional Attributes keyAttr
# A user type
union UserType
# An enumeration type
Enum enum
# A struct type
Struct struct
# A type definition
Typedef typedef
# A JSON web API (not reference-able)
Action action
# User type base struct
struct UserBase
# The user type name
string name
# The documentation markdown text lines
optional string[] doc
# The documentation group name
optional string docGroup
# An enumeration type
struct Enum (UserBase)
# The enum's base enumerations
optional string[len > 0] bases
# The enumeration values
optional EnumValue[len > 0] values
# An enumeration type value
struct EnumValue
# The value string
string name
# The documentation markdown text lines
optional string[] doc
# A struct type
struct Struct (UserBase)
# The struct's base classes
optional string[len > 0] bases
# If true, the struct is a union and exactly one of the optional members is present
optional bool union
# The struct members
optional StructMember[len > 0] members
# A struct member
struct StructMember
# The member name
string name
# The documentation markdown text lines
optional string[] doc
# The member type
Type type
# The member type attributes
optional Attributes attr
# If true, the member is optional and may not be present
optional bool optional
# A typedef type
struct Typedef (UserBase)
# The typedef's type
Type type
# The typedef's type attributes
optional Attributes attr
# A JSON web service API
struct Action (UserBase)
# The action's URLs
optional ActionURL[len > 0] urls
# The path parameters struct type name
optional string path
# The query parameters struct type name
optional string query
# The content body struct type name
optional string input
# The response body struct type name
optional string output
# The custom error response codes enum type name
optional string errors
# An action URL model
struct ActionURL
# The HTTP method. If not provided, matches all HTTP methods.
optional string method
# The URL path. If not provided, uses the default URL path of "/<actionName>".
optional string path
''')
|
schema-markdown
|
/schema_markdown-1.2.6-py3-none-any.whl/schema_markdown/type_model.py
|
type_model.py
|
from .parser import parse_schema_markdown
#: The Schema Markdown type model
TYPE_MODEL = parse_schema_markdown('''\
# Map of user type name to user type model
typedef UserType{len > 0} Types
# Union representing a member type
union Type
# A built-in type
BuiltinType builtin
# An array type
Array array
# A dictionary type
Dict dict
# A user type name
string user
# A type or member's attributes
struct Attributes
# If true, the value may be null
optional bool nullable
# The value is equal
optional float eq
# The value is less than
optional float lt
# The value is less than or equal to
optional float lte
# The value is greater than
optional float gt
# The value is greater than or equal to
optional float gte
# The length is equal to
optional int lenEq
# The length is less-than
optional int lenLT
# The length is less than or equal to
optional int lenLTE
# The length is greater than
optional int lenGT
# The length is greater than or equal to
optional int lenGTE
# The built-in type enumeration
enum BuiltinType
# The string type
string
# The integer type
int
# The float type
float
# The boolean type
bool
# A date formatted as an ISO-8601 date string
date
# A date/time formatted as an ISO-8601 date/time string
datetime
# A UUID formatted as string
uuid
# An object of any type
object
# An array type
struct Array
# The contained type
Type type
# The contained type's attributes
optional Attributes attr
# A dictionary type
struct Dict
# The contained key type
Type type
# The contained key type's attributes
optional Attributes attr
# The contained value type
optional Type keyType
# The contained value type's attributes
optional Attributes keyAttr
# A user type
union UserType
# An enumeration type
Enum enum
# A struct type
Struct struct
# A type definition
Typedef typedef
# A JSON web API (not reference-able)
Action action
# User type base struct
struct UserBase
# The user type name
string name
# The documentation markdown text lines
optional string[] doc
# The documentation group name
optional string docGroup
# An enumeration type
struct Enum (UserBase)
# The enum's base enumerations
optional string[len > 0] bases
# The enumeration values
optional EnumValue[len > 0] values
# An enumeration type value
struct EnumValue
# The value string
string name
# The documentation markdown text lines
optional string[] doc
# A struct type
struct Struct (UserBase)
# The struct's base classes
optional string[len > 0] bases
# If true, the struct is a union and exactly one of the optional members is present
optional bool union
# The struct members
optional StructMember[len > 0] members
# A struct member
struct StructMember
# The member name
string name
# The documentation markdown text lines
optional string[] doc
# The member type
Type type
# The member type attributes
optional Attributes attr
# If true, the member is optional and may not be present
optional bool optional
# A typedef type
struct Typedef (UserBase)
# The typedef's type
Type type
# The typedef's type attributes
optional Attributes attr
# A JSON web service API
struct Action (UserBase)
# The action's URLs
optional ActionURL[len > 0] urls
# The path parameters struct type name
optional string path
# The query parameters struct type name
optional string query
# The content body struct type name
optional string input
# The response body struct type name
optional string output
# The custom error response codes enum type name
optional string errors
# An action URL model
struct ActionURL
# The HTTP method. If not provided, matches all HTTP methods.
optional string method
# The URL path. If not provided, uses the default URL path of "/<actionName>".
optional string path
''')
| 0.797675 | 0.289786 |
from itertools import chain
import re
from .schema_util import validate_type_model_errors
# Built-in types
BUILTIN_TYPES = {'bool', 'date', 'datetime', 'float', 'int', 'object', 'string', 'uuid'}
# Schema Markdown regex
RE_PART_ID = r'(?:[A-Za-z]\w*)'
RE_PART_ATTR_GROUP = \
r'(?:(?P<nullable>nullable)' \
r'|(?P<op><=|<|>|>=|==)\s*(?P<opnum>-?\d+(?:\.\d+)?)' \
r'|(?P<ltype>len)\s*(?P<lop><=|<|>|>=|==)\s*(?P<lopnum>\d+))'
RE_PART_ATTR = re.sub(r'\(\?P<[^>]+>', r'(?:', RE_PART_ATTR_GROUP)
RE_PART_ATTRS = r'(?:' + RE_PART_ATTR + r'(?:\s*,\s*' + RE_PART_ATTR + r')*)'
RE_ATTR_GROUP = re.compile(RE_PART_ATTR_GROUP)
RE_FIND_ATTRS = re.compile(RE_PART_ATTR + r'(?:\s*,\s*|\s*\Z)')
RE_LINE_CONT = re.compile(r'\\s*$')
RE_COMMENT = re.compile(r'^\s*(?:#-.*|#(?P<doc>.*))?$')
RE_GROUP = re.compile(r'^group(?:\s+"(?P<group>.+?)")?\s*$')
RE_ACTION = re.compile(r'^action\s+(?P<id>' + RE_PART_ID + r')')
RE_PART_BASE_IDS = r'(?:\s*\(\s*(?P<base_ids>' + RE_PART_ID + r'(?:\s*,\s*' + RE_PART_ID + r')*)\s*\)\s*)'
RE_BASE_IDS_SPLIT = re.compile(r'\s*,\s*')
RE_DEFINITION = re.compile(r'^(?P<type>struct|union|enum)\s+(?P<id>' + RE_PART_ID + r')' + RE_PART_BASE_IDS + r'?\s*$')
RE_SECTION = re.compile(r'^\s+(?P<type>path|query|input|output|errors)' + RE_PART_BASE_IDS + r'?\s*$')
RE_SECTION_PLAIN = re.compile(r'^\s+(?P<type>urls)\s*$')
RE_PART_TYPEDEF = \
r'(?P<type>' + RE_PART_ID + r')' \
r'(?:\s*\(\s*(?P<attrs>' + RE_PART_ATTRS + r')\s*\))?' \
r'(?:' \
r'(?:\s*\[\s*(?P<array>' + RE_PART_ATTRS + r'?)\s*\])?' \
r'|' \
r'(?:' \
r'\s*:\s*(?P<dictValueType>' + RE_PART_ID + r')' \
r'(?:\s*\(\s*(?P<dictValueAttrs>' + RE_PART_ATTRS + r')\s*\))?' \
r')?' \
r'(?:\s*\{\s*(?P<dict>' + RE_PART_ATTRS + r'?)\s*\})?' \
r')' \
r'\s+(?P<id>' + RE_PART_ID + r')'
RE_TYPEDEF = re.compile(r'^typedef\s+' + RE_PART_TYPEDEF + r'\s*$')
RE_MEMBER = re.compile(r'^\s+(?P<optional>optional\s+)?' + RE_PART_TYPEDEF + r'\s*$')
RE_VALUE = re.compile(r'^\s+(?P<id>' + RE_PART_ID + r')\s*$')
RE_VALUE_QUOTED = re.compile(r'^\s+"(?P<id>.*?)"\s*$')
RE_URL = re.compile(r'^\s+(?P<method>[A-Za-z]+|\*)(?:\s+(?P<path>/\S*))?')
def parse_schema_markdown(text, types=None, filename='', validate=True):
"""
Parse Schema Markdown from a string or an iterator of strings
:param text: The Schema Markdown text
:type text: str or ~collections.abc.Iterable(str)
:param object types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param str filename: The name of file being parsed (for error messages)
:param bool validate: If True, validate after parsing
:returns: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:raises SchemaMarkdownParserError: A parsing error occurred
"""
# Current parser state
if types is None:
types = {}
error_map = {}
filepos = {}
action = None
urls = None
user_type = None
doc = []
doc_group = None
linenum = 0
# Helper function to add an error message
def add_error(msg, error_filename, error_linenum):
error_msg = f'{error_filename}:{error_linenum}: error: {msg}'
error_map[error_msg] = (error_filename, error_linenum, error_msg)
# Helper function to get documentation strings
def get_doc():
nonlocal doc
result = None
if doc:
result = doc
doc = []
return result
# Line-split all script text
if isinstance(text, str):
lines = text.splitlines()
else:
lines = list(chain.from_iterable(text_part.splitlines() for text_part in text))
lines.append('')
# Process each line
line_continuation = []
for line_part in lines:
linenum += 1
# Line continuation?
line_part_no_continuation = RE_LINE_CONT.sub('', line_part)
if line_continuation or line_part_no_continuation is not line_part:
line_continuation.append(line_part_no_continuation)
if line_part_no_continuation is not line_part:
continue
if line_continuation:
line = ''.join(line_continuation)
del line_continuation[:]
else:
line = line_part
# Match syntax
match_name, match = 'comment', RE_COMMENT.search(line)
if match is None:
match_name, match = 'group', RE_GROUP.search(line)
if match is None:
match_name, match = 'action', RE_ACTION.search(line)
if match is None:
match_name, match = 'definition', RE_DEFINITION.search(line)
if match is None and action is not None:
match_name, match = 'section', RE_SECTION.search(line)
if match is None and action is not None:
match_name, match = 'section_plain', RE_SECTION_PLAIN.search(line)
if match is None and user_type is not None and 'enum' in user_type:
match_value = RE_VALUE.search(line)
if match_value is not None:
match_name, match = 'value', match_value
else:
match_name, match = 'value', RE_VALUE_QUOTED.search(line)
if match is None and user_type is not None and 'struct' in user_type:
match_name, match = 'member', RE_MEMBER.search(line)
if match is None and urls is not None:
match_name, match = 'urls', RE_URL.search(line)
if match is None:
match_name, match = 'typedef', RE_TYPEDEF.search(line)
if match is None:
match_name = None
# Comment?
if match_name == 'comment':
doc_string = match.group('doc')
if doc_string is not None:
doc.append(doc_string if not doc_string.startswith(' ') else doc_string[1:])
# Documentation group?
elif match_name == 'group':
doc_group = match.group('group')
if doc_group is not None:
doc_group = doc_group.strip()
else:
doc_group = None
# Action?
elif match_name == 'action':
action_id = match.group('id')
# Action already defined?
if action_id in types:
add_error(f"Redefinition of action '{action_id}'", filename, linenum)
# Clear parser state
urls = None
user_type = None
action_doc = get_doc()
# Create the new action
action = {'name': action_id}
types[action_id] = {'action': action}
if action_doc is not None:
action['doc'] = action_doc
if doc_group is not None:
action['docGroup'] = doc_group
# Definition?
elif match_name == 'definition':
definition_string = match.group('type')
definition_id = match.group('id')
definition_base_ids = match.group('base_ids')
# Type already defined?
if definition_id in BUILTIN_TYPES or definition_id in types:
add_error(f"Redefinition of type '{definition_id}'", filename, linenum)
# Clear parser state
action = None
urls = None
definition_doc = get_doc()
# Struct definition
if definition_string in ('struct', 'union'):
# Create the new struct type
struct = {'name': definition_id}
user_type = types[definition_id] = {'struct': struct}
if definition_doc is not None:
struct['doc'] = definition_doc
if doc_group is not None:
struct['docGroup'] = doc_group
if definition_string == 'union':
struct['union'] = True
if definition_base_ids is not None:
struct['bases'] = RE_BASE_IDS_SPLIT.split(definition_base_ids)
# Enum definition
else: # definition_string == 'enum':
# Create the new enum type
enum = {'name': definition_id}
user_type = types[definition_id] = {'enum': enum}
if definition_doc is not None:
enum['doc'] = definition_doc
if doc_group is not None:
enum['docGroup'] = doc_group
if definition_base_ids is not None:
enum['bases'] = RE_BASE_IDS_SPLIT.split(definition_base_ids)
# Record finalization information
filepos[definition_id] = linenum
# Action section?
elif match_name == 'section':
section_string = match.group('type')
section_base_ids = match.group('base_ids')
# Action section redefinition?
if section_string in action:
add_error(f'Redefinition of action {section_string}', filename, linenum)
# Clear parser state
urls = None
# Set the action section type
section_type_name = f'{action["name"]}_{section_string}'
action[section_string] = section_type_name
if section_string == 'errors':
enum = {'name': section_type_name}
user_type = types[section_type_name] = {'enum': enum}
if section_base_ids is not None:
enum['bases'] = RE_BASE_IDS_SPLIT.split(section_base_ids)
else:
struct = {'name': section_type_name}
user_type = types[section_type_name] = {'struct': struct}
if section_base_ids is not None:
struct['bases'] = RE_BASE_IDS_SPLIT.split(section_base_ids)
# Record finalization information
filepos[section_type_name] = linenum
# Plain action section?
elif match_name == 'section_plain':
section_string = match.group('type')
# Action section redefinition?
if section_string in action:
add_error(f'Redefinition of action {section_string}', filename, linenum)
# Clear parser state
user_type = None
# Update the parser state
urls = []
# Enum value?
elif match_name == 'value':
value_string = match.group('id')
# Add the enum value
enum = user_type['enum']
if 'values' not in enum:
enum['values'] = []
enum_value = {'name': value_string}
enum['values'].append(enum_value)
enum_value_doc = get_doc()
if enum_value_doc is not None:
enum_value['doc'] = enum_value_doc
# Record finalization information
filepos[f'{enum["name"]}.{value_string}'] = linenum
# Struct member?
elif match_name == 'member':
optional = match.group('optional') is not None
member_name = match.group('id')
# Add the member
struct = user_type['struct']
if 'members' not in struct:
struct['members'] = []
member_type, member_attr = _parse_typedef(match)
member_doc = get_doc()
member = {
'name': member_name,
'type': member_type
}
struct['members'].append(member)
if member_attr is not None:
member['attr'] = member_attr
if member_doc is not None:
member['doc'] = member_doc
if optional:
member['optional'] = True
# Record finalization information
filepos[f'{struct["name"]}.{member_name}'] = linenum
# URL?
elif match_name == 'urls':
method = match.group('method')
path = match.group('path')
# Create the action URL object
action_url = {}
if method != '*':
action_url['method'] = method
if path is not None:
action_url['path'] = path
# Duplicate URL?
if action_url in urls:
padded_path = "" if path is None else f' {path}'
add_error(f'Duplicate URL: {method}{padded_path}', filename, linenum)
# Add the URL
if 'urls' not in action:
action['urls'] = urls
urls.append(action_url)
# Typedef?
elif match_name == 'typedef':
definition_id = match.group('id')
# Type already defined?
if definition_id in BUILTIN_TYPES or definition_id in types:
add_error(f"Redefinition of type '{definition_id}'", filename, linenum)
# Clear parser state
action = None
urls = None
user_type = None
typedef_doc = get_doc()
# Create the typedef
typedef_type, typedef_attr = _parse_typedef(match)
typedef = {
'name': definition_id,
'type': typedef_type
}
types[definition_id] = {'typedef': typedef}
if typedef_attr is not None:
typedef['attr'] = typedef_attr
if typedef_doc is not None:
typedef['doc'] = typedef_doc
if doc_group is not None:
typedef['docGroup'] = doc_group
# Record finalization information
filepos[definition_id] = linenum
# Unrecognized line syntax
else:
add_error('Syntax error', filename, linenum)
# Validate the type model, if requested
if validate:
for type_name, member_name, error_msg in validate_type_model_errors(types):
error_filename = filename
error_linenum = None
if member_name is not None:
error_linenum = filepos.get(f'{type_name}.{member_name}')
if error_linenum is None:
error_linenum = filepos.get(type_name)
if error_linenum is None:
error_filename = ''
error_linenum = 1
add_error(error_msg, error_filename, error_linenum)
# Raise a parser exception if there are any errors
errors = [msg for _, _, msg in sorted(error_map.values())]
if errors:
raise SchemaMarkdownParserError(errors)
return types
# Helper function to parse a typedef - returns a type-model and attributes-model tuple
def _parse_typedef(match_typedef):
array_attrs_string = match_typedef.group('array')
dict_attrs_string = match_typedef.group('dict')
# Array type?
if array_attrs_string is not None:
value_type_name = match_typedef.group('type')
value_attr = _parse_attr(match_typedef.group('attrs'))
array_type = {'type': _create_type(value_type_name)}
if value_attr is not None:
array_type['attr'] = value_attr
return {'array': array_type}, _parse_attr(array_attrs_string)
# Dictionary type?
if dict_attrs_string is not None:
value_type_name = match_typedef.group('dictValueType')
if value_type_name is not None:
value_attr = _parse_attr(match_typedef.group('dictValueAttrs'))
key_type_name = match_typedef.group('type')
key_attr = _parse_attr(match_typedef.group('attrs'))
dict_type = {
'type': _create_type(value_type_name),
'keyType': _create_type(key_type_name)
}
if value_attr is not None:
dict_type['attr'] = value_attr
if key_attr is not None:
dict_type['keyAttr'] = key_attr
else:
value_type_name = match_typedef.group('type')
value_attr = _parse_attr(match_typedef.group('attrs'))
dict_type = {'type': _create_type(value_type_name)}
if value_attr is not None:
dict_type['attr'] = value_attr
return {'dict': dict_type}, _parse_attr(dict_attrs_string)
# Non-container type...
member_type_name = match_typedef.group('type')
return _create_type(member_type_name), _parse_attr(match_typedef.group('attrs'))
# Helper function to create a type model
def _create_type(type_name):
if type_name in BUILTIN_TYPES:
return {'builtin': type_name}
return {'user': type_name}
# Helper function to parse an attributes string - returns an attributes model
def _parse_attr(attrs_string):
attrs = None
if attrs_string is not None:
for attr_string in RE_FIND_ATTRS.findall(attrs_string):
if attrs is None:
attrs = {}
match_attr = RE_ATTR_GROUP.match(attr_string)
attr_op = match_attr.group('op')
attr_length_op = match_attr.group('lop') if attr_op is None else None
if match_attr.group('nullable') is not None:
attrs['nullable'] = True
elif attr_op is not None:
attr_value = float(match_attr.group('opnum'))
if attr_op == '<':
attrs['lt'] = attr_value
elif attr_op == '<=':
attrs['lte'] = attr_value
elif attr_op == '>':
attrs['gt'] = attr_value
elif attr_op == '>=':
attrs['gte'] = attr_value
else: # ==
attrs['eq'] = attr_value
else: # attr_length_op is not None:
attr_value = int(match_attr.group('lopnum'))
if attr_length_op == '<':
attrs['lenLT'] = attr_value
elif attr_length_op == '<=':
attrs['lenLTE'] = attr_value
elif attr_length_op == '>':
attrs['lenGT'] = attr_value
elif attr_length_op == '>=':
attrs['lenGTE'] = attr_value
else: # ==
attrs['lenEq'] = attr_value
return attrs
class SchemaMarkdownParserError(Exception):
"""
Schema Markdown parser exception
:param list(str) errors: The list of error strings
"""
__slots__ = ('errors',)
def __init__(self, errors):
super().__init__('\n'.join(errors))
#: The list of error strings
self.errors = errors
|
schema-markdown
|
/schema_markdown-1.2.6-py3-none-any.whl/schema_markdown/parser.py
|
parser.py
|
from itertools import chain
import re
from .schema_util import validate_type_model_errors
# Built-in types
BUILTIN_TYPES = {'bool', 'date', 'datetime', 'float', 'int', 'object', 'string', 'uuid'}
# Schema Markdown regex
RE_PART_ID = r'(?:[A-Za-z]\w*)'
RE_PART_ATTR_GROUP = \
r'(?:(?P<nullable>nullable)' \
r'|(?P<op><=|<|>|>=|==)\s*(?P<opnum>-?\d+(?:\.\d+)?)' \
r'|(?P<ltype>len)\s*(?P<lop><=|<|>|>=|==)\s*(?P<lopnum>\d+))'
RE_PART_ATTR = re.sub(r'\(\?P<[^>]+>', r'(?:', RE_PART_ATTR_GROUP)
RE_PART_ATTRS = r'(?:' + RE_PART_ATTR + r'(?:\s*,\s*' + RE_PART_ATTR + r')*)'
RE_ATTR_GROUP = re.compile(RE_PART_ATTR_GROUP)
RE_FIND_ATTRS = re.compile(RE_PART_ATTR + r'(?:\s*,\s*|\s*\Z)')
RE_LINE_CONT = re.compile(r'\\s*$')
RE_COMMENT = re.compile(r'^\s*(?:#-.*|#(?P<doc>.*))?$')
RE_GROUP = re.compile(r'^group(?:\s+"(?P<group>.+?)")?\s*$')
RE_ACTION = re.compile(r'^action\s+(?P<id>' + RE_PART_ID + r')')
RE_PART_BASE_IDS = r'(?:\s*\(\s*(?P<base_ids>' + RE_PART_ID + r'(?:\s*,\s*' + RE_PART_ID + r')*)\s*\)\s*)'
RE_BASE_IDS_SPLIT = re.compile(r'\s*,\s*')
RE_DEFINITION = re.compile(r'^(?P<type>struct|union|enum)\s+(?P<id>' + RE_PART_ID + r')' + RE_PART_BASE_IDS + r'?\s*$')
RE_SECTION = re.compile(r'^\s+(?P<type>path|query|input|output|errors)' + RE_PART_BASE_IDS + r'?\s*$')
RE_SECTION_PLAIN = re.compile(r'^\s+(?P<type>urls)\s*$')
RE_PART_TYPEDEF = \
r'(?P<type>' + RE_PART_ID + r')' \
r'(?:\s*\(\s*(?P<attrs>' + RE_PART_ATTRS + r')\s*\))?' \
r'(?:' \
r'(?:\s*\[\s*(?P<array>' + RE_PART_ATTRS + r'?)\s*\])?' \
r'|' \
r'(?:' \
r'\s*:\s*(?P<dictValueType>' + RE_PART_ID + r')' \
r'(?:\s*\(\s*(?P<dictValueAttrs>' + RE_PART_ATTRS + r')\s*\))?' \
r')?' \
r'(?:\s*\{\s*(?P<dict>' + RE_PART_ATTRS + r'?)\s*\})?' \
r')' \
r'\s+(?P<id>' + RE_PART_ID + r')'
RE_TYPEDEF = re.compile(r'^typedef\s+' + RE_PART_TYPEDEF + r'\s*$')
RE_MEMBER = re.compile(r'^\s+(?P<optional>optional\s+)?' + RE_PART_TYPEDEF + r'\s*$')
RE_VALUE = re.compile(r'^\s+(?P<id>' + RE_PART_ID + r')\s*$')
RE_VALUE_QUOTED = re.compile(r'^\s+"(?P<id>.*?)"\s*$')
RE_URL = re.compile(r'^\s+(?P<method>[A-Za-z]+|\*)(?:\s+(?P<path>/\S*))?')
def parse_schema_markdown(text, types=None, filename='', validate=True):
"""
Parse Schema Markdown from a string or an iterator of strings
:param text: The Schema Markdown text
:type text: str or ~collections.abc.Iterable(str)
:param object types: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:param str filename: The name of file being parsed (for error messages)
:param bool validate: If True, validate after parsing
:returns: The `type model <https://craigahobbs.github.io/schema-markdown-doc/doc/#var.vName='Types'>`__
:raises SchemaMarkdownParserError: A parsing error occurred
"""
# Current parser state
if types is None:
types = {}
error_map = {}
filepos = {}
action = None
urls = None
user_type = None
doc = []
doc_group = None
linenum = 0
# Helper function to add an error message
def add_error(msg, error_filename, error_linenum):
error_msg = f'{error_filename}:{error_linenum}: error: {msg}'
error_map[error_msg] = (error_filename, error_linenum, error_msg)
# Helper function to get documentation strings
def get_doc():
nonlocal doc
result = None
if doc:
result = doc
doc = []
return result
# Line-split all script text
if isinstance(text, str):
lines = text.splitlines()
else:
lines = list(chain.from_iterable(text_part.splitlines() for text_part in text))
lines.append('')
# Process each line
line_continuation = []
for line_part in lines:
linenum += 1
# Line continuation?
line_part_no_continuation = RE_LINE_CONT.sub('', line_part)
if line_continuation or line_part_no_continuation is not line_part:
line_continuation.append(line_part_no_continuation)
if line_part_no_continuation is not line_part:
continue
if line_continuation:
line = ''.join(line_continuation)
del line_continuation[:]
else:
line = line_part
# Match syntax
match_name, match = 'comment', RE_COMMENT.search(line)
if match is None:
match_name, match = 'group', RE_GROUP.search(line)
if match is None:
match_name, match = 'action', RE_ACTION.search(line)
if match is None:
match_name, match = 'definition', RE_DEFINITION.search(line)
if match is None and action is not None:
match_name, match = 'section', RE_SECTION.search(line)
if match is None and action is not None:
match_name, match = 'section_plain', RE_SECTION_PLAIN.search(line)
if match is None and user_type is not None and 'enum' in user_type:
match_value = RE_VALUE.search(line)
if match_value is not None:
match_name, match = 'value', match_value
else:
match_name, match = 'value', RE_VALUE_QUOTED.search(line)
if match is None and user_type is not None and 'struct' in user_type:
match_name, match = 'member', RE_MEMBER.search(line)
if match is None and urls is not None:
match_name, match = 'urls', RE_URL.search(line)
if match is None:
match_name, match = 'typedef', RE_TYPEDEF.search(line)
if match is None:
match_name = None
# Comment?
if match_name == 'comment':
doc_string = match.group('doc')
if doc_string is not None:
doc.append(doc_string if not doc_string.startswith(' ') else doc_string[1:])
# Documentation group?
elif match_name == 'group':
doc_group = match.group('group')
if doc_group is not None:
doc_group = doc_group.strip()
else:
doc_group = None
# Action?
elif match_name == 'action':
action_id = match.group('id')
# Action already defined?
if action_id in types:
add_error(f"Redefinition of action '{action_id}'", filename, linenum)
# Clear parser state
urls = None
user_type = None
action_doc = get_doc()
# Create the new action
action = {'name': action_id}
types[action_id] = {'action': action}
if action_doc is not None:
action['doc'] = action_doc
if doc_group is not None:
action['docGroup'] = doc_group
# Definition?
elif match_name == 'definition':
definition_string = match.group('type')
definition_id = match.group('id')
definition_base_ids = match.group('base_ids')
# Type already defined?
if definition_id in BUILTIN_TYPES or definition_id in types:
add_error(f"Redefinition of type '{definition_id}'", filename, linenum)
# Clear parser state
action = None
urls = None
definition_doc = get_doc()
# Struct definition
if definition_string in ('struct', 'union'):
# Create the new struct type
struct = {'name': definition_id}
user_type = types[definition_id] = {'struct': struct}
if definition_doc is not None:
struct['doc'] = definition_doc
if doc_group is not None:
struct['docGroup'] = doc_group
if definition_string == 'union':
struct['union'] = True
if definition_base_ids is not None:
struct['bases'] = RE_BASE_IDS_SPLIT.split(definition_base_ids)
# Enum definition
else: # definition_string == 'enum':
# Create the new enum type
enum = {'name': definition_id}
user_type = types[definition_id] = {'enum': enum}
if definition_doc is not None:
enum['doc'] = definition_doc
if doc_group is not None:
enum['docGroup'] = doc_group
if definition_base_ids is not None:
enum['bases'] = RE_BASE_IDS_SPLIT.split(definition_base_ids)
# Record finalization information
filepos[definition_id] = linenum
# Action section?
elif match_name == 'section':
section_string = match.group('type')
section_base_ids = match.group('base_ids')
# Action section redefinition?
if section_string in action:
add_error(f'Redefinition of action {section_string}', filename, linenum)
# Clear parser state
urls = None
# Set the action section type
section_type_name = f'{action["name"]}_{section_string}'
action[section_string] = section_type_name
if section_string == 'errors':
enum = {'name': section_type_name}
user_type = types[section_type_name] = {'enum': enum}
if section_base_ids is not None:
enum['bases'] = RE_BASE_IDS_SPLIT.split(section_base_ids)
else:
struct = {'name': section_type_name}
user_type = types[section_type_name] = {'struct': struct}
if section_base_ids is not None:
struct['bases'] = RE_BASE_IDS_SPLIT.split(section_base_ids)
# Record finalization information
filepos[section_type_name] = linenum
# Plain action section?
elif match_name == 'section_plain':
section_string = match.group('type')
# Action section redefinition?
if section_string in action:
add_error(f'Redefinition of action {section_string}', filename, linenum)
# Clear parser state
user_type = None
# Update the parser state
urls = []
# Enum value?
elif match_name == 'value':
value_string = match.group('id')
# Add the enum value
enum = user_type['enum']
if 'values' not in enum:
enum['values'] = []
enum_value = {'name': value_string}
enum['values'].append(enum_value)
enum_value_doc = get_doc()
if enum_value_doc is not None:
enum_value['doc'] = enum_value_doc
# Record finalization information
filepos[f'{enum["name"]}.{value_string}'] = linenum
# Struct member?
elif match_name == 'member':
optional = match.group('optional') is not None
member_name = match.group('id')
# Add the member
struct = user_type['struct']
if 'members' not in struct:
struct['members'] = []
member_type, member_attr = _parse_typedef(match)
member_doc = get_doc()
member = {
'name': member_name,
'type': member_type
}
struct['members'].append(member)
if member_attr is not None:
member['attr'] = member_attr
if member_doc is not None:
member['doc'] = member_doc
if optional:
member['optional'] = True
# Record finalization information
filepos[f'{struct["name"]}.{member_name}'] = linenum
# URL?
elif match_name == 'urls':
method = match.group('method')
path = match.group('path')
# Create the action URL object
action_url = {}
if method != '*':
action_url['method'] = method
if path is not None:
action_url['path'] = path
# Duplicate URL?
if action_url in urls:
padded_path = "" if path is None else f' {path}'
add_error(f'Duplicate URL: {method}{padded_path}', filename, linenum)
# Add the URL
if 'urls' not in action:
action['urls'] = urls
urls.append(action_url)
# Typedef?
elif match_name == 'typedef':
definition_id = match.group('id')
# Type already defined?
if definition_id in BUILTIN_TYPES or definition_id in types:
add_error(f"Redefinition of type '{definition_id}'", filename, linenum)
# Clear parser state
action = None
urls = None
user_type = None
typedef_doc = get_doc()
# Create the typedef
typedef_type, typedef_attr = _parse_typedef(match)
typedef = {
'name': definition_id,
'type': typedef_type
}
types[definition_id] = {'typedef': typedef}
if typedef_attr is not None:
typedef['attr'] = typedef_attr
if typedef_doc is not None:
typedef['doc'] = typedef_doc
if doc_group is not None:
typedef['docGroup'] = doc_group
# Record finalization information
filepos[definition_id] = linenum
# Unrecognized line syntax
else:
add_error('Syntax error', filename, linenum)
# Validate the type model, if requested
if validate:
for type_name, member_name, error_msg in validate_type_model_errors(types):
error_filename = filename
error_linenum = None
if member_name is not None:
error_linenum = filepos.get(f'{type_name}.{member_name}')
if error_linenum is None:
error_linenum = filepos.get(type_name)
if error_linenum is None:
error_filename = ''
error_linenum = 1
add_error(error_msg, error_filename, error_linenum)
# Raise a parser exception if there are any errors
errors = [msg for _, _, msg in sorted(error_map.values())]
if errors:
raise SchemaMarkdownParserError(errors)
return types
# Helper function to parse a typedef - returns a type-model and attributes-model tuple
def _parse_typedef(match_typedef):
array_attrs_string = match_typedef.group('array')
dict_attrs_string = match_typedef.group('dict')
# Array type?
if array_attrs_string is not None:
value_type_name = match_typedef.group('type')
value_attr = _parse_attr(match_typedef.group('attrs'))
array_type = {'type': _create_type(value_type_name)}
if value_attr is not None:
array_type['attr'] = value_attr
return {'array': array_type}, _parse_attr(array_attrs_string)
# Dictionary type?
if dict_attrs_string is not None:
value_type_name = match_typedef.group('dictValueType')
if value_type_name is not None:
value_attr = _parse_attr(match_typedef.group('dictValueAttrs'))
key_type_name = match_typedef.group('type')
key_attr = _parse_attr(match_typedef.group('attrs'))
dict_type = {
'type': _create_type(value_type_name),
'keyType': _create_type(key_type_name)
}
if value_attr is not None:
dict_type['attr'] = value_attr
if key_attr is not None:
dict_type['keyAttr'] = key_attr
else:
value_type_name = match_typedef.group('type')
value_attr = _parse_attr(match_typedef.group('attrs'))
dict_type = {'type': _create_type(value_type_name)}
if value_attr is not None:
dict_type['attr'] = value_attr
return {'dict': dict_type}, _parse_attr(dict_attrs_string)
# Non-container type...
member_type_name = match_typedef.group('type')
return _create_type(member_type_name), _parse_attr(match_typedef.group('attrs'))
# Helper function to create a type model
def _create_type(type_name):
if type_name in BUILTIN_TYPES:
return {'builtin': type_name}
return {'user': type_name}
# Helper function to parse an attributes string - returns an attributes model
def _parse_attr(attrs_string):
attrs = None
if attrs_string is not None:
for attr_string in RE_FIND_ATTRS.findall(attrs_string):
if attrs is None:
attrs = {}
match_attr = RE_ATTR_GROUP.match(attr_string)
attr_op = match_attr.group('op')
attr_length_op = match_attr.group('lop') if attr_op is None else None
if match_attr.group('nullable') is not None:
attrs['nullable'] = True
elif attr_op is not None:
attr_value = float(match_attr.group('opnum'))
if attr_op == '<':
attrs['lt'] = attr_value
elif attr_op == '<=':
attrs['lte'] = attr_value
elif attr_op == '>':
attrs['gt'] = attr_value
elif attr_op == '>=':
attrs['gte'] = attr_value
else: # ==
attrs['eq'] = attr_value
else: # attr_length_op is not None:
attr_value = int(match_attr.group('lopnum'))
if attr_length_op == '<':
attrs['lenLT'] = attr_value
elif attr_length_op == '<=':
attrs['lenLTE'] = attr_value
elif attr_length_op == '>':
attrs['lenGT'] = attr_value
elif attr_length_op == '>=':
attrs['lenGTE'] = attr_value
else: # ==
attrs['lenEq'] = attr_value
return attrs
class SchemaMarkdownParserError(Exception):
"""
Schema Markdown parser exception
:param list(str) errors: The list of error strings
"""
__slots__ = ('errors',)
def __init__(self, errors):
super().__init__('\n'.join(errors))
#: The list of error strings
self.errors = errors
| 0.425844 | 0.186447 |
[](https://pypi.org/project/schema-matching/)
# Python Schema Matching by XGboost and Sentence-Transformers
A python tool using XGboost and sentence-transformers to perform schema matching task on tables. Support multi-language column names and instances matching and can be used without column names. Both csv and json file type are supported.
## What is schema matching?

Schema matching is the problem of finding potential associations between elements (most often attributes or relations) of two schemas.
[source](https://link.springer.com/referenceworkentry/10.1007/978-3-319-77525-8_20)
## Dependencies
- numpy==1.19.5
- pandas==1.1.5
- nltk==3.6.5
- python-dateutil==2.8.2
- sentence-transformers==2.1.0
- xgboost==1.5.2
- strsimpy==0.2.1
## Package usage
### Install
```
pip install schema-matching
```
### Conduct schema matching
```
from schema_matching import schema_matching
df_pred,df_pred_labels,predicted_pairs = schema_matching("Test Data/QA/Table1.json","Test Data/QA/Table2.json")
print(df_pred)
print(df_pred_labels)
for pair_tuple in predicted_pairs:
print(pair_tuple)
```
#### Return:
- df_pred: Predict value matrix, pd.DataFrame.
- df_pred_labels: Predict label matrix, pd.DataFrame.
- predicted_pairs: Predict label == 1 column pairs, in tuple format.
#### Parameters:
- table1_pth: Path to your first **csv, json or jsonl file**.
- table2_pth: Path to your second **csv, json or jsonl file**.
- threshold: Threshold, you can use this parameter to specify threshold value, suggest 0.9 for easy matching(column name very similar). Default value is calculated from training data, which is around 0.15-0.2. This value is used for difficult matching(column name masked or very different).
- strategy: Strategy, there are three options: "one-to-one", "one-to-many" and "many-to-many". "one-to-one" means that one column can only be matched to one column. "one-to-many" means that columns in Table1 can only be matched to one column in Table2. "many-to-many" means that there is no restrictions. Default is "many-to-many".
- model_pth: Path to trained model folder, which must contain at least one pair of ".model" file and ".threshold" file. You don't need to specify this parameter.
## Raw code usage: Training
### Data
See Data format in Training Data and Test Data folders. You need to put mapping.txt, Table1.csv and Table2.csv in new folders under Training Data. For Test Data, mapping.txt is not needed.
### 1.Construct features
```
python relation_features.py
```
### 2.Train xgboost models
```
python train.py
```
### 3.Calculate similarity matrix (inference)
```
Example:
python cal_column_similarity.py -p Test\ Data/self -m /model/2022-04-12-12-06-32 -s one-to-one
python cal_column_similarity.py -p Test\ Data/authors -m /model/2022-04-12-12-06-32-11 -t 0.9
```
Parameters:
- -p: Path to test data folder, must contain **"Table1.csv" and "Table2.csv" or "Table1.json" and "Table2.json"**.
- -m: Path to trained model folder, which must contain at least one pair of ".model" file and ".threshold" file.
- -t: Threshold, you can use this parameter to specify threshold value, suggest 0.9 for easy matching(column name very similar). Default value is calculated from training data, which is around 0.15-0.2. This value is used for difficult matching(column name masked or very different).
- -s: Strategy, there are three options: "one-to-one", "one-to-many" and "many-to-many". "one-to-one" means that one column can only be matched to one column. "one-to-many" means that columns in Table1 can only be matched to one column in Table2. "many-to-many" means that there is no restrictions. Default is "many-to-many".
Output:
- similarity_matrix_label.csv: Labels(0,1) for each column pairs.
- similarity_matrix_value.csv: Average of raw values computed by all the xgboost models.
## Feature Engineering
Features: "is_url","is_numeric","is_date","is_string","numeric:mean", "numeric:min", "numeric:max", "numeric:variance","numeric:cv", "numeric:unique/len(data_list)", "length:mean", "length:min", "length:max", "length:variance","length:cv", "length:unique/len(data_list)", "whitespace_ratios:mean","punctuation_ratios:mean","special_character_ratios:mean","numeric_ratios:mean", "whitespace_ratios:cv","punctuation_ratios:cv","special_character_ratios:cv","numeric_ratios:cv", "colname:bleu_score", "colname:edit_distance","colname:lcs","colname:tsm_cosine", "colname:one_in_one", "instance_similarity:cosine"
- tsm_cosine: Cosine similarity of column names computed by sentence-transformers using "paraphrase-multilingual-mpnet-base-v2". Support multi-language column names matching.
- instance_similarity:cosine: Select 20 instances each string column and compute its mean embedding using sentence-transformers. Cosine similarity is computed by each pairs.
## Performance
### Cross Validation on Training Data(Each pair to be used as test data)
- Average Precision: 0.755
- Average Recall: 0.829
- Average F1: 0.766
Average Confusion Matrix:
| | Negative(Truth) | Positive(Truth) |
|----------------|-----------------|-----------------|
| Negative(pred) | 0.94343111 | 0.05656889 |
| Positive(pred) | 0.17135417 | 0.82864583 |
### Inference on Test Data (Give confusing column names)
Data: https://github.com/fireindark707/Schema_Matching_XGboost/tree/main/Test%20Data/self
| | title | text | summary | keywords | url | country | language | domain | name | timestamp |
|---------|------------|------------|------------|------------|------------|------------|------------|------------|-------|------------|
| col1 | 1(FN) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| col2 | 0 | 1(TP) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| col3 | 0 | 0 | 1(TP) | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| words | 0 | 0 | 0 | 1(TP) | 0 | 0 | 0 | 0 | 0 | 0 |
| link | 0 | 0 | 0 | 0 | 1(TP) | 0 | 0 | 0 | 0 | 0 |
| col6 | 0 | 0 | 0 | 0 | 0 | 1(TP) | 0 | 0 | 0 | 0 |
| lang | 0 | 0 | 0 | 0 | 0 | 0 | 1(TP) | 0 | 0 | 0 |
| col8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1(TP) | 0 | 0 |
| website | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0(FN) | 0 |
| col10 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1(TP) |
**F1 score: 0.889**
## Cite
```
@software{fireinfark707_Schema_Matching_by_2022,
author = {fireinfark707},
license = {MIT},
month = {4},
title = {{Schema Matching by XGboost}},
url = {https://github.com/fireindark707/Schema_Matching_XGboost},
year = {2022}
}
```
|
schema-matching
|
/schema_matching-1.0.4.tar.gz/schema_matching-1.0.4/README.md
|
README.md
|
pip install schema-matching
from schema_matching import schema_matching
df_pred,df_pred_labels,predicted_pairs = schema_matching("Test Data/QA/Table1.json","Test Data/QA/Table2.json")
print(df_pred)
print(df_pred_labels)
for pair_tuple in predicted_pairs:
print(pair_tuple)
python relation_features.py
python train.py
Example:
python cal_column_similarity.py -p Test\ Data/self -m /model/2022-04-12-12-06-32 -s one-to-one
python cal_column_similarity.py -p Test\ Data/authors -m /model/2022-04-12-12-06-32-11 -t 0.9
@software{fireinfark707_Schema_Matching_by_2022,
author = {fireinfark707},
license = {MIT},
month = {4},
title = {{Schema Matching by XGboost}},
url = {https://github.com/fireindark707/Schema_Matching_XGboost},
year = {2022}
}
| 0.495606 | 0.953751 |
import pandas as pd
import numpy as np
import os
import xgboost as xgb
import datetime
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, precision_score, recall_score
import warnings
warnings.filterwarnings("ignore")
feature_names = ["is_url","is_numeric","is_date","is_string","numeric:mean", "numeric:min", "numeric:max", "numeric:variance","numeric:cv", "numeric:unique/len(data_list)",
"length:mean", "length:min", "length:max", "length:variance","length:cv", "length:unique/len(data_list)",
"whitespace_ratios:mean","punctuation_ratios:mean","special_character_ratios:mean","numeric_ratios:mean",
"whitespace_ratios:cv","punctuation_ratios:cv","special_character_ratios:cv","numeric_ratios:cv",
"colname:bleu_score", "colname:edit_distance","colname:lcs","colname:tsm_cosine", "colname:one_in_one","instance_similarity:cosine",
]
params = {
'max_depth': 4,
'eta': 0.1,
'objective': 'binary:logistic',
'eval_metric': 'logloss',
}
def train(train_features,train_labels,num_round):
dtrain = xgb.DMatrix(train_features, label=train_labels)
bst = xgb.train(params, dtrain, num_round)
# get best_threshold
best_f1 = 0
best_threshold = 0
for threshold in range(100):
threshold = threshold / 100
pred_labels = np.where(bst.predict(dtrain) > threshold, 1, 0)
f1 = f1_score(train_labels, pred_labels,average="binary",pos_label=1)
if f1 > best_f1:
best_f1 = f1
best_threshold = threshold
return bst,best_threshold
def test(bst,best_threshold, test_features, test_labels, type="evaluation"):
dtest = xgb.DMatrix(test_features, label=test_labels)
pred = bst.predict(dtest)
if type == "inference":
pred_labels = np.where(pred > best_threshold, 1, 0)
return pred,pred_labels
# compute precision, recall, and F1 score
pred_labels = np.where(pred > best_threshold, 1, 0)
precision = precision_score(test_labels, pred_labels,average="binary",pos_label=1)
recall = recall_score(test_labels, pred_labels,average="binary",pos_label=1)
f1 = f1_score(test_labels, pred_labels,average="binary",pos_label=1)
c_matrix = confusion_matrix(test_labels, pred_labels)
return precision, recall, f1, c_matrix
def merge_features(path):
files = os.listdir(path)
files.sort()
merged_features = []
for file in files:
if not "features" in file:
continue
features = np.load(path + file)
merged_features.append(features)
return np.concatenate(merged_features)
def get_labels(path):
files = os.listdir(path)
files.sort()
labels = []
for file in files:
if not "labels" in file:
continue
labels.append(np.load(path + file))
return np.concatenate(labels)
def preprocess(path):
train_path = path + "/train/"
test_path = path + "/test/"
train_features = merge_features(train_path)
train_labels = get_labels(train_path)
test_features = merge_features(test_path)
test_labels = get_labels(test_path)
return train_features, train_labels, test_features, test_labels
def get_feature_importances(bst):
importance = bst.get_fscore()
importance = [(im,feature_names[int(im[0].replace("f",""))]) for im in importance.items()]
importance = sorted(importance, key=lambda x: x[0][1], reverse=True)
return importance
def train_loop(num_round=300):
precision_list = []
recall_list = []
f1_list = []
c_matrix_list = []
feature_importance_list = []
for i in range(len(os.listdir("Input"))):
train_features, train_labels, test_features, test_labels = preprocess("Input/" + str(i))
bst, best_threshold = train(train_features, train_labels, num_round)
precision, recall, f1, c_matrix = test(bst,best_threshold, test_features, test_labels)
feature_importance = get_feature_importances(bst)
c_matrix_norm = c_matrix.astype('float') / c_matrix.sum(axis=1)[:, np.newaxis]
precision_list.append(precision)
recall_list.append(recall)
f1_list.append(f1)
c_matrix_list.append(c_matrix_norm)
feature_importance_list.append(feature_importance)
bst.save_model(model_save_pth+f"/{i}.model")
with open(model_save_pth+f"/{i}.threshold",'w') as f:
f.write(str(best_threshold))
# evaluate feature importance
feature_name_importance = {}
for feature_importance in feature_importance_list:
for (im,feature_name) in feature_importance:
if feature_name in feature_name_importance:
feature_name_importance[feature_name] += im[1]
else:
feature_name_importance[feature_name] = im[1]
feature_name_importance = sorted(feature_name_importance.items(), key=lambda x: x[1], reverse=True)
return precision_list, recall_list, f1_list, c_matrix_list, feature_name_importance
def optimize_hyperparameter(eta_candid,max_depth_candid,num_round_candid):
best_f1 = 0
for eta in eta_candid:
for max_depth in max_depth_candid:
for num_round in num_round_candid:
print(eta, max_depth, num_round)
params["eta"] = eta
params["max_depth"] = max_depth
precision_list, recall_list, f1_list, c_matrix_list, feature_name_importance = train_loop(num_round)
print("Average Precision: %.3f" % np.mean(precision_list))
print("Average Recall: %.3f" % np.mean(recall_list))
print("Average F1: %.3f" % np.mean(f1_list))
if np.mean(f1_list) > best_f1:
best_f1 = np.mean(f1_list)
best_params = params
best_precision = np.mean(precision_list)
best_recall = np.mean(recall_list)
best_params["num_round"] = num_round
return best_params, best_precision, best_recall, best_f1
if __name__ == '__main__':
model_save_pth = "model/"+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
if not os.path.exists(model_save_pth):
os.makedirs(model_save_pth)
precision_list, recall_list, f1_list, c_matrix_list, feature_name_importance = train_loop()
# give evaluation results
print("Average Precision: %.3f" % np.mean(precision_list))
print("Average Recall: %.3f" % np.mean(recall_list))
print("Average F1: %.3f" % np.mean(f1_list))
print(f1_list)
print("Average Confusion Matrix: \n", np.mean(c_matrix_list,axis=0))
print("Feature Importance:")
for importance in feature_name_importance:
print(f"{importance[0]}: {importance[1]}")
# tune parameters
if False:
eta_candidate = [0.08,0.05,0.03, 0.01]
max_depth_candidate = [3,4,5,6,7,8,9,10,12,15,20]
num_round_candidate = [100,200,300,400,500,600,700,800,900,1000]
best_params,best_precision, best_recall, best_f1 = optimize_hyperparameter(eta_candidate,max_depth_candidate,num_round_candidate)
print(best_params)
print(best_precision)
print(best_recall)
print(best_f1)
|
schema-matching
|
/schema_matching-1.0.4.tar.gz/schema_matching-1.0.4/src/schema_matching/train.py
|
train.py
|
import pandas as pd
import numpy as np
import os
import xgboost as xgb
import datetime
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, precision_score, recall_score
import warnings
warnings.filterwarnings("ignore")
feature_names = ["is_url","is_numeric","is_date","is_string","numeric:mean", "numeric:min", "numeric:max", "numeric:variance","numeric:cv", "numeric:unique/len(data_list)",
"length:mean", "length:min", "length:max", "length:variance","length:cv", "length:unique/len(data_list)",
"whitespace_ratios:mean","punctuation_ratios:mean","special_character_ratios:mean","numeric_ratios:mean",
"whitespace_ratios:cv","punctuation_ratios:cv","special_character_ratios:cv","numeric_ratios:cv",
"colname:bleu_score", "colname:edit_distance","colname:lcs","colname:tsm_cosine", "colname:one_in_one","instance_similarity:cosine",
]
params = {
'max_depth': 4,
'eta': 0.1,
'objective': 'binary:logistic',
'eval_metric': 'logloss',
}
def train(train_features,train_labels,num_round):
dtrain = xgb.DMatrix(train_features, label=train_labels)
bst = xgb.train(params, dtrain, num_round)
# get best_threshold
best_f1 = 0
best_threshold = 0
for threshold in range(100):
threshold = threshold / 100
pred_labels = np.where(bst.predict(dtrain) > threshold, 1, 0)
f1 = f1_score(train_labels, pred_labels,average="binary",pos_label=1)
if f1 > best_f1:
best_f1 = f1
best_threshold = threshold
return bst,best_threshold
def test(bst,best_threshold, test_features, test_labels, type="evaluation"):
dtest = xgb.DMatrix(test_features, label=test_labels)
pred = bst.predict(dtest)
if type == "inference":
pred_labels = np.where(pred > best_threshold, 1, 0)
return pred,pred_labels
# compute precision, recall, and F1 score
pred_labels = np.where(pred > best_threshold, 1, 0)
precision = precision_score(test_labels, pred_labels,average="binary",pos_label=1)
recall = recall_score(test_labels, pred_labels,average="binary",pos_label=1)
f1 = f1_score(test_labels, pred_labels,average="binary",pos_label=1)
c_matrix = confusion_matrix(test_labels, pred_labels)
return precision, recall, f1, c_matrix
def merge_features(path):
files = os.listdir(path)
files.sort()
merged_features = []
for file in files:
if not "features" in file:
continue
features = np.load(path + file)
merged_features.append(features)
return np.concatenate(merged_features)
def get_labels(path):
files = os.listdir(path)
files.sort()
labels = []
for file in files:
if not "labels" in file:
continue
labels.append(np.load(path + file))
return np.concatenate(labels)
def preprocess(path):
train_path = path + "/train/"
test_path = path + "/test/"
train_features = merge_features(train_path)
train_labels = get_labels(train_path)
test_features = merge_features(test_path)
test_labels = get_labels(test_path)
return train_features, train_labels, test_features, test_labels
def get_feature_importances(bst):
importance = bst.get_fscore()
importance = [(im,feature_names[int(im[0].replace("f",""))]) for im in importance.items()]
importance = sorted(importance, key=lambda x: x[0][1], reverse=True)
return importance
def train_loop(num_round=300):
precision_list = []
recall_list = []
f1_list = []
c_matrix_list = []
feature_importance_list = []
for i in range(len(os.listdir("Input"))):
train_features, train_labels, test_features, test_labels = preprocess("Input/" + str(i))
bst, best_threshold = train(train_features, train_labels, num_round)
precision, recall, f1, c_matrix = test(bst,best_threshold, test_features, test_labels)
feature_importance = get_feature_importances(bst)
c_matrix_norm = c_matrix.astype('float') / c_matrix.sum(axis=1)[:, np.newaxis]
precision_list.append(precision)
recall_list.append(recall)
f1_list.append(f1)
c_matrix_list.append(c_matrix_norm)
feature_importance_list.append(feature_importance)
bst.save_model(model_save_pth+f"/{i}.model")
with open(model_save_pth+f"/{i}.threshold",'w') as f:
f.write(str(best_threshold))
# evaluate feature importance
feature_name_importance = {}
for feature_importance in feature_importance_list:
for (im,feature_name) in feature_importance:
if feature_name in feature_name_importance:
feature_name_importance[feature_name] += im[1]
else:
feature_name_importance[feature_name] = im[1]
feature_name_importance = sorted(feature_name_importance.items(), key=lambda x: x[1], reverse=True)
return precision_list, recall_list, f1_list, c_matrix_list, feature_name_importance
def optimize_hyperparameter(eta_candid,max_depth_candid,num_round_candid):
best_f1 = 0
for eta in eta_candid:
for max_depth in max_depth_candid:
for num_round in num_round_candid:
print(eta, max_depth, num_round)
params["eta"] = eta
params["max_depth"] = max_depth
precision_list, recall_list, f1_list, c_matrix_list, feature_name_importance = train_loop(num_round)
print("Average Precision: %.3f" % np.mean(precision_list))
print("Average Recall: %.3f" % np.mean(recall_list))
print("Average F1: %.3f" % np.mean(f1_list))
if np.mean(f1_list) > best_f1:
best_f1 = np.mean(f1_list)
best_params = params
best_precision = np.mean(precision_list)
best_recall = np.mean(recall_list)
best_params["num_round"] = num_round
return best_params, best_precision, best_recall, best_f1
if __name__ == '__main__':
model_save_pth = "model/"+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
if not os.path.exists(model_save_pth):
os.makedirs(model_save_pth)
precision_list, recall_list, f1_list, c_matrix_list, feature_name_importance = train_loop()
# give evaluation results
print("Average Precision: %.3f" % np.mean(precision_list))
print("Average Recall: %.3f" % np.mean(recall_list))
print("Average F1: %.3f" % np.mean(f1_list))
print(f1_list)
print("Average Confusion Matrix: \n", np.mean(c_matrix_list,axis=0))
print("Feature Importance:")
for importance in feature_name_importance:
print(f"{importance[0]}: {importance[1]}")
# tune parameters
if False:
eta_candidate = [0.08,0.05,0.03, 0.01]
max_depth_candidate = [3,4,5,6,7,8,9,10,12,15,20]
num_round_candidate = [100,200,300,400,500,600,700,800,900,1000]
best_params,best_precision, best_recall, best_f1 = optimize_hyperparameter(eta_candidate,max_depth_candidate,num_round_candidate)
print(best_params)
print(best_precision)
print(best_recall)
print(best_f1)
| 0.467575 | 0.263925 |
import pandas as pd
import json
from collections import defaultdict
import re
def find_all_keys_values(json_data,parent_key):
"""
Find all keys that don't have list or dictionary values and their values. Key should be saved with its parent key like "parent-key.key".
"""
key_values = defaultdict(list)
for key, value in json_data.items():
if isinstance(value, dict):
child_key_values = find_all_keys_values(value,key)
for child_key, child_value in child_key_values.items():
key_values[child_key].extend(child_value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
child_key_values = find_all_keys_values(item,key)
for child_key, child_value in child_key_values.items():
key_values[child_key].extend(child_value)
else:
key_values[parent_key+"."+key].append(item)
else:
key_values[parent_key+"."+key].append(value)
return key_values
def make_csv_from_json(file_path):
"""
Make csv file from json file.
"""
if file_path.endswith(".json"):
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
elif file_path.endswith(".jsonl"):
data = []
with open(file_path, 'r') as json_file:
json_list = list(json_file)
for json_str in json_list:
data.append(json.loads(json_str))
# find key_values
if isinstance(data, dict):
key_values = find_all_keys_values(data,"")
elif isinstance(data, list):
key_values = find_all_keys_values({"TOPLEVEL":data},"TOPLEVEL")
else:
raise ValueError('Your input JsonData is not a dictionary or list')
key_values = {k.replace("TOPLEVEL.",""):v for k,v in key_values.items() if len(v)>1}
df = pd.DataFrame({k:pd.Series(v) for k,v in key_values.items()})
# save to csv
save_pth = re.sub(r'\.jsonl?','.csv',file_path)
df.to_csv(save_pth, index=False, encoding='utf-8')
return df
def table_column_filter(table_df):
"""
Filter columns that have zero instances or all columns are "--"
"""
original_columns = table_df.columns
for column in table_df.columns:
column_data = [d for d in list(table_df[column]) if d == d and d != "--"]
if len(column_data) <= 1:
table_df = table_df.drop(column, axis=1)
continue
if "Unnamed:" in column:
table_df = table_df.drop(column, axis=1)
continue
remove_columns = list(set(original_columns) - set(table_df.columns))
if len(remove_columns) > 0:
print("Removed columns:", remove_columns)
return table_df
|
schema-matching
|
/schema_matching-1.0.4.tar.gz/schema_matching-1.0.4/src/schema_matching/utils.py
|
utils.py
|
import pandas as pd
import json
from collections import defaultdict
import re
def find_all_keys_values(json_data,parent_key):
"""
Find all keys that don't have list or dictionary values and their values. Key should be saved with its parent key like "parent-key.key".
"""
key_values = defaultdict(list)
for key, value in json_data.items():
if isinstance(value, dict):
child_key_values = find_all_keys_values(value,key)
for child_key, child_value in child_key_values.items():
key_values[child_key].extend(child_value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
child_key_values = find_all_keys_values(item,key)
for child_key, child_value in child_key_values.items():
key_values[child_key].extend(child_value)
else:
key_values[parent_key+"."+key].append(item)
else:
key_values[parent_key+"."+key].append(value)
return key_values
def make_csv_from_json(file_path):
"""
Make csv file from json file.
"""
if file_path.endswith(".json"):
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
elif file_path.endswith(".jsonl"):
data = []
with open(file_path, 'r') as json_file:
json_list = list(json_file)
for json_str in json_list:
data.append(json.loads(json_str))
# find key_values
if isinstance(data, dict):
key_values = find_all_keys_values(data,"")
elif isinstance(data, list):
key_values = find_all_keys_values({"TOPLEVEL":data},"TOPLEVEL")
else:
raise ValueError('Your input JsonData is not a dictionary or list')
key_values = {k.replace("TOPLEVEL.",""):v for k,v in key_values.items() if len(v)>1}
df = pd.DataFrame({k:pd.Series(v) for k,v in key_values.items()})
# save to csv
save_pth = re.sub(r'\.jsonl?','.csv',file_path)
df.to_csv(save_pth, index=False, encoding='utf-8')
return df
def table_column_filter(table_df):
"""
Filter columns that have zero instances or all columns are "--"
"""
original_columns = table_df.columns
for column in table_df.columns:
column_data = [d for d in list(table_df[column]) if d == d and d != "--"]
if len(column_data) <= 1:
table_df = table_df.drop(column, axis=1)
continue
if "Unnamed:" in column:
table_df = table_df.drop(column, axis=1)
continue
remove_columns = list(set(original_columns) - set(table_df.columns))
if len(remove_columns) > 0:
print("Removed columns:", remove_columns)
return table_df
| 0.210523 | 0.275026 |
from . import init
from .self_features import make_self_features_from
from .utils import table_column_filter
import pandas as pd
import numpy as np
from numpy.linalg import norm
import random
import os
import subprocess
from strsimpy.metric_lcs import MetricLCS
from strsimpy.damerau import Damerau
from nltk.translate import bleu
from nltk.translate.bleu_score import SmoothingFunction
from sentence_transformers import util
import re
model = init.model
smoothie = SmoothingFunction().method4
metriclcs = MetricLCS()
damerau = Damerau()
seed = 200
random.seed(seed)
def preprocess_text(text):
text = text.lower()
text = re.split(r'[\s\_\.]', text)
text = " ".join(text).strip()
return text
def transformer_similarity(text1, text2):
"""
Use sentence transformer to calculate similarity between two sentences.
"""
embeddings1 = model.encode(text1)
embeddings2 = model.encode(text2)
cosine_similarity = util.cos_sim(embeddings1, embeddings2)
return cosine_similarity
def read_mapping(mapping_file):
"""
Read mapping file and return a set.
"""
if mapping_file is None or not os.path.exists(mapping_file):
return set()
with open(mapping_file, 'r') as f:
readed = f.readlines()
readed = [x.strip() for x in readed]
mapping = set()
for map in readed:
map = map.split(",")
map = [m.strip("< >") for m in map]
mapping.add(tuple(map))
return mapping
def make_combinations_labels(columns1, columns2, mapping ,type="train"):
"""
Make combinations from columns1 list and columns2 list. Label them using mapping.
"""
labels = {}
for i,c1 in enumerate(columns1):
for j,c2 in enumerate(columns2):
if (c1, c2) in mapping or (c2, c1) in mapping:
labels[(i, j)] = 1
else:
labels[(i, j)] = 0
# sample negative labels
if type == "train":
combinations_count = len(labels)
for i in range(combinations_count*2):
if sum(labels.values()) >= 0.1 * len(labels):
break
c1 = random.choice(range(len(columns1)))
c2 = random.choice(range(len(columns2)))
if (c1, c2) in labels and labels[c1, c2] == 0:
del labels[(c1, c2)]
return labels
def get_colnames_features(text1,text2,column_name_embeddings):
"""
Use BLEU, edit distance and word2vec to calculate features.
"""
bleu_score = bleu([text1], text2, smoothing_function=smoothie)
edit_distance = damerau.distance(text1, text2)
lcs = metriclcs.distance(text1, text2)
transformer_score = util.cos_sim(column_name_embeddings[text1], column_name_embeddings[text2])
one_in_one = text1 in text2 or text2 in text1
colnames_features = np.array([bleu_score, edit_distance, lcs,transformer_score, one_in_one])
return colnames_features
def get_instance_similarity(embeddings1, embeddings2):
"""
Use cosine similarity between two sentences.
"""
cosine_similarity = np.inner(embeddings1, embeddings2) / (norm(embeddings1) * norm(embeddings2))
return np.array([cosine_similarity])
def make_data_from(table1_df, table2_df,mapping_file=None,type="train"):
"""
Read data from 2 table dataframe, mapping file path and make relational features and labels as a matrix.
"""
mapping = read_mapping(mapping_file)
columns1 = list(table1_df.columns)
columns2 = list(table2_df.columns)
combinations_labels = make_combinations_labels(columns1, columns2, mapping,type)
table1_features = make_self_features_from(table1_df)
table2_features = make_self_features_from(table2_df)
column_name_embeddings = {preprocess_text(k):model.encode(preprocess_text(k)) for k in columns1+columns2}
additional_feature_num = 6
output_feature_table = np.zeros((len(combinations_labels), table1_features.shape[1] - 768+ additional_feature_num), dtype=np.float32)
output_labels = np.zeros(len(combinations_labels), dtype=np.int32)
for i, (combination,label) in enumerate(combinations_labels.items()):
c1,c2 = combination
c1_name = columns1[c1]
c2_name = columns2[c2]
difference_features_percent = np.abs(table1_features[c1] - table2_features[c2]) / (table1_features[c1] + table2_features[c2] + 1e-8)
c1_name = preprocess_text(c1_name)
c2_name = preprocess_text(c2_name)
colnames_features = get_colnames_features(c1_name, c2_name,column_name_embeddings)
instance_similarity = get_instance_similarity(table1_features[c1][-768:], table2_features[c2][-768:])
output_feature_table[i,:] = np.concatenate((difference_features_percent[:-768], colnames_features, instance_similarity))
output_labels[i] = label
# add column names mask for training data
if type == "train" and i % 5 == 0:
colnames_features = np.array([0,12,0,0.2,0])
added_features = np.concatenate((difference_features_percent[:-768], colnames_features, instance_similarity))
added_features = added_features.reshape((1, added_features.shape[0]))
output_feature_table = np.concatenate((output_feature_table, added_features), axis=0)
output_labels = np.concatenate((output_labels, np.array([label])))
return output_feature_table, output_labels
if __name__ == '__main__':
if os.path.exists("Input"):
#remove existing Input folder
subprocess.call(["rm", "-r", "Input"])
# make folders
os.mkdir("Input")
folder_list = os.listdir("Training Data")
train_features = {}
train_labels = {}
test_features = {}
test_labels = {}
for folder in folder_list:
print("start extracting data from " + folder)
data_folder = "Training Data/" + folder
table1_df = pd.read_csv(data_folder + "/Table1.csv")
table2_df = pd.read_csv(data_folder + "/Table2.csv")
table1_df = table_column_filter(table1_df)
table2_df = table_column_filter(table2_df)
mapping_file = data_folder + "/mapping.txt"
features,labels = make_data_from(table1_df, table2_df, mapping_file,type="train")
train_features[folder] = features
train_labels[folder] = labels
features,labels = make_data_from(table1_df, table2_df, mapping_file,type="test")
test_features[folder] = features
test_labels[folder] = labels
# save data using cross validation
for i in range(len(folder_list)):
os.mkdir("Input/" + str(i))
os.mkdir("Input/" + str(i) + "/train")
os.mkdir("Input/" + str(i) + "/test")
test_folder = folder_list[i]
train_folders = folder_list[:i] + folder_list[i+1:]
for folder in train_folders:
np.save("Input/"+ str(i) +"/train/" +folder.split('/')[-1]+ "_features.npy", train_features[folder])
np.save("Input/"+ str(i) +"/train/" +folder.split('/')[-1]+ "_labels.npy", train_labels[folder])
np.save("Input/"+ str(i) +"/test/" +test_folder.split('/')[-1]+ "_features.npy", test_features[test_folder])
np.save("Input/"+ str(i) +"/test/" +test_folder.split('/')[-1]+ "_labels.npy", test_labels[test_folder])
|
schema-matching
|
/schema_matching-1.0.4.tar.gz/schema_matching-1.0.4/src/schema_matching/relation_features.py
|
relation_features.py
|
from . import init
from .self_features import make_self_features_from
from .utils import table_column_filter
import pandas as pd
import numpy as np
from numpy.linalg import norm
import random
import os
import subprocess
from strsimpy.metric_lcs import MetricLCS
from strsimpy.damerau import Damerau
from nltk.translate import bleu
from nltk.translate.bleu_score import SmoothingFunction
from sentence_transformers import util
import re
model = init.model
smoothie = SmoothingFunction().method4
metriclcs = MetricLCS()
damerau = Damerau()
seed = 200
random.seed(seed)
def preprocess_text(text):
text = text.lower()
text = re.split(r'[\s\_\.]', text)
text = " ".join(text).strip()
return text
def transformer_similarity(text1, text2):
"""
Use sentence transformer to calculate similarity between two sentences.
"""
embeddings1 = model.encode(text1)
embeddings2 = model.encode(text2)
cosine_similarity = util.cos_sim(embeddings1, embeddings2)
return cosine_similarity
def read_mapping(mapping_file):
"""
Read mapping file and return a set.
"""
if mapping_file is None or not os.path.exists(mapping_file):
return set()
with open(mapping_file, 'r') as f:
readed = f.readlines()
readed = [x.strip() for x in readed]
mapping = set()
for map in readed:
map = map.split(",")
map = [m.strip("< >") for m in map]
mapping.add(tuple(map))
return mapping
def make_combinations_labels(columns1, columns2, mapping ,type="train"):
"""
Make combinations from columns1 list and columns2 list. Label them using mapping.
"""
labels = {}
for i,c1 in enumerate(columns1):
for j,c2 in enumerate(columns2):
if (c1, c2) in mapping or (c2, c1) in mapping:
labels[(i, j)] = 1
else:
labels[(i, j)] = 0
# sample negative labels
if type == "train":
combinations_count = len(labels)
for i in range(combinations_count*2):
if sum(labels.values()) >= 0.1 * len(labels):
break
c1 = random.choice(range(len(columns1)))
c2 = random.choice(range(len(columns2)))
if (c1, c2) in labels and labels[c1, c2] == 0:
del labels[(c1, c2)]
return labels
def get_colnames_features(text1,text2,column_name_embeddings):
"""
Use BLEU, edit distance and word2vec to calculate features.
"""
bleu_score = bleu([text1], text2, smoothing_function=smoothie)
edit_distance = damerau.distance(text1, text2)
lcs = metriclcs.distance(text1, text2)
transformer_score = util.cos_sim(column_name_embeddings[text1], column_name_embeddings[text2])
one_in_one = text1 in text2 or text2 in text1
colnames_features = np.array([bleu_score, edit_distance, lcs,transformer_score, one_in_one])
return colnames_features
def get_instance_similarity(embeddings1, embeddings2):
"""
Use cosine similarity between two sentences.
"""
cosine_similarity = np.inner(embeddings1, embeddings2) / (norm(embeddings1) * norm(embeddings2))
return np.array([cosine_similarity])
def make_data_from(table1_df, table2_df,mapping_file=None,type="train"):
"""
Read data from 2 table dataframe, mapping file path and make relational features and labels as a matrix.
"""
mapping = read_mapping(mapping_file)
columns1 = list(table1_df.columns)
columns2 = list(table2_df.columns)
combinations_labels = make_combinations_labels(columns1, columns2, mapping,type)
table1_features = make_self_features_from(table1_df)
table2_features = make_self_features_from(table2_df)
column_name_embeddings = {preprocess_text(k):model.encode(preprocess_text(k)) for k in columns1+columns2}
additional_feature_num = 6
output_feature_table = np.zeros((len(combinations_labels), table1_features.shape[1] - 768+ additional_feature_num), dtype=np.float32)
output_labels = np.zeros(len(combinations_labels), dtype=np.int32)
for i, (combination,label) in enumerate(combinations_labels.items()):
c1,c2 = combination
c1_name = columns1[c1]
c2_name = columns2[c2]
difference_features_percent = np.abs(table1_features[c1] - table2_features[c2]) / (table1_features[c1] + table2_features[c2] + 1e-8)
c1_name = preprocess_text(c1_name)
c2_name = preprocess_text(c2_name)
colnames_features = get_colnames_features(c1_name, c2_name,column_name_embeddings)
instance_similarity = get_instance_similarity(table1_features[c1][-768:], table2_features[c2][-768:])
output_feature_table[i,:] = np.concatenate((difference_features_percent[:-768], colnames_features, instance_similarity))
output_labels[i] = label
# add column names mask for training data
if type == "train" and i % 5 == 0:
colnames_features = np.array([0,12,0,0.2,0])
added_features = np.concatenate((difference_features_percent[:-768], colnames_features, instance_similarity))
added_features = added_features.reshape((1, added_features.shape[0]))
output_feature_table = np.concatenate((output_feature_table, added_features), axis=0)
output_labels = np.concatenate((output_labels, np.array([label])))
return output_feature_table, output_labels
if __name__ == '__main__':
if os.path.exists("Input"):
#remove existing Input folder
subprocess.call(["rm", "-r", "Input"])
# make folders
os.mkdir("Input")
folder_list = os.listdir("Training Data")
train_features = {}
train_labels = {}
test_features = {}
test_labels = {}
for folder in folder_list:
print("start extracting data from " + folder)
data_folder = "Training Data/" + folder
table1_df = pd.read_csv(data_folder + "/Table1.csv")
table2_df = pd.read_csv(data_folder + "/Table2.csv")
table1_df = table_column_filter(table1_df)
table2_df = table_column_filter(table2_df)
mapping_file = data_folder + "/mapping.txt"
features,labels = make_data_from(table1_df, table2_df, mapping_file,type="train")
train_features[folder] = features
train_labels[folder] = labels
features,labels = make_data_from(table1_df, table2_df, mapping_file,type="test")
test_features[folder] = features
test_labels[folder] = labels
# save data using cross validation
for i in range(len(folder_list)):
os.mkdir("Input/" + str(i))
os.mkdir("Input/" + str(i) + "/train")
os.mkdir("Input/" + str(i) + "/test")
test_folder = folder_list[i]
train_folders = folder_list[:i] + folder_list[i+1:]
for folder in train_folders:
np.save("Input/"+ str(i) +"/train/" +folder.split('/')[-1]+ "_features.npy", train_features[folder])
np.save("Input/"+ str(i) +"/train/" +folder.split('/')[-1]+ "_labels.npy", train_labels[folder])
np.save("Input/"+ str(i) +"/test/" +test_folder.split('/')[-1]+ "_features.npy", test_features[test_folder])
np.save("Input/"+ str(i) +"/test/" +test_folder.split('/')[-1]+ "_labels.npy", test_labels[test_folder])
| 0.482917 | 0.284511 |
from . import init
import pandas as pd
import numpy as np
import re
import random
from dateutil.parser import parse as parse_date
model = init.model
unit_dict = {"万": 10000, "亿": 100000000, "萬": 10000, "億": 100000000, "K+": 1000, "M+": 1000000, "B+": 1000000000}
def load_table(filepath):
"""
Loads the data from the given filepath.
"""
df = pd.read_csv(filepath)
return df
def strict_numeric(data_list,verbose=False):
"""
Checks if the given data is numeric.
"""
cnt = 0
for x in data_list:
try:
y = float(x)
if verbose:
print(x)
print(y)
cnt += 1
except:
continue
if cnt >= 0.95*len(data_list):
return True
return False
def mainly_numeric(data_list):
"""
Checks if the given data list is mostly numeric.
"""
cnt = 0
for data in data_list:
data = str(data)
data = data.replace(",", "")
for unit in unit_dict.keys():
data = data.replace(unit, "")
numeric_part = re.findall(r'\d+', data)
if len(numeric_part) > 0 and sum(len(x) for x in numeric_part) >= 0.5*len(data):
cnt += 1
if cnt >= 0.9*len(data_list):
return True
return False
def extract_numeric(data_list):
"""
Extracts numeric part(including float) from string list
"""
try:
data_list = [float(d) for d in data_list]
except:
pass
numeric_part = []
unit = []
for data in data_list:
data = str(data)
data = data.replace(",", "")
numeric_part.append(re.findall(r'([-]?([0-9]*[.])?[0-9]+)', data))
this_unit = 1
for unit_key in unit_dict.keys():
if unit_key in data:
this_unit = unit_dict[unit_key]
break
unit.append(this_unit)
numeric_part = [x for x in numeric_part if len(x) > 0]
if len(numeric_part) != len(data_list):
print(f"Warning: extract_numeric() found different number of numeric part({len(numeric_part)}) and data list({len(data_list)})")
numeric_part = [float(x[0][0])*unit[i] for i,x in enumerate(numeric_part)]
return numeric_part
def numeric_features(data_list):
"""
Extracts numeric features from the given data. Including Mean,Min, Max, Variance, Standard Deviation,
and the number of unique values.
"""
mean = np.mean(data_list)
min = np.min(data_list)
max = np.max(data_list)
variance = np.var(data_list)
cv = np.var(data_list)/mean
unique = len(set(data_list))
return np.array([mean, min, max, variance,cv, unique/len(data_list)])
def is_url(data_list):
"""
Checks if the given data is in URL format.
"""
cnt = 0
for data in data_list:
if type(data) != str:
continue
if re.search(r'[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)', data):
cnt += 1
if cnt >= 0.9*len(data_list):
return True
return False
def is_date(data_list):
"""
Checks if the given data is in Date format.
"""
cnt = 0
for data in data_list:
if type(data) != str:
continue
if "月" in data or "日" in data or "年" in data:
cnt += 1
try:
date = parse_date(data)
# check if the date is near to today
if date.year < 2000 or date.year > 2030:
continue
cnt += 1
except:
continue
if cnt >= 0.9*len(data_list):
return True
return False
def character_features(data_list):
"""
Extracts character features from the given data.
"""
# Ratio of whitespace to length
# Ratio of punctuation to length
# Ratio of special characters to length
punctuations = [",",".",";","!","?",",","。",";","!","?"]
special_characters = ["/","/","\\","-","_","+","=","*","&","^","%","$","#","@","~","`","(",")","[","]","{","}","<",">","|","'","\""]
whitespace_ratios = []
punctuation_ratios = []
special_character_ratios = []
numeric_ratios = []
for data in data_list:
whitespace_ratio = (data.count(" ") + data.count("\t") + data.count("\n"))/len(data)
punctuation_ratio = sum(1 for x in data if x in punctuations)/len(data)
special_character_ratio = sum(1 for x in data if x in special_characters)/len(data)
numeric_ratio = sum(1 for x in data if x.isdigit())/len(data)
whitespace_ratios.append(whitespace_ratio)
punctuation_ratios.append(punctuation_ratio)
special_character_ratios.append(special_character_ratio)
numeric_ratios.append(numeric_ratio)
epilson = np.array([1e-12]*len(data_list))
whitespace_ratios = np.array(whitespace_ratios + epilson)
punctuation_ratios = np.array(punctuation_ratios + epilson)
special_character_ratios = np.array(special_character_ratios + epilson)
numeric_ratios = np.array(numeric_ratios + epilson)
return np.array([np.mean(whitespace_ratios), np.mean(punctuation_ratios), np.mean(special_character_ratios), np.mean(numeric_ratios),
np.var(whitespace_ratios)/np.mean(whitespace_ratios), np.var(punctuation_ratios)/np.mean(punctuation_ratios),
np.var(special_character_ratios)/np.mean(special_character_ratios), np.var(numeric_ratios)/np.mean(numeric_ratios)])
def deep_embedding(data_list):
"""
Extracts deep embedding features from the given data using sentence-transformers.
"""
if len(data_list) < 20:
selected_data = data_list
else:
selected_data = random.sample(data_list,20)
embeddings = [model.encode(str(data)) for data in selected_data]
embeddings = np.array(embeddings)
return np.mean(embeddings, axis=0)
def extract_features(data_list):
"""
Extract some features from the given data(column) or list
"""
data_list = [d for d in data_list if d == d and d != "--"]
data_types = ("url","numeric","date","string")
# Classify the data's type, URL or Date or Numeric
if is_url(data_list):
data_type = "url"
elif is_date(data_list):
data_type = "date"
elif strict_numeric(data_list) or mainly_numeric(data_list):
data_type = "numeric"
else:
data_type = "string"
# Make data type feature one hot encoding
data_type_feature = np.zeros(len(data_types))
data_type_feature[data_types.index(data_type)] = 1
# Give numeric features if the data is mostly numeric
if data_type == "numeric":
data_numeric = extract_numeric(data_list)
num_fts = numeric_features(data_numeric)
else:
num_fts = np.array([-1]*6)
# If data is not numeric, give length features
length_fts = numeric_features([len(str(d)) for d in data_list])
# Give character features and deep embeddings if the data is string
if data_type == "string" or (not strict_numeric(data_list) and mainly_numeric(data_list)):
char_fts = character_features(data_list)
deep_fts = deep_embedding(data_list)
else:
char_fts = np.array([-1]*8)
deep_fts = np.array([-999]*768)
output_features = np.concatenate((data_type_feature, num_fts, length_fts, char_fts, deep_fts))
return output_features
def make_self_features_from(table_df):
"""
Extracts features from the given table path and returns a feature table.
"""
features = None
for column in table_df.columns:
if "Unnamed:" in column:
continue
fts = extract_features(table_df[column])
fts = fts.reshape(1, -1)
if features is None:
features = fts
else:
features = np.concatenate((features, fts), axis=0)
return features
if __name__ == '__main__':
features = make_self_features_from("Test Data/0archive/Table2.csv")
print(features)
print(features.shape)
|
schema-matching
|
/schema_matching-1.0.4.tar.gz/schema_matching-1.0.4/src/schema_matching/self_features.py
|
self_features.py
|
from . import init
import pandas as pd
import numpy as np
import re
import random
from dateutil.parser import parse as parse_date
model = init.model
unit_dict = {"万": 10000, "亿": 100000000, "萬": 10000, "億": 100000000, "K+": 1000, "M+": 1000000, "B+": 1000000000}
def load_table(filepath):
"""
Loads the data from the given filepath.
"""
df = pd.read_csv(filepath)
return df
def strict_numeric(data_list,verbose=False):
"""
Checks if the given data is numeric.
"""
cnt = 0
for x in data_list:
try:
y = float(x)
if verbose:
print(x)
print(y)
cnt += 1
except:
continue
if cnt >= 0.95*len(data_list):
return True
return False
def mainly_numeric(data_list):
"""
Checks if the given data list is mostly numeric.
"""
cnt = 0
for data in data_list:
data = str(data)
data = data.replace(",", "")
for unit in unit_dict.keys():
data = data.replace(unit, "")
numeric_part = re.findall(r'\d+', data)
if len(numeric_part) > 0 and sum(len(x) for x in numeric_part) >= 0.5*len(data):
cnt += 1
if cnt >= 0.9*len(data_list):
return True
return False
def extract_numeric(data_list):
"""
Extracts numeric part(including float) from string list
"""
try:
data_list = [float(d) for d in data_list]
except:
pass
numeric_part = []
unit = []
for data in data_list:
data = str(data)
data = data.replace(",", "")
numeric_part.append(re.findall(r'([-]?([0-9]*[.])?[0-9]+)', data))
this_unit = 1
for unit_key in unit_dict.keys():
if unit_key in data:
this_unit = unit_dict[unit_key]
break
unit.append(this_unit)
numeric_part = [x for x in numeric_part if len(x) > 0]
if len(numeric_part) != len(data_list):
print(f"Warning: extract_numeric() found different number of numeric part({len(numeric_part)}) and data list({len(data_list)})")
numeric_part = [float(x[0][0])*unit[i] for i,x in enumerate(numeric_part)]
return numeric_part
def numeric_features(data_list):
"""
Extracts numeric features from the given data. Including Mean,Min, Max, Variance, Standard Deviation,
and the number of unique values.
"""
mean = np.mean(data_list)
min = np.min(data_list)
max = np.max(data_list)
variance = np.var(data_list)
cv = np.var(data_list)/mean
unique = len(set(data_list))
return np.array([mean, min, max, variance,cv, unique/len(data_list)])
def is_url(data_list):
"""
Checks if the given data is in URL format.
"""
cnt = 0
for data in data_list:
if type(data) != str:
continue
if re.search(r'[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)', data):
cnt += 1
if cnt >= 0.9*len(data_list):
return True
return False
def is_date(data_list):
"""
Checks if the given data is in Date format.
"""
cnt = 0
for data in data_list:
if type(data) != str:
continue
if "月" in data or "日" in data or "年" in data:
cnt += 1
try:
date = parse_date(data)
# check if the date is near to today
if date.year < 2000 or date.year > 2030:
continue
cnt += 1
except:
continue
if cnt >= 0.9*len(data_list):
return True
return False
def character_features(data_list):
"""
Extracts character features from the given data.
"""
# Ratio of whitespace to length
# Ratio of punctuation to length
# Ratio of special characters to length
punctuations = [",",".",";","!","?",",","。",";","!","?"]
special_characters = ["/","/","\\","-","_","+","=","*","&","^","%","$","#","@","~","`","(",")","[","]","{","}","<",">","|","'","\""]
whitespace_ratios = []
punctuation_ratios = []
special_character_ratios = []
numeric_ratios = []
for data in data_list:
whitespace_ratio = (data.count(" ") + data.count("\t") + data.count("\n"))/len(data)
punctuation_ratio = sum(1 for x in data if x in punctuations)/len(data)
special_character_ratio = sum(1 for x in data if x in special_characters)/len(data)
numeric_ratio = sum(1 for x in data if x.isdigit())/len(data)
whitespace_ratios.append(whitespace_ratio)
punctuation_ratios.append(punctuation_ratio)
special_character_ratios.append(special_character_ratio)
numeric_ratios.append(numeric_ratio)
epilson = np.array([1e-12]*len(data_list))
whitespace_ratios = np.array(whitespace_ratios + epilson)
punctuation_ratios = np.array(punctuation_ratios + epilson)
special_character_ratios = np.array(special_character_ratios + epilson)
numeric_ratios = np.array(numeric_ratios + epilson)
return np.array([np.mean(whitespace_ratios), np.mean(punctuation_ratios), np.mean(special_character_ratios), np.mean(numeric_ratios),
np.var(whitespace_ratios)/np.mean(whitespace_ratios), np.var(punctuation_ratios)/np.mean(punctuation_ratios),
np.var(special_character_ratios)/np.mean(special_character_ratios), np.var(numeric_ratios)/np.mean(numeric_ratios)])
def deep_embedding(data_list):
"""
Extracts deep embedding features from the given data using sentence-transformers.
"""
if len(data_list) < 20:
selected_data = data_list
else:
selected_data = random.sample(data_list,20)
embeddings = [model.encode(str(data)) for data in selected_data]
embeddings = np.array(embeddings)
return np.mean(embeddings, axis=0)
def extract_features(data_list):
"""
Extract some features from the given data(column) or list
"""
data_list = [d for d in data_list if d == d and d != "--"]
data_types = ("url","numeric","date","string")
# Classify the data's type, URL or Date or Numeric
if is_url(data_list):
data_type = "url"
elif is_date(data_list):
data_type = "date"
elif strict_numeric(data_list) or mainly_numeric(data_list):
data_type = "numeric"
else:
data_type = "string"
# Make data type feature one hot encoding
data_type_feature = np.zeros(len(data_types))
data_type_feature[data_types.index(data_type)] = 1
# Give numeric features if the data is mostly numeric
if data_type == "numeric":
data_numeric = extract_numeric(data_list)
num_fts = numeric_features(data_numeric)
else:
num_fts = np.array([-1]*6)
# If data is not numeric, give length features
length_fts = numeric_features([len(str(d)) for d in data_list])
# Give character features and deep embeddings if the data is string
if data_type == "string" or (not strict_numeric(data_list) and mainly_numeric(data_list)):
char_fts = character_features(data_list)
deep_fts = deep_embedding(data_list)
else:
char_fts = np.array([-1]*8)
deep_fts = np.array([-999]*768)
output_features = np.concatenate((data_type_feature, num_fts, length_fts, char_fts, deep_fts))
return output_features
def make_self_features_from(table_df):
"""
Extracts features from the given table path and returns a feature table.
"""
features = None
for column in table_df.columns:
if "Unnamed:" in column:
continue
fts = extract_features(table_df[column])
fts = fts.reshape(1, -1)
if features is None:
features = fts
else:
features = np.concatenate((features, fts), axis=0)
return features
if __name__ == '__main__':
features = make_self_features_from("Test Data/0archive/Table2.csv")
print(features)
print(features.shape)
| 0.389779 | 0.426979 |
from . import init
from .relation_features import make_data_from
from .utils import make_csv_from_json,table_column_filter
from .train import test
import numpy as np
import pandas as pd
import xgboost as xgb
import os
import argparse
import time
from pathlib import Path
this_directory = Path(__file__).parent
parser = argparse.ArgumentParser()
parser.add_argument("-p","--path", help="path to the folder containing the test data")
parser.add_argument("-m", "--model", help="path to the model")
parser.add_argument("-t", "--threshold", help="threshold for inference")
parser.add_argument("-s", "--strategy", help="one-to-one or many-to-many or one-to-many", default="many-to-many")
args = parser.parse_args()
def create_similarity_matrix(table1_df,table2_df,preds,pred_labels_list,strategy="many-to-many"):
"""
Create a similarity matrix from the prediction
"""
predicted_pairs = []
preds = np.array(preds)
preds = np.mean(preds,axis=0)
pred_labels_list = np.array(pred_labels_list)
pred_labels = np.mean(pred_labels_list,axis=0)
pred_labels = np.where(pred_labels>0.5,1,0)
# read column names
df1_cols = table1_df.columns
df2_cols = table2_df.columns
# create similarity matrix for pred values
preds_matrix = np.array(preds).reshape(len(df1_cols),len(df2_cols))
# create similarity matrix for pred labels
if strategy == "many-to-many":
pred_labels_matrix = np.array(pred_labels).reshape(len(df1_cols),len(df2_cols))
else:
pred_labels_matrix = np.zeros((len(df1_cols),len(df2_cols)))
for i in range(len(df1_cols)):
for j in range(len(df2_cols)):
if pred_labels[i*len(df2_cols)+j] == 1:
if strategy == "one-to-one":
max_row = max(preds_matrix[i,:])
max_col = max(preds_matrix[:,j])
if preds_matrix[i,j] == max_row and preds_matrix[i,j] == max_col:
pred_labels_matrix[i,j] = 1
elif strategy == "one-to-many":
max_row = max(preds_matrix[i,:])
if preds_matrix[i,j] == max_row:
pred_labels_matrix[i,j] = 1
df_pred = pd.DataFrame(preds_matrix,columns=df2_cols,index=df1_cols)
df_pred_labels = pd.DataFrame(pred_labels_matrix,columns=df2_cols,index=df1_cols)
for i in range(len(df_pred_labels)):
for j in range(len(df_pred_labels.iloc[i])):
if df_pred_labels.iloc[i,j] == 1:
predicted_pairs.append((df_pred.index[i],df_pred.columns[j],df_pred.iloc[i,j]))
return df_pred,df_pred_labels,predicted_pairs
def schema_matching(table1_pth,table2_pth,threshold=None,strategy="many-to-many",model_pth=None):
"""
Do schema matching!
"""
if model_pth is None:
model_pth = str(this_directory / "model" / "2022-04-12-12-06-32")
# transform jsonl or json file to csv
if table1_pth.endswith('.json') or table1_pth.endswith('.jsonl'):
table1_df = make_csv_from_json(table1_pth)
else:
table1_df = pd.read_csv(table1_pth)
if table2_pth.endswith('.json') or table2_pth.endswith('.jsonl'):
table2_df = make_csv_from_json(table2_pth)
else:
table2_df = pd.read_csv(table2_pth)
# filter columns
table1_df = table_column_filter(table1_df)
table2_df = table_column_filter(table2_df)
# extract features
features,_ = make_data_from(table1_df, table2_df, type="test")
# load model and predict on features
preds = []
pred_labels_list = []
for i in range(len(os.listdir(model_pth))//2):
bst = xgb.Booster({'nthread': 4}) # init model
bst.load_model(model_pth+"/"+str(i)+".model")
if threshold is not None:
best_threshold = float(threshold)
else:
with open(model_pth+"/"+str(i)+".threshold",'r') as f:
best_threshold = float(f.read())
pred, pred_labels = test(bst, best_threshold, features, test_labels=np.ones(len(features)), type="inference")
preds.append(pred)
pred_labels_list.append(pred_labels)
del bst
df_pred,df_pred_labels,predicted_pairs = create_similarity_matrix(table1_df, table2_df, preds, pred_labels_list, strategy=strategy)
return df_pred,df_pred_labels,predicted_pairs
if __name__ == '__main__':
start = time.time()
args.path = args.path.rstrip("/")
df_pred,df_pred_labels,predicted_pairs = schema_matching(args.path+"/Table1.csv",args.path+"/Table2.csv",threshold=args.threshold,strategy=args.strategy,model_pth=args.model)
df_pred.to_csv(args.path+"/similarity_matrix_value.csv",index=True)
df_pred_labels.to_csv(args.path+"/similarity_matrix_label.csv",index=True)
for pair_tuple in predicted_pairs:
print(pair_tuple)
print("schema_matching|Time taken:",time.time()-start)
|
schema-matching
|
/schema_matching-1.0.4.tar.gz/schema_matching-1.0.4/src/schema_matching/cal_column_similarity.py
|
cal_column_similarity.py
|
from . import init
from .relation_features import make_data_from
from .utils import make_csv_from_json,table_column_filter
from .train import test
import numpy as np
import pandas as pd
import xgboost as xgb
import os
import argparse
import time
from pathlib import Path
this_directory = Path(__file__).parent
parser = argparse.ArgumentParser()
parser.add_argument("-p","--path", help="path to the folder containing the test data")
parser.add_argument("-m", "--model", help="path to the model")
parser.add_argument("-t", "--threshold", help="threshold for inference")
parser.add_argument("-s", "--strategy", help="one-to-one or many-to-many or one-to-many", default="many-to-many")
args = parser.parse_args()
def create_similarity_matrix(table1_df,table2_df,preds,pred_labels_list,strategy="many-to-many"):
"""
Create a similarity matrix from the prediction
"""
predicted_pairs = []
preds = np.array(preds)
preds = np.mean(preds,axis=0)
pred_labels_list = np.array(pred_labels_list)
pred_labels = np.mean(pred_labels_list,axis=0)
pred_labels = np.where(pred_labels>0.5,1,0)
# read column names
df1_cols = table1_df.columns
df2_cols = table2_df.columns
# create similarity matrix for pred values
preds_matrix = np.array(preds).reshape(len(df1_cols),len(df2_cols))
# create similarity matrix for pred labels
if strategy == "many-to-many":
pred_labels_matrix = np.array(pred_labels).reshape(len(df1_cols),len(df2_cols))
else:
pred_labels_matrix = np.zeros((len(df1_cols),len(df2_cols)))
for i in range(len(df1_cols)):
for j in range(len(df2_cols)):
if pred_labels[i*len(df2_cols)+j] == 1:
if strategy == "one-to-one":
max_row = max(preds_matrix[i,:])
max_col = max(preds_matrix[:,j])
if preds_matrix[i,j] == max_row and preds_matrix[i,j] == max_col:
pred_labels_matrix[i,j] = 1
elif strategy == "one-to-many":
max_row = max(preds_matrix[i,:])
if preds_matrix[i,j] == max_row:
pred_labels_matrix[i,j] = 1
df_pred = pd.DataFrame(preds_matrix,columns=df2_cols,index=df1_cols)
df_pred_labels = pd.DataFrame(pred_labels_matrix,columns=df2_cols,index=df1_cols)
for i in range(len(df_pred_labels)):
for j in range(len(df_pred_labels.iloc[i])):
if df_pred_labels.iloc[i,j] == 1:
predicted_pairs.append((df_pred.index[i],df_pred.columns[j],df_pred.iloc[i,j]))
return df_pred,df_pred_labels,predicted_pairs
def schema_matching(table1_pth,table2_pth,threshold=None,strategy="many-to-many",model_pth=None):
"""
Do schema matching!
"""
if model_pth is None:
model_pth = str(this_directory / "model" / "2022-04-12-12-06-32")
# transform jsonl or json file to csv
if table1_pth.endswith('.json') or table1_pth.endswith('.jsonl'):
table1_df = make_csv_from_json(table1_pth)
else:
table1_df = pd.read_csv(table1_pth)
if table2_pth.endswith('.json') or table2_pth.endswith('.jsonl'):
table2_df = make_csv_from_json(table2_pth)
else:
table2_df = pd.read_csv(table2_pth)
# filter columns
table1_df = table_column_filter(table1_df)
table2_df = table_column_filter(table2_df)
# extract features
features,_ = make_data_from(table1_df, table2_df, type="test")
# load model and predict on features
preds = []
pred_labels_list = []
for i in range(len(os.listdir(model_pth))//2):
bst = xgb.Booster({'nthread': 4}) # init model
bst.load_model(model_pth+"/"+str(i)+".model")
if threshold is not None:
best_threshold = float(threshold)
else:
with open(model_pth+"/"+str(i)+".threshold",'r') as f:
best_threshold = float(f.read())
pred, pred_labels = test(bst, best_threshold, features, test_labels=np.ones(len(features)), type="inference")
preds.append(pred)
pred_labels_list.append(pred_labels)
del bst
df_pred,df_pred_labels,predicted_pairs = create_similarity_matrix(table1_df, table2_df, preds, pred_labels_list, strategy=strategy)
return df_pred,df_pred_labels,predicted_pairs
if __name__ == '__main__':
start = time.time()
args.path = args.path.rstrip("/")
df_pred,df_pred_labels,predicted_pairs = schema_matching(args.path+"/Table1.csv",args.path+"/Table2.csv",threshold=args.threshold,strategy=args.strategy,model_pth=args.model)
df_pred.to_csv(args.path+"/similarity_matrix_value.csv",index=True)
df_pred_labels.to_csv(args.path+"/similarity_matrix_label.csv",index=True)
for pair_tuple in predicted_pairs:
print(pair_tuple)
print("schema_matching|Time taken:",time.time()-start)
| 0.304972 | 0.279738 |
from functools import lru_cache
import json
from typing import Any, Dict, List, Optional, Set
from requests_toolbelt.sessions import BaseUrlSession
from bs4 import BeautifulSoup
from bs4.element import ResultSet
from schema_registry.config import get_logger
from schema_registry.registries.entities import EnrichedNamespace, VersionedType
class HttpSchemaRegistry:
def __init__(self, *, url: str) -> None:
self.url = url
self.session = BaseUrlSession(base_url=self.url)
self.log = get_logger(self.__class__.__name__)
self._namespaces: Set[str] = set()
self._types: Dict[str, Set[VersionedType]] = {}
@lru_cache(maxsize=128)
def get(
self, *, namespace: str, type_name: str, version: Optional[int] = None
) -> Optional[Dict[str, Any]]:
if version is None:
version = self.find_latest_version(namespace=namespace, type_name=type_name)
req = self.session.get(
f"/registry/{namespace}/{type_name}/{version}/schema.json"
)
self.log.debug(req)
if req.status_code != 200:
return None
return req.json() # type: ignore
@property
def namespaces(self) -> Set[str]:
if not self._namespaces:
self._initialize_namespaces()
return self._namespaces
def types(self, *, namespace: str) -> Set[VersionedType]:
preliminary_result = self._types.get(namespace)
if preliminary_result is None:
self.log.debug(f"Fetching types for {namespace} from remote")
self._initialize_types(namespace)
return self._types.get(namespace, set())
def _initialize_namespaces(self) -> None:
index = self.session.get("/registry/index.json").json()
schemes = index.get("schemes", [])
self.log.debug(schemes)
self._namespaces = {schema["ns"] for schema in schemes}
def _initialize_types(self, namespace: str) -> None:
index = self.session.get("/registry/index.json").json()
schemes = index.get("schemes", [])
filtered = filter(lambda x: x["ns"] == namespace, schemes)
self._types[namespace] = {
VersionedType(name=schema["type"], version=schema["version"])
for schema in filtered
}
def find_latest_version(self, *, namespace: str, type_name: str) -> int:
self.log.debug("Fetching version")
types_of_namespace = self.types(namespace=namespace)
found_types = list(filter(lambda x: x.name == type_name, types_of_namespace))
found_types.sort()
self.log.debug(f"Found {found_types}")
# TODO: deal with case where found_types is empty/None
version = found_types[-1].version
if len(found_types) != 1:
self.log.info(
f"Found {len(found_types)} versions using latest version ({version})"
)
return version
def refresh(self) -> None:
self._initialize_namespaces()
self._types = {}
self.get.cache_clear()
# TODO Transformation stuff
class EnrichingHttpSchemaRegistry:
def __init__(self, *, url: str) -> None:
self.url = url
self.session = BaseUrlSession(base_url=self.url)
self.http_schema_registry = HttpSchemaRegistry(url=url)
self.log = get_logger(self.__class__.__name__)
self._namespaces: Set[EnrichedNamespace] = set()
self._types: Dict[str, Set[VersionedType]] = {}
@lru_cache(maxsize=128)
def get(
self, *, namespace: str, type_name: str, version: Optional[int] = None
) -> Optional[Dict[str, Any]]:
if version is None:
version = self.http_schema_registry.find_latest_version(
namespace=namespace, type_name=type_name
)
schema = None
try:
schema_code, _ = self._get_code_elements(
namespace=namespace, type_name=type_name, version=version
) # type: ignore
schema_description = self._get_schema_description(
namespace=namespace, type_name=type_name
)
schema = json.loads(schema_code.get_text())
schema["title"] = "\n".join(schema_description)
self.log.debug(f"Found schema: {schema}")
except TypeError:
self.log.info("Requested schema not found.")
except json.JSONDecodeError:
self.log.warning(
"Could not translate remote schema into JSON. HTML parsing or the page might be off."
)
return schema
def _get_code_elements(
self, *, namespace: str, type_name: str, version: int
) -> Optional[ResultSet]:
# The way this is currently written is that 2 requests are made if the example and
# the schema for the same event are requested. This is based on the assumption,
# that users are interested in one or the other but not frequently in both.
# Therefore trading of network traffic vs memory footprint.
# * Should this prove false, we want lru_caching here as well, or maybe even
# * a more powerful caching mechanism
req = self.session.get(f"/{namespace}/{type_name.lower()}/{version}/index.html")
if req.status_code != 200:
return None
raw_schema_html = req.content
schema_html = BeautifulSoup(raw_schema_html, features="lxml")
code_elements = schema_html.find_all("code")
self.log.debug(f"Found {len(code_elements)} code elements")
if len(code_elements) != 2:
raise AttributeError("More than 2 code elements found can not parse schema")
return code_elements
def _get_schema_description(self, namespace: str, type_name: str) -> List[str]:
req = self.session.get(f"/{namespace}/{type_name.lower()}/index.html")
raw_schema_html = req.content
schema_html = BeautifulSoup(raw_schema_html, features="lxml")
headline = schema_html.body.h1
description = []
next_sibling = headline.next_sibling
while next_sibling.name != "h2":
self.log.debug(f"Sibling: {next_sibling}: {next_sibling.name}")
if next_sibling.string and next_sibling.string.strip():
description.append(next_sibling.string.strip())
next_sibling = next_sibling.next_sibling
return description
@lru_cache(maxsize=128)
def example(
self, *, namespace: str, type_name: str, version: Optional[int] = None
) -> Optional[Dict[str, Any]]:
if version is None:
version = self.http_schema_registry.find_latest_version(
namespace=namespace, type_name=type_name
)
example = None
try:
_, example_code = self._get_code_elements(
namespace=namespace, type_name=type_name, version=version
) # type: ignore
example = json.loads(example_code.get_text())
self.log.debug(f"Found example: {example}")
except TypeError:
self.log.info("Requested example not found.")
except json.JSONDecodeError:
self.log.warning(
"Could not translate remote example into JSON. HTML parsing or the page might be off."
)
return example
@property
def namespaces(self) -> Set[EnrichedNamespace]:
if not self._namespaces:
self._initialize_namespaces()
return self._namespaces
def _initialize_namespaces(self) -> None:
namespaces = self.http_schema_registry.namespaces
self._namespaces = set()
for namespace in namespaces:
req = self.session.get(f"/{namespace}")
raw_schema_html = req.content
namespace_html = BeautifulSoup(raw_schema_html, features="lxml")
description = namespace_html.find("div", {"id": "body-inner"}).p
if description and description.string:
title = description.string
else:
title = ""
self._namespaces.add(EnrichedNamespace(name=namespace, title=title))
def types(self, *, namespace: str) -> Set[VersionedType]:
return self.http_schema_registry.types(namespace=namespace)
def refresh(self) -> None:
self.http_schema_registry.refresh()
self.get.cache_clear()
# TODO Transformation stuff
|
schema-registry
|
/schema-registry-0.0.6.tar.gz/schema-registry-0.0.6/src/schema_registry/registries/HttpRegistry.py
|
HttpRegistry.py
|
from functools import lru_cache
import json
from typing import Any, Dict, List, Optional, Set
from requests_toolbelt.sessions import BaseUrlSession
from bs4 import BeautifulSoup
from bs4.element import ResultSet
from schema_registry.config import get_logger
from schema_registry.registries.entities import EnrichedNamespace, VersionedType
class HttpSchemaRegistry:
def __init__(self, *, url: str) -> None:
self.url = url
self.session = BaseUrlSession(base_url=self.url)
self.log = get_logger(self.__class__.__name__)
self._namespaces: Set[str] = set()
self._types: Dict[str, Set[VersionedType]] = {}
@lru_cache(maxsize=128)
def get(
self, *, namespace: str, type_name: str, version: Optional[int] = None
) -> Optional[Dict[str, Any]]:
if version is None:
version = self.find_latest_version(namespace=namespace, type_name=type_name)
req = self.session.get(
f"/registry/{namespace}/{type_name}/{version}/schema.json"
)
self.log.debug(req)
if req.status_code != 200:
return None
return req.json() # type: ignore
@property
def namespaces(self) -> Set[str]:
if not self._namespaces:
self._initialize_namespaces()
return self._namespaces
def types(self, *, namespace: str) -> Set[VersionedType]:
preliminary_result = self._types.get(namespace)
if preliminary_result is None:
self.log.debug(f"Fetching types for {namespace} from remote")
self._initialize_types(namespace)
return self._types.get(namespace, set())
def _initialize_namespaces(self) -> None:
index = self.session.get("/registry/index.json").json()
schemes = index.get("schemes", [])
self.log.debug(schemes)
self._namespaces = {schema["ns"] for schema in schemes}
def _initialize_types(self, namespace: str) -> None:
index = self.session.get("/registry/index.json").json()
schemes = index.get("schemes", [])
filtered = filter(lambda x: x["ns"] == namespace, schemes)
self._types[namespace] = {
VersionedType(name=schema["type"], version=schema["version"])
for schema in filtered
}
def find_latest_version(self, *, namespace: str, type_name: str) -> int:
self.log.debug("Fetching version")
types_of_namespace = self.types(namespace=namespace)
found_types = list(filter(lambda x: x.name == type_name, types_of_namespace))
found_types.sort()
self.log.debug(f"Found {found_types}")
# TODO: deal with case where found_types is empty/None
version = found_types[-1].version
if len(found_types) != 1:
self.log.info(
f"Found {len(found_types)} versions using latest version ({version})"
)
return version
def refresh(self) -> None:
self._initialize_namespaces()
self._types = {}
self.get.cache_clear()
# TODO Transformation stuff
class EnrichingHttpSchemaRegistry:
def __init__(self, *, url: str) -> None:
self.url = url
self.session = BaseUrlSession(base_url=self.url)
self.http_schema_registry = HttpSchemaRegistry(url=url)
self.log = get_logger(self.__class__.__name__)
self._namespaces: Set[EnrichedNamespace] = set()
self._types: Dict[str, Set[VersionedType]] = {}
@lru_cache(maxsize=128)
def get(
self, *, namespace: str, type_name: str, version: Optional[int] = None
) -> Optional[Dict[str, Any]]:
if version is None:
version = self.http_schema_registry.find_latest_version(
namespace=namespace, type_name=type_name
)
schema = None
try:
schema_code, _ = self._get_code_elements(
namespace=namespace, type_name=type_name, version=version
) # type: ignore
schema_description = self._get_schema_description(
namespace=namespace, type_name=type_name
)
schema = json.loads(schema_code.get_text())
schema["title"] = "\n".join(schema_description)
self.log.debug(f"Found schema: {schema}")
except TypeError:
self.log.info("Requested schema not found.")
except json.JSONDecodeError:
self.log.warning(
"Could not translate remote schema into JSON. HTML parsing or the page might be off."
)
return schema
def _get_code_elements(
self, *, namespace: str, type_name: str, version: int
) -> Optional[ResultSet]:
# The way this is currently written is that 2 requests are made if the example and
# the schema for the same event are requested. This is based on the assumption,
# that users are interested in one or the other but not frequently in both.
# Therefore trading of network traffic vs memory footprint.
# * Should this prove false, we want lru_caching here as well, or maybe even
# * a more powerful caching mechanism
req = self.session.get(f"/{namespace}/{type_name.lower()}/{version}/index.html")
if req.status_code != 200:
return None
raw_schema_html = req.content
schema_html = BeautifulSoup(raw_schema_html, features="lxml")
code_elements = schema_html.find_all("code")
self.log.debug(f"Found {len(code_elements)} code elements")
if len(code_elements) != 2:
raise AttributeError("More than 2 code elements found can not parse schema")
return code_elements
def _get_schema_description(self, namespace: str, type_name: str) -> List[str]:
req = self.session.get(f"/{namespace}/{type_name.lower()}/index.html")
raw_schema_html = req.content
schema_html = BeautifulSoup(raw_schema_html, features="lxml")
headline = schema_html.body.h1
description = []
next_sibling = headline.next_sibling
while next_sibling.name != "h2":
self.log.debug(f"Sibling: {next_sibling}: {next_sibling.name}")
if next_sibling.string and next_sibling.string.strip():
description.append(next_sibling.string.strip())
next_sibling = next_sibling.next_sibling
return description
@lru_cache(maxsize=128)
def example(
self, *, namespace: str, type_name: str, version: Optional[int] = None
) -> Optional[Dict[str, Any]]:
if version is None:
version = self.http_schema_registry.find_latest_version(
namespace=namespace, type_name=type_name
)
example = None
try:
_, example_code = self._get_code_elements(
namespace=namespace, type_name=type_name, version=version
) # type: ignore
example = json.loads(example_code.get_text())
self.log.debug(f"Found example: {example}")
except TypeError:
self.log.info("Requested example not found.")
except json.JSONDecodeError:
self.log.warning(
"Could not translate remote example into JSON. HTML parsing or the page might be off."
)
return example
@property
def namespaces(self) -> Set[EnrichedNamespace]:
if not self._namespaces:
self._initialize_namespaces()
return self._namespaces
def _initialize_namespaces(self) -> None:
namespaces = self.http_schema_registry.namespaces
self._namespaces = set()
for namespace in namespaces:
req = self.session.get(f"/{namespace}")
raw_schema_html = req.content
namespace_html = BeautifulSoup(raw_schema_html, features="lxml")
description = namespace_html.find("div", {"id": "body-inner"}).p
if description and description.string:
title = description.string
else:
title = ""
self._namespaces.add(EnrichedNamespace(name=namespace, title=title))
def types(self, *, namespace: str) -> Set[VersionedType]:
return self.http_schema_registry.types(namespace=namespace)
def refresh(self) -> None:
self.http_schema_registry.refresh()
self.get.cache_clear()
# TODO Transformation stuff
| 0.63114 | 0.113506 |
from pathlib import Path
from copy import deepcopy
import json
class SchemaRenderer:
list_resolver = {"allOf", "anyOf", "oneOf", "items"}
schema_cache = {}
def __init__(self, path):
pth = Path(path)
self.root_directory = pth.parent
self.schema = json.load(open(path))
def _resolve_ref(self, ref, working_schema, working_path):
path, _, fragment = ref.partition("#")
other_schema = None
new_working_path = None
if path:
path = Path(working_path, path)
new_working_path = path.parent.resolve()
other_schema = json.load(open(path))
working_schema = deepcopy(other_schema or working_schema)
doc_part = deepcopy(other_schema or working_schema)
fragment_parts = [part for part in fragment.split("/") if part]
for fragment_part in fragment_parts:
doc_part = doc_part[fragment_part]
return doc_part, working_schema, new_working_path or working_path
def _resolve_dict(self, dictionary, working_schema, working_path):
data = dict()
if "$ref" in dictionary:
loaded_data, temp_working_schema, temp_working_path = self._resolve_ref(
dictionary["$ref"], working_schema, working_path
)
return self._resolve_dict(
loaded_data, temp_working_schema, temp_working_path
)
for key, item in dictionary.items():
new_value = item
if isinstance(item, dict):
if key == "definitions":
continue
new_value = self._resolve_dict(item, working_schema, working_path)
elif isinstance(item, list) and key in SchemaRenderer.list_resolver:
new_value = [
self._resolve_dict(it, working_schema, working_path) for it in item
]
data[key] = new_value
return data
def render(self):
return self._resolve_dict(self.schema, self.schema, self.root_directory)
|
schema-renderer
|
/schema_renderer-0.1.0-py3-none-any.whl/schema_renderer/schema_renderer.py
|
schema_renderer.py
|
from pathlib import Path
from copy import deepcopy
import json
class SchemaRenderer:
list_resolver = {"allOf", "anyOf", "oneOf", "items"}
schema_cache = {}
def __init__(self, path):
pth = Path(path)
self.root_directory = pth.parent
self.schema = json.load(open(path))
def _resolve_ref(self, ref, working_schema, working_path):
path, _, fragment = ref.partition("#")
other_schema = None
new_working_path = None
if path:
path = Path(working_path, path)
new_working_path = path.parent.resolve()
other_schema = json.load(open(path))
working_schema = deepcopy(other_schema or working_schema)
doc_part = deepcopy(other_schema or working_schema)
fragment_parts = [part for part in fragment.split("/") if part]
for fragment_part in fragment_parts:
doc_part = doc_part[fragment_part]
return doc_part, working_schema, new_working_path or working_path
def _resolve_dict(self, dictionary, working_schema, working_path):
data = dict()
if "$ref" in dictionary:
loaded_data, temp_working_schema, temp_working_path = self._resolve_ref(
dictionary["$ref"], working_schema, working_path
)
return self._resolve_dict(
loaded_data, temp_working_schema, temp_working_path
)
for key, item in dictionary.items():
new_value = item
if isinstance(item, dict):
if key == "definitions":
continue
new_value = self._resolve_dict(item, working_schema, working_path)
elif isinstance(item, list) and key in SchemaRenderer.list_resolver:
new_value = [
self._resolve_dict(it, working_schema, working_path) for it in item
]
data[key] = new_value
return data
def render(self):
return self._resolve_dict(self.schema, self.schema, self.root_directory)
| 0.524882 | 0.142113 |
================
Schema Resources
================
.. image:: https://travis-ci.org/klmitch/schema-resource.svg?branch=master
:target: https://travis-ci.org/klmitch/schema-resource
The ``schema-resource`` package is a simple library for loading
``jsonschema`` schemas using ``pkg_resources``. This means that
Python packages utilizing the ``schema-resource`` package can bundle
schemas for validating API or user configuration as separate files
with the package source. Further, those schemas may then reference
other schema files within the package.
Simple Usage
============
The simplest way to use ``schema-resource`` begins by understanding
the resource URI. A resource URI is a URI with the scheme "res". The
"network location"--the part that appears after the "//" in a URI--is
the package name, as understood by ``pkg_resources``. The path is
then interpreted relative to the root directory of the package. For
instance, if the package "spam" has a schema named "schema.yaml", the
resource URI would be "res://spam/schema.yaml". This schema can then
be loaded using ``schema_res.load_schema()``, which takes the resource
URI as its first argument; the result will be an object conforming to
the ``jsonschema.IValidator`` interface documented in the
``jsonschema`` documentation.
This schema could, of course, be loaded by using a combination of
``jsonschema`` and ``pkg_resources`` directly; however,
``schema-resource`` creates the schema with a special
``jsonschema.RefResolver`` that understands these resource URIs; this
enhancement allows one schema to refer to another, or part of another,
resource schema directly.
Class Attributes
================
Often, a class needs to use a particular schema in order to validate
input, often from an API or a configuration file. This can be
simplified through the use of ``schema_res.SchemaDescriptor``. This
class implements the Python "descriptor" protocol, meaning that, when
assigned to a class attribute, references to the value of the
attribute will cause a method of ``schema_res.SchemaDescriptor`` to be
called. That method implements an on-demand loading of a schema
resource, constructing the resource URI if needed from the class's
``__module__`` attribute. For instance, assume that the ``Spam``
class below needs to validate data fed to a class method::
class Spam(object):
schema = schema_res.SchemaDescriptor("spam.yaml")
@classmethod
def from_data(cls, data):
cls.schema.validate(data)
return cls(**data)
...
This class first validates the data against the schema loaded from the
"spam.yaml" file bundled with the package sources, loading the schema
the first time the method is called. (The
``jsonschema.IValidator.validate()`` method raises a
``jsonschema.ValidationError`` exception if the ``data`` doesn't match
the requirements of the schema.)
Validating Schemas
==================
It is a good idea for the test suite for a package to verify that the
schemas it bundles are valid. This could be done by simply using the
``schema_res.load_schema()`` function, calling it for each resource
URI and passing ``validate=True``, within the package's test suite.
However, there's also a simple helper: ``schema_res.validate()`` takes
one or more resource URIs and calls ``schema_res.load_schema()`` on
each, passing ``validate=True``. This means that this entire test can
be written as a single function call, like so::
class TestSchemas(object):
def test_valid(self):
schema_res.validate(
"res://spam/schema1.yaml",
"res://spam/schema2.yaml",
"res://spam/schema3.yaml",
)
Schema Format
=============
In all the examples so far, the schema filenames have had the ".yaml"
extension. There is no specific need to use this extension, nor even
for the files to be in YAML format: JSON is a subset of YAML, so the
schema files can be written in regular JSON. However, by using a YAML
parser to load the schema files, they may be expressed in YAML format,
which this programmer finds easier to write and to read than strict
JSON syntax.
|
schema-resource
|
/schema-resource-0.0.1.tar.gz/schema-resource-0.0.1/README.rst
|
README.rst
|
================
Schema Resources
================
.. image:: https://travis-ci.org/klmitch/schema-resource.svg?branch=master
:target: https://travis-ci.org/klmitch/schema-resource
The ``schema-resource`` package is a simple library for loading
``jsonschema`` schemas using ``pkg_resources``. This means that
Python packages utilizing the ``schema-resource`` package can bundle
schemas for validating API or user configuration as separate files
with the package source. Further, those schemas may then reference
other schema files within the package.
Simple Usage
============
The simplest way to use ``schema-resource`` begins by understanding
the resource URI. A resource URI is a URI with the scheme "res". The
"network location"--the part that appears after the "//" in a URI--is
the package name, as understood by ``pkg_resources``. The path is
then interpreted relative to the root directory of the package. For
instance, if the package "spam" has a schema named "schema.yaml", the
resource URI would be "res://spam/schema.yaml". This schema can then
be loaded using ``schema_res.load_schema()``, which takes the resource
URI as its first argument; the result will be an object conforming to
the ``jsonschema.IValidator`` interface documented in the
``jsonschema`` documentation.
This schema could, of course, be loaded by using a combination of
``jsonschema`` and ``pkg_resources`` directly; however,
``schema-resource`` creates the schema with a special
``jsonschema.RefResolver`` that understands these resource URIs; this
enhancement allows one schema to refer to another, or part of another,
resource schema directly.
Class Attributes
================
Often, a class needs to use a particular schema in order to validate
input, often from an API or a configuration file. This can be
simplified through the use of ``schema_res.SchemaDescriptor``. This
class implements the Python "descriptor" protocol, meaning that, when
assigned to a class attribute, references to the value of the
attribute will cause a method of ``schema_res.SchemaDescriptor`` to be
called. That method implements an on-demand loading of a schema
resource, constructing the resource URI if needed from the class's
``__module__`` attribute. For instance, assume that the ``Spam``
class below needs to validate data fed to a class method::
class Spam(object):
schema = schema_res.SchemaDescriptor("spam.yaml")
@classmethod
def from_data(cls, data):
cls.schema.validate(data)
return cls(**data)
...
This class first validates the data against the schema loaded from the
"spam.yaml" file bundled with the package sources, loading the schema
the first time the method is called. (The
``jsonschema.IValidator.validate()`` method raises a
``jsonschema.ValidationError`` exception if the ``data`` doesn't match
the requirements of the schema.)
Validating Schemas
==================
It is a good idea for the test suite for a package to verify that the
schemas it bundles are valid. This could be done by simply using the
``schema_res.load_schema()`` function, calling it for each resource
URI and passing ``validate=True``, within the package's test suite.
However, there's also a simple helper: ``schema_res.validate()`` takes
one or more resource URIs and calls ``schema_res.load_schema()`` on
each, passing ``validate=True``. This means that this entire test can
be written as a single function call, like so::
class TestSchemas(object):
def test_valid(self):
schema_res.validate(
"res://spam/schema1.yaml",
"res://spam/schema2.yaml",
"res://spam/schema3.yaml",
)
Schema Format
=============
In all the examples so far, the schema filenames have had the ".yaml"
extension. There is no specific need to use this extension, nor even
for the files to be in YAML format: JSON is a subset of YAML, so the
schema files can be written in regular JSON. However, by using a YAML
parser to load the schema files, they may be expressed in YAML format,
which this programmer finds easier to write and to read than strict
JSON syntax.
| 0.919376 | 0.531209 |
from six.moves.urllib import parse as urlparse
from schema_res import loader
class SchemaDescriptor(object):
"""
A class implementing the descriptor protocol to automatically load
schemas from package resources. Values are always
``jsonschema.IValidator`` instances, regardless of how the
descriptor is accessed.
"""
def __init__(self, uri_or_fname):
"""
Initialize a ``SchemaDescriptor`` instance.
:param str uri_or_fname: The URI or filename of the schema to
load. Only "res:"-style URIs are
recognized; this cannot be used to
load a schema over the network, in
order to discourage that non-portable
practice. If a bare filename is
provided, it will be interpreted
relative to the ``__module__``
attribute of the owning class.
"""
# Save the URI
self.uri = urlparse.urlparse(uri_or_fname)
# Cache of the loaded schema
self.schema = None
def __get__(self, inst, owner):
"""
Retrieve the specified schema object.
:param inst: An instance of the class. Ignored by this
implementation of the descriptor protocol.
:param owner: The class containing the descriptor.
:returns: The desired schema, loading it as necessary.
:rtype: ``jsonschema.IValidator``
"""
# Do we need to load the schema?
if self.schema is None:
# Is it a fully qualified URL?
if self.uri.scheme:
uri = urlparse.urlunparse(self.uri)
else:
uri = urlparse.urlunparse((
'res', owner.__module__,
self.uri.path, self.uri.params, self.uri.query,
self.uri.fragment,
))
# Load the schema
self.schema = loader.load_schema(uri)
return self.schema
|
schema-resource
|
/schema-resource-0.0.1.tar.gz/schema-resource-0.0.1/schema_res/descriptor.py
|
descriptor.py
|
from six.moves.urllib import parse as urlparse
from schema_res import loader
class SchemaDescriptor(object):
"""
A class implementing the descriptor protocol to automatically load
schemas from package resources. Values are always
``jsonschema.IValidator`` instances, regardless of how the
descriptor is accessed.
"""
def __init__(self, uri_or_fname):
"""
Initialize a ``SchemaDescriptor`` instance.
:param str uri_or_fname: The URI or filename of the schema to
load. Only "res:"-style URIs are
recognized; this cannot be used to
load a schema over the network, in
order to discourage that non-portable
practice. If a bare filename is
provided, it will be interpreted
relative to the ``__module__``
attribute of the owning class.
"""
# Save the URI
self.uri = urlparse.urlparse(uri_or_fname)
# Cache of the loaded schema
self.schema = None
def __get__(self, inst, owner):
"""
Retrieve the specified schema object.
:param inst: An instance of the class. Ignored by this
implementation of the descriptor protocol.
:param owner: The class containing the descriptor.
:returns: The desired schema, loading it as necessary.
:rtype: ``jsonschema.IValidator``
"""
# Do we need to load the schema?
if self.schema is None:
# Is it a fully qualified URL?
if self.uri.scheme:
uri = urlparse.urlunparse(self.uri)
else:
uri = urlparse.urlunparse((
'res', owner.__module__,
self.uri.path, self.uri.params, self.uri.query,
self.uri.fragment,
))
# Load the schema
self.schema = loader.load_schema(uri)
return self.schema
| 0.868032 | 0.277825 |
import jsonschema
import pkg_resources
from six.moves.urllib import parse as urlparse
import yaml
def _res_handler(uri):
"""
Handler for "res:" URIs. A "res:" URI resolves a resource using
``pkg_resources.resource_stream``; the "netloc" part of the URI
(the part after the "res://") should be the package or
requirement, and the "path" part of the URI will be interpreted
relative to the top level of that package or requirement.
:param str uri: The resource URI.
:returns: The result of loading the URI as a YAML file (of which
JSON is a subset).
:rtype: ``dict`` or ``bool``
"""
# Split the URI and extract the parts we're interested in
urisplit = urlparse.urlparse(uri)
if (urisplit.scheme != 'res' or not urisplit.netloc or
urisplit.path in {'', '/'}):
raise ValueError('invalid URI "%s"' % uri)
pkg = urisplit.netloc
path = urisplit.path[1:]
# Return the result of loading the URI
with pkg_resources.resource_stream(pkg, path) as f:
return yaml.safe_load(f)
def load_schema(uri, validate=False):
"""
Loads a schema from a specified resource URI. A resource URI has
the scheme "res:"; the "netloc" part of the URI (the part after
the "res://") should be the package or requirement, and the "path"
part of the URI will be interpreted relative to the top level of
that package or requirement. The schema is loaded as a YAML file
(of which JSON is a subset), and an appropriate ``jsonschema``
validator is returned.
:param str uri: The resource URI.
:param bool validate: If ``True``, the schema will be validated.
Defaults to ``False``.
:returns: A suitable schema validator.
:rtype: ``jsonschema.IValidator``
"""
# Begin by loading the root schema
sch = _res_handler(uri)
# Construct a RefResolver
resolver = jsonschema.RefResolver(
uri, sch,
handlers={'res': _res_handler},
)
# Pick the correct validator matching the schema
val = jsonschema.validators.validator_for(sch)
# Perform the schema validation
if validate:
val.check_schema(sch)
# Return the constructed schema
return val(sch, resolver=resolver)
|
schema-resource
|
/schema-resource-0.0.1.tar.gz/schema-resource-0.0.1/schema_res/loader.py
|
loader.py
|
import jsonschema
import pkg_resources
from six.moves.urllib import parse as urlparse
import yaml
def _res_handler(uri):
"""
Handler for "res:" URIs. A "res:" URI resolves a resource using
``pkg_resources.resource_stream``; the "netloc" part of the URI
(the part after the "res://") should be the package or
requirement, and the "path" part of the URI will be interpreted
relative to the top level of that package or requirement.
:param str uri: The resource URI.
:returns: The result of loading the URI as a YAML file (of which
JSON is a subset).
:rtype: ``dict`` or ``bool``
"""
# Split the URI and extract the parts we're interested in
urisplit = urlparse.urlparse(uri)
if (urisplit.scheme != 'res' or not urisplit.netloc or
urisplit.path in {'', '/'}):
raise ValueError('invalid URI "%s"' % uri)
pkg = urisplit.netloc
path = urisplit.path[1:]
# Return the result of loading the URI
with pkg_resources.resource_stream(pkg, path) as f:
return yaml.safe_load(f)
def load_schema(uri, validate=False):
"""
Loads a schema from a specified resource URI. A resource URI has
the scheme "res:"; the "netloc" part of the URI (the part after
the "res://") should be the package or requirement, and the "path"
part of the URI will be interpreted relative to the top level of
that package or requirement. The schema is loaded as a YAML file
(of which JSON is a subset), and an appropriate ``jsonschema``
validator is returned.
:param str uri: The resource URI.
:param bool validate: If ``True``, the schema will be validated.
Defaults to ``False``.
:returns: A suitable schema validator.
:rtype: ``jsonschema.IValidator``
"""
# Begin by loading the root schema
sch = _res_handler(uri)
# Construct a RefResolver
resolver = jsonschema.RefResolver(
uri, sch,
handlers={'res': _res_handler},
)
# Pick the correct validator matching the schema
val = jsonschema.validators.validator_for(sch)
# Perform the schema validation
if validate:
val.check_schema(sch)
# Return the constructed schema
return val(sch, resolver=resolver)
| 0.77949 | 0.278836 |
How to add new types to the local Typeshed
------------------------------------------
If when running ``make mypy`` you receive errors about modules that can't be
found you may need to add type stubs for new modules to the ``mypy-stubs/``
directory.
::
stubgen -o mypy-stubs module_name
make mypy
Note: the module name is not always the name of the PyPI package
(``CacheControl`` vs ``cachecontrol``).
Stubs are just that, you will still need to annotate whichever functions you
call.
Oftentimes it is simpler to comment out imports in the ``.pyi`` stubs that are
not needed yet. The goal is represent the public API, or at least the part we
use.
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/docs/typeshed.rst
|
typeshed.rst
|
How to add new types to the local Typeshed
------------------------------------------
If when running ``make mypy`` you receive errors about modules that can't be
found you may need to add type stubs for new modules to the ``mypy-stubs/``
directory.
::
stubgen -o mypy-stubs module_name
make mypy
Note: the module name is not always the name of the PyPI package
(``CacheControl`` vs ``cachecontrol``).
Stubs are just that, you will still need to annotate whichever functions you
call.
Oftentimes it is simpler to comment out imports in the ``.pyi`` stubs that are
not needed yet. The goal is represent the public API, or at least the part we
use.
| 0.717903 | 0.32146 |
import logging
import unicodedata
from typing import (
Any,
Dict,
Iterable,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
cast,
)
from urllib.parse import urldefrag, urlsplit
import rdflib
import rdflib.namespace
from rdflib import Graph, URIRef
from rdflib.namespace import RDF, RDFS
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from .exceptions import SchemaException
from .utils import ContextType, aslist, json_dumps
_logger = logging.getLogger("salad")
def pred(
datatype: MutableMapping[str, Union[Dict[str, str], str]],
field: Optional[Dict[str, Any]],
name: str,
context: ContextType,
defaultBase: str,
namespaces: Dict[str, rdflib.namespace.Namespace],
) -> Union[Dict[str, Union[str, None]], str]:
split = urlsplit(name)
vee = None # type: Optional[str]
if split.scheme != "":
vee = name
(ns, ln) = rdflib.namespace.split_uri(str(vee))
name = ln
if ns[0:-1] in namespaces:
vee = str(namespaces[ns[0:-1]][ln])
_logger.debug("name, v %s %s", name, vee)
v = None # type: Optional[Union[Dict[str, Union[str, None]], str]]
if field is not None and "jsonldPredicate" in field:
if isinstance(field["jsonldPredicate"], MutableMapping):
v = {}
for k, val in field["jsonldPredicate"].items():
v[("@" + k[1:] if k.startswith("_") else k)] = val
if "@id" not in v:
v["@id"] = vee
else:
v = field["jsonldPredicate"]
elif "jsonldPredicate" in datatype:
if isinstance(datatype["jsonldPredicate"], Iterable):
for d in datatype["jsonldPredicate"]:
if isinstance(d, MutableMapping):
if d["symbol"] == name:
v = d["predicate"]
else:
raise SchemaException(
"entries in the jsonldPredicate List must be " "Dictionaries"
)
else:
raise SchemaException("jsonldPredicate must be a List of Dictionaries.")
ret = v or vee
if not ret:
ret = defaultBase + name
if name in context:
if context[name] != ret:
raise SchemaException(f"Predicate collision on {name}, {context[name]!r} != {ret!r}")
else:
_logger.debug("Adding to context '%s' %s (%s)", name, ret, type(ret))
context[name] = ret
return ret
def process_type(
t: MutableMapping[str, Any],
g: Graph,
context: ContextType,
defaultBase: str,
namespaces: Dict[str, rdflib.namespace.Namespace],
defaultPrefix: str,
) -> None:
if t["type"] not in ("record", "enum"):
return
if "name" in t:
recordname = t["name"]
_logger.debug("Processing %s %s\n", t.get("type"), t)
classnode = URIRef(recordname)
g.add((classnode, RDF.type, RDFS.Class))
split = urlsplit(recordname)
predicate = recordname
if t.get("inVocab", True):
if split.scheme:
(ns, ln) = rdflib.namespace.split_uri(str(recordname))
predicate = recordname
recordname = ln
else:
predicate = f"{defaultPrefix}:{recordname}"
if context.get(recordname, predicate) != predicate:
raise SchemaException(
f"Predicate collision on {recordname!r}, "
f"{context[recordname]!r} != {predicate!r}"
)
if not recordname:
raise SchemaException(f"Unable to find/derive recordname for {t}")
_logger.debug("Adding to context '%s' %s (%s)", recordname, predicate, type(predicate))
context[recordname] = predicate
if t["type"] == "record":
for i in t.get("fields", []):
fieldname = i["name"]
_logger.debug("Processing field %s", i)
v = pred(
t, i, fieldname, context, defaultPrefix, namespaces
) # type: Union[Dict[Any, Any], str, None]
if isinstance(v, str):
v = v if v[0] != "@" else None
elif v is not None:
v = v["_@id"] if v.get("_@id", "@")[0] != "@" else None
if bool(v):
try:
(ns, ln) = rdflib.namespace.split_uri(str(v))
except ValueError:
# rdflib 5.0.0 compatibility
uri = str(v)
colon_index = str(v).rfind(":")
if colon_index < 0:
raise
split_start = rdflib.namespace.SPLIT_START_CATEGORIES
for j in range(-1 - colon_index, len(uri)):
if unicodedata.category(uri[j]) in split_start or uri[j] == "_":
# _ prevents early split, roundtrip not generate
ns = uri[:j]
if not ns:
break
ln = uri[j:]
break
if not ns or not ln:
raise
if ns[0:-1] in namespaces:
propnode = namespaces[ns[0:-1]][ln]
else:
propnode = URIRef(v)
g.add((propnode, RDF.type, RDF.Property))
g.add((propnode, RDFS.domain, classnode))
# TODO generate range from datatype.
if isinstance(i["type"], MutableMapping):
process_type(i["type"], g, context, defaultBase, namespaces, defaultPrefix)
if "extends" in t:
for e in aslist(t["extends"]):
g.add((classnode, RDFS.subClassOf, URIRef(e)))
elif t["type"] == "enum":
_logger.debug("Processing enum %s", t.get("name"))
for i in t["symbols"]:
pred(t, None, i, context, defaultBase, namespaces)
def salad_to_jsonld_context(
j: Iterable[MutableMapping[str, Any]], schema_ctx: MutableMapping[str, Any]
) -> Tuple[ContextType, Graph]:
context = {} # type: ContextType
namespaces = {}
g = Graph()
defaultPrefix = ""
for k, v in schema_ctx.items():
context[k] = v
namespaces[k] = rdflib.namespace.Namespace(v)
if "@base" in context:
defaultBase = cast(str, context["@base"])
del context["@base"]
else:
defaultBase = ""
for k, v in namespaces.items():
g.bind(str(k), v)
for t in j:
process_type(t, g, context, defaultBase, namespaces, defaultPrefix)
return (context, g)
def fix_jsonld_ids(obj: Union[CommentedMap, float, str, CommentedSeq], ids: List[str]) -> None:
"""Add missing identity entries."""
if isinstance(obj, MutableMapping):
for i in ids:
if i in obj:
obj["@id"] = obj[i]
for v in obj.values():
fix_jsonld_ids(v, ids)
if isinstance(obj, MutableSequence):
for entry in obj:
fix_jsonld_ids(entry, ids)
def makerdf(
workflow: Optional[str],
wf: Union[CommentedMap, float, str, CommentedSeq],
ctx: ContextType,
graph: Optional[Graph] = None,
) -> Graph:
prefixes = {}
idfields = []
for k, v in ctx.items():
if isinstance(v, MutableMapping):
url = v["@id"]
else:
url = v
if url == "@id":
idfields.append(k)
doc_url, frg = urldefrag(url)
if "/" in frg:
p = frg.split("/")[0]
prefixes[p] = f"{doc_url}#{p}/"
fix_jsonld_ids(wf, idfields)
g = Graph() if graph is None else graph
if isinstance(wf, MutableSequence):
for w in wf:
w["@context"] = ctx
g.parse(
data=json_dumps(w, default=str),
format="json-ld",
publicID=str(workflow),
)
elif isinstance(wf, MutableMapping):
wf["@context"] = ctx
g.parse(data=json_dumps(wf, default=str), format="json-ld", publicID=str(workflow))
else:
raise SchemaException(f"{wf} is not a workflow")
# Bug in json-ld loader causes @id fields to be added to the graph
for sub, pred, obj in g.triples((None, URIRef("@id"), None)):
g.remove((sub, pred, obj))
for k2, v2 in prefixes.items():
g.namespace_manager.bind(k2, v2)
return g
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/jsonld_context.py
|
jsonld_context.py
|
import logging
import unicodedata
from typing import (
Any,
Dict,
Iterable,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
cast,
)
from urllib.parse import urldefrag, urlsplit
import rdflib
import rdflib.namespace
from rdflib import Graph, URIRef
from rdflib.namespace import RDF, RDFS
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from .exceptions import SchemaException
from .utils import ContextType, aslist, json_dumps
_logger = logging.getLogger("salad")
def pred(
datatype: MutableMapping[str, Union[Dict[str, str], str]],
field: Optional[Dict[str, Any]],
name: str,
context: ContextType,
defaultBase: str,
namespaces: Dict[str, rdflib.namespace.Namespace],
) -> Union[Dict[str, Union[str, None]], str]:
split = urlsplit(name)
vee = None # type: Optional[str]
if split.scheme != "":
vee = name
(ns, ln) = rdflib.namespace.split_uri(str(vee))
name = ln
if ns[0:-1] in namespaces:
vee = str(namespaces[ns[0:-1]][ln])
_logger.debug("name, v %s %s", name, vee)
v = None # type: Optional[Union[Dict[str, Union[str, None]], str]]
if field is not None and "jsonldPredicate" in field:
if isinstance(field["jsonldPredicate"], MutableMapping):
v = {}
for k, val in field["jsonldPredicate"].items():
v[("@" + k[1:] if k.startswith("_") else k)] = val
if "@id" not in v:
v["@id"] = vee
else:
v = field["jsonldPredicate"]
elif "jsonldPredicate" in datatype:
if isinstance(datatype["jsonldPredicate"], Iterable):
for d in datatype["jsonldPredicate"]:
if isinstance(d, MutableMapping):
if d["symbol"] == name:
v = d["predicate"]
else:
raise SchemaException(
"entries in the jsonldPredicate List must be " "Dictionaries"
)
else:
raise SchemaException("jsonldPredicate must be a List of Dictionaries.")
ret = v or vee
if not ret:
ret = defaultBase + name
if name in context:
if context[name] != ret:
raise SchemaException(f"Predicate collision on {name}, {context[name]!r} != {ret!r}")
else:
_logger.debug("Adding to context '%s' %s (%s)", name, ret, type(ret))
context[name] = ret
return ret
def process_type(
t: MutableMapping[str, Any],
g: Graph,
context: ContextType,
defaultBase: str,
namespaces: Dict[str, rdflib.namespace.Namespace],
defaultPrefix: str,
) -> None:
if t["type"] not in ("record", "enum"):
return
if "name" in t:
recordname = t["name"]
_logger.debug("Processing %s %s\n", t.get("type"), t)
classnode = URIRef(recordname)
g.add((classnode, RDF.type, RDFS.Class))
split = urlsplit(recordname)
predicate = recordname
if t.get("inVocab", True):
if split.scheme:
(ns, ln) = rdflib.namespace.split_uri(str(recordname))
predicate = recordname
recordname = ln
else:
predicate = f"{defaultPrefix}:{recordname}"
if context.get(recordname, predicate) != predicate:
raise SchemaException(
f"Predicate collision on {recordname!r}, "
f"{context[recordname]!r} != {predicate!r}"
)
if not recordname:
raise SchemaException(f"Unable to find/derive recordname for {t}")
_logger.debug("Adding to context '%s' %s (%s)", recordname, predicate, type(predicate))
context[recordname] = predicate
if t["type"] == "record":
for i in t.get("fields", []):
fieldname = i["name"]
_logger.debug("Processing field %s", i)
v = pred(
t, i, fieldname, context, defaultPrefix, namespaces
) # type: Union[Dict[Any, Any], str, None]
if isinstance(v, str):
v = v if v[0] != "@" else None
elif v is not None:
v = v["_@id"] if v.get("_@id", "@")[0] != "@" else None
if bool(v):
try:
(ns, ln) = rdflib.namespace.split_uri(str(v))
except ValueError:
# rdflib 5.0.0 compatibility
uri = str(v)
colon_index = str(v).rfind(":")
if colon_index < 0:
raise
split_start = rdflib.namespace.SPLIT_START_CATEGORIES
for j in range(-1 - colon_index, len(uri)):
if unicodedata.category(uri[j]) in split_start or uri[j] == "_":
# _ prevents early split, roundtrip not generate
ns = uri[:j]
if not ns:
break
ln = uri[j:]
break
if not ns or not ln:
raise
if ns[0:-1] in namespaces:
propnode = namespaces[ns[0:-1]][ln]
else:
propnode = URIRef(v)
g.add((propnode, RDF.type, RDF.Property))
g.add((propnode, RDFS.domain, classnode))
# TODO generate range from datatype.
if isinstance(i["type"], MutableMapping):
process_type(i["type"], g, context, defaultBase, namespaces, defaultPrefix)
if "extends" in t:
for e in aslist(t["extends"]):
g.add((classnode, RDFS.subClassOf, URIRef(e)))
elif t["type"] == "enum":
_logger.debug("Processing enum %s", t.get("name"))
for i in t["symbols"]:
pred(t, None, i, context, defaultBase, namespaces)
def salad_to_jsonld_context(
j: Iterable[MutableMapping[str, Any]], schema_ctx: MutableMapping[str, Any]
) -> Tuple[ContextType, Graph]:
context = {} # type: ContextType
namespaces = {}
g = Graph()
defaultPrefix = ""
for k, v in schema_ctx.items():
context[k] = v
namespaces[k] = rdflib.namespace.Namespace(v)
if "@base" in context:
defaultBase = cast(str, context["@base"])
del context["@base"]
else:
defaultBase = ""
for k, v in namespaces.items():
g.bind(str(k), v)
for t in j:
process_type(t, g, context, defaultBase, namespaces, defaultPrefix)
return (context, g)
def fix_jsonld_ids(obj: Union[CommentedMap, float, str, CommentedSeq], ids: List[str]) -> None:
"""Add missing identity entries."""
if isinstance(obj, MutableMapping):
for i in ids:
if i in obj:
obj["@id"] = obj[i]
for v in obj.values():
fix_jsonld_ids(v, ids)
if isinstance(obj, MutableSequence):
for entry in obj:
fix_jsonld_ids(entry, ids)
def makerdf(
workflow: Optional[str],
wf: Union[CommentedMap, float, str, CommentedSeq],
ctx: ContextType,
graph: Optional[Graph] = None,
) -> Graph:
prefixes = {}
idfields = []
for k, v in ctx.items():
if isinstance(v, MutableMapping):
url = v["@id"]
else:
url = v
if url == "@id":
idfields.append(k)
doc_url, frg = urldefrag(url)
if "/" in frg:
p = frg.split("/")[0]
prefixes[p] = f"{doc_url}#{p}/"
fix_jsonld_ids(wf, idfields)
g = Graph() if graph is None else graph
if isinstance(wf, MutableSequence):
for w in wf:
w["@context"] = ctx
g.parse(
data=json_dumps(w, default=str),
format="json-ld",
publicID=str(workflow),
)
elif isinstance(wf, MutableMapping):
wf["@context"] = ctx
g.parse(data=json_dumps(wf, default=str), format="json-ld", publicID=str(workflow))
else:
raise SchemaException(f"{wf} is not a workflow")
# Bug in json-ld loader causes @id fields to be added to the graph
for sub, pred, obj in g.triples((None, URIRef("@id"), None)):
g.remove((sub, pred, obj))
for k2, v2 in prefixes.items():
g.namespace_manager.bind(k2, v2)
return g
| 0.482917 | 0.202838 |
import sys
from io import TextIOWrapper
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
TextIO,
Union,
)
from urllib.parse import urlsplit
from . import schema
from .codegen_base import CodeGenBase
from .cpp_codegen import CppCodeGen
from .dlang_codegen import DlangCodeGen
from .dotnet_codegen import DotNetCodeGen
from .exceptions import SchemaSaladException
from .java_codegen import JavaCodeGen
from .python_codegen import PythonCodeGen
from .ref_resolver import Loader
from .schema import shortname
from .typescript_codegen import TypeScriptCodeGen
from .utils import aslist
FIELD_SORT_ORDER = ["id", "class", "name"]
def codegen(
lang: str,
i: List[Dict[str, str]],
schema_metadata: Dict[str, Any],
loader: Loader,
target: Optional[str] = None,
examples: Optional[str] = None,
package: Optional[str] = None,
copyright: Optional[str] = None,
parser_info: Optional[str] = None,
) -> None:
"""Generate classes with loaders for the given Schema Salad description."""
j = schema.extend_and_specialize(i, loader)
gen: Optional[CodeGenBase] = None
base = schema_metadata.get("$base", schema_metadata.get("id"))
# ``urlsplit`` decides whether to return an encoded result based
# on the object type. To ensure the code behaves the same for Py
# 3.6+, we enforce that the input value is of type ``str``.
if base is None:
base = ""
sp = urlsplit(base)
pkg = (
package
if package
else ".".join(list(reversed(sp.netloc.split("."))) + sp.path.strip("/").split("/"))
)
info = parser_info or pkg
salad_version = schema_metadata.get("saladVersion", "v1.1")
if lang in set(["python", "cpp", "dlang"]):
if target:
dest: Union[TextIOWrapper, TextIO] = open(target, mode="w", encoding="utf-8")
else:
dest = sys.stdout
if lang == "cpp":
gen = CppCodeGen(
base,
dest,
examples,
pkg,
copyright,
)
gen.parse(j)
return
if lang == "dlang":
gen = DlangCodeGen(
base,
dest,
examples,
pkg,
copyright,
info,
salad_version,
)
gen.parse(j)
return
gen = PythonCodeGen(
dest, copyright=copyright, parser_info=info, salad_version=salad_version
)
elif lang == "java":
gen = JavaCodeGen(
base,
target=target,
examples=examples,
package=pkg,
copyright=copyright,
)
elif lang == "typescript":
gen = TypeScriptCodeGen(base, target=target, package=pkg, examples=examples)
elif lang == "dotnet":
gen = DotNetCodeGen(base, target=target, package=pkg, examples=examples)
else:
raise SchemaSaladException(f"Unsupported code generation language {lang!r}")
gen.prologue()
document_roots = []
for rec in j:
if rec["type"] in ("enum", "record"):
gen.type_loader(rec)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for rec in j:
if rec["type"] == "enum":
for symbol in rec["symbols"]:
gen.add_vocab(shortname(symbol), symbol)
if rec["type"] == "record":
if rec.get("documentRoot"):
document_roots.append(rec["name"])
field_names = []
optional_fields = set()
for field in rec.get("fields", []):
field_name = shortname(field["name"])
field_names.append(field_name)
tp = field["type"]
if isinstance(tp, MutableSequence) and tp[0] == "https://w3id.org/cwl/salad#null":
optional_fields.add(field_name)
idfield = ""
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
idfield = field.get("name")
gen.begin_class(
rec["name"],
aslist(rec.get("extends", [])),
rec.get("doc", ""),
rec.get("abstract", False),
field_names,
idfield,
optional_fields,
)
gen.add_vocab(shortname(rec["name"]), rec["name"])
sorted_fields = sorted(
rec.get("fields", []),
key=lambda i: FIELD_SORT_ORDER.index(i["name"].split("/")[-1])
if i["name"].split("/")[-1] in FIELD_SORT_ORDER
else 100,
)
for field in sorted_fields:
if field.get("jsonldPredicate") == "@id":
subscope = field.get("subscope")
fieldpred = field["name"]
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
uri_loader = gen.uri_loader(gen.type_loader(field["type"]), True, False, None)
gen.declare_id_field(
fieldpred,
uri_loader,
field.get("doc"),
optional,
)
break
for field in sorted_fields:
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
type_loader = gen.type_loader(field["type"])
jld = field.get("jsonldPredicate")
fieldpred = field["name"]
subscope = None
if isinstance(jld, MutableMapping):
ref_scope = jld.get("refScope")
subscope = jld.get("subscope")
if jld.get("typeDSL"):
type_loader = gen.typedsl_loader(type_loader, ref_scope)
elif jld.get("secondaryFilesDSL"):
type_loader = gen.secondaryfilesdsl_loader(type_loader)
elif jld.get("_type") == "@id":
type_loader = gen.uri_loader(
type_loader, jld.get("identity", False), False, ref_scope
)
elif jld.get("_type") == "@vocab":
type_loader = gen.uri_loader(type_loader, False, True, ref_scope)
map_subject = jld.get("mapSubject")
if map_subject:
type_loader = gen.idmap_loader(
field["name"],
type_loader,
map_subject,
jld.get("mapPredicate"),
)
if "_id" in jld and jld["_id"][0] != "@":
fieldpred = jld["_id"]
if jld == "@id":
continue
gen.declare_field(fieldpred, type_loader, field.get("doc"), optional, subscope)
gen.end_class(rec["name"], field_names)
root_type = list(document_roots)
root_type.append({"type": "array", "items": document_roots})
gen.epilogue(gen.type_loader(root_type))
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/codegen.py
|
codegen.py
|
import sys
from io import TextIOWrapper
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
TextIO,
Union,
)
from urllib.parse import urlsplit
from . import schema
from .codegen_base import CodeGenBase
from .cpp_codegen import CppCodeGen
from .dlang_codegen import DlangCodeGen
from .dotnet_codegen import DotNetCodeGen
from .exceptions import SchemaSaladException
from .java_codegen import JavaCodeGen
from .python_codegen import PythonCodeGen
from .ref_resolver import Loader
from .schema import shortname
from .typescript_codegen import TypeScriptCodeGen
from .utils import aslist
FIELD_SORT_ORDER = ["id", "class", "name"]
def codegen(
lang: str,
i: List[Dict[str, str]],
schema_metadata: Dict[str, Any],
loader: Loader,
target: Optional[str] = None,
examples: Optional[str] = None,
package: Optional[str] = None,
copyright: Optional[str] = None,
parser_info: Optional[str] = None,
) -> None:
"""Generate classes with loaders for the given Schema Salad description."""
j = schema.extend_and_specialize(i, loader)
gen: Optional[CodeGenBase] = None
base = schema_metadata.get("$base", schema_metadata.get("id"))
# ``urlsplit`` decides whether to return an encoded result based
# on the object type. To ensure the code behaves the same for Py
# 3.6+, we enforce that the input value is of type ``str``.
if base is None:
base = ""
sp = urlsplit(base)
pkg = (
package
if package
else ".".join(list(reversed(sp.netloc.split("."))) + sp.path.strip("/").split("/"))
)
info = parser_info or pkg
salad_version = schema_metadata.get("saladVersion", "v1.1")
if lang in set(["python", "cpp", "dlang"]):
if target:
dest: Union[TextIOWrapper, TextIO] = open(target, mode="w", encoding="utf-8")
else:
dest = sys.stdout
if lang == "cpp":
gen = CppCodeGen(
base,
dest,
examples,
pkg,
copyright,
)
gen.parse(j)
return
if lang == "dlang":
gen = DlangCodeGen(
base,
dest,
examples,
pkg,
copyright,
info,
salad_version,
)
gen.parse(j)
return
gen = PythonCodeGen(
dest, copyright=copyright, parser_info=info, salad_version=salad_version
)
elif lang == "java":
gen = JavaCodeGen(
base,
target=target,
examples=examples,
package=pkg,
copyright=copyright,
)
elif lang == "typescript":
gen = TypeScriptCodeGen(base, target=target, package=pkg, examples=examples)
elif lang == "dotnet":
gen = DotNetCodeGen(base, target=target, package=pkg, examples=examples)
else:
raise SchemaSaladException(f"Unsupported code generation language {lang!r}")
gen.prologue()
document_roots = []
for rec in j:
if rec["type"] in ("enum", "record"):
gen.type_loader(rec)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for rec in j:
if rec["type"] == "enum":
for symbol in rec["symbols"]:
gen.add_vocab(shortname(symbol), symbol)
if rec["type"] == "record":
if rec.get("documentRoot"):
document_roots.append(rec["name"])
field_names = []
optional_fields = set()
for field in rec.get("fields", []):
field_name = shortname(field["name"])
field_names.append(field_name)
tp = field["type"]
if isinstance(tp, MutableSequence) and tp[0] == "https://w3id.org/cwl/salad#null":
optional_fields.add(field_name)
idfield = ""
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
idfield = field.get("name")
gen.begin_class(
rec["name"],
aslist(rec.get("extends", [])),
rec.get("doc", ""),
rec.get("abstract", False),
field_names,
idfield,
optional_fields,
)
gen.add_vocab(shortname(rec["name"]), rec["name"])
sorted_fields = sorted(
rec.get("fields", []),
key=lambda i: FIELD_SORT_ORDER.index(i["name"].split("/")[-1])
if i["name"].split("/")[-1] in FIELD_SORT_ORDER
else 100,
)
for field in sorted_fields:
if field.get("jsonldPredicate") == "@id":
subscope = field.get("subscope")
fieldpred = field["name"]
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
uri_loader = gen.uri_loader(gen.type_loader(field["type"]), True, False, None)
gen.declare_id_field(
fieldpred,
uri_loader,
field.get("doc"),
optional,
)
break
for field in sorted_fields:
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
type_loader = gen.type_loader(field["type"])
jld = field.get("jsonldPredicate")
fieldpred = field["name"]
subscope = None
if isinstance(jld, MutableMapping):
ref_scope = jld.get("refScope")
subscope = jld.get("subscope")
if jld.get("typeDSL"):
type_loader = gen.typedsl_loader(type_loader, ref_scope)
elif jld.get("secondaryFilesDSL"):
type_loader = gen.secondaryfilesdsl_loader(type_loader)
elif jld.get("_type") == "@id":
type_loader = gen.uri_loader(
type_loader, jld.get("identity", False), False, ref_scope
)
elif jld.get("_type") == "@vocab":
type_loader = gen.uri_loader(type_loader, False, True, ref_scope)
map_subject = jld.get("mapSubject")
if map_subject:
type_loader = gen.idmap_loader(
field["name"],
type_loader,
map_subject,
jld.get("mapPredicate"),
)
if "_id" in jld and jld["_id"][0] != "@":
fieldpred = jld["_id"]
if jld == "@id":
continue
gen.declare_field(fieldpred, type_loader, field.get("doc"), optional, subscope)
gen.end_class(rec["name"], field_names)
root_type = list(document_roots)
root_type.append({"type": "array", "items": document_roots})
gen.epilogue(gen.type_loader(root_type))
| 0.517083 | 0.112747 |
import os
import shutil
import string
from io import StringIO
from pathlib import Path
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Union,
)
from xml.sax.saxutils import escape # nosec
from importlib_resources import files
from . import _logger, schema
from .codegen_base import CodeGenBase, TypeDef
from .exceptions import SchemaException
from .java_codegen import _ensure_directory_and_write, _safe_makedirs
from .schema import shortname
def doc_to_doc_string(doc: Optional[str], indent_level: int = 0) -> str:
"""Generate a documentation string from a schema salad doc field."""
lead = "" + " " * indent_level + "/// "
if doc:
doc_str = "\n".join([f"{lead}{escape(line)}" for line in doc.split("\n")])
else:
doc_str = ""
return doc_str
_string_type_def = TypeDef(
instance_type="string",
init="new PrimitiveLoader<string>()",
name="StringInstance",
loader_type="ILoader<string>",
)
_int_type_def = TypeDef(
instance_type="int",
init="new PrimitiveLoader<int>()",
name="IntegerInstance",
loader_type="ILoader<int>",
)
_long_type_def = TypeDef(
instance_type="long",
name="LongInstance",
loader_type="ILoader<long>",
init="new PrimitiveLoader<long>()",
)
_float_type_def = TypeDef(
instance_type="double",
name="DoubleInstance",
loader_type="ILoader<double>",
init="new PrimitiveLoader<double>()",
)
_bool_type_def = TypeDef(
instance_type="bool",
name="BooleanInstance",
loader_type="ILoader<bool>",
init="new PrimitiveLoader<bool>()",
)
_null_type_def = TypeDef(
instance_type="None",
name="NullInstance",
loader_type="ILoader<object>",
init="new NullLoader()",
)
_any_type_def = TypeDef(
instance_type="object",
name="AnyInstance",
init="new AnyLoader()",
loader_type="ILoader<object>",
)
prims = {
"http://www.w3.org/2001/XMLSchema#string": _string_type_def,
"http://www.w3.org/2001/XMLSchema#int": _int_type_def,
"http://www.w3.org/2001/XMLSchema#long": _long_type_def,
"http://www.w3.org/2001/XMLSchema#float": _float_type_def,
"http://www.w3.org/2001/XMLSchema#double": _float_type_def,
"http://www.w3.org/2001/XMLSchema#boolean": _bool_type_def,
"https://w3id.org/cwl/salad#null": _null_type_def,
"https://w3id.org/cwl/salad#Any": _any_type_def,
"string": _string_type_def,
"int": _int_type_def,
"long": _long_type_def,
"float": _float_type_def,
"double": _float_type_def,
"boolean": _bool_type_def,
"null": _null_type_def,
"Any": _any_type_def,
}
class DotNetCodeGen(CodeGenBase):
"""Generation of TypeScript code for a given Schema Salad definition."""
def __init__(
self, base: str, examples: Optional[str], target: Optional[str], package: str
) -> None:
"""Initialize the TypeScript codegen."""
super().__init__()
self.target_dir = Path(target or ".").resolve()
self.main_src_dir = self.target_dir / package / "src"
self.test_src_dir = self.target_dir / "Test"
self.test_resources_dir = self.test_src_dir / "data"
self.package = package
self.base_uri = base
self.examples = examples
def prologue(self) -> None:
"""Trigger to generate the prolouge code."""
for src_dir in [self.main_src_dir]:
_safe_makedirs(src_dir)
for primitive in prims.values():
self.declare_type(primitive)
@staticmethod
def safe_name(name: str) -> str:
"""Generate a safe version of the given name."""
avn = schema.avro_field_name(name)
if avn.startswith("anon."):
avn = avn[5:]
if avn in (
"class",
"in",
"extends",
"abstract",
"default",
"package",
"arguments",
"out",
):
# reserved words
avn = avn + "_"
return avn
def begin_class(
self, # pylint: disable=too-many-arguments
classname: str,
extends: MutableSequence[str],
doc: str,
abstract: bool,
field_names: MutableSequence[str],
idfield: str,
optional_fields: Set[str],
) -> None:
"""Produce the header for the given class."""
self.current_interface = "I" + self.safe_name(classname)
cls = self.safe_name(classname)
self.current_class = cls
self.current_class_is_abstract = abstract
interface_module_name = self.current_interface
self.current_interface_target_file = self.main_src_dir / f"{interface_module_name}.cs"
class_module_name = self.current_class
self.current_class_target_file = self.main_src_dir / f"{class_module_name}.cs"
self.current_constructor_signature = StringIO()
self.current_constructor_signature_optionals = StringIO()
self.current_constructor_body = StringIO()
self.current_loader = StringIO()
self.current_serializer = StringIO()
self.current_fieldtypes: Dict[str, TypeDef] = {}
self.optional_field_names: List[str] = []
self.mandatory_field_names: List[str] = []
self.idfield = idfield
doc_string = f"""
/// <summary>
/// Auto-generated interface for {classname}
"""
if doc:
doc_string += "///\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += "/// </summary>"
with open(self.current_interface_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_interface_target_file)
if extends:
ext = " : " + ", ".join("I" + self.safe_name(e) for e in extends)
else:
ext = ""
f.write(
"""#pragma warning disable CS0108
namespace {package};
{docstring}
public interface {cls}{ext}
{{
""".format(
docstring=doc_string,
cls=f"{self.current_interface}",
ext=ext,
package=self.package,
)
)
if self.current_class_is_abstract:
return
doc_string = f"""
/// <summary>
/// Auto-generated class implementation for {classname}
"""
if doc:
doc_string += "///\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += "/// </summary>"
with open(self.current_class_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_class_target_file)
f.write(
"""using System.Collections;
using OneOf;
using OneOf.Types;
namespace {package};
{docstring}
public class {cls} : {current_interface}, ISaveable
{{
readonly LoadingOptions loadingOptions;
readonly Dictionary<object, object> extensionFields;
""".format(
cls=cls,
current_interface=self.current_interface,
docstring=doc_string,
package=self.package,
)
)
self.current_constructor_signature.write(
"\n"
+ "\n"
+ " public {cls}(".format(
cls=cls,
)
)
self.current_constructor_body.write(
"""
this.loadingOptions = loadingOptions ?? new LoadingOptions();
this.extensionFields = extensionFields ?? new Dictionary<object, object>();
"""
)
self.current_loader.write(
"""
public static ISaveable FromDoc(object doc__, string baseUri, LoadingOptions loadingOptions,
string? docRoot = null)
{
List<ValidationException> errors = new();
if (doc__ is not IDictionary)
{
throw new ValidationException("Document has to be of type Dictionary");
}
Dictionary<object, object> doc_ = ((IDictionary)doc__)
.Cast<dynamic>()
.ToDictionary(entry => entry.Key, entry => entry.Value);
"""
)
self.current_serializer.write(
"""
public Dictionary<object, object> Save(bool top = false, string baseUrl = "",
bool relativeUris = true)
{
Dictionary<object, object> r = new();
foreach (KeyValuePair<object, object> ef in extensionFields)
{
r[loadingOptions.PrefixUrl((string)ef.Value)] = ef.Value;
}
"""
)
def end_class(self, classname: str, field_names: List[str]) -> None:
"""Signal that we are done with this class."""
with open(self.current_interface_target_file, "a") as f:
f.write("}\n")
if self.current_class_is_abstract:
return
self.current_constructor_signature.write(
self.current_constructor_signature_optionals.getvalue()
)
self.current_constructor_signature.write(
"LoadingOptions? loadingOptions = null, "
"Dictionary<object, object>? extensionFields = null)"
"\n "
"{"
)
self.current_constructor_body.write(" }\n")
self.current_loader.write(
"""
Dictionary<object, object> extensionFields = new();
foreach (KeyValuePair<object, object> v in doc_)
{{
if (!attr.Contains(v.Key))
{{
if (((string)v.Key).Contains(':'))
{{
string ex = loadingOptions.ExpandUrl((string)v.Key, "", false, false, null);
extensionFields[ex] = v.Value;
}}
else
{{
errors.Add(
new ValidationException($"invalid field {{v.Key}}," +
"expected one of {fields}"));
break;
}}
}}
}}
if (errors.Count > 0)
{{
throw new ValidationException("", errors);
}}
{classname} res__ = new(
""".format(
classname=self.current_class,
fields=", ".join(["`" + f + "`" for f in field_names]),
)
)
self.current_loader.write("loadingOptions: loadingOptions")
if len(self.mandatory_field_names) > 0:
self.current_loader.write(
",\n "
+ ",\n ".join(f + ": " + f for f in self.mandatory_field_names)
)
self.current_loader.write("\n );\n")
for optionalField in self.optional_field_names:
self.current_loader.write(
f"""
if ({optionalField} != null)
{{
res__.{optionalField} = {optionalField};
}}
"""
)
self.current_loader.write("\n return res__;")
self.current_loader.write("\n " + "}" + "\n")
self.current_serializer.write(
"""
if (top)
{
if (loadingOptions.namespaces != null)
{
r["$namespaces"] = loadingOptions.namespaces;
}
if (this.loadingOptions.schemas != null)
{
r["$schemas"] = loadingOptions.schemas;
}
}
return r;
}
"""
)
with open(
self.current_class_target_file,
"a",
) as f:
f.write(self.current_constructor_signature.getvalue())
f.write(self.current_constructor_body.getvalue())
f.write(self.current_loader.getvalue())
f.write(self.current_serializer.getvalue())
f.write(
" static readonly System.Collections.Generic.HashSet<string>"
+ " attr = new() { "
+ ", ".join(['"' + shortname(f) + '"' for f in field_names])
+ " };"
)
f.write(
"""
}
"""
)
def type_loader(self, type_declaration: Union[List[Any], Dict[str, Any], str]) -> TypeDef:
"""Parse the given type declaration and declare its components."""
if isinstance(type_declaration, MutableSequence):
sub_types = [self.type_loader(i) for i in type_declaration]
sub_names: List[str] = list(dict.fromkeys([i.name for i in sub_types]))
sub_instance_types: List[str] = list(
dict.fromkeys([i.instance_type for i in sub_types if i.instance_type is not None])
)
return self.declare_type(
TypeDef(
name="union_of_{}".format("_or_".join(sub_names)),
init="new UnionLoader(new List<ILoader> {{ {} }})".format(", ".join(sub_names)),
instance_type="OneOf<" + ", ".join(sub_instance_types) + ">",
loader_type="ILoader<object>",
)
)
if isinstance(type_declaration, MutableMapping):
if type_declaration["type"] in (
"array",
"https://w3id.org/cwl/salad#array",
):
i = self.type_loader(type_declaration["items"])
return self.declare_type(
TypeDef(
instance_type=f"List<{i.instance_type}>",
name=f"array_of_{i.name}",
loader_type=f"ILoader<List<{i.instance_type}>>",
init=f"new ArrayLoader<{i.instance_type}>({i.name})",
)
)
if type_declaration["type"] in ("enum", "https://w3id.org/cwl/salad#enum"):
return self.type_loader_enum(type_declaration)
if type_declaration["type"] in (
"record",
"https://w3id.org/cwl/salad#record",
):
return self.declare_type(
TypeDef(
instance_type=self.safe_name(type_declaration["name"]),
name=self.safe_name(type_declaration["name"]) + "Loader",
init="new RecordLoader<{}>()".format(
self.safe_name(type_declaration["name"]),
),
loader_type="ILoader<{}>".format(self.safe_name(type_declaration["name"])),
abstract=type_declaration.get("abstract", False),
)
)
raise SchemaException("wft {}".format(type_declaration["type"]))
if type_declaration in prims:
return prims[type_declaration]
if type_declaration in ("Expression", "https://w3id.org/cwl/cwl#Expression"):
return self.declare_type(
TypeDef(
name=self.safe_name(type_declaration) + "Loader",
init="new ExpressionLoader()",
loader_type="ILoader<string>",
instance_type="string",
)
)
return self.collected_types[self.safe_name(type_declaration) + "Loader"]
def type_loader_enum(self, type_declaration: Dict[str, Any]) -> TypeDef:
for sym in type_declaration["symbols"]:
self.add_vocab(shortname(sym), sym)
enum_name = self.safe_name(type_declaration["name"])
enum_module_name = enum_name
enum_path = self.main_src_dir / f"{enum_module_name}.cs"
with open(enum_path, "w") as f:
_logger.info("Writing file: %s", enum_path)
f.write(
"""namespace {package};
public class {enum_name} : IEnumClass<{enum_name}>
{{
private string _Name;
private static readonly List<{enum_name}> members = new();
""".format(
enum_name=enum_name, package=self.package
)
)
for sym in type_declaration["symbols"]:
const = self.safe_name(sym).replace("-", "_").replace(".", "_").upper()
f.write(
""" public static readonly {enum_name} {const} =
new("{val}");\n""".format(
const=const, val=self.safe_name(sym), enum_name=enum_name
)
)
f.write(
"""
public string Name
{{
get {{ return _Name; }}
private set {{ _Name = value; }}
}}
public static IList<{enum_name}> Members
{{
get {{ return members; }}
}}
private {enum_name}(string name)
{{
_Name = name;
members.Add(this);
}}
public static {enum_name} Parse(string toParse)
{{
foreach ({enum_name} s in Members)
{{
if (toParse == s.Name)
return s;
}}
throw new FormatException("Could not parse string.");
}}
public static bool Contains(string value)
{{
bool contains = false;
foreach ({enum_name} s in Members)
{{
if (value == s.Name)
{{
contains = true;
return contains;
}}
}}
return contains;
}}
public static List<string> Symbols()
{{
return members.Select(m => m.Name).ToList();
}}
public override string ToString()
{{
return _Name;
}}
}}
""".format(
enum_name=enum_name
)
)
return self.declare_type(
TypeDef(
instance_type=enum_name,
name=self.safe_name(type_declaration["name"] + "Loader"),
init=f"new EnumLoader<{enum_name}>()",
loader_type=f"ILoader<{enum_name}>",
)
)
def declare_field(
self,
name: str,
fieldtype: TypeDef,
doc: Optional[str],
optional: bool,
subscope: str,
) -> None:
"""Output the code to load the given field."""
if self.current_class_is_abstract:
return
safename = self.safe_name(name)
fieldname = shortname(name)
self.current_fieldtypes[safename] = fieldtype
if optional:
self.optional_field_names.append(safename)
if fieldtype.instance_type is not None and not fieldtype.instance_type.startswith(
"OneOf<None"
):
optionalstring = "?"
else:
optionalstring = ""
else:
self.mandatory_field_names.append(safename)
optionalstring = ""
with open(self.current_class_target_file, "a") as f:
if doc:
f.write(
"""
/// <summary>
{doc_str}
/// </summary>
""".format(
doc_str=doc_to_doc_string(doc, indent_level=1)
)
)
f.write(
" public {type}{optionalstring} {safename} {{ get; set; }}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring=optionalstring,
)
)
if fieldname == "class":
if fieldtype.instance_type == "string":
self.current_constructor_signature_optionals.write(
'string {safename} = "{val}", '.format(
safename=safename, val=self.current_class
)
)
else:
self.current_constructor_signature_optionals.write(
"{type}? {safename} = null, ".format(
safename=safename, type=fieldtype.instance_type
)
)
else:
if not optional:
self.current_constructor_signature.write(
"{type} {safename}, ".format(
safename=safename,
type=fieldtype.instance_type,
)
)
else:
if fieldtype.instance_type is not None and fieldtype.instance_type.startswith(
"OneOf<None"
):
self.current_constructor_signature_optionals.write(
"{type} {safename} = default, ".format(
safename=safename,
type=fieldtype.instance_type,
)
)
else:
self.current_constructor_signature_optionals.write(
"{type}? {safename} = null, ".format(
safename=safename,
type=fieldtype.instance_type,
)
)
if fieldname == "class" and fieldtype.instance_type != "string":
self.current_constructor_body.write(
" this.{safeName} = {safeName} ?? {type}.{val};\n".format(
safeName=safename,
type=fieldtype.instance_type,
val=self.current_class.replace("-", "_").replace(".", "_").upper(),
)
)
else:
self.current_constructor_body.write(
" this.{safeName} = {safeName};\n".format(safeName=safename)
)
self.current_loader.write(
"""
dynamic {safename} = default!;""".format(
safename=safename
)
)
if optional:
self.current_loader.write(
"""
if (doc_.ContainsKey("{fieldname}"))
{{""".format(
fieldname=fieldname
)
)
spc = " "
else:
spc = " "
self.current_loader.write(
"""
{spc} try
{spc} {{
{spc} {safename} = LoaderInstances.{fieldtype}
{spc} .LoadField(doc_.GetValueOrDefault("{fieldname}", null!), baseUri,
{spc} loadingOptions);
{spc} }}
{spc} catch (ValidationException e)
{spc} {{
{spc} errors.Add(
{spc} new ValidationException("the `{fieldname}` field is not valid because: ", e)
{spc} );
{spc} }}
""".format(
safename=safename,
fieldname=fieldname,
fieldtype=fieldtype.name,
spc=spc,
)
)
if optional:
self.current_loader.write(" }\n")
if name == self.idfield or not self.idfield:
baseurl = "baseUrl"
elif self.id_field_type.instance_type is not None:
if self.id_field_type.instance_type.startswith("OneOf"):
baseurl = (
f"(this.{self.safe_name(self.idfield)}.Value is "
f'None ? "" : {self.safe_name(self.idfield)}.Value)'
)
else:
baseurl = f"this.{self.safe_name(self.idfield)}"
if fieldtype.is_uri:
self.current_serializer.write(
"""
object? {safename}Val = ISaveable.SaveRelativeUri({safename}, {scoped_id},
relativeUris, {ref_scope}, (string){base_url}!);
if ({safename}Val is not null)
{{
r["{fieldname}"] = {safename}Val;
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
scoped_id=self.to_dotnet(fieldtype.scoped_id),
ref_scope=self.to_dotnet(fieldtype.ref_scope),
)
)
else:
self.current_serializer.write(
"""
object? {safename}Val = ISaveable.Save({safename},
false, (string){base_url}!, relativeUris);
if ({safename}Val is not null)
{{
r["{fieldname}"] = {safename}Val;
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
)
)
def declare_id_field(
self,
name: str,
fieldtype: TypeDef,
doc: str,
optional: bool,
) -> None:
"""Output the code to handle the given ID field."""
self.id_field_type = fieldtype
if self.current_class_is_abstract:
return
self.declare_field(name, fieldtype, doc, True, "")
if optional:
opt = f"""{self.safe_name(name)} = "_" + Guid.NewGuid();"""
else:
opt = """throw new ValidationException("Missing {fieldname}");""".format(
fieldname=shortname(name)
)
self.current_loader.write(
"""
if ({safename} == null)
{{
if (docRoot != null)
{{
{safename} = docRoot;
}}
else
{{
{opt}
}}
}}
else
{{
baseUri = (string){safename};
}}
""".format(
safename=self.safe_name(name), opt=opt
)
)
def to_dotnet(self, val: Any) -> Any:
"""Convert a Python keyword to a DotNet keyword."""
if val is True:
return "true"
elif val is None:
return "null"
elif val is False:
return "false"
return val
def uri_loader(
self,
inner: TypeDef,
scoped_id: bool,
vocab_term: bool,
ref_scope: Optional[int],
) -> TypeDef:
"""Construct the TypeDef for the given URI loader."""
instance_type = inner.instance_type or "object"
return self.declare_type(
TypeDef(
instance_type=instance_type,
name=f"uri{inner.name}{scoped_id}{vocab_term}{ref_scope}",
loader_type="ILoader<object>",
init="new UriLoader({}, {}, {}, {})".format(
inner.name,
self.to_dotnet(scoped_id),
self.to_dotnet(vocab_term),
self.to_dotnet(ref_scope),
),
is_uri=True,
scoped_id=scoped_id,
ref_scope=ref_scope,
)
)
def idmap_loader(
self, field: str, inner: TypeDef, map_subject: str, map_predicate: Optional[str]
) -> TypeDef:
"""Construct the TypeDef for the given mapped ID loader."""
instance_type = inner.instance_type or "object"
return self.declare_type(
TypeDef(
instance_type=instance_type,
name=f"idmap{self.safe_name(field)}{inner.name}",
loader_type="ILoader<object>",
init='new IdMapLoader({}, "{}", "{}")'.format(
inner.name, map_subject, map_predicate
),
)
)
def typedsl_loader(self, inner: TypeDef, ref_scope: Optional[int]) -> TypeDef:
"""Construct the TypeDef for the given DSL loader."""
instance_type = inner.instance_type or "object"
return self.declare_type(
TypeDef(
instance_type=instance_type,
name=f"typedsl{self.safe_name(inner.name)}{ref_scope}",
loader_type="ILoader<object>",
init=(f"new TypeDSLLoader" f"({self.safe_name(inner.name)}, {ref_scope})"),
)
)
def epilogue(self, root_loader: TypeDef) -> None:
"""Trigger to generate the epilouge code."""
pd = "This project contains .Net objects and utilities "
pd = pd + ' auto-generated by <a href="https://github.com/'
pd = pd + 'common-workflow-language/schema_salad">Schema Salad</a>'
pd = pd + " for parsing documents corresponding to the "
pd = pd + str(self.base_uri) + " schema."
template_vars: MutableMapping[str, str] = dict(
project_name=self.package,
version="0.0.1-SNAPSHOT",
project_description=pd,
license_name="Apache License, Version 2.0",
)
def template_from_resource(resource: Path) -> string.Template:
template_str = resource.read_text("utf-8")
template = string.Template(template_str)
return template
def expand_resource_template_to(resource: str, path: Path) -> None:
template = template_from_resource(files("schema_salad").joinpath(f"dotnet/{resource}"))
src = template.safe_substitute(template_vars)
_ensure_directory_and_write(path, src)
expand_resource_template_to("editorconfig", self.target_dir / ".editorconfig")
expand_resource_template_to("gitignore", self.target_dir / ".gitignore")
expand_resource_template_to("LICENSE", self.target_dir / "LICENSE")
expand_resource_template_to("README.md", self.target_dir / "README.md")
expand_resource_template_to("Solution.sln", self.target_dir / "Solution.sln")
expand_resource_template_to(
"Project.csproj.template",
self.target_dir / self.package / Path(self.package + ".csproj"),
)
expand_resource_template_to(
"Test.csproj.template",
self.test_src_dir / "Test.csproj",
)
expand_resource_template_to(
"docfx.json",
self.target_dir / self.package / "docfx.json",
)
expand_resource_template_to(
"AssemblyInfo.cs",
self.target_dir / self.package / "Properties" / "AssemblyInfo.cs",
)
vocab = ",\n ".join(
f"""["{k}"] = "{self.vocab[k]}\"""" for k in sorted(self.vocab.keys()) # noqa: B907
)
rvocab = ",\n ".join(
f"""["{self.vocab[k]}"] = "{k}\"""" for k in sorted(self.vocab.keys()) # noqa: B907
)
loader_instances = ""
for _, collected_type in self.collected_types.items():
if not collected_type.abstract:
loader_instances += " internal static readonly {} {} = {};\n".format(
collected_type.loader_type, collected_type.name, collected_type.init
)
example_tests = ""
if self.examples:
_safe_makedirs(self.test_resources_dir)
utils_resources = self.test_resources_dir / "examples"
if os.path.exists(utils_resources):
shutil.rmtree(utils_resources)
shutil.copytree(self.examples, utils_resources)
for example_name in os.listdir(self.examples):
if example_name.startswith("valid"):
basename = os.path.basename(example_name).rsplit(".", 1)[0]
example_tests += """
[TestMethod]
public void Test{basename}()
{{
string? file = System.IO.File.ReadAllText("data/examples/{example_name}");
RootLoader.LoadDocument(file!,
new Uri(Path.GetFullPath("data/examples/{example_name}")).AbsoluteUri);
}}
""".format(
basename=basename.replace("-", "_").replace(".", "_"),
example_name=example_name,
)
template_args: MutableMapping[str, str] = dict(
project_name=self.package,
loader_instances=loader_instances,
vocab=vocab,
rvocab=rvocab,
root_loader=root_loader.name,
root_loader_type=root_loader.instance_type or "object",
tests=example_tests,
project_description=pd,
)
util_src_dirs = {
"util": self.main_src_dir / "util",
"Test": self.test_src_dir,
"DocFx": self.target_dir / "DocFx",
}
def copy_utils_recursive(util_src: str, util_target: Path) -> None:
for util in files("schema_salad").joinpath(f"dotnet/{util_src}").iterdir():
if util.is_dir():
copy_utils_recursive(os.path.join(util_src, util.name), util_target / util.name)
continue
src_path = util_target / util.name
src_template = template_from_resource(util)
src = src_template.safe_substitute(template_args)
_ensure_directory_and_write(src_path, src)
for util_src, util_target in util_src_dirs.items():
copy_utils_recursive(util_src, util_target)
def secondaryfilesdsl_loader(self, inner: TypeDef) -> TypeDef:
"""Construct the TypeDef for secondary files."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
name=f"secondaryfilesdsl{inner.name}",
init=f"new SecondaryDSLLoader({inner.name})",
loader_type="ILoader<object>",
instance_type=instance_type,
)
)
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/dotnet_codegen.py
|
dotnet_codegen.py
|
import os
import shutil
import string
from io import StringIO
from pathlib import Path
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Union,
)
from xml.sax.saxutils import escape # nosec
from importlib_resources import files
from . import _logger, schema
from .codegen_base import CodeGenBase, TypeDef
from .exceptions import SchemaException
from .java_codegen import _ensure_directory_and_write, _safe_makedirs
from .schema import shortname
def doc_to_doc_string(doc: Optional[str], indent_level: int = 0) -> str:
"""Generate a documentation string from a schema salad doc field."""
lead = "" + " " * indent_level + "/// "
if doc:
doc_str = "\n".join([f"{lead}{escape(line)}" for line in doc.split("\n")])
else:
doc_str = ""
return doc_str
_string_type_def = TypeDef(
instance_type="string",
init="new PrimitiveLoader<string>()",
name="StringInstance",
loader_type="ILoader<string>",
)
_int_type_def = TypeDef(
instance_type="int",
init="new PrimitiveLoader<int>()",
name="IntegerInstance",
loader_type="ILoader<int>",
)
_long_type_def = TypeDef(
instance_type="long",
name="LongInstance",
loader_type="ILoader<long>",
init="new PrimitiveLoader<long>()",
)
_float_type_def = TypeDef(
instance_type="double",
name="DoubleInstance",
loader_type="ILoader<double>",
init="new PrimitiveLoader<double>()",
)
_bool_type_def = TypeDef(
instance_type="bool",
name="BooleanInstance",
loader_type="ILoader<bool>",
init="new PrimitiveLoader<bool>()",
)
_null_type_def = TypeDef(
instance_type="None",
name="NullInstance",
loader_type="ILoader<object>",
init="new NullLoader()",
)
_any_type_def = TypeDef(
instance_type="object",
name="AnyInstance",
init="new AnyLoader()",
loader_type="ILoader<object>",
)
prims = {
"http://www.w3.org/2001/XMLSchema#string": _string_type_def,
"http://www.w3.org/2001/XMLSchema#int": _int_type_def,
"http://www.w3.org/2001/XMLSchema#long": _long_type_def,
"http://www.w3.org/2001/XMLSchema#float": _float_type_def,
"http://www.w3.org/2001/XMLSchema#double": _float_type_def,
"http://www.w3.org/2001/XMLSchema#boolean": _bool_type_def,
"https://w3id.org/cwl/salad#null": _null_type_def,
"https://w3id.org/cwl/salad#Any": _any_type_def,
"string": _string_type_def,
"int": _int_type_def,
"long": _long_type_def,
"float": _float_type_def,
"double": _float_type_def,
"boolean": _bool_type_def,
"null": _null_type_def,
"Any": _any_type_def,
}
class DotNetCodeGen(CodeGenBase):
"""Generation of TypeScript code for a given Schema Salad definition."""
def __init__(
self, base: str, examples: Optional[str], target: Optional[str], package: str
) -> None:
"""Initialize the TypeScript codegen."""
super().__init__()
self.target_dir = Path(target or ".").resolve()
self.main_src_dir = self.target_dir / package / "src"
self.test_src_dir = self.target_dir / "Test"
self.test_resources_dir = self.test_src_dir / "data"
self.package = package
self.base_uri = base
self.examples = examples
def prologue(self) -> None:
"""Trigger to generate the prolouge code."""
for src_dir in [self.main_src_dir]:
_safe_makedirs(src_dir)
for primitive in prims.values():
self.declare_type(primitive)
@staticmethod
def safe_name(name: str) -> str:
"""Generate a safe version of the given name."""
avn = schema.avro_field_name(name)
if avn.startswith("anon."):
avn = avn[5:]
if avn in (
"class",
"in",
"extends",
"abstract",
"default",
"package",
"arguments",
"out",
):
# reserved words
avn = avn + "_"
return avn
def begin_class(
self, # pylint: disable=too-many-arguments
classname: str,
extends: MutableSequence[str],
doc: str,
abstract: bool,
field_names: MutableSequence[str],
idfield: str,
optional_fields: Set[str],
) -> None:
"""Produce the header for the given class."""
self.current_interface = "I" + self.safe_name(classname)
cls = self.safe_name(classname)
self.current_class = cls
self.current_class_is_abstract = abstract
interface_module_name = self.current_interface
self.current_interface_target_file = self.main_src_dir / f"{interface_module_name}.cs"
class_module_name = self.current_class
self.current_class_target_file = self.main_src_dir / f"{class_module_name}.cs"
self.current_constructor_signature = StringIO()
self.current_constructor_signature_optionals = StringIO()
self.current_constructor_body = StringIO()
self.current_loader = StringIO()
self.current_serializer = StringIO()
self.current_fieldtypes: Dict[str, TypeDef] = {}
self.optional_field_names: List[str] = []
self.mandatory_field_names: List[str] = []
self.idfield = idfield
doc_string = f"""
/// <summary>
/// Auto-generated interface for {classname}
"""
if doc:
doc_string += "///\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += "/// </summary>"
with open(self.current_interface_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_interface_target_file)
if extends:
ext = " : " + ", ".join("I" + self.safe_name(e) for e in extends)
else:
ext = ""
f.write(
"""#pragma warning disable CS0108
namespace {package};
{docstring}
public interface {cls}{ext}
{{
""".format(
docstring=doc_string,
cls=f"{self.current_interface}",
ext=ext,
package=self.package,
)
)
if self.current_class_is_abstract:
return
doc_string = f"""
/// <summary>
/// Auto-generated class implementation for {classname}
"""
if doc:
doc_string += "///\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += "/// </summary>"
with open(self.current_class_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_class_target_file)
f.write(
"""using System.Collections;
using OneOf;
using OneOf.Types;
namespace {package};
{docstring}
public class {cls} : {current_interface}, ISaveable
{{
readonly LoadingOptions loadingOptions;
readonly Dictionary<object, object> extensionFields;
""".format(
cls=cls,
current_interface=self.current_interface,
docstring=doc_string,
package=self.package,
)
)
self.current_constructor_signature.write(
"\n"
+ "\n"
+ " public {cls}(".format(
cls=cls,
)
)
self.current_constructor_body.write(
"""
this.loadingOptions = loadingOptions ?? new LoadingOptions();
this.extensionFields = extensionFields ?? new Dictionary<object, object>();
"""
)
self.current_loader.write(
"""
public static ISaveable FromDoc(object doc__, string baseUri, LoadingOptions loadingOptions,
string? docRoot = null)
{
List<ValidationException> errors = new();
if (doc__ is not IDictionary)
{
throw new ValidationException("Document has to be of type Dictionary");
}
Dictionary<object, object> doc_ = ((IDictionary)doc__)
.Cast<dynamic>()
.ToDictionary(entry => entry.Key, entry => entry.Value);
"""
)
self.current_serializer.write(
"""
public Dictionary<object, object> Save(bool top = false, string baseUrl = "",
bool relativeUris = true)
{
Dictionary<object, object> r = new();
foreach (KeyValuePair<object, object> ef in extensionFields)
{
r[loadingOptions.PrefixUrl((string)ef.Value)] = ef.Value;
}
"""
)
def end_class(self, classname: str, field_names: List[str]) -> None:
"""Signal that we are done with this class."""
with open(self.current_interface_target_file, "a") as f:
f.write("}\n")
if self.current_class_is_abstract:
return
self.current_constructor_signature.write(
self.current_constructor_signature_optionals.getvalue()
)
self.current_constructor_signature.write(
"LoadingOptions? loadingOptions = null, "
"Dictionary<object, object>? extensionFields = null)"
"\n "
"{"
)
self.current_constructor_body.write(" }\n")
self.current_loader.write(
"""
Dictionary<object, object> extensionFields = new();
foreach (KeyValuePair<object, object> v in doc_)
{{
if (!attr.Contains(v.Key))
{{
if (((string)v.Key).Contains(':'))
{{
string ex = loadingOptions.ExpandUrl((string)v.Key, "", false, false, null);
extensionFields[ex] = v.Value;
}}
else
{{
errors.Add(
new ValidationException($"invalid field {{v.Key}}," +
"expected one of {fields}"));
break;
}}
}}
}}
if (errors.Count > 0)
{{
throw new ValidationException("", errors);
}}
{classname} res__ = new(
""".format(
classname=self.current_class,
fields=", ".join(["`" + f + "`" for f in field_names]),
)
)
self.current_loader.write("loadingOptions: loadingOptions")
if len(self.mandatory_field_names) > 0:
self.current_loader.write(
",\n "
+ ",\n ".join(f + ": " + f for f in self.mandatory_field_names)
)
self.current_loader.write("\n );\n")
for optionalField in self.optional_field_names:
self.current_loader.write(
f"""
if ({optionalField} != null)
{{
res__.{optionalField} = {optionalField};
}}
"""
)
self.current_loader.write("\n return res__;")
self.current_loader.write("\n " + "}" + "\n")
self.current_serializer.write(
"""
if (top)
{
if (loadingOptions.namespaces != null)
{
r["$namespaces"] = loadingOptions.namespaces;
}
if (this.loadingOptions.schemas != null)
{
r["$schemas"] = loadingOptions.schemas;
}
}
return r;
}
"""
)
with open(
self.current_class_target_file,
"a",
) as f:
f.write(self.current_constructor_signature.getvalue())
f.write(self.current_constructor_body.getvalue())
f.write(self.current_loader.getvalue())
f.write(self.current_serializer.getvalue())
f.write(
" static readonly System.Collections.Generic.HashSet<string>"
+ " attr = new() { "
+ ", ".join(['"' + shortname(f) + '"' for f in field_names])
+ " };"
)
f.write(
"""
}
"""
)
def type_loader(self, type_declaration: Union[List[Any], Dict[str, Any], str]) -> TypeDef:
"""Parse the given type declaration and declare its components."""
if isinstance(type_declaration, MutableSequence):
sub_types = [self.type_loader(i) for i in type_declaration]
sub_names: List[str] = list(dict.fromkeys([i.name for i in sub_types]))
sub_instance_types: List[str] = list(
dict.fromkeys([i.instance_type for i in sub_types if i.instance_type is not None])
)
return self.declare_type(
TypeDef(
name="union_of_{}".format("_or_".join(sub_names)),
init="new UnionLoader(new List<ILoader> {{ {} }})".format(", ".join(sub_names)),
instance_type="OneOf<" + ", ".join(sub_instance_types) + ">",
loader_type="ILoader<object>",
)
)
if isinstance(type_declaration, MutableMapping):
if type_declaration["type"] in (
"array",
"https://w3id.org/cwl/salad#array",
):
i = self.type_loader(type_declaration["items"])
return self.declare_type(
TypeDef(
instance_type=f"List<{i.instance_type}>",
name=f"array_of_{i.name}",
loader_type=f"ILoader<List<{i.instance_type}>>",
init=f"new ArrayLoader<{i.instance_type}>({i.name})",
)
)
if type_declaration["type"] in ("enum", "https://w3id.org/cwl/salad#enum"):
return self.type_loader_enum(type_declaration)
if type_declaration["type"] in (
"record",
"https://w3id.org/cwl/salad#record",
):
return self.declare_type(
TypeDef(
instance_type=self.safe_name(type_declaration["name"]),
name=self.safe_name(type_declaration["name"]) + "Loader",
init="new RecordLoader<{}>()".format(
self.safe_name(type_declaration["name"]),
),
loader_type="ILoader<{}>".format(self.safe_name(type_declaration["name"])),
abstract=type_declaration.get("abstract", False),
)
)
raise SchemaException("wft {}".format(type_declaration["type"]))
if type_declaration in prims:
return prims[type_declaration]
if type_declaration in ("Expression", "https://w3id.org/cwl/cwl#Expression"):
return self.declare_type(
TypeDef(
name=self.safe_name(type_declaration) + "Loader",
init="new ExpressionLoader()",
loader_type="ILoader<string>",
instance_type="string",
)
)
return self.collected_types[self.safe_name(type_declaration) + "Loader"]
def type_loader_enum(self, type_declaration: Dict[str, Any]) -> TypeDef:
for sym in type_declaration["symbols"]:
self.add_vocab(shortname(sym), sym)
enum_name = self.safe_name(type_declaration["name"])
enum_module_name = enum_name
enum_path = self.main_src_dir / f"{enum_module_name}.cs"
with open(enum_path, "w") as f:
_logger.info("Writing file: %s", enum_path)
f.write(
"""namespace {package};
public class {enum_name} : IEnumClass<{enum_name}>
{{
private string _Name;
private static readonly List<{enum_name}> members = new();
""".format(
enum_name=enum_name, package=self.package
)
)
for sym in type_declaration["symbols"]:
const = self.safe_name(sym).replace("-", "_").replace(".", "_").upper()
f.write(
""" public static readonly {enum_name} {const} =
new("{val}");\n""".format(
const=const, val=self.safe_name(sym), enum_name=enum_name
)
)
f.write(
"""
public string Name
{{
get {{ return _Name; }}
private set {{ _Name = value; }}
}}
public static IList<{enum_name}> Members
{{
get {{ return members; }}
}}
private {enum_name}(string name)
{{
_Name = name;
members.Add(this);
}}
public static {enum_name} Parse(string toParse)
{{
foreach ({enum_name} s in Members)
{{
if (toParse == s.Name)
return s;
}}
throw new FormatException("Could not parse string.");
}}
public static bool Contains(string value)
{{
bool contains = false;
foreach ({enum_name} s in Members)
{{
if (value == s.Name)
{{
contains = true;
return contains;
}}
}}
return contains;
}}
public static List<string> Symbols()
{{
return members.Select(m => m.Name).ToList();
}}
public override string ToString()
{{
return _Name;
}}
}}
""".format(
enum_name=enum_name
)
)
return self.declare_type(
TypeDef(
instance_type=enum_name,
name=self.safe_name(type_declaration["name"] + "Loader"),
init=f"new EnumLoader<{enum_name}>()",
loader_type=f"ILoader<{enum_name}>",
)
)
def declare_field(
self,
name: str,
fieldtype: TypeDef,
doc: Optional[str],
optional: bool,
subscope: str,
) -> None:
"""Output the code to load the given field."""
if self.current_class_is_abstract:
return
safename = self.safe_name(name)
fieldname = shortname(name)
self.current_fieldtypes[safename] = fieldtype
if optional:
self.optional_field_names.append(safename)
if fieldtype.instance_type is not None and not fieldtype.instance_type.startswith(
"OneOf<None"
):
optionalstring = "?"
else:
optionalstring = ""
else:
self.mandatory_field_names.append(safename)
optionalstring = ""
with open(self.current_class_target_file, "a") as f:
if doc:
f.write(
"""
/// <summary>
{doc_str}
/// </summary>
""".format(
doc_str=doc_to_doc_string(doc, indent_level=1)
)
)
f.write(
" public {type}{optionalstring} {safename} {{ get; set; }}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring=optionalstring,
)
)
if fieldname == "class":
if fieldtype.instance_type == "string":
self.current_constructor_signature_optionals.write(
'string {safename} = "{val}", '.format(
safename=safename, val=self.current_class
)
)
else:
self.current_constructor_signature_optionals.write(
"{type}? {safename} = null, ".format(
safename=safename, type=fieldtype.instance_type
)
)
else:
if not optional:
self.current_constructor_signature.write(
"{type} {safename}, ".format(
safename=safename,
type=fieldtype.instance_type,
)
)
else:
if fieldtype.instance_type is not None and fieldtype.instance_type.startswith(
"OneOf<None"
):
self.current_constructor_signature_optionals.write(
"{type} {safename} = default, ".format(
safename=safename,
type=fieldtype.instance_type,
)
)
else:
self.current_constructor_signature_optionals.write(
"{type}? {safename} = null, ".format(
safename=safename,
type=fieldtype.instance_type,
)
)
if fieldname == "class" and fieldtype.instance_type != "string":
self.current_constructor_body.write(
" this.{safeName} = {safeName} ?? {type}.{val};\n".format(
safeName=safename,
type=fieldtype.instance_type,
val=self.current_class.replace("-", "_").replace(".", "_").upper(),
)
)
else:
self.current_constructor_body.write(
" this.{safeName} = {safeName};\n".format(safeName=safename)
)
self.current_loader.write(
"""
dynamic {safename} = default!;""".format(
safename=safename
)
)
if optional:
self.current_loader.write(
"""
if (doc_.ContainsKey("{fieldname}"))
{{""".format(
fieldname=fieldname
)
)
spc = " "
else:
spc = " "
self.current_loader.write(
"""
{spc} try
{spc} {{
{spc} {safename} = LoaderInstances.{fieldtype}
{spc} .LoadField(doc_.GetValueOrDefault("{fieldname}", null!), baseUri,
{spc} loadingOptions);
{spc} }}
{spc} catch (ValidationException e)
{spc} {{
{spc} errors.Add(
{spc} new ValidationException("the `{fieldname}` field is not valid because: ", e)
{spc} );
{spc} }}
""".format(
safename=safename,
fieldname=fieldname,
fieldtype=fieldtype.name,
spc=spc,
)
)
if optional:
self.current_loader.write(" }\n")
if name == self.idfield or not self.idfield:
baseurl = "baseUrl"
elif self.id_field_type.instance_type is not None:
if self.id_field_type.instance_type.startswith("OneOf"):
baseurl = (
f"(this.{self.safe_name(self.idfield)}.Value is "
f'None ? "" : {self.safe_name(self.idfield)}.Value)'
)
else:
baseurl = f"this.{self.safe_name(self.idfield)}"
if fieldtype.is_uri:
self.current_serializer.write(
"""
object? {safename}Val = ISaveable.SaveRelativeUri({safename}, {scoped_id},
relativeUris, {ref_scope}, (string){base_url}!);
if ({safename}Val is not null)
{{
r["{fieldname}"] = {safename}Val;
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
scoped_id=self.to_dotnet(fieldtype.scoped_id),
ref_scope=self.to_dotnet(fieldtype.ref_scope),
)
)
else:
self.current_serializer.write(
"""
object? {safename}Val = ISaveable.Save({safename},
false, (string){base_url}!, relativeUris);
if ({safename}Val is not null)
{{
r["{fieldname}"] = {safename}Val;
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
)
)
def declare_id_field(
self,
name: str,
fieldtype: TypeDef,
doc: str,
optional: bool,
) -> None:
"""Output the code to handle the given ID field."""
self.id_field_type = fieldtype
if self.current_class_is_abstract:
return
self.declare_field(name, fieldtype, doc, True, "")
if optional:
opt = f"""{self.safe_name(name)} = "_" + Guid.NewGuid();"""
else:
opt = """throw new ValidationException("Missing {fieldname}");""".format(
fieldname=shortname(name)
)
self.current_loader.write(
"""
if ({safename} == null)
{{
if (docRoot != null)
{{
{safename} = docRoot;
}}
else
{{
{opt}
}}
}}
else
{{
baseUri = (string){safename};
}}
""".format(
safename=self.safe_name(name), opt=opt
)
)
def to_dotnet(self, val: Any) -> Any:
"""Convert a Python keyword to a DotNet keyword."""
if val is True:
return "true"
elif val is None:
return "null"
elif val is False:
return "false"
return val
def uri_loader(
self,
inner: TypeDef,
scoped_id: bool,
vocab_term: bool,
ref_scope: Optional[int],
) -> TypeDef:
"""Construct the TypeDef for the given URI loader."""
instance_type = inner.instance_type or "object"
return self.declare_type(
TypeDef(
instance_type=instance_type,
name=f"uri{inner.name}{scoped_id}{vocab_term}{ref_scope}",
loader_type="ILoader<object>",
init="new UriLoader({}, {}, {}, {})".format(
inner.name,
self.to_dotnet(scoped_id),
self.to_dotnet(vocab_term),
self.to_dotnet(ref_scope),
),
is_uri=True,
scoped_id=scoped_id,
ref_scope=ref_scope,
)
)
def idmap_loader(
self, field: str, inner: TypeDef, map_subject: str, map_predicate: Optional[str]
) -> TypeDef:
"""Construct the TypeDef for the given mapped ID loader."""
instance_type = inner.instance_type or "object"
return self.declare_type(
TypeDef(
instance_type=instance_type,
name=f"idmap{self.safe_name(field)}{inner.name}",
loader_type="ILoader<object>",
init='new IdMapLoader({}, "{}", "{}")'.format(
inner.name, map_subject, map_predicate
),
)
)
def typedsl_loader(self, inner: TypeDef, ref_scope: Optional[int]) -> TypeDef:
"""Construct the TypeDef for the given DSL loader."""
instance_type = inner.instance_type or "object"
return self.declare_type(
TypeDef(
instance_type=instance_type,
name=f"typedsl{self.safe_name(inner.name)}{ref_scope}",
loader_type="ILoader<object>",
init=(f"new TypeDSLLoader" f"({self.safe_name(inner.name)}, {ref_scope})"),
)
)
def epilogue(self, root_loader: TypeDef) -> None:
"""Trigger to generate the epilouge code."""
pd = "This project contains .Net objects and utilities "
pd = pd + ' auto-generated by <a href="https://github.com/'
pd = pd + 'common-workflow-language/schema_salad">Schema Salad</a>'
pd = pd + " for parsing documents corresponding to the "
pd = pd + str(self.base_uri) + " schema."
template_vars: MutableMapping[str, str] = dict(
project_name=self.package,
version="0.0.1-SNAPSHOT",
project_description=pd,
license_name="Apache License, Version 2.0",
)
def template_from_resource(resource: Path) -> string.Template:
template_str = resource.read_text("utf-8")
template = string.Template(template_str)
return template
def expand_resource_template_to(resource: str, path: Path) -> None:
template = template_from_resource(files("schema_salad").joinpath(f"dotnet/{resource}"))
src = template.safe_substitute(template_vars)
_ensure_directory_and_write(path, src)
expand_resource_template_to("editorconfig", self.target_dir / ".editorconfig")
expand_resource_template_to("gitignore", self.target_dir / ".gitignore")
expand_resource_template_to("LICENSE", self.target_dir / "LICENSE")
expand_resource_template_to("README.md", self.target_dir / "README.md")
expand_resource_template_to("Solution.sln", self.target_dir / "Solution.sln")
expand_resource_template_to(
"Project.csproj.template",
self.target_dir / self.package / Path(self.package + ".csproj"),
)
expand_resource_template_to(
"Test.csproj.template",
self.test_src_dir / "Test.csproj",
)
expand_resource_template_to(
"docfx.json",
self.target_dir / self.package / "docfx.json",
)
expand_resource_template_to(
"AssemblyInfo.cs",
self.target_dir / self.package / "Properties" / "AssemblyInfo.cs",
)
vocab = ",\n ".join(
f"""["{k}"] = "{self.vocab[k]}\"""" for k in sorted(self.vocab.keys()) # noqa: B907
)
rvocab = ",\n ".join(
f"""["{self.vocab[k]}"] = "{k}\"""" for k in sorted(self.vocab.keys()) # noqa: B907
)
loader_instances = ""
for _, collected_type in self.collected_types.items():
if not collected_type.abstract:
loader_instances += " internal static readonly {} {} = {};\n".format(
collected_type.loader_type, collected_type.name, collected_type.init
)
example_tests = ""
if self.examples:
_safe_makedirs(self.test_resources_dir)
utils_resources = self.test_resources_dir / "examples"
if os.path.exists(utils_resources):
shutil.rmtree(utils_resources)
shutil.copytree(self.examples, utils_resources)
for example_name in os.listdir(self.examples):
if example_name.startswith("valid"):
basename = os.path.basename(example_name).rsplit(".", 1)[0]
example_tests += """
[TestMethod]
public void Test{basename}()
{{
string? file = System.IO.File.ReadAllText("data/examples/{example_name}");
RootLoader.LoadDocument(file!,
new Uri(Path.GetFullPath("data/examples/{example_name}")).AbsoluteUri);
}}
""".format(
basename=basename.replace("-", "_").replace(".", "_"),
example_name=example_name,
)
template_args: MutableMapping[str, str] = dict(
project_name=self.package,
loader_instances=loader_instances,
vocab=vocab,
rvocab=rvocab,
root_loader=root_loader.name,
root_loader_type=root_loader.instance_type or "object",
tests=example_tests,
project_description=pd,
)
util_src_dirs = {
"util": self.main_src_dir / "util",
"Test": self.test_src_dir,
"DocFx": self.target_dir / "DocFx",
}
def copy_utils_recursive(util_src: str, util_target: Path) -> None:
for util in files("schema_salad").joinpath(f"dotnet/{util_src}").iterdir():
if util.is_dir():
copy_utils_recursive(os.path.join(util_src, util.name), util_target / util.name)
continue
src_path = util_target / util.name
src_template = template_from_resource(util)
src = src_template.safe_substitute(template_args)
_ensure_directory_and_write(src_path, src)
for util_src, util_target in util_src_dirs.items():
copy_utils_recursive(util_src, util_target)
def secondaryfilesdsl_loader(self, inner: TypeDef) -> TypeDef:
"""Construct the TypeDef for secondary files."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
name=f"secondaryfilesdsl{inner.name}",
init=f"new SecondaryDSLLoader({inner.name})",
loader_type="ILoader<object>",
instance_type=instance_type,
)
)
| 0.680135 | 0.154121 |
import copy
import logging
import os
import pathlib
import tempfile
import uuid as _uuid__ # pylint: disable=unused-import # noqa: F401
import xml.sax # nosec
from abc import ABC, abstractmethod
from io import StringIO
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from urllib.parse import quote, urldefrag, urlparse, urlsplit, urlunsplit
from urllib.request import pathname2url
from rdflib import Graph
from rdflib.plugins.parsers.notation3 import BadSyntax
from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import SchemaSaladException, ValidationException
from schema_salad.fetcher import DefaultFetcher, Fetcher, MemoryCachingFetcher
from schema_salad.sourceline import SourceLine, add_lc_filename
from schema_salad.utils import CacheType, yaml_no_ts # requires schema-salad v8.2+
_vocab: Dict[str, str] = {}
_rvocab: Dict[str, str] = {}
_logger = logging.getLogger("salad")
IdxType = MutableMapping[str, Tuple[Any, "LoadingOptions"]]
class LoadingOptions:
idx: IdxType
fileuri: Optional[str]
baseuri: str
namespaces: MutableMapping[str, str]
schemas: MutableSequence[str]
original_doc: Optional[Any]
addl_metadata: MutableMapping[str, Any]
fetcher: Fetcher
vocab: Dict[str, str]
rvocab: Dict[str, str]
cache: CacheType
imports: List[str]
includes: List[str]
def __init__(
self,
fetcher: Optional[Fetcher] = None,
namespaces: Optional[Dict[str, str]] = None,
schemas: Optional[List[str]] = None,
fileuri: Optional[str] = None,
copyfrom: Optional["LoadingOptions"] = None,
original_doc: Optional[Any] = None,
addl_metadata: Optional[Dict[str, str]] = None,
baseuri: Optional[str] = None,
idx: Optional[IdxType] = None,
imports: Optional[List[str]] = None,
includes: Optional[List[str]] = None,
) -> None:
"""Create a LoadingOptions object."""
self.original_doc = original_doc
if idx is not None:
self.idx = idx
else:
self.idx = copyfrom.idx if copyfrom is not None else {}
if fileuri is not None:
self.fileuri = fileuri
else:
self.fileuri = copyfrom.fileuri if copyfrom is not None else None
if baseuri is not None:
self.baseuri = baseuri
else:
self.baseuri = copyfrom.baseuri if copyfrom is not None else ""
if namespaces is not None:
self.namespaces = namespaces
else:
self.namespaces = copyfrom.namespaces if copyfrom is not None else {}
if schemas is not None:
self.schemas = schemas
else:
self.schemas = copyfrom.schemas if copyfrom is not None else []
if addl_metadata is not None:
self.addl_metadata = addl_metadata
else:
self.addl_metadata = copyfrom.addl_metadata if copyfrom is not None else {}
if imports is not None:
self.imports = imports
else:
self.imports = copyfrom.imports if copyfrom is not None else []
if includes is not None:
self.includes = includes
else:
self.includes = copyfrom.includes if copyfrom is not None else []
if fetcher is not None:
self.fetcher = fetcher
elif copyfrom is not None:
self.fetcher = copyfrom.fetcher
else:
import requests
from cachecontrol.caches import FileCache
from cachecontrol.wrapper import CacheControl
root = pathlib.Path(os.environ.get("HOME", tempfile.gettempdir()))
session = CacheControl(
requests.Session(),
cache=FileCache(root / ".cache" / "salad"),
)
self.fetcher: Fetcher = DefaultFetcher({}, session)
self.cache = self.fetcher.cache if isinstance(self.fetcher, MemoryCachingFetcher) else {}
self.vocab = _vocab
self.rvocab = _rvocab
if namespaces is not None:
self.vocab = self.vocab.copy()
self.rvocab = self.rvocab.copy()
for k, v in namespaces.items():
self.vocab[k] = v
self.rvocab[v] = k
@property
def graph(self) -> Graph:
"""Generate a merged rdflib.Graph from all entries in self.schemas."""
graph = Graph()
if not self.schemas:
return graph
key = str(hash(tuple(self.schemas)))
if key in self.cache:
return cast(Graph, self.cache[key])
for schema in self.schemas:
fetchurl = (
self.fetcher.urljoin(self.fileuri, schema)
if self.fileuri is not None
else pathlib.Path(schema).resolve().as_uri()
)
if fetchurl not in self.cache or self.cache[fetchurl] is True:
_logger.debug("Getting external schema %s", fetchurl)
try:
content = self.fetcher.fetch_text(fetchurl)
except Exception as e:
_logger.warning("Could not load extension schema %s: %s", fetchurl, str(e))
continue
newGraph = Graph()
err_msg = "unknown error"
for fmt in ["xml", "turtle"]:
try:
newGraph.parse(data=content, format=fmt, publicID=str(fetchurl))
self.cache[fetchurl] = newGraph
graph += newGraph
break
except (xml.sax.SAXParseException, TypeError, BadSyntax) as e:
err_msg = str(e)
else:
_logger.warning("Could not load extension schema %s: %s", fetchurl, err_msg)
self.cache[key] = graph
return graph
class Saveable(ABC):
"""Mark classes than have a save() and fromDoc() function."""
@classmethod
@abstractmethod
def fromDoc(
cls,
_doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "Saveable":
"""Construct this object from the result of yaml.load()."""
@abstractmethod
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
"""Convert this object to a JSON/YAML friendly dictionary."""
def load_field(val, fieldtype, baseuri, loadingOptions):
# type: (Union[str, Dict[str, str]], _Loader, str, LoadingOptions) -> Any
if isinstance(val, MutableMapping):
if "$import" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"])
result, metadata = _document_load_by_url(
fieldtype,
url,
loadingOptions,
)
loadingOptions.imports.append(url)
return result
if "$include" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"])
val = loadingOptions.fetcher.fetch_text(url)
loadingOptions.includes.append(url)
return fieldtype.load(val, baseuri, loadingOptions)
save_type = Optional[Union[MutableMapping[str, Any], MutableSequence[Any], int, float, bool, str]]
def save(
val: Any,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
if isinstance(val, Saveable):
return val.save(top=top, base_url=base_url, relative_uris=relative_uris)
if isinstance(val, MutableSequence):
return [save(v, top=False, base_url=base_url, relative_uris=relative_uris) for v in val]
if isinstance(val, MutableMapping):
newdict = {}
for key in val:
newdict[key] = save(val[key], top=False, base_url=base_url, relative_uris=relative_uris)
return newdict
if val is None or isinstance(val, (int, float, bool, str)):
return val
raise Exception("Not Saveable: %s" % type(val))
def save_with_metadata(
val: Any,
valLoadingOpts: LoadingOptions,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
"""Save and set $namespaces, $schemas, $base and any other metadata fields at the top level."""
saved_val = save(val, top, base_url, relative_uris)
newdict: MutableMapping[str, Any] = {}
if isinstance(saved_val, MutableSequence):
newdict = {"$graph": saved_val}
elif isinstance(saved_val, MutableMapping):
newdict = saved_val
if valLoadingOpts.namespaces:
newdict["$namespaces"] = valLoadingOpts.namespaces
if valLoadingOpts.schemas:
newdict["$schemas"] = valLoadingOpts.schemas
if valLoadingOpts.baseuri:
newdict["$base"] = valLoadingOpts.baseuri
for k, v in valLoadingOpts.addl_metadata.items():
if k not in newdict:
newdict[k] = v
return newdict
def expand_url(
url, # type: str
base_url, # type: str
loadingOptions, # type: LoadingOptions
scoped_id=False, # type: bool
vocab_term=False, # type: bool
scoped_ref=None, # type: Optional[int]
):
# type: (...) -> str
if url in ("@id", "@type"):
return url
if vocab_term and url in loadingOptions.vocab:
return url
if bool(loadingOptions.vocab) and ":" in url:
prefix = url.split(":")[0]
if prefix in loadingOptions.vocab:
url = loadingOptions.vocab[prefix] + url[len(prefix) + 1 :]
split = urlsplit(url)
if (
(bool(split.scheme) and split.scheme in loadingOptions.fetcher.supported_schemes())
or url.startswith("$(")
or url.startswith("${")
):
pass
elif scoped_id and not bool(split.fragment):
splitbase = urlsplit(base_url)
frg = ""
if bool(splitbase.fragment):
frg = splitbase.fragment + "/" + split.path
else:
frg = split.path
pt = splitbase.path if splitbase.path != "" else "/"
url = urlunsplit((splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
elif scoped_ref is not None and not bool(split.fragment):
splitbase = urlsplit(base_url)
sp = splitbase.fragment.split("/")
n = scoped_ref
while n > 0 and len(sp) > 0:
sp.pop()
n -= 1
sp.append(url)
url = urlunsplit(
(
splitbase.scheme,
splitbase.netloc,
splitbase.path,
splitbase.query,
"/".join(sp),
)
)
else:
url = loadingOptions.fetcher.urljoin(base_url, url)
if vocab_term:
split = urlsplit(url)
if bool(split.scheme):
if url in loadingOptions.rvocab:
return loadingOptions.rvocab[url]
else:
raise ValidationException(f"Term {url!r} not in vocabulary")
return url
class _Loader:
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
pass
class _AnyLoader(_Loader):
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc is not None:
return doc
raise ValidationException("Expected non-null")
class _PrimitiveLoader(_Loader):
def __init__(self, tp):
# type: (Union[type, Tuple[Type[str], Type[str]]]) -> None
self.tp = tp
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, self.tp):
raise ValidationException(
"Expected a {} but got {}".format(
self.tp.__class__.__name__, doc.__class__.__name__
)
)
return doc
def __repr__(self): # type: () -> str
return str(self.tp)
class _ArrayLoader(_Loader):
def __init__(self, items):
# type: (_Loader) -> None
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableSequence):
raise ValidationException(f"Expected a list, was {type(doc)}")
r = [] # type: List[Any]
errors = [] # type: List[SchemaSaladException]
for i in range(0, len(doc)):
try:
lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)
if isinstance(lf, MutableSequence):
r.extend(lf)
else:
r.append(lf)
except ValidationException as e:
errors.append(e.with_sourceline(SourceLine(doc, i, str)))
if errors:
raise ValidationException("", None, errors)
return r
def __repr__(self): # type: () -> str
return f"array<{self.items}>"
class _EnumLoader(_Loader):
def __init__(self, symbols: Sequence[str], name: str) -> None:
self.symbols = symbols
self.name = name
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc in self.symbols:
return doc
raise ValidationException(f"Expected one of {self.symbols}")
def __repr__(self): # type: () -> str
return self.name
class _SecondaryDSLLoader(_Loader):
def __init__(self, inner):
# type: (_Loader) -> None
self.inner = inner
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
r: List[Dict[str, Any]] = []
if isinstance(doc, MutableSequence):
for d in doc:
if isinstance(d, str):
if d.endswith("?"):
r.append({"pattern": d[:-1], "required": False})
else:
r.append({"pattern": d})
elif isinstance(d, dict):
new_dict: Dict[str, Any] = {}
dict_copy = copy.deepcopy(d)
if "pattern" in dict_copy:
new_dict["pattern"] = dict_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {d}"
)
new_dict["required"] = (
dict_copy.pop("required") if "required" in dict_copy else None
)
if len(dict_copy):
raise ValidationException(
"Unallowed values in secondaryFiles specification entry: {}".format(
dict_copy
)
)
r.append(new_dict)
else:
raise ValidationException(
"Expected a string or sequence of (strings or mappings)."
)
elif isinstance(doc, MutableMapping):
new_dict = {}
doc_copy = copy.deepcopy(doc)
if "pattern" in doc_copy:
new_dict["pattern"] = doc_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {doc}"
)
new_dict["required"] = doc_copy.pop("required") if "required" in doc_copy else None
if len(doc_copy):
raise ValidationException(
f"Unallowed values in secondaryFiles specification entry: {doc_copy}"
)
r.append(new_dict)
elif isinstance(doc, str):
if doc.endswith("?"):
r.append({"pattern": doc[:-1], "required": False})
else:
r.append({"pattern": doc})
else:
raise ValidationException("Expected str or sequence of str")
return self.inner.load(r, baseuri, loadingOptions, docRoot)
class _RecordLoader(_Loader):
def __init__(self, classtype):
# type: (Type[Saveable]) -> None
self.classtype = classtype
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableMapping):
raise ValidationException(f"Expected a dict, was {type(doc)}")
return self.classtype.fromDoc(doc, baseuri, loadingOptions, docRoot=docRoot)
def __repr__(self): # type: () -> str
return str(self.classtype.__name__)
class _ExpressionLoader(_Loader):
def __init__(self, items: Type[str]) -> None:
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, str):
raise ValidationException(f"Expected a str, was {type(doc)}")
return doc
class _UnionLoader(_Loader):
def __init__(self, alternates: Sequence[_Loader]) -> None:
self.alternates = alternates
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
errors = []
for t in self.alternates:
try:
return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)
except ValidationException as e:
errors.append(ValidationException(f"tried {t} but", None, [e]))
raise ValidationException("", None, errors, "-")
def __repr__(self): # type: () -> str
return " | ".join(str(a) for a in self.alternates)
class _URILoader(_Loader):
def __init__(self, inner, scoped_id, vocab_term, scoped_ref):
# type: (_Loader, bool, bool, Union[int, None]) -> None
self.inner = inner
self.scoped_id = scoped_id
self.vocab_term = vocab_term
self.scoped_ref = scoped_ref
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
newdoc = []
for i in doc:
if isinstance(i, str):
newdoc.append(
expand_url(
i,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
)
else:
newdoc.append(i)
doc = newdoc
elif isinstance(doc, str):
doc = expand_url(
doc,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
return self.inner.load(doc, baseuri, loadingOptions)
class _TypeDSLLoader(_Loader):
def __init__(self, inner, refScope, salad_version):
# type: (_Loader, Union[int, None], str) -> None
self.inner = inner
self.refScope = refScope
self.salad_version = salad_version
def resolve(
self,
doc, # type: str
baseuri, # type: str
loadingOptions, # type: LoadingOptions
):
# type: (...) -> Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
doc_ = doc
optional = False
if doc_.endswith("?"):
optional = True
doc_ = doc_[0:-1]
if doc_.endswith("[]"):
salad_versions = [int(v) for v in self.salad_version[1:].split(".")]
items = "" # type: Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
rest = doc_[0:-2]
if salad_versions < [1, 3]:
if rest.endswith("[]"):
# To show the error message with the original type
return doc
else:
items = expand_url(rest, baseuri, loadingOptions, False, True, self.refScope)
else:
items = self.resolve(rest, baseuri, loadingOptions)
if isinstance(items, str):
items = expand_url(items, baseuri, loadingOptions, False, True, self.refScope)
expanded = {"type": "array", "items": items} # type: Union[Dict[str, Any], str]
else:
expanded = expand_url(doc_, baseuri, loadingOptions, False, True, self.refScope)
if optional:
return ["null", expanded]
else:
return expanded
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
r = [] # type: List[Any]
for d in doc:
if isinstance(d, str):
resolved = self.resolve(d, baseuri, loadingOptions)
if isinstance(resolved, MutableSequence):
for i in resolved:
if i not in r:
r.append(i)
else:
if resolved not in r:
r.append(resolved)
else:
r.append(d)
doc = r
elif isinstance(doc, str):
doc = self.resolve(doc, baseuri, loadingOptions)
return self.inner.load(doc, baseuri, loadingOptions)
class _IdMapLoader(_Loader):
def __init__(self, inner, mapSubject, mapPredicate):
# type: (_Loader, str, Union[str, None]) -> None
self.inner = inner
self.mapSubject = mapSubject
self.mapPredicate = mapPredicate
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableMapping):
r = [] # type: List[Any]
for k in sorted(doc.keys()):
val = doc[k]
if isinstance(val, CommentedMap):
v = copy.copy(val)
v.lc.data = val.lc.data
v.lc.filename = val.lc.filename
v[self.mapSubject] = k
r.append(v)
elif isinstance(val, MutableMapping):
v2 = copy.copy(val)
v2[self.mapSubject] = k
r.append(v2)
else:
if self.mapPredicate:
v3 = {self.mapPredicate: val}
v3[self.mapSubject] = k
r.append(v3)
else:
raise ValidationException("No mapPredicate")
doc = r
return self.inner.load(doc, baseuri, loadingOptions)
def _document_load(
loader: _Loader,
doc: Union[str, MutableMapping[str, Any], MutableSequence[Any]],
baseuri: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if isinstance(doc, str):
return _document_load_by_url(
loader,
loadingOptions.fetcher.urljoin(baseuri, doc),
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
if isinstance(doc, MutableMapping):
addl_metadata = {}
if addl_metadata_fields is not None:
for mf in addl_metadata_fields:
if mf in doc:
addl_metadata[mf] = doc[mf]
docuri = baseuri
if "$base" in doc:
baseuri = doc["$base"]
loadingOptions = LoadingOptions(
copyfrom=loadingOptions,
namespaces=doc.get("$namespaces", None),
schemas=doc.get("$schemas", None),
baseuri=doc.get("$base", None),
addl_metadata=addl_metadata,
)
doc = {k: v for k, v in doc.items() if k not in ("$namespaces", "$schemas", "$base")}
if "$graph" in doc:
loadingOptions.idx[baseuri] = (
loader.load(doc["$graph"], baseuri, loadingOptions),
loadingOptions,
)
else:
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions, docRoot=baseuri),
loadingOptions,
)
if docuri != baseuri:
loadingOptions.idx[docuri] = loadingOptions.idx[baseuri]
return loadingOptions.idx[baseuri]
if isinstance(doc, MutableSequence):
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions),
loadingOptions,
)
return loadingOptions.idx[baseuri]
raise ValidationException(
"Expected URI string, MutableMapping or MutableSequence, got %s" % type(doc)
)
def _document_load_by_url(
loader: _Loader,
url: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if url in loadingOptions.idx:
return loadingOptions.idx[url]
doc_url, frg = urldefrag(url)
text = loadingOptions.fetcher.fetch_text(doc_url)
textIO = StringIO(text)
textIO.name = str(doc_url)
yaml = yaml_no_ts()
result = yaml.load(textIO)
add_lc_filename(result, doc_url)
loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=doc_url)
_document_load(
loader,
result,
doc_url,
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
return loadingOptions.idx[url]
def file_uri(path, split_frag=False): # type: (str, bool) -> str
if path.startswith("file://"):
return path
if split_frag:
pathsp = path.split("#", 2)
frag = "#" + quote(str(pathsp[1])) if len(pathsp) == 2 else ""
urlpath = pathname2url(str(pathsp[0]))
else:
urlpath = pathname2url(path)
frag = ""
if urlpath.startswith("//"):
return f"file:{urlpath}{frag}"
return f"file://{urlpath}{frag}"
def prefix_url(url: str, namespaces: Dict[str, str]) -> str:
"""Expand short forms into full URLs using the given namespace dictionary."""
for k, v in namespaces.items():
if url.startswith(v):
return k + ":" + url[len(v) :]
return url
def save_relative_uri(
uri: Any,
base_url: str,
scoped_id: bool,
ref_scope: Optional[int],
relative_uris: bool,
) -> Any:
"""Convert any URI to a relative one, obeying the scoping rules."""
if isinstance(uri, MutableSequence):
return [save_relative_uri(u, base_url, scoped_id, ref_scope, relative_uris) for u in uri]
elif isinstance(uri, str):
if not relative_uris or uri == base_url:
return uri
urisplit = urlsplit(uri)
basesplit = urlsplit(base_url)
if urisplit.scheme == basesplit.scheme and urisplit.netloc == basesplit.netloc:
if urisplit.path != basesplit.path:
p = os.path.relpath(urisplit.path, os.path.dirname(basesplit.path))
if urisplit.fragment:
p = p + "#" + urisplit.fragment
return p
basefrag = basesplit.fragment + "/"
if ref_scope:
sp = basefrag.split("/")
i = 0
while i < ref_scope:
sp.pop()
i += 1
basefrag = "/".join(sp)
if urisplit.fragment.startswith(basefrag):
return urisplit.fragment[len(basefrag) :]
return urisplit.fragment
return uri
else:
return save(uri, top=False, base_url=base_url, relative_uris=relative_uris)
def shortname(inputid: str) -> str:
"""
Compute the shortname of a fully qualified identifier.
See https://w3id.org/cwl/v1.2/SchemaSalad.html#Short_names.
"""
parsed_id = urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split("/")[-1]
return parsed_id.path.split("/")[-1]
def parser_info() -> str:
return "org.w3id.cwl.salad"
class Documented(Saveable):
pass
class RecordField(Documented):
"""
A field of a record.
"""
def __init__(
self,
name: Any,
type: Any,
doc: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.doc = doc
self.name = name
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, RecordField):
return bool(
self.doc == other.doc
and self.name == other.name
and self.type == other.type
)
return False
def __hash__(self) -> int:
return hash((self.doc, self.name, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "RecordField":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
try:
type = load_field(
_doc.get("type"),
typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `doc`, `name`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'RecordField'", None, _errors__)
_constructed = cls(
doc=doc,
name=name,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["doc", "name", "type"])
class RecordSchema(Saveable):
def __init__(
self,
type: Any,
fields: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.fields = fields
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, RecordSchema):
return bool(self.fields == other.fields and self.type == other.type)
return False
def __hash__(self) -> int:
return hash((self.fields, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "RecordSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "fields" in _doc:
try:
fields = load_field(
_doc.get("fields"),
idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'fields' field is not valid because:",
SourceLine(_doc, "fields", str),
[e],
)
)
else:
fields = None
try:
type = load_field(
_doc.get("type"),
typedsl_Record_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `fields`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'RecordSchema'", None, _errors__)
_constructed = cls(
fields=fields,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.fields is not None:
r["fields"] = save(
self.fields, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["fields", "type"])
class EnumSchema(Saveable):
"""
Define an enumerated type.
"""
def __init__(
self,
symbols: Any,
type: Any,
name: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.symbols = symbols
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, EnumSchema):
return bool(
self.name == other.name
and self.symbols == other.symbols
and self.type == other.type
)
return False
def __hash__(self) -> int:
return hash((self.name, self.symbols, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "EnumSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_union_of_None_type_or_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
name = "_:" + str(_uuid__.uuid4())
if not __original_name_is_none:
baseuri = name
try:
symbols = load_field(
_doc.get("symbols"),
uri_array_of_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'symbols' field is not valid because:",
SourceLine(_doc, "symbols", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Enum_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `symbols`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'EnumSchema'", None, _errors__)
_constructed = cls(
name=name,
symbols=symbols,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.symbols is not None:
u = save_relative_uri(self.symbols, self.name, True, None, relative_uris)
r["symbols"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["name", "symbols", "type"])
class ArraySchema(Saveable):
def __init__(
self,
items: Any,
type: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.items = items
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, ArraySchema):
return bool(self.items == other.items and self.type == other.type)
return False
def __hash__(self) -> int:
return hash((self.items, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "ArraySchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
items = load_field(
_doc.get("items"),
uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'items' field is not valid because:",
SourceLine(_doc, "items", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Array_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `items`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'ArraySchema'", None, _errors__)
_constructed = cls(
items=items,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.items is not None:
u = save_relative_uri(self.items, base_url, False, 2, relative_uris)
r["items"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["items", "type"])
class JsonldPredicate(Saveable):
"""
Attached to a record field to define how the parent record field is handled for
URI resolution and JSON-LD context generation.
"""
def __init__(
self,
_id: Optional[Any] = None,
_type: Optional[Any] = None,
_container: Optional[Any] = None,
identity: Optional[Any] = None,
noLinkCheck: Optional[Any] = None,
mapSubject: Optional[Any] = None,
mapPredicate: Optional[Any] = None,
refScope: Optional[Any] = None,
typeDSL: Optional[Any] = None,
secondaryFilesDSL: Optional[Any] = None,
subscope: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self._id = _id
self._type = _type
self._container = _container
self.identity = identity
self.noLinkCheck = noLinkCheck
self.mapSubject = mapSubject
self.mapPredicate = mapPredicate
self.refScope = refScope
self.typeDSL = typeDSL
self.secondaryFilesDSL = secondaryFilesDSL
self.subscope = subscope
def __eq__(self, other: Any) -> bool:
if isinstance(other, JsonldPredicate):
return bool(
self._id == other._id
and self._type == other._type
and self._container == other._container
and self.identity == other.identity
and self.noLinkCheck == other.noLinkCheck
and self.mapSubject == other.mapSubject
and self.mapPredicate == other.mapPredicate
and self.refScope == other.refScope
and self.typeDSL == other.typeDSL
and self.secondaryFilesDSL == other.secondaryFilesDSL
and self.subscope == other.subscope
)
return False
def __hash__(self) -> int:
return hash(
(
self._id,
self._type,
self._container,
self.identity,
self.noLinkCheck,
self.mapSubject,
self.mapPredicate,
self.refScope,
self.typeDSL,
self.secondaryFilesDSL,
self.subscope,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "JsonldPredicate":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "_id" in _doc:
try:
_id = load_field(
_doc.get("_id"),
uri_union_of_None_type_or_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the '_id' field is not valid because:",
SourceLine(_doc, "_id", str),
[e],
)
)
else:
_id = None
if "_type" in _doc:
try:
_type = load_field(
_doc.get("_type"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the '_type' field is not valid because:",
SourceLine(_doc, "_type", str),
[e],
)
)
else:
_type = None
if "_container" in _doc:
try:
_container = load_field(
_doc.get("_container"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the '_container' field is not valid because:",
SourceLine(_doc, "_container", str),
[e],
)
)
else:
_container = None
if "identity" in _doc:
try:
identity = load_field(
_doc.get("identity"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'identity' field is not valid because:",
SourceLine(_doc, "identity", str),
[e],
)
)
else:
identity = None
if "noLinkCheck" in _doc:
try:
noLinkCheck = load_field(
_doc.get("noLinkCheck"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'noLinkCheck' field is not valid because:",
SourceLine(_doc, "noLinkCheck", str),
[e],
)
)
else:
noLinkCheck = None
if "mapSubject" in _doc:
try:
mapSubject = load_field(
_doc.get("mapSubject"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'mapSubject' field is not valid because:",
SourceLine(_doc, "mapSubject", str),
[e],
)
)
else:
mapSubject = None
if "mapPredicate" in _doc:
try:
mapPredicate = load_field(
_doc.get("mapPredicate"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'mapPredicate' field is not valid because:",
SourceLine(_doc, "mapPredicate", str),
[e],
)
)
else:
mapPredicate = None
if "refScope" in _doc:
try:
refScope = load_field(
_doc.get("refScope"),
union_of_None_type_or_inttype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'refScope' field is not valid because:",
SourceLine(_doc, "refScope", str),
[e],
)
)
else:
refScope = None
if "typeDSL" in _doc:
try:
typeDSL = load_field(
_doc.get("typeDSL"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'typeDSL' field is not valid because:",
SourceLine(_doc, "typeDSL", str),
[e],
)
)
else:
typeDSL = None
if "secondaryFilesDSL" in _doc:
try:
secondaryFilesDSL = load_field(
_doc.get("secondaryFilesDSL"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'secondaryFilesDSL' field is not valid because:",
SourceLine(_doc, "secondaryFilesDSL", str),
[e],
)
)
else:
secondaryFilesDSL = None
if "subscope" in _doc:
try:
subscope = load_field(
_doc.get("subscope"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'subscope' field is not valid because:",
SourceLine(_doc, "subscope", str),
[e],
)
)
else:
subscope = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `_id`, `_type`, `_container`, `identity`, `noLinkCheck`, `mapSubject`, `mapPredicate`, `refScope`, `typeDSL`, `secondaryFilesDSL`, `subscope`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'JsonldPredicate'", None, _errors__)
_constructed = cls(
_id=_id,
_type=_type,
_container=_container,
identity=identity,
noLinkCheck=noLinkCheck,
mapSubject=mapSubject,
mapPredicate=mapPredicate,
refScope=refScope,
typeDSL=typeDSL,
secondaryFilesDSL=secondaryFilesDSL,
subscope=subscope,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self._id is not None:
u = save_relative_uri(self._id, base_url, True, None, relative_uris)
r["_id"] = u
if self._type is not None:
r["_type"] = save(
self._type, top=False, base_url=base_url, relative_uris=relative_uris
)
if self._container is not None:
r["_container"] = save(
self._container,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.identity is not None:
r["identity"] = save(
self.identity, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.noLinkCheck is not None:
r["noLinkCheck"] = save(
self.noLinkCheck,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.mapSubject is not None:
r["mapSubject"] = save(
self.mapSubject,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.mapPredicate is not None:
r["mapPredicate"] = save(
self.mapPredicate,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.refScope is not None:
r["refScope"] = save(
self.refScope, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.typeDSL is not None:
r["typeDSL"] = save(
self.typeDSL, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.secondaryFilesDSL is not None:
r["secondaryFilesDSL"] = save(
self.secondaryFilesDSL,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.subscope is not None:
r["subscope"] = save(
self.subscope, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"_id",
"_type",
"_container",
"identity",
"noLinkCheck",
"mapSubject",
"mapPredicate",
"refScope",
"typeDSL",
"secondaryFilesDSL",
"subscope",
]
)
class SpecializeDef(Saveable):
def __init__(
self,
specializeFrom: Any,
specializeTo: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.specializeFrom = specializeFrom
self.specializeTo = specializeTo
def __eq__(self, other: Any) -> bool:
if isinstance(other, SpecializeDef):
return bool(
self.specializeFrom == other.specializeFrom
and self.specializeTo == other.specializeTo
)
return False
def __hash__(self) -> int:
return hash((self.specializeFrom, self.specializeTo))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SpecializeDef":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
specializeFrom = load_field(
_doc.get("specializeFrom"),
uri_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'specializeFrom' field is not valid because:",
SourceLine(_doc, "specializeFrom", str),
[e],
)
)
try:
specializeTo = load_field(
_doc.get("specializeTo"),
uri_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'specializeTo' field is not valid because:",
SourceLine(_doc, "specializeTo", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `specializeFrom`, `specializeTo`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SpecializeDef'", None, _errors__)
_constructed = cls(
specializeFrom=specializeFrom,
specializeTo=specializeTo,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.specializeFrom is not None:
u = save_relative_uri(
self.specializeFrom, base_url, False, 1, relative_uris
)
r["specializeFrom"] = u
if self.specializeTo is not None:
u = save_relative_uri(self.specializeTo, base_url, False, 1, relative_uris)
r["specializeTo"] = u
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["specializeFrom", "specializeTo"])
class NamedType(Saveable):
pass
class DocType(Documented):
pass
class SchemaDefinedType(DocType):
"""
Abstract base for schema-defined types.
"""
pass
class SaladRecordField(RecordField):
"""
A field of a record.
"""
def __init__(
self,
name: Any,
type: Any,
doc: Optional[Any] = None,
jsonldPredicate: Optional[Any] = None,
default: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.doc = doc
self.name = name
self.type = type
self.jsonldPredicate = jsonldPredicate
self.default = default
def __eq__(self, other: Any) -> bool:
if isinstance(other, SaladRecordField):
return bool(
self.doc == other.doc
and self.name == other.name
and self.type == other.type
and self.jsonldPredicate == other.jsonldPredicate
and self.default == other.default
)
return False
def __hash__(self) -> int:
return hash(
(self.doc, self.name, self.type, self.jsonldPredicate, self.default)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SaladRecordField":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
try:
type = load_field(
_doc.get("type"),
typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "jsonldPredicate" in _doc:
try:
jsonldPredicate = load_field(
_doc.get("jsonldPredicate"),
union_of_None_type_or_strtype_or_JsonldPredicateLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'jsonldPredicate' field is not valid because:",
SourceLine(_doc, "jsonldPredicate", str),
[e],
)
)
else:
jsonldPredicate = None
if "default" in _doc:
try:
default = load_field(
_doc.get("default"),
union_of_None_type_or_Any_type,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'default' field is not valid because:",
SourceLine(_doc, "default", str),
[e],
)
)
else:
default = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `doc`, `name`, `type`, `jsonldPredicate`, `default`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SaladRecordField'", None, _errors__)
_constructed = cls(
doc=doc,
name=name,
type=type,
jsonldPredicate=jsonldPredicate,
default=default,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.jsonldPredicate is not None:
r["jsonldPredicate"] = save(
self.jsonldPredicate,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.default is not None:
r["default"] = save(
self.default, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["doc", "name", "type", "jsonldPredicate", "default"])
class SaladRecordSchema(NamedType, RecordSchema, SchemaDefinedType):
def __init__(
self,
name: Any,
type: Any,
inVocab: Optional[Any] = None,
fields: Optional[Any] = None,
doc: Optional[Any] = None,
docParent: Optional[Any] = None,
docChild: Optional[Any] = None,
docAfter: Optional[Any] = None,
jsonldPredicate: Optional[Any] = None,
documentRoot: Optional[Any] = None,
abstract: Optional[Any] = None,
extends: Optional[Any] = None,
specialize: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.inVocab = inVocab
self.fields = fields
self.type = type
self.doc = doc
self.docParent = docParent
self.docChild = docChild
self.docAfter = docAfter
self.jsonldPredicate = jsonldPredicate
self.documentRoot = documentRoot
self.abstract = abstract
self.extends = extends
self.specialize = specialize
def __eq__(self, other: Any) -> bool:
if isinstance(other, SaladRecordSchema):
return bool(
self.name == other.name
and self.inVocab == other.inVocab
and self.fields == other.fields
and self.type == other.type
and self.doc == other.doc
and self.docParent == other.docParent
and self.docChild == other.docChild
and self.docAfter == other.docAfter
and self.jsonldPredicate == other.jsonldPredicate
and self.documentRoot == other.documentRoot
and self.abstract == other.abstract
and self.extends == other.extends
and self.specialize == other.specialize
)
return False
def __hash__(self) -> int:
return hash(
(
self.name,
self.inVocab,
self.fields,
self.type,
self.doc,
self.docParent,
self.docChild,
self.docAfter,
self.jsonldPredicate,
self.documentRoot,
self.abstract,
self.extends,
self.specialize,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SaladRecordSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "inVocab" in _doc:
try:
inVocab = load_field(
_doc.get("inVocab"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'inVocab' field is not valid because:",
SourceLine(_doc, "inVocab", str),
[e],
)
)
else:
inVocab = None
if "fields" in _doc:
try:
fields = load_field(
_doc.get("fields"),
idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'fields' field is not valid because:",
SourceLine(_doc, "fields", str),
[e],
)
)
else:
fields = None
try:
type = load_field(
_doc.get("type"),
typedsl_Record_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
if "docParent" in _doc:
try:
docParent = load_field(
_doc.get("docParent"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docParent' field is not valid because:",
SourceLine(_doc, "docParent", str),
[e],
)
)
else:
docParent = None
if "docChild" in _doc:
try:
docChild = load_field(
_doc.get("docChild"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docChild' field is not valid because:",
SourceLine(_doc, "docChild", str),
[e],
)
)
else:
docChild = None
if "docAfter" in _doc:
try:
docAfter = load_field(
_doc.get("docAfter"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docAfter' field is not valid because:",
SourceLine(_doc, "docAfter", str),
[e],
)
)
else:
docAfter = None
if "jsonldPredicate" in _doc:
try:
jsonldPredicate = load_field(
_doc.get("jsonldPredicate"),
union_of_None_type_or_strtype_or_JsonldPredicateLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'jsonldPredicate' field is not valid because:",
SourceLine(_doc, "jsonldPredicate", str),
[e],
)
)
else:
jsonldPredicate = None
if "documentRoot" in _doc:
try:
documentRoot = load_field(
_doc.get("documentRoot"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'documentRoot' field is not valid because:",
SourceLine(_doc, "documentRoot", str),
[e],
)
)
else:
documentRoot = None
if "abstract" in _doc:
try:
abstract = load_field(
_doc.get("abstract"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'abstract' field is not valid because:",
SourceLine(_doc, "abstract", str),
[e],
)
)
else:
abstract = None
if "extends" in _doc:
try:
extends = load_field(
_doc.get("extends"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'extends' field is not valid because:",
SourceLine(_doc, "extends", str),
[e],
)
)
else:
extends = None
if "specialize" in _doc:
try:
specialize = load_field(
_doc.get("specialize"),
idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'specialize' field is not valid because:",
SourceLine(_doc, "specialize", str),
[e],
)
)
else:
specialize = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `inVocab`, `fields`, `type`, `doc`, `docParent`, `docChild`, `docAfter`, `jsonldPredicate`, `documentRoot`, `abstract`, `extends`, `specialize`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SaladRecordSchema'", None, _errors__)
_constructed = cls(
name=name,
inVocab=inVocab,
fields=fields,
type=type,
doc=doc,
docParent=docParent,
docChild=docChild,
docAfter=docAfter,
jsonldPredicate=jsonldPredicate,
documentRoot=documentRoot,
abstract=abstract,
extends=extends,
specialize=specialize,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.inVocab is not None:
r["inVocab"] = save(
self.inVocab, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.fields is not None:
r["fields"] = save(
self.fields, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.docParent is not None:
u = save_relative_uri(self.docParent, self.name, False, None, relative_uris)
r["docParent"] = u
if self.docChild is not None:
u = save_relative_uri(self.docChild, self.name, False, None, relative_uris)
r["docChild"] = u
if self.docAfter is not None:
u = save_relative_uri(self.docAfter, self.name, False, None, relative_uris)
r["docAfter"] = u
if self.jsonldPredicate is not None:
r["jsonldPredicate"] = save(
self.jsonldPredicate,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.documentRoot is not None:
r["documentRoot"] = save(
self.documentRoot,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.abstract is not None:
r["abstract"] = save(
self.abstract,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.extends is not None:
u = save_relative_uri(self.extends, self.name, False, 1, relative_uris)
r["extends"] = u
if self.specialize is not None:
r["specialize"] = save(
self.specialize,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"name",
"inVocab",
"fields",
"type",
"doc",
"docParent",
"docChild",
"docAfter",
"jsonldPredicate",
"documentRoot",
"abstract",
"extends",
"specialize",
]
)
class SaladEnumSchema(NamedType, EnumSchema, SchemaDefinedType):
"""
Define an enumerated type.
"""
def __init__(
self,
symbols: Any,
type: Any,
name: Optional[Any] = None,
inVocab: Optional[Any] = None,
doc: Optional[Any] = None,
docParent: Optional[Any] = None,
docChild: Optional[Any] = None,
docAfter: Optional[Any] = None,
jsonldPredicate: Optional[Any] = None,
documentRoot: Optional[Any] = None,
extends: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.inVocab = inVocab
self.symbols = symbols
self.type = type
self.doc = doc
self.docParent = docParent
self.docChild = docChild
self.docAfter = docAfter
self.jsonldPredicate = jsonldPredicate
self.documentRoot = documentRoot
self.extends = extends
def __eq__(self, other: Any) -> bool:
if isinstance(other, SaladEnumSchema):
return bool(
self.name == other.name
and self.inVocab == other.inVocab
and self.symbols == other.symbols
and self.type == other.type
and self.doc == other.doc
and self.docParent == other.docParent
and self.docChild == other.docChild
and self.docAfter == other.docAfter
and self.jsonldPredicate == other.jsonldPredicate
and self.documentRoot == other.documentRoot
and self.extends == other.extends
)
return False
def __hash__(self) -> int:
return hash(
(
self.name,
self.inVocab,
self.symbols,
self.type,
self.doc,
self.docParent,
self.docChild,
self.docAfter,
self.jsonldPredicate,
self.documentRoot,
self.extends,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SaladEnumSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_union_of_None_type_or_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
name = "_:" + str(_uuid__.uuid4())
if not __original_name_is_none:
baseuri = name
if "inVocab" in _doc:
try:
inVocab = load_field(
_doc.get("inVocab"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'inVocab' field is not valid because:",
SourceLine(_doc, "inVocab", str),
[e],
)
)
else:
inVocab = None
try:
symbols = load_field(
_doc.get("symbols"),
uri_array_of_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'symbols' field is not valid because:",
SourceLine(_doc, "symbols", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Enum_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
if "docParent" in _doc:
try:
docParent = load_field(
_doc.get("docParent"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docParent' field is not valid because:",
SourceLine(_doc, "docParent", str),
[e],
)
)
else:
docParent = None
if "docChild" in _doc:
try:
docChild = load_field(
_doc.get("docChild"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docChild' field is not valid because:",
SourceLine(_doc, "docChild", str),
[e],
)
)
else:
docChild = None
if "docAfter" in _doc:
try:
docAfter = load_field(
_doc.get("docAfter"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docAfter' field is not valid because:",
SourceLine(_doc, "docAfter", str),
[e],
)
)
else:
docAfter = None
if "jsonldPredicate" in _doc:
try:
jsonldPredicate = load_field(
_doc.get("jsonldPredicate"),
union_of_None_type_or_strtype_or_JsonldPredicateLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'jsonldPredicate' field is not valid because:",
SourceLine(_doc, "jsonldPredicate", str),
[e],
)
)
else:
jsonldPredicate = None
if "documentRoot" in _doc:
try:
documentRoot = load_field(
_doc.get("documentRoot"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'documentRoot' field is not valid because:",
SourceLine(_doc, "documentRoot", str),
[e],
)
)
else:
documentRoot = None
if "extends" in _doc:
try:
extends = load_field(
_doc.get("extends"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'extends' field is not valid because:",
SourceLine(_doc, "extends", str),
[e],
)
)
else:
extends = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `inVocab`, `symbols`, `type`, `doc`, `docParent`, `docChild`, `docAfter`, `jsonldPredicate`, `documentRoot`, `extends`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SaladEnumSchema'", None, _errors__)
_constructed = cls(
name=name,
inVocab=inVocab,
symbols=symbols,
type=type,
doc=doc,
docParent=docParent,
docChild=docChild,
docAfter=docAfter,
jsonldPredicate=jsonldPredicate,
documentRoot=documentRoot,
extends=extends,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.inVocab is not None:
r["inVocab"] = save(
self.inVocab, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.symbols is not None:
u = save_relative_uri(self.symbols, self.name, True, None, relative_uris)
r["symbols"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.docParent is not None:
u = save_relative_uri(self.docParent, self.name, False, None, relative_uris)
r["docParent"] = u
if self.docChild is not None:
u = save_relative_uri(self.docChild, self.name, False, None, relative_uris)
r["docChild"] = u
if self.docAfter is not None:
u = save_relative_uri(self.docAfter, self.name, False, None, relative_uris)
r["docAfter"] = u
if self.jsonldPredicate is not None:
r["jsonldPredicate"] = save(
self.jsonldPredicate,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.documentRoot is not None:
r["documentRoot"] = save(
self.documentRoot,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.extends is not None:
u = save_relative_uri(self.extends, self.name, False, 1, relative_uris)
r["extends"] = u
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"name",
"inVocab",
"symbols",
"type",
"doc",
"docParent",
"docChild",
"docAfter",
"jsonldPredicate",
"documentRoot",
"extends",
]
)
class Documentation(NamedType, DocType):
"""
A documentation section. This type exists to facilitate self-documenting
schemas but has no role in formal validation.
"""
def __init__(
self,
name: Any,
type: Any,
inVocab: Optional[Any] = None,
doc: Optional[Any] = None,
docParent: Optional[Any] = None,
docChild: Optional[Any] = None,
docAfter: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.inVocab = inVocab
self.doc = doc
self.docParent = docParent
self.docChild = docChild
self.docAfter = docAfter
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, Documentation):
return bool(
self.name == other.name
and self.inVocab == other.inVocab
and self.doc == other.doc
and self.docParent == other.docParent
and self.docChild == other.docChild
and self.docAfter == other.docAfter
and self.type == other.type
)
return False
def __hash__(self) -> int:
return hash(
(
self.name,
self.inVocab,
self.doc,
self.docParent,
self.docChild,
self.docAfter,
self.type,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "Documentation":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "inVocab" in _doc:
try:
inVocab = load_field(
_doc.get("inVocab"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'inVocab' field is not valid because:",
SourceLine(_doc, "inVocab", str),
[e],
)
)
else:
inVocab = None
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
if "docParent" in _doc:
try:
docParent = load_field(
_doc.get("docParent"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docParent' field is not valid because:",
SourceLine(_doc, "docParent", str),
[e],
)
)
else:
docParent = None
if "docChild" in _doc:
try:
docChild = load_field(
_doc.get("docChild"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docChild' field is not valid because:",
SourceLine(_doc, "docChild", str),
[e],
)
)
else:
docChild = None
if "docAfter" in _doc:
try:
docAfter = load_field(
_doc.get("docAfter"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docAfter' field is not valid because:",
SourceLine(_doc, "docAfter", str),
[e],
)
)
else:
docAfter = None
try:
type = load_field(
_doc.get("type"),
typedsl_Documentation_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `inVocab`, `doc`, `docParent`, `docChild`, `docAfter`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'Documentation'", None, _errors__)
_constructed = cls(
name=name,
inVocab=inVocab,
doc=doc,
docParent=docParent,
docChild=docChild,
docAfter=docAfter,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.inVocab is not None:
r["inVocab"] = save(
self.inVocab, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.docParent is not None:
u = save_relative_uri(self.docParent, self.name, False, None, relative_uris)
r["docParent"] = u
if self.docChild is not None:
u = save_relative_uri(self.docChild, self.name, False, None, relative_uris)
r["docChild"] = u
if self.docAfter is not None:
u = save_relative_uri(self.docAfter, self.name, False, None, relative_uris)
r["docAfter"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
["name", "inVocab", "doc", "docParent", "docChild", "docAfter", "type"]
)
_vocab = {
"Any": "https://w3id.org/cwl/salad#Any",
"ArraySchema": "https://w3id.org/cwl/salad#ArraySchema",
"DocType": "https://w3id.org/cwl/salad#DocType",
"Documentation": "https://w3id.org/cwl/salad#Documentation",
"Documented": "https://w3id.org/cwl/salad#Documented",
"EnumSchema": "https://w3id.org/cwl/salad#EnumSchema",
"JsonldPredicate": "https://w3id.org/cwl/salad#JsonldPredicate",
"NamedType": "https://w3id.org/cwl/salad#NamedType",
"PrimitiveType": "https://w3id.org/cwl/salad#PrimitiveType",
"RecordField": "https://w3id.org/cwl/salad#RecordField",
"RecordSchema": "https://w3id.org/cwl/salad#RecordSchema",
"SaladEnumSchema": "https://w3id.org/cwl/salad#SaladEnumSchema",
"SaladRecordField": "https://w3id.org/cwl/salad#SaladRecordField",
"SaladRecordSchema": "https://w3id.org/cwl/salad#SaladRecordSchema",
"SchemaDefinedType": "https://w3id.org/cwl/salad#SchemaDefinedType",
"SpecializeDef": "https://w3id.org/cwl/salad#SpecializeDef",
"array": "https://w3id.org/cwl/salad#array",
"boolean": "http://www.w3.org/2001/XMLSchema#boolean",
"documentation": "https://w3id.org/cwl/salad#documentation",
"double": "http://www.w3.org/2001/XMLSchema#double",
"enum": "https://w3id.org/cwl/salad#enum",
"float": "http://www.w3.org/2001/XMLSchema#float",
"int": "http://www.w3.org/2001/XMLSchema#int",
"long": "http://www.w3.org/2001/XMLSchema#long",
"null": "https://w3id.org/cwl/salad#null",
"record": "https://w3id.org/cwl/salad#record",
"string": "http://www.w3.org/2001/XMLSchema#string",
}
_rvocab = {
"https://w3id.org/cwl/salad#Any": "Any",
"https://w3id.org/cwl/salad#ArraySchema": "ArraySchema",
"https://w3id.org/cwl/salad#DocType": "DocType",
"https://w3id.org/cwl/salad#Documentation": "Documentation",
"https://w3id.org/cwl/salad#Documented": "Documented",
"https://w3id.org/cwl/salad#EnumSchema": "EnumSchema",
"https://w3id.org/cwl/salad#JsonldPredicate": "JsonldPredicate",
"https://w3id.org/cwl/salad#NamedType": "NamedType",
"https://w3id.org/cwl/salad#PrimitiveType": "PrimitiveType",
"https://w3id.org/cwl/salad#RecordField": "RecordField",
"https://w3id.org/cwl/salad#RecordSchema": "RecordSchema",
"https://w3id.org/cwl/salad#SaladEnumSchema": "SaladEnumSchema",
"https://w3id.org/cwl/salad#SaladRecordField": "SaladRecordField",
"https://w3id.org/cwl/salad#SaladRecordSchema": "SaladRecordSchema",
"https://w3id.org/cwl/salad#SchemaDefinedType": "SchemaDefinedType",
"https://w3id.org/cwl/salad#SpecializeDef": "SpecializeDef",
"https://w3id.org/cwl/salad#array": "array",
"http://www.w3.org/2001/XMLSchema#boolean": "boolean",
"https://w3id.org/cwl/salad#documentation": "documentation",
"http://www.w3.org/2001/XMLSchema#double": "double",
"https://w3id.org/cwl/salad#enum": "enum",
"http://www.w3.org/2001/XMLSchema#float": "float",
"http://www.w3.org/2001/XMLSchema#int": "int",
"http://www.w3.org/2001/XMLSchema#long": "long",
"https://w3id.org/cwl/salad#null": "null",
"https://w3id.org/cwl/salad#record": "record",
"http://www.w3.org/2001/XMLSchema#string": "string",
}
strtype = _PrimitiveLoader(str)
inttype = _PrimitiveLoader(int)
floattype = _PrimitiveLoader(float)
booltype = _PrimitiveLoader(bool)
None_type = _PrimitiveLoader(type(None))
Any_type = _AnyLoader()
PrimitiveTypeLoader = _EnumLoader(
(
"null",
"boolean",
"int",
"long",
"float",
"double",
"string",
),
"PrimitiveType",
)
"""
Names of salad data types (based on Avro schema declarations).
Refer to the [Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
detailed information.
null: no value
boolean: a binary value
int: 32-bit signed integer
long: 64-bit signed integer
float: single precision (32-bit) IEEE 754 floating-point number
double: double precision (64-bit) IEEE 754 floating-point number
string: Unicode character sequence
"""
AnyLoader = _EnumLoader(("Any",), "Any")
"""
The **Any** type validates for any non-null value.
"""
RecordFieldLoader = _RecordLoader(RecordField)
RecordSchemaLoader = _RecordLoader(RecordSchema)
EnumSchemaLoader = _RecordLoader(EnumSchema)
ArraySchemaLoader = _RecordLoader(ArraySchema)
JsonldPredicateLoader = _RecordLoader(JsonldPredicate)
SpecializeDefLoader = _RecordLoader(SpecializeDef)
SaladRecordFieldLoader = _RecordLoader(SaladRecordField)
SaladRecordSchemaLoader = _RecordLoader(SaladRecordSchema)
SaladEnumSchemaLoader = _RecordLoader(SaladEnumSchema)
DocumentationLoader = _RecordLoader(Documentation)
array_of_strtype = _ArrayLoader(strtype)
union_of_None_type_or_strtype_or_array_of_strtype = _UnionLoader(
(
None_type,
strtype,
array_of_strtype,
)
)
uri_strtype_True_False_None = _URILoader(strtype, True, False, None)
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader(
(
PrimitiveTypeLoader,
RecordSchemaLoader,
EnumSchemaLoader,
ArraySchemaLoader,
strtype,
)
)
array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _ArrayLoader(
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype
)
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader(
(
PrimitiveTypeLoader,
RecordSchemaLoader,
EnumSchemaLoader,
ArraySchemaLoader,
strtype,
array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,
)
)
typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,
2,
"v1.1",
)
array_of_RecordFieldLoader = _ArrayLoader(RecordFieldLoader)
union_of_None_type_or_array_of_RecordFieldLoader = _UnionLoader(
(
None_type,
array_of_RecordFieldLoader,
)
)
idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader = _IdMapLoader(
union_of_None_type_or_array_of_RecordFieldLoader, "name", "type"
)
Record_nameLoader = _EnumLoader(("record",), "Record_name")
typedsl_Record_nameLoader_2 = _TypeDSLLoader(Record_nameLoader, 2, "v1.1")
union_of_None_type_or_strtype = _UnionLoader(
(
None_type,
strtype,
)
)
uri_union_of_None_type_or_strtype_True_False_None = _URILoader(
union_of_None_type_or_strtype, True, False, None
)
uri_array_of_strtype_True_False_None = _URILoader(array_of_strtype, True, False, None)
Enum_nameLoader = _EnumLoader(("enum",), "Enum_name")
typedsl_Enum_nameLoader_2 = _TypeDSLLoader(Enum_nameLoader, 2, "v1.1")
uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2 = _URILoader(
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,
False,
True,
2,
)
Array_nameLoader = _EnumLoader(("array",), "Array_name")
typedsl_Array_nameLoader_2 = _TypeDSLLoader(Array_nameLoader, 2, "v1.1")
union_of_None_type_or_booltype = _UnionLoader(
(
None_type,
booltype,
)
)
union_of_None_type_or_inttype = _UnionLoader(
(
None_type,
inttype,
)
)
uri_strtype_False_False_1 = _URILoader(strtype, False, False, 1)
uri_union_of_None_type_or_strtype_False_False_None = _URILoader(
union_of_None_type_or_strtype, False, False, None
)
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None = _URILoader(
union_of_None_type_or_strtype_or_array_of_strtype, False, False, None
)
union_of_None_type_or_strtype_or_JsonldPredicateLoader = _UnionLoader(
(
None_type,
strtype,
JsonldPredicateLoader,
)
)
union_of_None_type_or_Any_type = _UnionLoader(
(
None_type,
Any_type,
)
)
array_of_SaladRecordFieldLoader = _ArrayLoader(SaladRecordFieldLoader)
union_of_None_type_or_array_of_SaladRecordFieldLoader = _UnionLoader(
(
None_type,
array_of_SaladRecordFieldLoader,
)
)
idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader = _IdMapLoader(
union_of_None_type_or_array_of_SaladRecordFieldLoader, "name", "type"
)
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1 = _URILoader(
union_of_None_type_or_strtype_or_array_of_strtype, False, False, 1
)
array_of_SpecializeDefLoader = _ArrayLoader(SpecializeDefLoader)
union_of_None_type_or_array_of_SpecializeDefLoader = _UnionLoader(
(
None_type,
array_of_SpecializeDefLoader,
)
)
idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader = _IdMapLoader(
union_of_None_type_or_array_of_SpecializeDefLoader, "specializeFrom", "specializeTo"
)
Documentation_nameLoader = _EnumLoader(("documentation",), "Documentation_name")
typedsl_Documentation_nameLoader_2 = _TypeDSLLoader(Documentation_nameLoader, 2, "v1.1")
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = (
_UnionLoader(
(
SaladRecordSchemaLoader,
SaladEnumSchemaLoader,
DocumentationLoader,
)
)
)
array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _ArrayLoader(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader
)
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _UnionLoader(
(
SaladRecordSchemaLoader,
SaladEnumSchemaLoader,
DocumentationLoader,
array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
)
)
def load_document(
doc: Any,
baseuri: Optional[str] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> Any:
if baseuri is None:
baseuri = file_uri(os.getcwd()) + "/"
if loadingOptions is None:
loadingOptions = LoadingOptions()
result, metadata = _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
doc,
baseuri,
loadingOptions,
)
return result
def load_document_with_metadata(
doc: Any,
baseuri: Optional[str] = None,
loadingOptions: Optional[LoadingOptions] = None,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Any:
if baseuri is None:
baseuri = file_uri(os.getcwd()) + "/"
if loadingOptions is None:
loadingOptions = LoadingOptions(fileuri=baseuri)
return _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
doc,
baseuri,
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
def load_document_by_string(
string: Any,
uri: str,
loadingOptions: Optional[LoadingOptions] = None,
) -> Any:
yaml = yaml_no_ts()
result = yaml.load(string)
add_lc_filename(result, uri)
if loadingOptions is None:
loadingOptions = LoadingOptions(fileuri=uri)
result, metadata = _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
result,
uri,
loadingOptions,
)
return result
def load_document_by_yaml(
yaml: Any,
uri: str,
loadingOptions: Optional[LoadingOptions] = None,
) -> Any:
"""
Shortcut to load via a YAML object.
yaml: must be from ruamel.yaml.main.YAML.load with preserve_quotes=True
"""
add_lc_filename(yaml, uri)
if loadingOptions is None:
loadingOptions = LoadingOptions(fileuri=uri)
result, metadata = _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
yaml,
uri,
loadingOptions,
)
return result
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/metaschema.py
|
metaschema.py
|
import copy
import logging
import os
import pathlib
import tempfile
import uuid as _uuid__ # pylint: disable=unused-import # noqa: F401
import xml.sax # nosec
from abc import ABC, abstractmethod
from io import StringIO
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from urllib.parse import quote, urldefrag, urlparse, urlsplit, urlunsplit
from urllib.request import pathname2url
from rdflib import Graph
from rdflib.plugins.parsers.notation3 import BadSyntax
from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import SchemaSaladException, ValidationException
from schema_salad.fetcher import DefaultFetcher, Fetcher, MemoryCachingFetcher
from schema_salad.sourceline import SourceLine, add_lc_filename
from schema_salad.utils import CacheType, yaml_no_ts # requires schema-salad v8.2+
_vocab: Dict[str, str] = {}
_rvocab: Dict[str, str] = {}
_logger = logging.getLogger("salad")
IdxType = MutableMapping[str, Tuple[Any, "LoadingOptions"]]
class LoadingOptions:
idx: IdxType
fileuri: Optional[str]
baseuri: str
namespaces: MutableMapping[str, str]
schemas: MutableSequence[str]
original_doc: Optional[Any]
addl_metadata: MutableMapping[str, Any]
fetcher: Fetcher
vocab: Dict[str, str]
rvocab: Dict[str, str]
cache: CacheType
imports: List[str]
includes: List[str]
def __init__(
self,
fetcher: Optional[Fetcher] = None,
namespaces: Optional[Dict[str, str]] = None,
schemas: Optional[List[str]] = None,
fileuri: Optional[str] = None,
copyfrom: Optional["LoadingOptions"] = None,
original_doc: Optional[Any] = None,
addl_metadata: Optional[Dict[str, str]] = None,
baseuri: Optional[str] = None,
idx: Optional[IdxType] = None,
imports: Optional[List[str]] = None,
includes: Optional[List[str]] = None,
) -> None:
"""Create a LoadingOptions object."""
self.original_doc = original_doc
if idx is not None:
self.idx = idx
else:
self.idx = copyfrom.idx if copyfrom is not None else {}
if fileuri is not None:
self.fileuri = fileuri
else:
self.fileuri = copyfrom.fileuri if copyfrom is not None else None
if baseuri is not None:
self.baseuri = baseuri
else:
self.baseuri = copyfrom.baseuri if copyfrom is not None else ""
if namespaces is not None:
self.namespaces = namespaces
else:
self.namespaces = copyfrom.namespaces if copyfrom is not None else {}
if schemas is not None:
self.schemas = schemas
else:
self.schemas = copyfrom.schemas if copyfrom is not None else []
if addl_metadata is not None:
self.addl_metadata = addl_metadata
else:
self.addl_metadata = copyfrom.addl_metadata if copyfrom is not None else {}
if imports is not None:
self.imports = imports
else:
self.imports = copyfrom.imports if copyfrom is not None else []
if includes is not None:
self.includes = includes
else:
self.includes = copyfrom.includes if copyfrom is not None else []
if fetcher is not None:
self.fetcher = fetcher
elif copyfrom is not None:
self.fetcher = copyfrom.fetcher
else:
import requests
from cachecontrol.caches import FileCache
from cachecontrol.wrapper import CacheControl
root = pathlib.Path(os.environ.get("HOME", tempfile.gettempdir()))
session = CacheControl(
requests.Session(),
cache=FileCache(root / ".cache" / "salad"),
)
self.fetcher: Fetcher = DefaultFetcher({}, session)
self.cache = self.fetcher.cache if isinstance(self.fetcher, MemoryCachingFetcher) else {}
self.vocab = _vocab
self.rvocab = _rvocab
if namespaces is not None:
self.vocab = self.vocab.copy()
self.rvocab = self.rvocab.copy()
for k, v in namespaces.items():
self.vocab[k] = v
self.rvocab[v] = k
@property
def graph(self) -> Graph:
"""Generate a merged rdflib.Graph from all entries in self.schemas."""
graph = Graph()
if not self.schemas:
return graph
key = str(hash(tuple(self.schemas)))
if key in self.cache:
return cast(Graph, self.cache[key])
for schema in self.schemas:
fetchurl = (
self.fetcher.urljoin(self.fileuri, schema)
if self.fileuri is not None
else pathlib.Path(schema).resolve().as_uri()
)
if fetchurl not in self.cache or self.cache[fetchurl] is True:
_logger.debug("Getting external schema %s", fetchurl)
try:
content = self.fetcher.fetch_text(fetchurl)
except Exception as e:
_logger.warning("Could not load extension schema %s: %s", fetchurl, str(e))
continue
newGraph = Graph()
err_msg = "unknown error"
for fmt in ["xml", "turtle"]:
try:
newGraph.parse(data=content, format=fmt, publicID=str(fetchurl))
self.cache[fetchurl] = newGraph
graph += newGraph
break
except (xml.sax.SAXParseException, TypeError, BadSyntax) as e:
err_msg = str(e)
else:
_logger.warning("Could not load extension schema %s: %s", fetchurl, err_msg)
self.cache[key] = graph
return graph
class Saveable(ABC):
"""Mark classes than have a save() and fromDoc() function."""
@classmethod
@abstractmethod
def fromDoc(
cls,
_doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "Saveable":
"""Construct this object from the result of yaml.load()."""
@abstractmethod
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
"""Convert this object to a JSON/YAML friendly dictionary."""
def load_field(val, fieldtype, baseuri, loadingOptions):
# type: (Union[str, Dict[str, str]], _Loader, str, LoadingOptions) -> Any
if isinstance(val, MutableMapping):
if "$import" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"])
result, metadata = _document_load_by_url(
fieldtype,
url,
loadingOptions,
)
loadingOptions.imports.append(url)
return result
if "$include" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"])
val = loadingOptions.fetcher.fetch_text(url)
loadingOptions.includes.append(url)
return fieldtype.load(val, baseuri, loadingOptions)
save_type = Optional[Union[MutableMapping[str, Any], MutableSequence[Any], int, float, bool, str]]
def save(
val: Any,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
if isinstance(val, Saveable):
return val.save(top=top, base_url=base_url, relative_uris=relative_uris)
if isinstance(val, MutableSequence):
return [save(v, top=False, base_url=base_url, relative_uris=relative_uris) for v in val]
if isinstance(val, MutableMapping):
newdict = {}
for key in val:
newdict[key] = save(val[key], top=False, base_url=base_url, relative_uris=relative_uris)
return newdict
if val is None or isinstance(val, (int, float, bool, str)):
return val
raise Exception("Not Saveable: %s" % type(val))
def save_with_metadata(
val: Any,
valLoadingOpts: LoadingOptions,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
"""Save and set $namespaces, $schemas, $base and any other metadata fields at the top level."""
saved_val = save(val, top, base_url, relative_uris)
newdict: MutableMapping[str, Any] = {}
if isinstance(saved_val, MutableSequence):
newdict = {"$graph": saved_val}
elif isinstance(saved_val, MutableMapping):
newdict = saved_val
if valLoadingOpts.namespaces:
newdict["$namespaces"] = valLoadingOpts.namespaces
if valLoadingOpts.schemas:
newdict["$schemas"] = valLoadingOpts.schemas
if valLoadingOpts.baseuri:
newdict["$base"] = valLoadingOpts.baseuri
for k, v in valLoadingOpts.addl_metadata.items():
if k not in newdict:
newdict[k] = v
return newdict
def expand_url(
url, # type: str
base_url, # type: str
loadingOptions, # type: LoadingOptions
scoped_id=False, # type: bool
vocab_term=False, # type: bool
scoped_ref=None, # type: Optional[int]
):
# type: (...) -> str
if url in ("@id", "@type"):
return url
if vocab_term and url in loadingOptions.vocab:
return url
if bool(loadingOptions.vocab) and ":" in url:
prefix = url.split(":")[0]
if prefix in loadingOptions.vocab:
url = loadingOptions.vocab[prefix] + url[len(prefix) + 1 :]
split = urlsplit(url)
if (
(bool(split.scheme) and split.scheme in loadingOptions.fetcher.supported_schemes())
or url.startswith("$(")
or url.startswith("${")
):
pass
elif scoped_id and not bool(split.fragment):
splitbase = urlsplit(base_url)
frg = ""
if bool(splitbase.fragment):
frg = splitbase.fragment + "/" + split.path
else:
frg = split.path
pt = splitbase.path if splitbase.path != "" else "/"
url = urlunsplit((splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
elif scoped_ref is not None and not bool(split.fragment):
splitbase = urlsplit(base_url)
sp = splitbase.fragment.split("/")
n = scoped_ref
while n > 0 and len(sp) > 0:
sp.pop()
n -= 1
sp.append(url)
url = urlunsplit(
(
splitbase.scheme,
splitbase.netloc,
splitbase.path,
splitbase.query,
"/".join(sp),
)
)
else:
url = loadingOptions.fetcher.urljoin(base_url, url)
if vocab_term:
split = urlsplit(url)
if bool(split.scheme):
if url in loadingOptions.rvocab:
return loadingOptions.rvocab[url]
else:
raise ValidationException(f"Term {url!r} not in vocabulary")
return url
class _Loader:
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
pass
class _AnyLoader(_Loader):
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc is not None:
return doc
raise ValidationException("Expected non-null")
class _PrimitiveLoader(_Loader):
def __init__(self, tp):
# type: (Union[type, Tuple[Type[str], Type[str]]]) -> None
self.tp = tp
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, self.tp):
raise ValidationException(
"Expected a {} but got {}".format(
self.tp.__class__.__name__, doc.__class__.__name__
)
)
return doc
def __repr__(self): # type: () -> str
return str(self.tp)
class _ArrayLoader(_Loader):
def __init__(self, items):
# type: (_Loader) -> None
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableSequence):
raise ValidationException(f"Expected a list, was {type(doc)}")
r = [] # type: List[Any]
errors = [] # type: List[SchemaSaladException]
for i in range(0, len(doc)):
try:
lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)
if isinstance(lf, MutableSequence):
r.extend(lf)
else:
r.append(lf)
except ValidationException as e:
errors.append(e.with_sourceline(SourceLine(doc, i, str)))
if errors:
raise ValidationException("", None, errors)
return r
def __repr__(self): # type: () -> str
return f"array<{self.items}>"
class _EnumLoader(_Loader):
def __init__(self, symbols: Sequence[str], name: str) -> None:
self.symbols = symbols
self.name = name
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc in self.symbols:
return doc
raise ValidationException(f"Expected one of {self.symbols}")
def __repr__(self): # type: () -> str
return self.name
class _SecondaryDSLLoader(_Loader):
def __init__(self, inner):
# type: (_Loader) -> None
self.inner = inner
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
r: List[Dict[str, Any]] = []
if isinstance(doc, MutableSequence):
for d in doc:
if isinstance(d, str):
if d.endswith("?"):
r.append({"pattern": d[:-1], "required": False})
else:
r.append({"pattern": d})
elif isinstance(d, dict):
new_dict: Dict[str, Any] = {}
dict_copy = copy.deepcopy(d)
if "pattern" in dict_copy:
new_dict["pattern"] = dict_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {d}"
)
new_dict["required"] = (
dict_copy.pop("required") if "required" in dict_copy else None
)
if len(dict_copy):
raise ValidationException(
"Unallowed values in secondaryFiles specification entry: {}".format(
dict_copy
)
)
r.append(new_dict)
else:
raise ValidationException(
"Expected a string or sequence of (strings or mappings)."
)
elif isinstance(doc, MutableMapping):
new_dict = {}
doc_copy = copy.deepcopy(doc)
if "pattern" in doc_copy:
new_dict["pattern"] = doc_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {doc}"
)
new_dict["required"] = doc_copy.pop("required") if "required" in doc_copy else None
if len(doc_copy):
raise ValidationException(
f"Unallowed values in secondaryFiles specification entry: {doc_copy}"
)
r.append(new_dict)
elif isinstance(doc, str):
if doc.endswith("?"):
r.append({"pattern": doc[:-1], "required": False})
else:
r.append({"pattern": doc})
else:
raise ValidationException("Expected str or sequence of str")
return self.inner.load(r, baseuri, loadingOptions, docRoot)
class _RecordLoader(_Loader):
def __init__(self, classtype):
# type: (Type[Saveable]) -> None
self.classtype = classtype
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableMapping):
raise ValidationException(f"Expected a dict, was {type(doc)}")
return self.classtype.fromDoc(doc, baseuri, loadingOptions, docRoot=docRoot)
def __repr__(self): # type: () -> str
return str(self.classtype.__name__)
class _ExpressionLoader(_Loader):
def __init__(self, items: Type[str]) -> None:
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, str):
raise ValidationException(f"Expected a str, was {type(doc)}")
return doc
class _UnionLoader(_Loader):
def __init__(self, alternates: Sequence[_Loader]) -> None:
self.alternates = alternates
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
errors = []
for t in self.alternates:
try:
return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)
except ValidationException as e:
errors.append(ValidationException(f"tried {t} but", None, [e]))
raise ValidationException("", None, errors, "-")
def __repr__(self): # type: () -> str
return " | ".join(str(a) for a in self.alternates)
class _URILoader(_Loader):
def __init__(self, inner, scoped_id, vocab_term, scoped_ref):
# type: (_Loader, bool, bool, Union[int, None]) -> None
self.inner = inner
self.scoped_id = scoped_id
self.vocab_term = vocab_term
self.scoped_ref = scoped_ref
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
newdoc = []
for i in doc:
if isinstance(i, str):
newdoc.append(
expand_url(
i,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
)
else:
newdoc.append(i)
doc = newdoc
elif isinstance(doc, str):
doc = expand_url(
doc,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
return self.inner.load(doc, baseuri, loadingOptions)
class _TypeDSLLoader(_Loader):
def __init__(self, inner, refScope, salad_version):
# type: (_Loader, Union[int, None], str) -> None
self.inner = inner
self.refScope = refScope
self.salad_version = salad_version
def resolve(
self,
doc, # type: str
baseuri, # type: str
loadingOptions, # type: LoadingOptions
):
# type: (...) -> Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
doc_ = doc
optional = False
if doc_.endswith("?"):
optional = True
doc_ = doc_[0:-1]
if doc_.endswith("[]"):
salad_versions = [int(v) for v in self.salad_version[1:].split(".")]
items = "" # type: Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
rest = doc_[0:-2]
if salad_versions < [1, 3]:
if rest.endswith("[]"):
# To show the error message with the original type
return doc
else:
items = expand_url(rest, baseuri, loadingOptions, False, True, self.refScope)
else:
items = self.resolve(rest, baseuri, loadingOptions)
if isinstance(items, str):
items = expand_url(items, baseuri, loadingOptions, False, True, self.refScope)
expanded = {"type": "array", "items": items} # type: Union[Dict[str, Any], str]
else:
expanded = expand_url(doc_, baseuri, loadingOptions, False, True, self.refScope)
if optional:
return ["null", expanded]
else:
return expanded
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
r = [] # type: List[Any]
for d in doc:
if isinstance(d, str):
resolved = self.resolve(d, baseuri, loadingOptions)
if isinstance(resolved, MutableSequence):
for i in resolved:
if i not in r:
r.append(i)
else:
if resolved not in r:
r.append(resolved)
else:
r.append(d)
doc = r
elif isinstance(doc, str):
doc = self.resolve(doc, baseuri, loadingOptions)
return self.inner.load(doc, baseuri, loadingOptions)
class _IdMapLoader(_Loader):
def __init__(self, inner, mapSubject, mapPredicate):
# type: (_Loader, str, Union[str, None]) -> None
self.inner = inner
self.mapSubject = mapSubject
self.mapPredicate = mapPredicate
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableMapping):
r = [] # type: List[Any]
for k in sorted(doc.keys()):
val = doc[k]
if isinstance(val, CommentedMap):
v = copy.copy(val)
v.lc.data = val.lc.data
v.lc.filename = val.lc.filename
v[self.mapSubject] = k
r.append(v)
elif isinstance(val, MutableMapping):
v2 = copy.copy(val)
v2[self.mapSubject] = k
r.append(v2)
else:
if self.mapPredicate:
v3 = {self.mapPredicate: val}
v3[self.mapSubject] = k
r.append(v3)
else:
raise ValidationException("No mapPredicate")
doc = r
return self.inner.load(doc, baseuri, loadingOptions)
def _document_load(
loader: _Loader,
doc: Union[str, MutableMapping[str, Any], MutableSequence[Any]],
baseuri: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if isinstance(doc, str):
return _document_load_by_url(
loader,
loadingOptions.fetcher.urljoin(baseuri, doc),
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
if isinstance(doc, MutableMapping):
addl_metadata = {}
if addl_metadata_fields is not None:
for mf in addl_metadata_fields:
if mf in doc:
addl_metadata[mf] = doc[mf]
docuri = baseuri
if "$base" in doc:
baseuri = doc["$base"]
loadingOptions = LoadingOptions(
copyfrom=loadingOptions,
namespaces=doc.get("$namespaces", None),
schemas=doc.get("$schemas", None),
baseuri=doc.get("$base", None),
addl_metadata=addl_metadata,
)
doc = {k: v for k, v in doc.items() if k not in ("$namespaces", "$schemas", "$base")}
if "$graph" in doc:
loadingOptions.idx[baseuri] = (
loader.load(doc["$graph"], baseuri, loadingOptions),
loadingOptions,
)
else:
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions, docRoot=baseuri),
loadingOptions,
)
if docuri != baseuri:
loadingOptions.idx[docuri] = loadingOptions.idx[baseuri]
return loadingOptions.idx[baseuri]
if isinstance(doc, MutableSequence):
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions),
loadingOptions,
)
return loadingOptions.idx[baseuri]
raise ValidationException(
"Expected URI string, MutableMapping or MutableSequence, got %s" % type(doc)
)
def _document_load_by_url(
loader: _Loader,
url: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if url in loadingOptions.idx:
return loadingOptions.idx[url]
doc_url, frg = urldefrag(url)
text = loadingOptions.fetcher.fetch_text(doc_url)
textIO = StringIO(text)
textIO.name = str(doc_url)
yaml = yaml_no_ts()
result = yaml.load(textIO)
add_lc_filename(result, doc_url)
loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=doc_url)
_document_load(
loader,
result,
doc_url,
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
return loadingOptions.idx[url]
def file_uri(path, split_frag=False): # type: (str, bool) -> str
if path.startswith("file://"):
return path
if split_frag:
pathsp = path.split("#", 2)
frag = "#" + quote(str(pathsp[1])) if len(pathsp) == 2 else ""
urlpath = pathname2url(str(pathsp[0]))
else:
urlpath = pathname2url(path)
frag = ""
if urlpath.startswith("//"):
return f"file:{urlpath}{frag}"
return f"file://{urlpath}{frag}"
def prefix_url(url: str, namespaces: Dict[str, str]) -> str:
"""Expand short forms into full URLs using the given namespace dictionary."""
for k, v in namespaces.items():
if url.startswith(v):
return k + ":" + url[len(v) :]
return url
def save_relative_uri(
uri: Any,
base_url: str,
scoped_id: bool,
ref_scope: Optional[int],
relative_uris: bool,
) -> Any:
"""Convert any URI to a relative one, obeying the scoping rules."""
if isinstance(uri, MutableSequence):
return [save_relative_uri(u, base_url, scoped_id, ref_scope, relative_uris) for u in uri]
elif isinstance(uri, str):
if not relative_uris or uri == base_url:
return uri
urisplit = urlsplit(uri)
basesplit = urlsplit(base_url)
if urisplit.scheme == basesplit.scheme and urisplit.netloc == basesplit.netloc:
if urisplit.path != basesplit.path:
p = os.path.relpath(urisplit.path, os.path.dirname(basesplit.path))
if urisplit.fragment:
p = p + "#" + urisplit.fragment
return p
basefrag = basesplit.fragment + "/"
if ref_scope:
sp = basefrag.split("/")
i = 0
while i < ref_scope:
sp.pop()
i += 1
basefrag = "/".join(sp)
if urisplit.fragment.startswith(basefrag):
return urisplit.fragment[len(basefrag) :]
return urisplit.fragment
return uri
else:
return save(uri, top=False, base_url=base_url, relative_uris=relative_uris)
def shortname(inputid: str) -> str:
"""
Compute the shortname of a fully qualified identifier.
See https://w3id.org/cwl/v1.2/SchemaSalad.html#Short_names.
"""
parsed_id = urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split("/")[-1]
return parsed_id.path.split("/")[-1]
def parser_info() -> str:
return "org.w3id.cwl.salad"
class Documented(Saveable):
pass
class RecordField(Documented):
"""
A field of a record.
"""
def __init__(
self,
name: Any,
type: Any,
doc: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.doc = doc
self.name = name
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, RecordField):
return bool(
self.doc == other.doc
and self.name == other.name
and self.type == other.type
)
return False
def __hash__(self) -> int:
return hash((self.doc, self.name, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "RecordField":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
try:
type = load_field(
_doc.get("type"),
typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `doc`, `name`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'RecordField'", None, _errors__)
_constructed = cls(
doc=doc,
name=name,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["doc", "name", "type"])
class RecordSchema(Saveable):
def __init__(
self,
type: Any,
fields: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.fields = fields
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, RecordSchema):
return bool(self.fields == other.fields and self.type == other.type)
return False
def __hash__(self) -> int:
return hash((self.fields, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "RecordSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "fields" in _doc:
try:
fields = load_field(
_doc.get("fields"),
idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'fields' field is not valid because:",
SourceLine(_doc, "fields", str),
[e],
)
)
else:
fields = None
try:
type = load_field(
_doc.get("type"),
typedsl_Record_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `fields`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'RecordSchema'", None, _errors__)
_constructed = cls(
fields=fields,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.fields is not None:
r["fields"] = save(
self.fields, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["fields", "type"])
class EnumSchema(Saveable):
"""
Define an enumerated type.
"""
def __init__(
self,
symbols: Any,
type: Any,
name: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.symbols = symbols
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, EnumSchema):
return bool(
self.name == other.name
and self.symbols == other.symbols
and self.type == other.type
)
return False
def __hash__(self) -> int:
return hash((self.name, self.symbols, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "EnumSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_union_of_None_type_or_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
name = "_:" + str(_uuid__.uuid4())
if not __original_name_is_none:
baseuri = name
try:
symbols = load_field(
_doc.get("symbols"),
uri_array_of_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'symbols' field is not valid because:",
SourceLine(_doc, "symbols", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Enum_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `symbols`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'EnumSchema'", None, _errors__)
_constructed = cls(
name=name,
symbols=symbols,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.symbols is not None:
u = save_relative_uri(self.symbols, self.name, True, None, relative_uris)
r["symbols"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["name", "symbols", "type"])
class ArraySchema(Saveable):
def __init__(
self,
items: Any,
type: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.items = items
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, ArraySchema):
return bool(self.items == other.items and self.type == other.type)
return False
def __hash__(self) -> int:
return hash((self.items, self.type))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "ArraySchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
items = load_field(
_doc.get("items"),
uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'items' field is not valid because:",
SourceLine(_doc, "items", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Array_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `items`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'ArraySchema'", None, _errors__)
_constructed = cls(
items=items,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.items is not None:
u = save_relative_uri(self.items, base_url, False, 2, relative_uris)
r["items"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["items", "type"])
class JsonldPredicate(Saveable):
"""
Attached to a record field to define how the parent record field is handled for
URI resolution and JSON-LD context generation.
"""
def __init__(
self,
_id: Optional[Any] = None,
_type: Optional[Any] = None,
_container: Optional[Any] = None,
identity: Optional[Any] = None,
noLinkCheck: Optional[Any] = None,
mapSubject: Optional[Any] = None,
mapPredicate: Optional[Any] = None,
refScope: Optional[Any] = None,
typeDSL: Optional[Any] = None,
secondaryFilesDSL: Optional[Any] = None,
subscope: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self._id = _id
self._type = _type
self._container = _container
self.identity = identity
self.noLinkCheck = noLinkCheck
self.mapSubject = mapSubject
self.mapPredicate = mapPredicate
self.refScope = refScope
self.typeDSL = typeDSL
self.secondaryFilesDSL = secondaryFilesDSL
self.subscope = subscope
def __eq__(self, other: Any) -> bool:
if isinstance(other, JsonldPredicate):
return bool(
self._id == other._id
and self._type == other._type
and self._container == other._container
and self.identity == other.identity
and self.noLinkCheck == other.noLinkCheck
and self.mapSubject == other.mapSubject
and self.mapPredicate == other.mapPredicate
and self.refScope == other.refScope
and self.typeDSL == other.typeDSL
and self.secondaryFilesDSL == other.secondaryFilesDSL
and self.subscope == other.subscope
)
return False
def __hash__(self) -> int:
return hash(
(
self._id,
self._type,
self._container,
self.identity,
self.noLinkCheck,
self.mapSubject,
self.mapPredicate,
self.refScope,
self.typeDSL,
self.secondaryFilesDSL,
self.subscope,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "JsonldPredicate":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "_id" in _doc:
try:
_id = load_field(
_doc.get("_id"),
uri_union_of_None_type_or_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the '_id' field is not valid because:",
SourceLine(_doc, "_id", str),
[e],
)
)
else:
_id = None
if "_type" in _doc:
try:
_type = load_field(
_doc.get("_type"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the '_type' field is not valid because:",
SourceLine(_doc, "_type", str),
[e],
)
)
else:
_type = None
if "_container" in _doc:
try:
_container = load_field(
_doc.get("_container"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the '_container' field is not valid because:",
SourceLine(_doc, "_container", str),
[e],
)
)
else:
_container = None
if "identity" in _doc:
try:
identity = load_field(
_doc.get("identity"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'identity' field is not valid because:",
SourceLine(_doc, "identity", str),
[e],
)
)
else:
identity = None
if "noLinkCheck" in _doc:
try:
noLinkCheck = load_field(
_doc.get("noLinkCheck"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'noLinkCheck' field is not valid because:",
SourceLine(_doc, "noLinkCheck", str),
[e],
)
)
else:
noLinkCheck = None
if "mapSubject" in _doc:
try:
mapSubject = load_field(
_doc.get("mapSubject"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'mapSubject' field is not valid because:",
SourceLine(_doc, "mapSubject", str),
[e],
)
)
else:
mapSubject = None
if "mapPredicate" in _doc:
try:
mapPredicate = load_field(
_doc.get("mapPredicate"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'mapPredicate' field is not valid because:",
SourceLine(_doc, "mapPredicate", str),
[e],
)
)
else:
mapPredicate = None
if "refScope" in _doc:
try:
refScope = load_field(
_doc.get("refScope"),
union_of_None_type_or_inttype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'refScope' field is not valid because:",
SourceLine(_doc, "refScope", str),
[e],
)
)
else:
refScope = None
if "typeDSL" in _doc:
try:
typeDSL = load_field(
_doc.get("typeDSL"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'typeDSL' field is not valid because:",
SourceLine(_doc, "typeDSL", str),
[e],
)
)
else:
typeDSL = None
if "secondaryFilesDSL" in _doc:
try:
secondaryFilesDSL = load_field(
_doc.get("secondaryFilesDSL"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'secondaryFilesDSL' field is not valid because:",
SourceLine(_doc, "secondaryFilesDSL", str),
[e],
)
)
else:
secondaryFilesDSL = None
if "subscope" in _doc:
try:
subscope = load_field(
_doc.get("subscope"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'subscope' field is not valid because:",
SourceLine(_doc, "subscope", str),
[e],
)
)
else:
subscope = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `_id`, `_type`, `_container`, `identity`, `noLinkCheck`, `mapSubject`, `mapPredicate`, `refScope`, `typeDSL`, `secondaryFilesDSL`, `subscope`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'JsonldPredicate'", None, _errors__)
_constructed = cls(
_id=_id,
_type=_type,
_container=_container,
identity=identity,
noLinkCheck=noLinkCheck,
mapSubject=mapSubject,
mapPredicate=mapPredicate,
refScope=refScope,
typeDSL=typeDSL,
secondaryFilesDSL=secondaryFilesDSL,
subscope=subscope,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self._id is not None:
u = save_relative_uri(self._id, base_url, True, None, relative_uris)
r["_id"] = u
if self._type is not None:
r["_type"] = save(
self._type, top=False, base_url=base_url, relative_uris=relative_uris
)
if self._container is not None:
r["_container"] = save(
self._container,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.identity is not None:
r["identity"] = save(
self.identity, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.noLinkCheck is not None:
r["noLinkCheck"] = save(
self.noLinkCheck,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.mapSubject is not None:
r["mapSubject"] = save(
self.mapSubject,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.mapPredicate is not None:
r["mapPredicate"] = save(
self.mapPredicate,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.refScope is not None:
r["refScope"] = save(
self.refScope, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.typeDSL is not None:
r["typeDSL"] = save(
self.typeDSL, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.secondaryFilesDSL is not None:
r["secondaryFilesDSL"] = save(
self.secondaryFilesDSL,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.subscope is not None:
r["subscope"] = save(
self.subscope, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"_id",
"_type",
"_container",
"identity",
"noLinkCheck",
"mapSubject",
"mapPredicate",
"refScope",
"typeDSL",
"secondaryFilesDSL",
"subscope",
]
)
class SpecializeDef(Saveable):
def __init__(
self,
specializeFrom: Any,
specializeTo: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.specializeFrom = specializeFrom
self.specializeTo = specializeTo
def __eq__(self, other: Any) -> bool:
if isinstance(other, SpecializeDef):
return bool(
self.specializeFrom == other.specializeFrom
and self.specializeTo == other.specializeTo
)
return False
def __hash__(self) -> int:
return hash((self.specializeFrom, self.specializeTo))
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SpecializeDef":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
specializeFrom = load_field(
_doc.get("specializeFrom"),
uri_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'specializeFrom' field is not valid because:",
SourceLine(_doc, "specializeFrom", str),
[e],
)
)
try:
specializeTo = load_field(
_doc.get("specializeTo"),
uri_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'specializeTo' field is not valid because:",
SourceLine(_doc, "specializeTo", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `specializeFrom`, `specializeTo`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SpecializeDef'", None, _errors__)
_constructed = cls(
specializeFrom=specializeFrom,
specializeTo=specializeTo,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.specializeFrom is not None:
u = save_relative_uri(
self.specializeFrom, base_url, False, 1, relative_uris
)
r["specializeFrom"] = u
if self.specializeTo is not None:
u = save_relative_uri(self.specializeTo, base_url, False, 1, relative_uris)
r["specializeTo"] = u
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["specializeFrom", "specializeTo"])
class NamedType(Saveable):
pass
class DocType(Documented):
pass
class SchemaDefinedType(DocType):
"""
Abstract base for schema-defined types.
"""
pass
class SaladRecordField(RecordField):
"""
A field of a record.
"""
def __init__(
self,
name: Any,
type: Any,
doc: Optional[Any] = None,
jsonldPredicate: Optional[Any] = None,
default: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.doc = doc
self.name = name
self.type = type
self.jsonldPredicate = jsonldPredicate
self.default = default
def __eq__(self, other: Any) -> bool:
if isinstance(other, SaladRecordField):
return bool(
self.doc == other.doc
and self.name == other.name
and self.type == other.type
and self.jsonldPredicate == other.jsonldPredicate
and self.default == other.default
)
return False
def __hash__(self) -> int:
return hash(
(self.doc, self.name, self.type, self.jsonldPredicate, self.default)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SaladRecordField":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
try:
type = load_field(
_doc.get("type"),
typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "jsonldPredicate" in _doc:
try:
jsonldPredicate = load_field(
_doc.get("jsonldPredicate"),
union_of_None_type_or_strtype_or_JsonldPredicateLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'jsonldPredicate' field is not valid because:",
SourceLine(_doc, "jsonldPredicate", str),
[e],
)
)
else:
jsonldPredicate = None
if "default" in _doc:
try:
default = load_field(
_doc.get("default"),
union_of_None_type_or_Any_type,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'default' field is not valid because:",
SourceLine(_doc, "default", str),
[e],
)
)
else:
default = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `doc`, `name`, `type`, `jsonldPredicate`, `default`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SaladRecordField'", None, _errors__)
_constructed = cls(
doc=doc,
name=name,
type=type,
jsonldPredicate=jsonldPredicate,
default=default,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.jsonldPredicate is not None:
r["jsonldPredicate"] = save(
self.jsonldPredicate,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.default is not None:
r["default"] = save(
self.default, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["doc", "name", "type", "jsonldPredicate", "default"])
class SaladRecordSchema(NamedType, RecordSchema, SchemaDefinedType):
def __init__(
self,
name: Any,
type: Any,
inVocab: Optional[Any] = None,
fields: Optional[Any] = None,
doc: Optional[Any] = None,
docParent: Optional[Any] = None,
docChild: Optional[Any] = None,
docAfter: Optional[Any] = None,
jsonldPredicate: Optional[Any] = None,
documentRoot: Optional[Any] = None,
abstract: Optional[Any] = None,
extends: Optional[Any] = None,
specialize: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.inVocab = inVocab
self.fields = fields
self.type = type
self.doc = doc
self.docParent = docParent
self.docChild = docChild
self.docAfter = docAfter
self.jsonldPredicate = jsonldPredicate
self.documentRoot = documentRoot
self.abstract = abstract
self.extends = extends
self.specialize = specialize
def __eq__(self, other: Any) -> bool:
if isinstance(other, SaladRecordSchema):
return bool(
self.name == other.name
and self.inVocab == other.inVocab
and self.fields == other.fields
and self.type == other.type
and self.doc == other.doc
and self.docParent == other.docParent
and self.docChild == other.docChild
and self.docAfter == other.docAfter
and self.jsonldPredicate == other.jsonldPredicate
and self.documentRoot == other.documentRoot
and self.abstract == other.abstract
and self.extends == other.extends
and self.specialize == other.specialize
)
return False
def __hash__(self) -> int:
return hash(
(
self.name,
self.inVocab,
self.fields,
self.type,
self.doc,
self.docParent,
self.docChild,
self.docAfter,
self.jsonldPredicate,
self.documentRoot,
self.abstract,
self.extends,
self.specialize,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SaladRecordSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "inVocab" in _doc:
try:
inVocab = load_field(
_doc.get("inVocab"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'inVocab' field is not valid because:",
SourceLine(_doc, "inVocab", str),
[e],
)
)
else:
inVocab = None
if "fields" in _doc:
try:
fields = load_field(
_doc.get("fields"),
idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'fields' field is not valid because:",
SourceLine(_doc, "fields", str),
[e],
)
)
else:
fields = None
try:
type = load_field(
_doc.get("type"),
typedsl_Record_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
if "docParent" in _doc:
try:
docParent = load_field(
_doc.get("docParent"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docParent' field is not valid because:",
SourceLine(_doc, "docParent", str),
[e],
)
)
else:
docParent = None
if "docChild" in _doc:
try:
docChild = load_field(
_doc.get("docChild"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docChild' field is not valid because:",
SourceLine(_doc, "docChild", str),
[e],
)
)
else:
docChild = None
if "docAfter" in _doc:
try:
docAfter = load_field(
_doc.get("docAfter"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docAfter' field is not valid because:",
SourceLine(_doc, "docAfter", str),
[e],
)
)
else:
docAfter = None
if "jsonldPredicate" in _doc:
try:
jsonldPredicate = load_field(
_doc.get("jsonldPredicate"),
union_of_None_type_or_strtype_or_JsonldPredicateLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'jsonldPredicate' field is not valid because:",
SourceLine(_doc, "jsonldPredicate", str),
[e],
)
)
else:
jsonldPredicate = None
if "documentRoot" in _doc:
try:
documentRoot = load_field(
_doc.get("documentRoot"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'documentRoot' field is not valid because:",
SourceLine(_doc, "documentRoot", str),
[e],
)
)
else:
documentRoot = None
if "abstract" in _doc:
try:
abstract = load_field(
_doc.get("abstract"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'abstract' field is not valid because:",
SourceLine(_doc, "abstract", str),
[e],
)
)
else:
abstract = None
if "extends" in _doc:
try:
extends = load_field(
_doc.get("extends"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'extends' field is not valid because:",
SourceLine(_doc, "extends", str),
[e],
)
)
else:
extends = None
if "specialize" in _doc:
try:
specialize = load_field(
_doc.get("specialize"),
idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'specialize' field is not valid because:",
SourceLine(_doc, "specialize", str),
[e],
)
)
else:
specialize = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `inVocab`, `fields`, `type`, `doc`, `docParent`, `docChild`, `docAfter`, `jsonldPredicate`, `documentRoot`, `abstract`, `extends`, `specialize`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SaladRecordSchema'", None, _errors__)
_constructed = cls(
name=name,
inVocab=inVocab,
fields=fields,
type=type,
doc=doc,
docParent=docParent,
docChild=docChild,
docAfter=docAfter,
jsonldPredicate=jsonldPredicate,
documentRoot=documentRoot,
abstract=abstract,
extends=extends,
specialize=specialize,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.inVocab is not None:
r["inVocab"] = save(
self.inVocab, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.fields is not None:
r["fields"] = save(
self.fields, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.docParent is not None:
u = save_relative_uri(self.docParent, self.name, False, None, relative_uris)
r["docParent"] = u
if self.docChild is not None:
u = save_relative_uri(self.docChild, self.name, False, None, relative_uris)
r["docChild"] = u
if self.docAfter is not None:
u = save_relative_uri(self.docAfter, self.name, False, None, relative_uris)
r["docAfter"] = u
if self.jsonldPredicate is not None:
r["jsonldPredicate"] = save(
self.jsonldPredicate,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.documentRoot is not None:
r["documentRoot"] = save(
self.documentRoot,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.abstract is not None:
r["abstract"] = save(
self.abstract,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.extends is not None:
u = save_relative_uri(self.extends, self.name, False, 1, relative_uris)
r["extends"] = u
if self.specialize is not None:
r["specialize"] = save(
self.specialize,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"name",
"inVocab",
"fields",
"type",
"doc",
"docParent",
"docChild",
"docAfter",
"jsonldPredicate",
"documentRoot",
"abstract",
"extends",
"specialize",
]
)
class SaladEnumSchema(NamedType, EnumSchema, SchemaDefinedType):
"""
Define an enumerated type.
"""
def __init__(
self,
symbols: Any,
type: Any,
name: Optional[Any] = None,
inVocab: Optional[Any] = None,
doc: Optional[Any] = None,
docParent: Optional[Any] = None,
docChild: Optional[Any] = None,
docAfter: Optional[Any] = None,
jsonldPredicate: Optional[Any] = None,
documentRoot: Optional[Any] = None,
extends: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.inVocab = inVocab
self.symbols = symbols
self.type = type
self.doc = doc
self.docParent = docParent
self.docChild = docChild
self.docAfter = docAfter
self.jsonldPredicate = jsonldPredicate
self.documentRoot = documentRoot
self.extends = extends
def __eq__(self, other: Any) -> bool:
if isinstance(other, SaladEnumSchema):
return bool(
self.name == other.name
and self.inVocab == other.inVocab
and self.symbols == other.symbols
and self.type == other.type
and self.doc == other.doc
and self.docParent == other.docParent
and self.docChild == other.docChild
and self.docAfter == other.docAfter
and self.jsonldPredicate == other.jsonldPredicate
and self.documentRoot == other.documentRoot
and self.extends == other.extends
)
return False
def __hash__(self) -> int:
return hash(
(
self.name,
self.inVocab,
self.symbols,
self.type,
self.doc,
self.docParent,
self.docChild,
self.docAfter,
self.jsonldPredicate,
self.documentRoot,
self.extends,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SaladEnumSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_union_of_None_type_or_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
name = "_:" + str(_uuid__.uuid4())
if not __original_name_is_none:
baseuri = name
if "inVocab" in _doc:
try:
inVocab = load_field(
_doc.get("inVocab"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'inVocab' field is not valid because:",
SourceLine(_doc, "inVocab", str),
[e],
)
)
else:
inVocab = None
try:
symbols = load_field(
_doc.get("symbols"),
uri_array_of_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'symbols' field is not valid because:",
SourceLine(_doc, "symbols", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Enum_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
if "docParent" in _doc:
try:
docParent = load_field(
_doc.get("docParent"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docParent' field is not valid because:",
SourceLine(_doc, "docParent", str),
[e],
)
)
else:
docParent = None
if "docChild" in _doc:
try:
docChild = load_field(
_doc.get("docChild"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docChild' field is not valid because:",
SourceLine(_doc, "docChild", str),
[e],
)
)
else:
docChild = None
if "docAfter" in _doc:
try:
docAfter = load_field(
_doc.get("docAfter"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docAfter' field is not valid because:",
SourceLine(_doc, "docAfter", str),
[e],
)
)
else:
docAfter = None
if "jsonldPredicate" in _doc:
try:
jsonldPredicate = load_field(
_doc.get("jsonldPredicate"),
union_of_None_type_or_strtype_or_JsonldPredicateLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'jsonldPredicate' field is not valid because:",
SourceLine(_doc, "jsonldPredicate", str),
[e],
)
)
else:
jsonldPredicate = None
if "documentRoot" in _doc:
try:
documentRoot = load_field(
_doc.get("documentRoot"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'documentRoot' field is not valid because:",
SourceLine(_doc, "documentRoot", str),
[e],
)
)
else:
documentRoot = None
if "extends" in _doc:
try:
extends = load_field(
_doc.get("extends"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'extends' field is not valid because:",
SourceLine(_doc, "extends", str),
[e],
)
)
else:
extends = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `inVocab`, `symbols`, `type`, `doc`, `docParent`, `docChild`, `docAfter`, `jsonldPredicate`, `documentRoot`, `extends`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SaladEnumSchema'", None, _errors__)
_constructed = cls(
name=name,
inVocab=inVocab,
symbols=symbols,
type=type,
doc=doc,
docParent=docParent,
docChild=docChild,
docAfter=docAfter,
jsonldPredicate=jsonldPredicate,
documentRoot=documentRoot,
extends=extends,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.inVocab is not None:
r["inVocab"] = save(
self.inVocab, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.symbols is not None:
u = save_relative_uri(self.symbols, self.name, True, None, relative_uris)
r["symbols"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.docParent is not None:
u = save_relative_uri(self.docParent, self.name, False, None, relative_uris)
r["docParent"] = u
if self.docChild is not None:
u = save_relative_uri(self.docChild, self.name, False, None, relative_uris)
r["docChild"] = u
if self.docAfter is not None:
u = save_relative_uri(self.docAfter, self.name, False, None, relative_uris)
r["docAfter"] = u
if self.jsonldPredicate is not None:
r["jsonldPredicate"] = save(
self.jsonldPredicate,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.documentRoot is not None:
r["documentRoot"] = save(
self.documentRoot,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
if self.extends is not None:
u = save_relative_uri(self.extends, self.name, False, 1, relative_uris)
r["extends"] = u
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"name",
"inVocab",
"symbols",
"type",
"doc",
"docParent",
"docChild",
"docAfter",
"jsonldPredicate",
"documentRoot",
"extends",
]
)
class Documentation(NamedType, DocType):
"""
A documentation section. This type exists to facilitate self-documenting
schemas but has no role in formal validation.
"""
def __init__(
self,
name: Any,
type: Any,
inVocab: Optional[Any] = None,
doc: Optional[Any] = None,
docParent: Optional[Any] = None,
docChild: Optional[Any] = None,
docAfter: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.name = name
self.inVocab = inVocab
self.doc = doc
self.docParent = docParent
self.docChild = docChild
self.docAfter = docAfter
self.type = type
def __eq__(self, other: Any) -> bool:
if isinstance(other, Documentation):
return bool(
self.name == other.name
and self.inVocab == other.inVocab
and self.doc == other.doc
and self.docParent == other.docParent
and self.docChild == other.docChild
and self.docAfter == other.docAfter
and self.type == other.type
)
return False
def __hash__(self) -> int:
return hash(
(
self.name,
self.inVocab,
self.doc,
self.docParent,
self.docChild,
self.docAfter,
self.type,
)
)
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "Documentation":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "name" in _doc:
try:
name = load_field(
_doc.get("name"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'name' field is not valid because:",
SourceLine(_doc, "name", str),
[e],
)
)
else:
name = None
__original_name_is_none = name is None
if name is None:
if docRoot is not None:
name = docRoot
else:
raise ValidationException("Missing name")
if not __original_name_is_none:
baseuri = name
if "inVocab" in _doc:
try:
inVocab = load_field(
_doc.get("inVocab"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'inVocab' field is not valid because:",
SourceLine(_doc, "inVocab", str),
[e],
)
)
else:
inVocab = None
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'doc' field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
if "docParent" in _doc:
try:
docParent = load_field(
_doc.get("docParent"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docParent' field is not valid because:",
SourceLine(_doc, "docParent", str),
[e],
)
)
else:
docParent = None
if "docChild" in _doc:
try:
docChild = load_field(
_doc.get("docChild"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docChild' field is not valid because:",
SourceLine(_doc, "docChild", str),
[e],
)
)
else:
docChild = None
if "docAfter" in _doc:
try:
docAfter = load_field(
_doc.get("docAfter"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'docAfter' field is not valid because:",
SourceLine(_doc, "docAfter", str),
[e],
)
)
else:
docAfter = None
try:
type = load_field(
_doc.get("type"),
typedsl_Documentation_nameLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the 'type' field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `name`, `inVocab`, `doc`, `docParent`, `docChild`, `docAfter`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'Documentation'", None, _errors__)
_constructed = cls(
name=name,
inVocab=inVocab,
doc=doc,
docParent=docParent,
docChild=docChild,
docAfter=docAfter,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
loadingOptions.idx[name] = (_constructed, loadingOptions)
return _constructed
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
if relative_uris:
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
else:
for ef in self.extension_fields:
r[ef] = self.extension_fields[ef]
if self.name is not None:
u = save_relative_uri(self.name, base_url, True, None, relative_uris)
r["name"] = u
if self.inVocab is not None:
r["inVocab"] = save(
self.inVocab, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.doc is not None:
r["doc"] = save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.docParent is not None:
u = save_relative_uri(self.docParent, self.name, False, None, relative_uris)
r["docParent"] = u
if self.docChild is not None:
u = save_relative_uri(self.docChild, self.name, False, None, relative_uris)
r["docChild"] = u
if self.docAfter is not None:
u = save_relative_uri(self.docAfter, self.name, False, None, relative_uris)
r["docAfter"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
["name", "inVocab", "doc", "docParent", "docChild", "docAfter", "type"]
)
_vocab = {
"Any": "https://w3id.org/cwl/salad#Any",
"ArraySchema": "https://w3id.org/cwl/salad#ArraySchema",
"DocType": "https://w3id.org/cwl/salad#DocType",
"Documentation": "https://w3id.org/cwl/salad#Documentation",
"Documented": "https://w3id.org/cwl/salad#Documented",
"EnumSchema": "https://w3id.org/cwl/salad#EnumSchema",
"JsonldPredicate": "https://w3id.org/cwl/salad#JsonldPredicate",
"NamedType": "https://w3id.org/cwl/salad#NamedType",
"PrimitiveType": "https://w3id.org/cwl/salad#PrimitiveType",
"RecordField": "https://w3id.org/cwl/salad#RecordField",
"RecordSchema": "https://w3id.org/cwl/salad#RecordSchema",
"SaladEnumSchema": "https://w3id.org/cwl/salad#SaladEnumSchema",
"SaladRecordField": "https://w3id.org/cwl/salad#SaladRecordField",
"SaladRecordSchema": "https://w3id.org/cwl/salad#SaladRecordSchema",
"SchemaDefinedType": "https://w3id.org/cwl/salad#SchemaDefinedType",
"SpecializeDef": "https://w3id.org/cwl/salad#SpecializeDef",
"array": "https://w3id.org/cwl/salad#array",
"boolean": "http://www.w3.org/2001/XMLSchema#boolean",
"documentation": "https://w3id.org/cwl/salad#documentation",
"double": "http://www.w3.org/2001/XMLSchema#double",
"enum": "https://w3id.org/cwl/salad#enum",
"float": "http://www.w3.org/2001/XMLSchema#float",
"int": "http://www.w3.org/2001/XMLSchema#int",
"long": "http://www.w3.org/2001/XMLSchema#long",
"null": "https://w3id.org/cwl/salad#null",
"record": "https://w3id.org/cwl/salad#record",
"string": "http://www.w3.org/2001/XMLSchema#string",
}
_rvocab = {
"https://w3id.org/cwl/salad#Any": "Any",
"https://w3id.org/cwl/salad#ArraySchema": "ArraySchema",
"https://w3id.org/cwl/salad#DocType": "DocType",
"https://w3id.org/cwl/salad#Documentation": "Documentation",
"https://w3id.org/cwl/salad#Documented": "Documented",
"https://w3id.org/cwl/salad#EnumSchema": "EnumSchema",
"https://w3id.org/cwl/salad#JsonldPredicate": "JsonldPredicate",
"https://w3id.org/cwl/salad#NamedType": "NamedType",
"https://w3id.org/cwl/salad#PrimitiveType": "PrimitiveType",
"https://w3id.org/cwl/salad#RecordField": "RecordField",
"https://w3id.org/cwl/salad#RecordSchema": "RecordSchema",
"https://w3id.org/cwl/salad#SaladEnumSchema": "SaladEnumSchema",
"https://w3id.org/cwl/salad#SaladRecordField": "SaladRecordField",
"https://w3id.org/cwl/salad#SaladRecordSchema": "SaladRecordSchema",
"https://w3id.org/cwl/salad#SchemaDefinedType": "SchemaDefinedType",
"https://w3id.org/cwl/salad#SpecializeDef": "SpecializeDef",
"https://w3id.org/cwl/salad#array": "array",
"http://www.w3.org/2001/XMLSchema#boolean": "boolean",
"https://w3id.org/cwl/salad#documentation": "documentation",
"http://www.w3.org/2001/XMLSchema#double": "double",
"https://w3id.org/cwl/salad#enum": "enum",
"http://www.w3.org/2001/XMLSchema#float": "float",
"http://www.w3.org/2001/XMLSchema#int": "int",
"http://www.w3.org/2001/XMLSchema#long": "long",
"https://w3id.org/cwl/salad#null": "null",
"https://w3id.org/cwl/salad#record": "record",
"http://www.w3.org/2001/XMLSchema#string": "string",
}
strtype = _PrimitiveLoader(str)
inttype = _PrimitiveLoader(int)
floattype = _PrimitiveLoader(float)
booltype = _PrimitiveLoader(bool)
None_type = _PrimitiveLoader(type(None))
Any_type = _AnyLoader()
PrimitiveTypeLoader = _EnumLoader(
(
"null",
"boolean",
"int",
"long",
"float",
"double",
"string",
),
"PrimitiveType",
)
"""
Names of salad data types (based on Avro schema declarations).
Refer to the [Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
detailed information.
null: no value
boolean: a binary value
int: 32-bit signed integer
long: 64-bit signed integer
float: single precision (32-bit) IEEE 754 floating-point number
double: double precision (64-bit) IEEE 754 floating-point number
string: Unicode character sequence
"""
AnyLoader = _EnumLoader(("Any",), "Any")
"""
The **Any** type validates for any non-null value.
"""
RecordFieldLoader = _RecordLoader(RecordField)
RecordSchemaLoader = _RecordLoader(RecordSchema)
EnumSchemaLoader = _RecordLoader(EnumSchema)
ArraySchemaLoader = _RecordLoader(ArraySchema)
JsonldPredicateLoader = _RecordLoader(JsonldPredicate)
SpecializeDefLoader = _RecordLoader(SpecializeDef)
SaladRecordFieldLoader = _RecordLoader(SaladRecordField)
SaladRecordSchemaLoader = _RecordLoader(SaladRecordSchema)
SaladEnumSchemaLoader = _RecordLoader(SaladEnumSchema)
DocumentationLoader = _RecordLoader(Documentation)
array_of_strtype = _ArrayLoader(strtype)
union_of_None_type_or_strtype_or_array_of_strtype = _UnionLoader(
(
None_type,
strtype,
array_of_strtype,
)
)
uri_strtype_True_False_None = _URILoader(strtype, True, False, None)
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader(
(
PrimitiveTypeLoader,
RecordSchemaLoader,
EnumSchemaLoader,
ArraySchemaLoader,
strtype,
)
)
array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _ArrayLoader(
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype
)
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader(
(
PrimitiveTypeLoader,
RecordSchemaLoader,
EnumSchemaLoader,
ArraySchemaLoader,
strtype,
array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,
)
)
typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,
2,
"v1.1",
)
array_of_RecordFieldLoader = _ArrayLoader(RecordFieldLoader)
union_of_None_type_or_array_of_RecordFieldLoader = _UnionLoader(
(
None_type,
array_of_RecordFieldLoader,
)
)
idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader = _IdMapLoader(
union_of_None_type_or_array_of_RecordFieldLoader, "name", "type"
)
Record_nameLoader = _EnumLoader(("record",), "Record_name")
typedsl_Record_nameLoader_2 = _TypeDSLLoader(Record_nameLoader, 2, "v1.1")
union_of_None_type_or_strtype = _UnionLoader(
(
None_type,
strtype,
)
)
uri_union_of_None_type_or_strtype_True_False_None = _URILoader(
union_of_None_type_or_strtype, True, False, None
)
uri_array_of_strtype_True_False_None = _URILoader(array_of_strtype, True, False, None)
Enum_nameLoader = _EnumLoader(("enum",), "Enum_name")
typedsl_Enum_nameLoader_2 = _TypeDSLLoader(Enum_nameLoader, 2, "v1.1")
uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2 = _URILoader(
union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,
False,
True,
2,
)
Array_nameLoader = _EnumLoader(("array",), "Array_name")
typedsl_Array_nameLoader_2 = _TypeDSLLoader(Array_nameLoader, 2, "v1.1")
union_of_None_type_or_booltype = _UnionLoader(
(
None_type,
booltype,
)
)
union_of_None_type_or_inttype = _UnionLoader(
(
None_type,
inttype,
)
)
uri_strtype_False_False_1 = _URILoader(strtype, False, False, 1)
uri_union_of_None_type_or_strtype_False_False_None = _URILoader(
union_of_None_type_or_strtype, False, False, None
)
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None = _URILoader(
union_of_None_type_or_strtype_or_array_of_strtype, False, False, None
)
union_of_None_type_or_strtype_or_JsonldPredicateLoader = _UnionLoader(
(
None_type,
strtype,
JsonldPredicateLoader,
)
)
union_of_None_type_or_Any_type = _UnionLoader(
(
None_type,
Any_type,
)
)
array_of_SaladRecordFieldLoader = _ArrayLoader(SaladRecordFieldLoader)
union_of_None_type_or_array_of_SaladRecordFieldLoader = _UnionLoader(
(
None_type,
array_of_SaladRecordFieldLoader,
)
)
idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader = _IdMapLoader(
union_of_None_type_or_array_of_SaladRecordFieldLoader, "name", "type"
)
uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1 = _URILoader(
union_of_None_type_or_strtype_or_array_of_strtype, False, False, 1
)
array_of_SpecializeDefLoader = _ArrayLoader(SpecializeDefLoader)
union_of_None_type_or_array_of_SpecializeDefLoader = _UnionLoader(
(
None_type,
array_of_SpecializeDefLoader,
)
)
idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader = _IdMapLoader(
union_of_None_type_or_array_of_SpecializeDefLoader, "specializeFrom", "specializeTo"
)
Documentation_nameLoader = _EnumLoader(("documentation",), "Documentation_name")
typedsl_Documentation_nameLoader_2 = _TypeDSLLoader(Documentation_nameLoader, 2, "v1.1")
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = (
_UnionLoader(
(
SaladRecordSchemaLoader,
SaladEnumSchemaLoader,
DocumentationLoader,
)
)
)
array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _ArrayLoader(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader
)
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _UnionLoader(
(
SaladRecordSchemaLoader,
SaladEnumSchemaLoader,
DocumentationLoader,
array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
)
)
def load_document(
doc: Any,
baseuri: Optional[str] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> Any:
if baseuri is None:
baseuri = file_uri(os.getcwd()) + "/"
if loadingOptions is None:
loadingOptions = LoadingOptions()
result, metadata = _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
doc,
baseuri,
loadingOptions,
)
return result
def load_document_with_metadata(
doc: Any,
baseuri: Optional[str] = None,
loadingOptions: Optional[LoadingOptions] = None,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Any:
if baseuri is None:
baseuri = file_uri(os.getcwd()) + "/"
if loadingOptions is None:
loadingOptions = LoadingOptions(fileuri=baseuri)
return _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
doc,
baseuri,
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
def load_document_by_string(
string: Any,
uri: str,
loadingOptions: Optional[LoadingOptions] = None,
) -> Any:
yaml = yaml_no_ts()
result = yaml.load(string)
add_lc_filename(result, uri)
if loadingOptions is None:
loadingOptions = LoadingOptions(fileuri=uri)
result, metadata = _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
result,
uri,
loadingOptions,
)
return result
def load_document_by_yaml(
yaml: Any,
uri: str,
loadingOptions: Optional[LoadingOptions] = None,
) -> Any:
"""
Shortcut to load via a YAML object.
yaml: must be from ruamel.yaml.main.YAML.load with preserve_quotes=True
"""
add_lc_filename(yaml, uri)
if loadingOptions is None:
loadingOptions = LoadingOptions(fileuri=uri)
result, metadata = _document_load(
union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,
yaml,
uri,
loadingOptions,
)
return result
| 0.600774 | 0.081082 |
import os
import re
from typing import (
Any,
AnyStr,
Callable,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
)
import ruamel.yaml
from ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq
lineno_re = re.compile("^(.*?:[0-9]+:[0-9]+: )(( *)(.*))")
def _add_lc_filename(r: ruamel.yaml.comments.CommentedBase, source: AnyStr) -> None:
if isinstance(r, ruamel.yaml.comments.CommentedBase):
r.lc.filename = source
if isinstance(r, MutableSequence):
for d in r:
_add_lc_filename(d, source)
elif isinstance(r, MutableMapping):
for d in r.values():
_add_lc_filename(d, source)
def relname(source: str) -> str:
if source.startswith("file://"):
source = source[7:]
source = os.path.relpath(source)
return source
def add_lc_filename(r: ruamel.yaml.comments.CommentedBase, source: str) -> None:
_add_lc_filename(r, relname(source))
def reflow_all(text: str, maxline: Optional[int] = None) -> str:
if maxline is None:
maxline = int(os.environ.get("COLUMNS", "100"))
maxno = 0
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
continue
group = g.group(1)
assert group is not None # nosec
maxno = max(maxno, len(group))
maxno_text = maxline - maxno
msg = [] # type: List[str]
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
pre = g.group(1)
assert pre is not None # nosec
group2 = g.group(2)
assert group2 is not None # nosec
reflowed = reflow(group2, maxno_text, g.group(3)).splitlines()
msg.extend([pre.ljust(maxno, " ") + r for r in reflowed])
return "\n".join(msg)
def reflow(text: str, maxline: int, shift: Optional[str] = "") -> str:
maxline = max(maxline, 20)
if len(text) > maxline:
sp = text.rfind(" ", 0, maxline)
if sp < 1:
sp = text.find(" ", sp + 1)
if sp == -1:
sp = len(text)
if sp < len(text):
return f"{text[0:sp]}\n{shift}{reflow(text[sp + 1 :], maxline, shift)}"
return text
def indent(v: str, nolead: bool = False, shift: str = " ", bullet: str = " ") -> str:
if nolead:
return v.splitlines()[0] + "\n".join([shift + line for line in v.splitlines()[1:]])
def lineno(i: int, line: str) -> str:
r = lineno_re.match(line)
if r is not None:
group1 = r.group(1)
group2 = r.group(2)
assert group1 is not None # nosec
assert group2 is not None # nosec
return group1 + (bullet if i == 0 else shift) + group2
return (bullet if i == 0 else shift) + line
return "\n".join([lineno(i, line) for i, line in enumerate(v.splitlines())])
def bullets(textlist: List[str], bul: str) -> str:
if len(textlist) == 1:
return textlist[0]
return "\n".join(indent(t, bullet=bul) for t in textlist)
def strip_duplicated_lineno(text: str) -> str:
"""
Strip duplicated line numbers.
Same as :py:meth:`strip_dup_lineno` but without reflow.
"""
pre = None # type: Optional[str]
msg = []
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
if g.group(1) != pre:
msg.append(line)
pre = g.group(1)
else:
group1 = g.group(1)
group2 = g.group(2)
assert group1 is not None # nosec
assert group2 is not None # nosec
msg.append(" " * len(group1) + group2)
return "\n".join(msg)
def strip_dup_lineno(text: str, maxline: Optional[int] = None) -> str:
if maxline is None:
maxline = int(os.environ.get("COLUMNS", "100"))
pre = None # type: Optional[str]
msg = []
maxno = 0
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
continue
group1 = g.group(1)
assert group1 is not None # nosec
maxno = max(maxno, len(group1))
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
if g.group(1) != pre:
group3 = g.group(3)
assert group3 is not None # nosec
shift = maxno + len(group3)
group2 = g.group(2)
assert group2 is not None # nosec
g2 = reflow(group2, maxline - shift, " " * shift)
pre = g.group(1)
assert pre is not None # nosec
msg.append(pre + " " * (maxno - len(pre)) + g2)
else:
group2 = g.group(2)
assert group2 is not None # nosec
group3 = g.group(3)
assert group3 is not None # nosec
g2 = reflow(group2, maxline - maxno, " " * (maxno + len(group3)))
msg.append(" " * maxno + g2)
return "\n".join(msg)
def cmap(
d: Union[int, float, str, MutableMapping[str, Any], MutableSequence[Any], None],
lc: Optional[List[int]] = None,
fn: Optional[str] = None,
) -> Union[int, float, str, CommentedMap, CommentedSeq, None]:
if lc is None:
lc = [0, 0, 0, 0]
if fn is None:
fn = "test"
if isinstance(d, CommentedMap):
fn = d.lc.filename if hasattr(d.lc, "filename") else fn
for k, v in d.items():
if d.lc.data is not None and k in d.lc.data:
d[k] = cmap(v, lc=d.lc.data[k], fn=fn)
else:
d[k] = cmap(v, lc, fn=fn)
return d
if isinstance(d, CommentedSeq):
fn = d.lc.filename if hasattr(d.lc, "filename") else fn
for k2, v2 in enumerate(d):
if d.lc.data is not None and k2 in d.lc.data:
d[k2] = cmap(v2, lc=d.lc.data[k2], fn=fn)
else:
d[k2] = cmap(v2, lc, fn=fn)
return d
if isinstance(d, MutableMapping):
cm = CommentedMap()
for k in sorted(d.keys()):
v = d[k]
if isinstance(v, CommentedBase):
uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]
vfn = v.lc.filename if hasattr(v.lc, "filename") else fn
else:
uselc = lc
vfn = fn
cm[k] = cmap(v, lc=uselc, fn=vfn)
cm.lc.add_kv_line_col(k, uselc)
cm.lc.filename = fn
return cm
if isinstance(d, MutableSequence):
cs = CommentedSeq()
for k3, v3 in enumerate(d):
if isinstance(v3, CommentedBase):
uselc = [v3.lc.line, v3.lc.col, v3.lc.line, v3.lc.col]
vfn = v3.lc.filename if hasattr(v3.lc, "filename") else fn
else:
uselc = lc
vfn = fn
cs.append(cmap(v3, lc=uselc, fn=vfn))
cs.lc.add_kv_line_col(k3, uselc)
cs.lc.filename = fn
return cs
return d
class SourceLine:
def __init__(
self,
item: Any,
key: Optional[Any] = None,
raise_type: Callable[[str], Any] = str,
include_traceback: bool = False,
) -> None:
self.item = item
self.key = key
self.raise_type = raise_type
self.include_traceback = include_traceback
def __enter__(self) -> "SourceLine":
return self
def __exit__(
self,
exc_type: Any,
exc_value: Any,
tb: Any,
) -> None:
if not exc_value:
return
raise self.makeError(str(exc_value)) from exc_value
def file(self) -> Optional[str]:
if hasattr(self.item, "lc") and hasattr(self.item.lc, "filename"):
return str(self.item.lc.filename)
return None
def start(self) -> Optional[Tuple[int, int]]:
if self.file() is None:
return None
if self.key is None or self.item.lc.data is None or self.key not in self.item.lc.data:
return ((self.item.lc.line or 0) + 1, (self.item.lc.col or 0) + 1)
return (
(self.item.lc.data[self.key][0] or 0) + 1,
(self.item.lc.data[self.key][1] or 0) + 1,
)
def end(self) -> Optional[Tuple[int, int]]:
return None
def makeLead(self) -> str:
if self.file():
lcol = self.start()
line, col = lcol if lcol else ("", "")
return f"{self.file()}:{line}:{col}:"
return ""
def makeError(self, msg: str) -> Any:
if not isinstance(self.item, ruamel.yaml.comments.CommentedBase):
return self.raise_type(msg)
errs = []
lead = self.makeLead()
for m in msg.splitlines():
if bool(lineno_re.match(m)):
errs.append(m)
else:
errs.append(f"{lead} {m}")
return self.raise_type("\n".join(errs))
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/sourceline.py
|
sourceline.py
|
import os
import re
from typing import (
Any,
AnyStr,
Callable,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
)
import ruamel.yaml
from ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq
lineno_re = re.compile("^(.*?:[0-9]+:[0-9]+: )(( *)(.*))")
def _add_lc_filename(r: ruamel.yaml.comments.CommentedBase, source: AnyStr) -> None:
if isinstance(r, ruamel.yaml.comments.CommentedBase):
r.lc.filename = source
if isinstance(r, MutableSequence):
for d in r:
_add_lc_filename(d, source)
elif isinstance(r, MutableMapping):
for d in r.values():
_add_lc_filename(d, source)
def relname(source: str) -> str:
if source.startswith("file://"):
source = source[7:]
source = os.path.relpath(source)
return source
def add_lc_filename(r: ruamel.yaml.comments.CommentedBase, source: str) -> None:
_add_lc_filename(r, relname(source))
def reflow_all(text: str, maxline: Optional[int] = None) -> str:
if maxline is None:
maxline = int(os.environ.get("COLUMNS", "100"))
maxno = 0
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
continue
group = g.group(1)
assert group is not None # nosec
maxno = max(maxno, len(group))
maxno_text = maxline - maxno
msg = [] # type: List[str]
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
pre = g.group(1)
assert pre is not None # nosec
group2 = g.group(2)
assert group2 is not None # nosec
reflowed = reflow(group2, maxno_text, g.group(3)).splitlines()
msg.extend([pre.ljust(maxno, " ") + r for r in reflowed])
return "\n".join(msg)
def reflow(text: str, maxline: int, shift: Optional[str] = "") -> str:
maxline = max(maxline, 20)
if len(text) > maxline:
sp = text.rfind(" ", 0, maxline)
if sp < 1:
sp = text.find(" ", sp + 1)
if sp == -1:
sp = len(text)
if sp < len(text):
return f"{text[0:sp]}\n{shift}{reflow(text[sp + 1 :], maxline, shift)}"
return text
def indent(v: str, nolead: bool = False, shift: str = " ", bullet: str = " ") -> str:
if nolead:
return v.splitlines()[0] + "\n".join([shift + line for line in v.splitlines()[1:]])
def lineno(i: int, line: str) -> str:
r = lineno_re.match(line)
if r is not None:
group1 = r.group(1)
group2 = r.group(2)
assert group1 is not None # nosec
assert group2 is not None # nosec
return group1 + (bullet if i == 0 else shift) + group2
return (bullet if i == 0 else shift) + line
return "\n".join([lineno(i, line) for i, line in enumerate(v.splitlines())])
def bullets(textlist: List[str], bul: str) -> str:
if len(textlist) == 1:
return textlist[0]
return "\n".join(indent(t, bullet=bul) for t in textlist)
def strip_duplicated_lineno(text: str) -> str:
"""
Strip duplicated line numbers.
Same as :py:meth:`strip_dup_lineno` but without reflow.
"""
pre = None # type: Optional[str]
msg = []
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
if g.group(1) != pre:
msg.append(line)
pre = g.group(1)
else:
group1 = g.group(1)
group2 = g.group(2)
assert group1 is not None # nosec
assert group2 is not None # nosec
msg.append(" " * len(group1) + group2)
return "\n".join(msg)
def strip_dup_lineno(text: str, maxline: Optional[int] = None) -> str:
if maxline is None:
maxline = int(os.environ.get("COLUMNS", "100"))
pre = None # type: Optional[str]
msg = []
maxno = 0
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
continue
group1 = g.group(1)
assert group1 is not None # nosec
maxno = max(maxno, len(group1))
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
if g.group(1) != pre:
group3 = g.group(3)
assert group3 is not None # nosec
shift = maxno + len(group3)
group2 = g.group(2)
assert group2 is not None # nosec
g2 = reflow(group2, maxline - shift, " " * shift)
pre = g.group(1)
assert pre is not None # nosec
msg.append(pre + " " * (maxno - len(pre)) + g2)
else:
group2 = g.group(2)
assert group2 is not None # nosec
group3 = g.group(3)
assert group3 is not None # nosec
g2 = reflow(group2, maxline - maxno, " " * (maxno + len(group3)))
msg.append(" " * maxno + g2)
return "\n".join(msg)
def cmap(
d: Union[int, float, str, MutableMapping[str, Any], MutableSequence[Any], None],
lc: Optional[List[int]] = None,
fn: Optional[str] = None,
) -> Union[int, float, str, CommentedMap, CommentedSeq, None]:
if lc is None:
lc = [0, 0, 0, 0]
if fn is None:
fn = "test"
if isinstance(d, CommentedMap):
fn = d.lc.filename if hasattr(d.lc, "filename") else fn
for k, v in d.items():
if d.lc.data is not None and k in d.lc.data:
d[k] = cmap(v, lc=d.lc.data[k], fn=fn)
else:
d[k] = cmap(v, lc, fn=fn)
return d
if isinstance(d, CommentedSeq):
fn = d.lc.filename if hasattr(d.lc, "filename") else fn
for k2, v2 in enumerate(d):
if d.lc.data is not None and k2 in d.lc.data:
d[k2] = cmap(v2, lc=d.lc.data[k2], fn=fn)
else:
d[k2] = cmap(v2, lc, fn=fn)
return d
if isinstance(d, MutableMapping):
cm = CommentedMap()
for k in sorted(d.keys()):
v = d[k]
if isinstance(v, CommentedBase):
uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]
vfn = v.lc.filename if hasattr(v.lc, "filename") else fn
else:
uselc = lc
vfn = fn
cm[k] = cmap(v, lc=uselc, fn=vfn)
cm.lc.add_kv_line_col(k, uselc)
cm.lc.filename = fn
return cm
if isinstance(d, MutableSequence):
cs = CommentedSeq()
for k3, v3 in enumerate(d):
if isinstance(v3, CommentedBase):
uselc = [v3.lc.line, v3.lc.col, v3.lc.line, v3.lc.col]
vfn = v3.lc.filename if hasattr(v3.lc, "filename") else fn
else:
uselc = lc
vfn = fn
cs.append(cmap(v3, lc=uselc, fn=vfn))
cs.lc.add_kv_line_col(k3, uselc)
cs.lc.filename = fn
return cs
return d
class SourceLine:
def __init__(
self,
item: Any,
key: Optional[Any] = None,
raise_type: Callable[[str], Any] = str,
include_traceback: bool = False,
) -> None:
self.item = item
self.key = key
self.raise_type = raise_type
self.include_traceback = include_traceback
def __enter__(self) -> "SourceLine":
return self
def __exit__(
self,
exc_type: Any,
exc_value: Any,
tb: Any,
) -> None:
if not exc_value:
return
raise self.makeError(str(exc_value)) from exc_value
def file(self) -> Optional[str]:
if hasattr(self.item, "lc") and hasattr(self.item.lc, "filename"):
return str(self.item.lc.filename)
return None
def start(self) -> Optional[Tuple[int, int]]:
if self.file() is None:
return None
if self.key is None or self.item.lc.data is None or self.key not in self.item.lc.data:
return ((self.item.lc.line or 0) + 1, (self.item.lc.col or 0) + 1)
return (
(self.item.lc.data[self.key][0] or 0) + 1,
(self.item.lc.data[self.key][1] or 0) + 1,
)
def end(self) -> Optional[Tuple[int, int]]:
return None
def makeLead(self) -> str:
if self.file():
lcol = self.start()
line, col = lcol if lcol else ("", "")
return f"{self.file()}:{line}:{col}:"
return ""
def makeError(self, msg: str) -> Any:
if not isinstance(self.item, ruamel.yaml.comments.CommentedBase):
return self.raise_type(msg)
errs = []
lead = self.makeLead()
for m in msg.splitlines():
if bool(lineno_re.match(m)):
errs.append(m)
else:
errs.append(f"{lead} {m}")
return self.raise_type("\n".join(errs))
| 0.567697 | 0.190479 |
import copy
import hashlib
from typing import (
IO,
Any,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
)
from urllib.parse import urlparse
from importlib_resources import files
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.utils import (
CacheType,
ResolveType,
add_dictlist,
aslist,
convert_to_dict,
flatten,
json_dumps,
yaml_no_ts,
)
from . import _logger, jsonld_context, ref_resolver, validate
from .avro.schema import Names, SchemaParseException, is_subtype, make_avsc_object
from .exceptions import (
ClassValidationException,
SchemaSaladException,
ValidationException,
)
from .ref_resolver import Loader
from .sourceline import SourceLine, add_lc_filename, relname
SALAD_FILES = (
"metaschema.yml",
"metaschema_base.yml",
"salad.md",
"field_name.yml",
"import_include.md",
"link_res.yml",
"ident_res.yml",
"vocab_res.yml",
"vocab_res.yml",
"field_name_schema.yml",
"field_name_src.yml",
"field_name_proc.yml",
"ident_res_schema.yml",
"ident_res_src.yml",
"ident_res_proc.yml",
"link_res_schema.yml",
"link_res_src.yml",
"link_res_proc.yml",
"vocab_res_schema.yml",
"vocab_res_src.yml",
"vocab_res_proc.yml",
"map_res.yml",
"map_res_schema.yml",
"map_res_src.yml",
"map_res_proc.yml",
"typedsl_res.yml",
"typedsl_res_schema.yml",
"typedsl_res_src.yml",
"typedsl_res_proc.yml",
"sfdsl_res.yml",
"sfdsl_res_schema.yml",
"sfdsl_res_src.yml",
"sfdsl_res_proc.yml",
)
saladp = "https://w3id.org/cwl/salad#"
cached_metaschema: Optional[Tuple[Names, List[Dict[str, str]], Loader]] = None
def get_metaschema() -> Tuple[Names, List[Dict[str, str]], Loader]:
"""Instantiate the metaschema."""
global cached_metaschema
if cached_metaschema is not None:
return cached_metaschema
loader = ref_resolver.Loader(
{
"Any": saladp + "Any",
"ArraySchema": saladp + "ArraySchema",
"Array_symbol": saladp + "ArraySchema/type/Array_symbol",
"DocType": saladp + "DocType",
"Documentation": saladp + "Documentation",
"Documentation_symbol": saladp + "Documentation/type/Documentation_symbol",
"Documented": saladp + "Documented",
"EnumSchema": saladp + "EnumSchema",
"Enum_symbol": saladp + "EnumSchema/type/Enum_symbol",
"JsonldPredicate": saladp + "JsonldPredicate",
"NamedType": saladp + "NamedType",
"PrimitiveType": saladp + "PrimitiveType",
"RecordField": saladp + "RecordField",
"RecordSchema": saladp + "RecordSchema",
"Record_symbol": saladp + "RecordSchema/type/Record_symbol",
"SaladEnumSchema": saladp + "SaladEnumSchema",
"SaladRecordField": saladp + "SaladRecordField",
"SaladRecordSchema": saladp + "SaladRecordSchema",
"SchemaDefinedType": saladp + "SchemaDefinedType",
"SpecializeDef": saladp + "SpecializeDef",
"_container": saladp + "JsonldPredicate/_container",
"_id": {"@id": saladp + "_id", "@type": "@id", "identity": True},
"_type": saladp + "JsonldPredicate/_type",
"abstract": saladp + "SaladRecordSchema/abstract",
"array": saladp + "array",
"boolean": "http://www.w3.org/2001/XMLSchema#boolean",
"dct": "http://purl.org/dc/terms/",
"default": {"@id": saladp + "default", "noLinkCheck": True},
"doc": "rdfs:comment",
"docAfter": {"@id": saladp + "docAfter", "@type": "@id"},
"docChild": {"@id": saladp + "docChild", "@type": "@id"},
"docParent": {"@id": saladp + "docParent", "@type": "@id"},
"documentRoot": saladp + "SchemaDefinedType/documentRoot",
"documentation": saladp + "documentation",
"double": "http://www.w3.org/2001/XMLSchema#double",
"enum": saladp + "enum",
"extends": {"@id": saladp + "extends", "@type": "@id", "refScope": 1},
"fields": {
"@id": saladp + "fields",
"mapPredicate": "type",
"mapSubject": "name",
},
"float": "http://www.w3.org/2001/XMLSchema#float",
"identity": saladp + "JsonldPredicate/identity",
"inVocab": saladp + "NamedType/inVocab",
"int": "http://www.w3.org/2001/XMLSchema#int",
"items": {"@id": saladp + "items", "@type": "@vocab", "refScope": 2},
"jsonldPredicate": "sld:jsonldPredicate",
"long": "http://www.w3.org/2001/XMLSchema#long",
"mapPredicate": saladp + "JsonldPredicate/mapPredicate",
"mapSubject": saladp + "JsonldPredicate/mapSubject",
"name": "@id",
"noLinkCheck": saladp + "JsonldPredicate/noLinkCheck",
"null": saladp + "null",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"record": saladp + "record",
"refScope": saladp + "JsonldPredicate/refScope",
"sld": saladp,
"specialize": {
"@id": saladp + "specialize",
"mapPredicate": "specializeTo",
"mapSubject": "specializeFrom",
},
"specializeFrom": {
"@id": saladp + "specializeFrom",
"@type": "@id",
"refScope": 1,
},
"specializeTo": {
"@id": saladp + "specializeTo",
"@type": "@id",
"refScope": 1,
},
"string": "http://www.w3.org/2001/XMLSchema#string",
"subscope": saladp + "JsonldPredicate/subscope",
"symbols": {"@id": saladp + "symbols", "@type": "@id", "identity": True},
"type": {
"@id": saladp + "type",
"@type": "@vocab",
"refScope": 2,
"typeDSL": True,
},
"typeDSL": saladp + "JsonldPredicate/typeDSL",
"xsd": "http://www.w3.org/2001/XMLSchema#",
},
salad_version="v1.3",
)
for salad in SALAD_FILES:
loader.cache["https://w3id.org/cwl/" + salad] = (
files("schema_salad").joinpath("metaschema/" + salad).read_text("UTF-8")
)
loader.cache["https://w3id.org/cwl/salad"] = (
files("schema_salad").joinpath("metaschema/metaschema.yml").read_text("UTF-8")
)
yaml = yaml_no_ts()
j = yaml.load(loader.cache["https://w3id.org/cwl/salad"])
add_lc_filename(j, "metaschema.yml")
j2 = loader.resolve_all(j, saladp)[0]
if not isinstance(j2, list):
_logger.error("%s", j2)
raise SchemaParseException(f"Not a list: {j2}")
sch_obj = make_avro(j2, loader, loader.vocab)
try:
sch_names = make_avro_schema_from_avro(sch_obj)
except SchemaParseException:
_logger.error("Metaschema error, avro was:\n%s", json_dumps(sch_obj, indent=4))
raise
validate_doc(sch_names, j2, loader, strict=True)
cached_metaschema = (sch_names, j2, loader)
return cached_metaschema
def add_namespaces(metadata: Mapping[str, Any], namespaces: MutableMapping[str, str]) -> None:
"""Collect the provided namespaces, checking for conflicts."""
for key, value in metadata.items():
if key not in namespaces:
namespaces[key] = value
elif namespaces[key] != value:
raise ValidationException(
f"Namespace prefix {key!r} has conflicting definitions {namespaces[key]!r}"
" and {value!r}."
)
def collect_namespaces(metadata: Mapping[str, Any]) -> Dict[str, str]:
"""Walk through the metadata object, collecting namespace declarations."""
namespaces = {} # type: Dict[str, str]
if "$import_metadata" in metadata:
for value in metadata["$import_metadata"].values():
add_namespaces(collect_namespaces(value), namespaces)
if "$namespaces" in metadata:
add_namespaces(metadata["$namespaces"], namespaces)
return namespaces
schema_type = Tuple[Loader, Union[Names, SchemaParseException], Dict[str, Any], Loader]
def load_schema(
schema_ref: ResolveType,
cache: Optional[CacheType] = None,
) -> schema_type:
"""
Load a schema that can be used to validate documents using load_and_validate.
:returns: document_loader, avsc_names, schema_metadata, metaschema_loader
"""
metaschema_names, _metaschema_doc, metaschema_loader = get_metaschema()
if cache is not None:
# we want to replace some items in the cache, so we need to
# make a new Loader with an empty index.
for k, v in metaschema_loader.cache.items():
if k not in cache:
cache[k] = v
metaschema_loader = Loader(
ctx=metaschema_loader.ctx, cache=cache, session=metaschema_loader.session
)
schema_doc, schema_metadata = metaschema_loader.resolve_ref(schema_ref, "")
if not isinstance(schema_doc, MutableSequence):
raise ValidationException("Schema reference must resolve to a list.")
validate_doc(metaschema_names, schema_doc, metaschema_loader, True)
metactx = schema_metadata.get("@context", {})
metactx.update(collect_namespaces(schema_metadata))
schema_ctx = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)[0]
# Create the loader that will be used to load the target document.
document_loader = Loader(schema_ctx, cache=cache)
# Make the Avro validation that will be used to validate the target
# document
avsc_names = make_avro_schema(schema_doc, document_loader, metaschema_loader.vocab)
return document_loader, avsc_names, schema_metadata, metaschema_loader
def load_and_validate(
document_loader: Loader,
avsc_names: Names,
document: Union[CommentedMap, str],
strict: bool,
strict_foreign_properties: bool = False,
) -> Tuple[Any, Dict[str, Any]]:
"""Load a document and validate it with the provided schema.
return data, metadata
"""
try:
if isinstance(document, CommentedMap):
data, metadata = document_loader.resolve_all(
document,
document["id"],
checklinks=True,
strict_foreign_properties=strict_foreign_properties,
)
else:
data, metadata = document_loader.resolve_ref(
document,
checklinks=True,
strict_foreign_properties=strict_foreign_properties,
)
validate_doc(
avsc_names,
data,
document_loader,
strict,
strict_foreign_properties=strict_foreign_properties,
)
except ValidationException as exc:
raise ValidationException("", None, [exc]) from exc
return data, metadata
def validate_doc(
schema_names: Names,
doc: ResolveType,
loader: Loader,
strict: bool,
strict_foreign_properties: bool = False,
) -> None:
"""Validate a document using the provided schema."""
has_root = False
for root in schema_names.names.values():
if (hasattr(root, "get_prop") and root.get_prop("documentRoot")) or (
"documentRoot" in root.props
):
has_root = True
break
if not has_root:
raise ValidationException("No document roots defined in the schema")
if isinstance(doc, MutableSequence):
vdoc = doc
elif isinstance(doc, CommentedMap):
vdoc = CommentedSeq([doc])
vdoc.lc.add_kv_line_col(0, [doc.lc.line, doc.lc.col])
vdoc.lc.filename = doc.lc.filename
else:
raise ValidationException("Document must be dict or list")
roots = []
for root in schema_names.names.values():
if (hasattr(root, "get_prop") and root.get_prop("documentRoot")) or (
root.props.get("documentRoot")
):
roots.append(root)
anyerrors = []
for pos, item in enumerate(vdoc):
sourceline = SourceLine(vdoc, pos, str)
success = False
for root in roots:
success = validate.validate_ex(
root,
item,
loader.identifiers,
strict,
foreign_properties=loader.foreign_properties,
raise_ex=False,
skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties,
vocab=loader.vocab,
)
if success:
break
if not success:
errors: List[SchemaSaladException] = []
for root in roots:
if hasattr(root, "get_prop"):
name = root.get_prop("name")
elif hasattr(root, "name"):
name = root.name
try:
validate.validate_ex(
root,
item,
loader.identifiers,
strict,
foreign_properties=loader.foreign_properties,
raise_ex=True,
skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties,
vocab=loader.vocab,
)
except ClassValidationException as exc1:
errors = [
ClassValidationException(
f"tried {validate.friendly(name)!r} but", sourceline, [exc1]
)
]
break
except ValidationException as exc2:
errors.append(
ValidationException(
f"tried {validate.friendly(name)!r} but", sourceline, [exc2]
)
)
objerr = "Invalid"
for ident in loader.identifiers:
if ident in item:
objerr = f"Object {relname(item[ident])!r} is not valid because"
break
anyerrors.append(ValidationException(objerr, sourceline, errors, "-"))
if anyerrors:
raise ValidationException("", None, anyerrors, "*")
def get_anon_name(rec: MutableMapping[str, Union[str, Dict[str, str], List[str]]]) -> str:
"""Calculate a reproducible name for anonymous types."""
if "name" in rec:
name = rec["name"]
if isinstance(name, str):
return name
raise ValidationException(f"Expected name field to be a string, was {name}")
anon_name = ""
if rec["type"] in ("enum", saladp + "enum"):
for sym in rec["symbols"]:
anon_name += sym
return "anon.enum_" + hashlib.sha1(anon_name.encode("UTF-8")).hexdigest() # nosec
if rec["type"] in ("record", saladp + "record"):
for field in rec["fields"]:
if isinstance(field, Mapping):
anon_name += field["name"]
else:
raise ValidationException(
f"Expected entries in 'fields' to also be maps, was {field}."
)
return "record_" + hashlib.sha1(anon_name.encode("UTF-8")).hexdigest() # nosec
if rec["type"] in ("array", saladp + "array"):
return ""
raise ValidationException("Expected enum or record, was {rec['type'])}")
def replace_type(
items: Any,
spec: Dict[str, Any],
loader: Loader,
found: Set[str],
find_embeds: bool = True,
deepen: bool = True,
) -> Any:
"""Go through and replace types in the 'spec' mapping."""
if isinstance(items, MutableMapping):
# recursively check these fields for types to replace
if items.get("type") in ("record", "enum") and items.get("name"):
if items["name"] in found:
return items["name"]
found.add(items["name"])
if not deepen:
return items
items = copy.copy(items)
if not items.get("name"):
items["name"] = get_anon_name(items)
for name in ("type", "items", "fields"):
if name in items:
items[name] = replace_type(
items[name],
spec,
loader,
found,
find_embeds=find_embeds,
deepen=find_embeds,
)
if isinstance(items[name], MutableSequence):
items[name] = flatten(items[name])
return items
if isinstance(items, MutableSequence):
# recursively transform list
return [
replace_type(i, spec, loader, found, find_embeds=find_embeds, deepen=deepen)
for i in items
]
if isinstance(items, str):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds)
found.add(items)
return items
def avro_field_name(url: str) -> str:
"""
Turn a URL into an Avro-safe name.
If the URL has no fragment, return this plain URL.
Extract either the last part of the URL fragment past the slash, otherwise
the whole fragment.
"""
d = urlparse(url)
if d.fragment:
return d.fragment.split("/")[-1]
return d.path.split("/")[-1]
Avro = TypeVar("Avro", MutableMapping[str, Any], MutableSequence[Any], str)
def make_valid_avro(
items: Avro,
alltypes: Dict[str, Dict[str, Any]],
found: Set[str],
union: bool = False,
fielddef: bool = False,
vocab: Optional[Dict[str, str]] = None,
) -> Union[Avro, MutableMapping[str, str], str, List[Union[Any, MutableMapping[str, str], str]]]:
"""Convert our schema to be more avro like."""
if vocab is None:
_, _, metaschema_loader = get_metaschema()
vocab = metaschema_loader.vocab
# Possibly could be integrated into our fork of avro/schema.py?
if isinstance(items, MutableMapping):
avro = copy.copy(items)
if avro.get("name"):
if fielddef:
avro["name"] = avro_field_name(avro["name"])
else:
avro["name"] = validate.avro_type_name(avro["name"])
if "type" in avro and avro["type"] in (
saladp + "record",
saladp + "enum",
"record",
"enum",
):
if (hasattr(avro, "get") and avro.get("abstract")) or ("abstract" in avro):
return avro
if avro["name"] in found:
return cast(str, avro["name"])
found.add(avro["name"])
for field in ("type", "items", "values", "fields"):
if field in avro:
avro[field] = make_valid_avro(
avro[field],
alltypes,
found,
union=True,
fielddef=(field == "fields"),
vocab=vocab,
)
if "symbols" in avro:
avro["symbols"] = [avro_field_name(sym) for sym in avro["symbols"]]
return avro
if items and isinstance(items, MutableSequence):
ret = []
for i in items:
ret.append(
make_valid_avro(i, alltypes, found, union=union, fielddef=fielddef, vocab=vocab)
)
return ret
if union and isinstance(items, str):
if items in alltypes and validate.avro_type_name(items) not in found:
return make_valid_avro(alltypes[items], alltypes, found, union=union, vocab=vocab)
if items in vocab:
return validate.avro_type_name(vocab[items])
return validate.avro_type_name(items)
return items
def deepcopy_strip(item: Any) -> Any:
"""
Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
"""
if isinstance(item, MutableMapping):
return {k: deepcopy_strip(v) for k, v in item.items()}
if isinstance(item, MutableSequence):
return [deepcopy_strip(k) for k in item]
return item
def extend_and_specialize(items: List[Dict[str, Any]], loader: Loader) -> List[Dict[str, Any]]:
"""Apply 'extend' and 'specialize' to fully materialize derived record types."""
items2 = deepcopy_strip(items)
types = {i["name"]: i for i in items2} # type: Dict[str, Any]
results = []
for stype in items2:
if "extends" in stype:
specs = {} # type: Dict[str, str]
if "specialize" in stype:
for spec in aslist(stype["specialize"]):
specs[spec["specializeFrom"]] = spec["specializeTo"]
exfields = [] # type: List[Any]
exsym = [] # type: List[str]
for ex in aslist(stype["extends"]):
if ex not in types:
raise ValidationException(
f"Extends {stype['extends']} in {stype['name']} refers to invalid base type."
)
basetype = copy.copy(types[ex])
if stype["type"] == "record":
if specs:
basetype["fields"] = replace_type(
basetype.get("fields", []), specs, loader, set()
)
for field in basetype.get("fields", []):
if "inherited_from" not in field:
field["inherited_from"] = ex
exfields.extend(basetype.get("fields", []))
elif stype["type"] == "enum":
exsym.extend(basetype.get("symbols", []))
if stype["type"] == "record":
stype = copy.copy(stype)
combined_fields = []
fields = stype.get("fields", [])
# We use short names here so that if a type inherits a field
# (e.g. Child#id) from a parent (Parent#id) we avoid adding
# the same field twice (previously we had just
# ``exfields.extends(stype.fields)``).
sns_fields = {shortname(field["name"]): field for field in fields}
sns_exfields = {shortname(exfield["name"]): exfield for exfield in exfields}
# N.B.: This could be simpler. We could have a single loop
# to create the list of fields. The reason for this more
# convoluted solution is to make sure we keep the order
# of ``exfields`` first, and then the type fields. That's
# because we have unit tests that rely on the order that
# fields are written. Codegen output changes as well.
# We are relying on the insertion order preserving
# property of python dicts (i.e. relyig on Py3.5+).
# First pass adding the exfields.
for sn_exfield, exfield in sns_exfields.items():
field = sns_fields.get(sn_exfield, None)
if field is None:
field = exfield
else:
# make sure field name has not been used yet
if not is_subtype(exfield["type"], field["type"]):
raise SchemaParseException(
f"Field name {field['name']} already in use with "
"incompatible type. "
f"{field['type']} vs {exfield['type']}."
)
combined_fields.append(field)
# Second pass, now add the ones that are specific to the subtype.
for field in sns_fields.values():
if field not in combined_fields:
combined_fields.append(field)
stype["fields"] = combined_fields
fieldnames = set() # type: Set[str]
for field in stype["fields"]:
if field["name"] in fieldnames:
raise ValidationException(
f"Field name {field['name']} appears twice in {stype['name']}"
)
fieldnames.add(field["name"])
elif stype["type"] == "enum":
stype = copy.copy(stype)
exsym.extend(stype.get("symbols", []))
stype["symbols"] = exsym
types[stype["name"]] = stype
results.append(stype)
ex_types = {}
for result in results:
ex_types[result["name"]] = result
extended_by = {} # type: Dict[str, str]
for result in results:
if "extends" in result:
for ex in aslist(result["extends"]):
if ex_types[ex].get("abstract"):
add_dictlist(extended_by, ex, ex_types[result["name"]])
add_dictlist(extended_by, validate.avro_type_name(ex), ex_types[ex])
for result in results:
if result.get("abstract") and result["name"] not in extended_by:
raise ValidationException(
f"{result['name']} is abstract but missing a concrete subtype"
)
for result in results:
if "fields" in result:
result["fields"] = replace_type(result["fields"], extended_by, loader, set())
return results
def make_avro(
i: List[Dict[str, Any]],
loader: Loader,
metaschema_vocab: Optional[Dict[str, str]] = None,
) -> List[Any]:
j = extend_and_specialize(i, loader)
name_dict = {} # type: Dict[str, Dict[str, Any]]
for entry in j:
name_dict[entry["name"]] = entry
avro = make_valid_avro(j, name_dict, set(), vocab=metaschema_vocab)
return [
t
for t in avro
if isinstance(t, MutableMapping)
and not t.get("abstract")
and t.get("type") != "org.w3id.cwl.salad.documentation"
]
def make_avro_schema(
i: List[Any], loader: Loader, metaschema_vocab: Optional[Dict[str, str]] = None
) -> Names:
"""
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
"""
names = Names()
avro = make_avro(i, loader, metaschema_vocab)
make_avsc_object(convert_to_dict(avro), names)
return names
def make_avro_schema_from_avro(avro: List[Union[Avro, Dict[str, str], str]]) -> Names:
names = Names()
make_avsc_object(convert_to_dict(avro), names)
return names
def shortname(inputid: str) -> str:
"""Return the last segment of the provided fragment or path."""
parsed_id = urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split("/")[-1]
return parsed_id.path.split("/")[-1]
def print_inheritance(doc: List[Dict[str, Any]], stream: IO[Any]) -> None:
"""Write a Grapviz inheritance graph for the supplied document."""
stream.write("digraph {\n")
for entry in doc:
if entry["type"] == "record":
label = name = shortname(entry["name"])
fields = entry.get("fields", [])
if fields:
label += "\\n* {}\\l".format(
"\\l* ".join(shortname(field["name"]) for field in fields)
)
shape = "ellipse" if entry.get("abstract") else "box"
stream.write(f'"{name}" [shape={shape} label="{label}"];\n') # noqa: B907
if "extends" in entry:
for target in aslist(entry["extends"]):
stream.write(f'"{shortname(target)}" -> "{name}";\n') # noqa: B907
stream.write("}\n")
def print_fieldrefs(doc: List[Dict[str, Any]], loader: Loader, stream: IO[Any]) -> None:
"""Write a GraphViz graph of the relationships between the fields."""
obj = extend_and_specialize(doc, loader)
primitives = {
"http://www.w3.org/2001/XMLSchema#string",
"http://www.w3.org/2001/XMLSchema#boolean",
"http://www.w3.org/2001/XMLSchema#int",
"http://www.w3.org/2001/XMLSchema#long",
saladp + "null",
saladp + "enum",
saladp + "array",
saladp + "record",
saladp + "Any",
}
stream.write("digraph {\n")
for entry in obj:
if entry.get("abstract"):
continue
if entry["type"] == "record":
label = shortname(entry["name"])
for field in entry.get("fields", []):
found = set() # type: Set[str]
field_name = shortname(field["name"])
replace_type(field["type"], {}, loader, found, find_embeds=False)
for each_type in found:
if each_type not in primitives:
stream.write(
f"{label!r} -> {shortname(each_type)!r} [label={field_name!r}];\n"
)
stream.write("}\n")
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/schema.py
|
schema.py
|
import copy
import hashlib
from typing import (
IO,
Any,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
)
from urllib.parse import urlparse
from importlib_resources import files
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.utils import (
CacheType,
ResolveType,
add_dictlist,
aslist,
convert_to_dict,
flatten,
json_dumps,
yaml_no_ts,
)
from . import _logger, jsonld_context, ref_resolver, validate
from .avro.schema import Names, SchemaParseException, is_subtype, make_avsc_object
from .exceptions import (
ClassValidationException,
SchemaSaladException,
ValidationException,
)
from .ref_resolver import Loader
from .sourceline import SourceLine, add_lc_filename, relname
SALAD_FILES = (
"metaschema.yml",
"metaschema_base.yml",
"salad.md",
"field_name.yml",
"import_include.md",
"link_res.yml",
"ident_res.yml",
"vocab_res.yml",
"vocab_res.yml",
"field_name_schema.yml",
"field_name_src.yml",
"field_name_proc.yml",
"ident_res_schema.yml",
"ident_res_src.yml",
"ident_res_proc.yml",
"link_res_schema.yml",
"link_res_src.yml",
"link_res_proc.yml",
"vocab_res_schema.yml",
"vocab_res_src.yml",
"vocab_res_proc.yml",
"map_res.yml",
"map_res_schema.yml",
"map_res_src.yml",
"map_res_proc.yml",
"typedsl_res.yml",
"typedsl_res_schema.yml",
"typedsl_res_src.yml",
"typedsl_res_proc.yml",
"sfdsl_res.yml",
"sfdsl_res_schema.yml",
"sfdsl_res_src.yml",
"sfdsl_res_proc.yml",
)
saladp = "https://w3id.org/cwl/salad#"
cached_metaschema: Optional[Tuple[Names, List[Dict[str, str]], Loader]] = None
def get_metaschema() -> Tuple[Names, List[Dict[str, str]], Loader]:
"""Instantiate the metaschema."""
global cached_metaschema
if cached_metaschema is not None:
return cached_metaschema
loader = ref_resolver.Loader(
{
"Any": saladp + "Any",
"ArraySchema": saladp + "ArraySchema",
"Array_symbol": saladp + "ArraySchema/type/Array_symbol",
"DocType": saladp + "DocType",
"Documentation": saladp + "Documentation",
"Documentation_symbol": saladp + "Documentation/type/Documentation_symbol",
"Documented": saladp + "Documented",
"EnumSchema": saladp + "EnumSchema",
"Enum_symbol": saladp + "EnumSchema/type/Enum_symbol",
"JsonldPredicate": saladp + "JsonldPredicate",
"NamedType": saladp + "NamedType",
"PrimitiveType": saladp + "PrimitiveType",
"RecordField": saladp + "RecordField",
"RecordSchema": saladp + "RecordSchema",
"Record_symbol": saladp + "RecordSchema/type/Record_symbol",
"SaladEnumSchema": saladp + "SaladEnumSchema",
"SaladRecordField": saladp + "SaladRecordField",
"SaladRecordSchema": saladp + "SaladRecordSchema",
"SchemaDefinedType": saladp + "SchemaDefinedType",
"SpecializeDef": saladp + "SpecializeDef",
"_container": saladp + "JsonldPredicate/_container",
"_id": {"@id": saladp + "_id", "@type": "@id", "identity": True},
"_type": saladp + "JsonldPredicate/_type",
"abstract": saladp + "SaladRecordSchema/abstract",
"array": saladp + "array",
"boolean": "http://www.w3.org/2001/XMLSchema#boolean",
"dct": "http://purl.org/dc/terms/",
"default": {"@id": saladp + "default", "noLinkCheck": True},
"doc": "rdfs:comment",
"docAfter": {"@id": saladp + "docAfter", "@type": "@id"},
"docChild": {"@id": saladp + "docChild", "@type": "@id"},
"docParent": {"@id": saladp + "docParent", "@type": "@id"},
"documentRoot": saladp + "SchemaDefinedType/documentRoot",
"documentation": saladp + "documentation",
"double": "http://www.w3.org/2001/XMLSchema#double",
"enum": saladp + "enum",
"extends": {"@id": saladp + "extends", "@type": "@id", "refScope": 1},
"fields": {
"@id": saladp + "fields",
"mapPredicate": "type",
"mapSubject": "name",
},
"float": "http://www.w3.org/2001/XMLSchema#float",
"identity": saladp + "JsonldPredicate/identity",
"inVocab": saladp + "NamedType/inVocab",
"int": "http://www.w3.org/2001/XMLSchema#int",
"items": {"@id": saladp + "items", "@type": "@vocab", "refScope": 2},
"jsonldPredicate": "sld:jsonldPredicate",
"long": "http://www.w3.org/2001/XMLSchema#long",
"mapPredicate": saladp + "JsonldPredicate/mapPredicate",
"mapSubject": saladp + "JsonldPredicate/mapSubject",
"name": "@id",
"noLinkCheck": saladp + "JsonldPredicate/noLinkCheck",
"null": saladp + "null",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"record": saladp + "record",
"refScope": saladp + "JsonldPredicate/refScope",
"sld": saladp,
"specialize": {
"@id": saladp + "specialize",
"mapPredicate": "specializeTo",
"mapSubject": "specializeFrom",
},
"specializeFrom": {
"@id": saladp + "specializeFrom",
"@type": "@id",
"refScope": 1,
},
"specializeTo": {
"@id": saladp + "specializeTo",
"@type": "@id",
"refScope": 1,
},
"string": "http://www.w3.org/2001/XMLSchema#string",
"subscope": saladp + "JsonldPredicate/subscope",
"symbols": {"@id": saladp + "symbols", "@type": "@id", "identity": True},
"type": {
"@id": saladp + "type",
"@type": "@vocab",
"refScope": 2,
"typeDSL": True,
},
"typeDSL": saladp + "JsonldPredicate/typeDSL",
"xsd": "http://www.w3.org/2001/XMLSchema#",
},
salad_version="v1.3",
)
for salad in SALAD_FILES:
loader.cache["https://w3id.org/cwl/" + salad] = (
files("schema_salad").joinpath("metaschema/" + salad).read_text("UTF-8")
)
loader.cache["https://w3id.org/cwl/salad"] = (
files("schema_salad").joinpath("metaschema/metaschema.yml").read_text("UTF-8")
)
yaml = yaml_no_ts()
j = yaml.load(loader.cache["https://w3id.org/cwl/salad"])
add_lc_filename(j, "metaschema.yml")
j2 = loader.resolve_all(j, saladp)[0]
if not isinstance(j2, list):
_logger.error("%s", j2)
raise SchemaParseException(f"Not a list: {j2}")
sch_obj = make_avro(j2, loader, loader.vocab)
try:
sch_names = make_avro_schema_from_avro(sch_obj)
except SchemaParseException:
_logger.error("Metaschema error, avro was:\n%s", json_dumps(sch_obj, indent=4))
raise
validate_doc(sch_names, j2, loader, strict=True)
cached_metaschema = (sch_names, j2, loader)
return cached_metaschema
def add_namespaces(metadata: Mapping[str, Any], namespaces: MutableMapping[str, str]) -> None:
"""Collect the provided namespaces, checking for conflicts."""
for key, value in metadata.items():
if key not in namespaces:
namespaces[key] = value
elif namespaces[key] != value:
raise ValidationException(
f"Namespace prefix {key!r} has conflicting definitions {namespaces[key]!r}"
" and {value!r}."
)
def collect_namespaces(metadata: Mapping[str, Any]) -> Dict[str, str]:
"""Walk through the metadata object, collecting namespace declarations."""
namespaces = {} # type: Dict[str, str]
if "$import_metadata" in metadata:
for value in metadata["$import_metadata"].values():
add_namespaces(collect_namespaces(value), namespaces)
if "$namespaces" in metadata:
add_namespaces(metadata["$namespaces"], namespaces)
return namespaces
schema_type = Tuple[Loader, Union[Names, SchemaParseException], Dict[str, Any], Loader]
def load_schema(
schema_ref: ResolveType,
cache: Optional[CacheType] = None,
) -> schema_type:
"""
Load a schema that can be used to validate documents using load_and_validate.
:returns: document_loader, avsc_names, schema_metadata, metaschema_loader
"""
metaschema_names, _metaschema_doc, metaschema_loader = get_metaschema()
if cache is not None:
# we want to replace some items in the cache, so we need to
# make a new Loader with an empty index.
for k, v in metaschema_loader.cache.items():
if k not in cache:
cache[k] = v
metaschema_loader = Loader(
ctx=metaschema_loader.ctx, cache=cache, session=metaschema_loader.session
)
schema_doc, schema_metadata = metaschema_loader.resolve_ref(schema_ref, "")
if not isinstance(schema_doc, MutableSequence):
raise ValidationException("Schema reference must resolve to a list.")
validate_doc(metaschema_names, schema_doc, metaschema_loader, True)
metactx = schema_metadata.get("@context", {})
metactx.update(collect_namespaces(schema_metadata))
schema_ctx = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)[0]
# Create the loader that will be used to load the target document.
document_loader = Loader(schema_ctx, cache=cache)
# Make the Avro validation that will be used to validate the target
# document
avsc_names = make_avro_schema(schema_doc, document_loader, metaschema_loader.vocab)
return document_loader, avsc_names, schema_metadata, metaschema_loader
def load_and_validate(
document_loader: Loader,
avsc_names: Names,
document: Union[CommentedMap, str],
strict: bool,
strict_foreign_properties: bool = False,
) -> Tuple[Any, Dict[str, Any]]:
"""Load a document and validate it with the provided schema.
return data, metadata
"""
try:
if isinstance(document, CommentedMap):
data, metadata = document_loader.resolve_all(
document,
document["id"],
checklinks=True,
strict_foreign_properties=strict_foreign_properties,
)
else:
data, metadata = document_loader.resolve_ref(
document,
checklinks=True,
strict_foreign_properties=strict_foreign_properties,
)
validate_doc(
avsc_names,
data,
document_loader,
strict,
strict_foreign_properties=strict_foreign_properties,
)
except ValidationException as exc:
raise ValidationException("", None, [exc]) from exc
return data, metadata
def validate_doc(
schema_names: Names,
doc: ResolveType,
loader: Loader,
strict: bool,
strict_foreign_properties: bool = False,
) -> None:
"""Validate a document using the provided schema."""
has_root = False
for root in schema_names.names.values():
if (hasattr(root, "get_prop") and root.get_prop("documentRoot")) or (
"documentRoot" in root.props
):
has_root = True
break
if not has_root:
raise ValidationException("No document roots defined in the schema")
if isinstance(doc, MutableSequence):
vdoc = doc
elif isinstance(doc, CommentedMap):
vdoc = CommentedSeq([doc])
vdoc.lc.add_kv_line_col(0, [doc.lc.line, doc.lc.col])
vdoc.lc.filename = doc.lc.filename
else:
raise ValidationException("Document must be dict or list")
roots = []
for root in schema_names.names.values():
if (hasattr(root, "get_prop") and root.get_prop("documentRoot")) or (
root.props.get("documentRoot")
):
roots.append(root)
anyerrors = []
for pos, item in enumerate(vdoc):
sourceline = SourceLine(vdoc, pos, str)
success = False
for root in roots:
success = validate.validate_ex(
root,
item,
loader.identifiers,
strict,
foreign_properties=loader.foreign_properties,
raise_ex=False,
skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties,
vocab=loader.vocab,
)
if success:
break
if not success:
errors: List[SchemaSaladException] = []
for root in roots:
if hasattr(root, "get_prop"):
name = root.get_prop("name")
elif hasattr(root, "name"):
name = root.name
try:
validate.validate_ex(
root,
item,
loader.identifiers,
strict,
foreign_properties=loader.foreign_properties,
raise_ex=True,
skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties,
vocab=loader.vocab,
)
except ClassValidationException as exc1:
errors = [
ClassValidationException(
f"tried {validate.friendly(name)!r} but", sourceline, [exc1]
)
]
break
except ValidationException as exc2:
errors.append(
ValidationException(
f"tried {validate.friendly(name)!r} but", sourceline, [exc2]
)
)
objerr = "Invalid"
for ident in loader.identifiers:
if ident in item:
objerr = f"Object {relname(item[ident])!r} is not valid because"
break
anyerrors.append(ValidationException(objerr, sourceline, errors, "-"))
if anyerrors:
raise ValidationException("", None, anyerrors, "*")
def get_anon_name(rec: MutableMapping[str, Union[str, Dict[str, str], List[str]]]) -> str:
"""Calculate a reproducible name for anonymous types."""
if "name" in rec:
name = rec["name"]
if isinstance(name, str):
return name
raise ValidationException(f"Expected name field to be a string, was {name}")
anon_name = ""
if rec["type"] in ("enum", saladp + "enum"):
for sym in rec["symbols"]:
anon_name += sym
return "anon.enum_" + hashlib.sha1(anon_name.encode("UTF-8")).hexdigest() # nosec
if rec["type"] in ("record", saladp + "record"):
for field in rec["fields"]:
if isinstance(field, Mapping):
anon_name += field["name"]
else:
raise ValidationException(
f"Expected entries in 'fields' to also be maps, was {field}."
)
return "record_" + hashlib.sha1(anon_name.encode("UTF-8")).hexdigest() # nosec
if rec["type"] in ("array", saladp + "array"):
return ""
raise ValidationException("Expected enum or record, was {rec['type'])}")
def replace_type(
items: Any,
spec: Dict[str, Any],
loader: Loader,
found: Set[str],
find_embeds: bool = True,
deepen: bool = True,
) -> Any:
"""Go through and replace types in the 'spec' mapping."""
if isinstance(items, MutableMapping):
# recursively check these fields for types to replace
if items.get("type") in ("record", "enum") and items.get("name"):
if items["name"] in found:
return items["name"]
found.add(items["name"])
if not deepen:
return items
items = copy.copy(items)
if not items.get("name"):
items["name"] = get_anon_name(items)
for name in ("type", "items", "fields"):
if name in items:
items[name] = replace_type(
items[name],
spec,
loader,
found,
find_embeds=find_embeds,
deepen=find_embeds,
)
if isinstance(items[name], MutableSequence):
items[name] = flatten(items[name])
return items
if isinstance(items, MutableSequence):
# recursively transform list
return [
replace_type(i, spec, loader, found, find_embeds=find_embeds, deepen=deepen)
for i in items
]
if isinstance(items, str):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds)
found.add(items)
return items
def avro_field_name(url: str) -> str:
"""
Turn a URL into an Avro-safe name.
If the URL has no fragment, return this plain URL.
Extract either the last part of the URL fragment past the slash, otherwise
the whole fragment.
"""
d = urlparse(url)
if d.fragment:
return d.fragment.split("/")[-1]
return d.path.split("/")[-1]
Avro = TypeVar("Avro", MutableMapping[str, Any], MutableSequence[Any], str)
def make_valid_avro(
items: Avro,
alltypes: Dict[str, Dict[str, Any]],
found: Set[str],
union: bool = False,
fielddef: bool = False,
vocab: Optional[Dict[str, str]] = None,
) -> Union[Avro, MutableMapping[str, str], str, List[Union[Any, MutableMapping[str, str], str]]]:
"""Convert our schema to be more avro like."""
if vocab is None:
_, _, metaschema_loader = get_metaschema()
vocab = metaschema_loader.vocab
# Possibly could be integrated into our fork of avro/schema.py?
if isinstance(items, MutableMapping):
avro = copy.copy(items)
if avro.get("name"):
if fielddef:
avro["name"] = avro_field_name(avro["name"])
else:
avro["name"] = validate.avro_type_name(avro["name"])
if "type" in avro and avro["type"] in (
saladp + "record",
saladp + "enum",
"record",
"enum",
):
if (hasattr(avro, "get") and avro.get("abstract")) or ("abstract" in avro):
return avro
if avro["name"] in found:
return cast(str, avro["name"])
found.add(avro["name"])
for field in ("type", "items", "values", "fields"):
if field in avro:
avro[field] = make_valid_avro(
avro[field],
alltypes,
found,
union=True,
fielddef=(field == "fields"),
vocab=vocab,
)
if "symbols" in avro:
avro["symbols"] = [avro_field_name(sym) for sym in avro["symbols"]]
return avro
if items and isinstance(items, MutableSequence):
ret = []
for i in items:
ret.append(
make_valid_avro(i, alltypes, found, union=union, fielddef=fielddef, vocab=vocab)
)
return ret
if union and isinstance(items, str):
if items in alltypes and validate.avro_type_name(items) not in found:
return make_valid_avro(alltypes[items], alltypes, found, union=union, vocab=vocab)
if items in vocab:
return validate.avro_type_name(vocab[items])
return validate.avro_type_name(items)
return items
def deepcopy_strip(item: Any) -> Any:
"""
Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
"""
if isinstance(item, MutableMapping):
return {k: deepcopy_strip(v) for k, v in item.items()}
if isinstance(item, MutableSequence):
return [deepcopy_strip(k) for k in item]
return item
def extend_and_specialize(items: List[Dict[str, Any]], loader: Loader) -> List[Dict[str, Any]]:
"""Apply 'extend' and 'specialize' to fully materialize derived record types."""
items2 = deepcopy_strip(items)
types = {i["name"]: i for i in items2} # type: Dict[str, Any]
results = []
for stype in items2:
if "extends" in stype:
specs = {} # type: Dict[str, str]
if "specialize" in stype:
for spec in aslist(stype["specialize"]):
specs[spec["specializeFrom"]] = spec["specializeTo"]
exfields = [] # type: List[Any]
exsym = [] # type: List[str]
for ex in aslist(stype["extends"]):
if ex not in types:
raise ValidationException(
f"Extends {stype['extends']} in {stype['name']} refers to invalid base type."
)
basetype = copy.copy(types[ex])
if stype["type"] == "record":
if specs:
basetype["fields"] = replace_type(
basetype.get("fields", []), specs, loader, set()
)
for field in basetype.get("fields", []):
if "inherited_from" not in field:
field["inherited_from"] = ex
exfields.extend(basetype.get("fields", []))
elif stype["type"] == "enum":
exsym.extend(basetype.get("symbols", []))
if stype["type"] == "record":
stype = copy.copy(stype)
combined_fields = []
fields = stype.get("fields", [])
# We use short names here so that if a type inherits a field
# (e.g. Child#id) from a parent (Parent#id) we avoid adding
# the same field twice (previously we had just
# ``exfields.extends(stype.fields)``).
sns_fields = {shortname(field["name"]): field for field in fields}
sns_exfields = {shortname(exfield["name"]): exfield for exfield in exfields}
# N.B.: This could be simpler. We could have a single loop
# to create the list of fields. The reason for this more
# convoluted solution is to make sure we keep the order
# of ``exfields`` first, and then the type fields. That's
# because we have unit tests that rely on the order that
# fields are written. Codegen output changes as well.
# We are relying on the insertion order preserving
# property of python dicts (i.e. relyig on Py3.5+).
# First pass adding the exfields.
for sn_exfield, exfield in sns_exfields.items():
field = sns_fields.get(sn_exfield, None)
if field is None:
field = exfield
else:
# make sure field name has not been used yet
if not is_subtype(exfield["type"], field["type"]):
raise SchemaParseException(
f"Field name {field['name']} already in use with "
"incompatible type. "
f"{field['type']} vs {exfield['type']}."
)
combined_fields.append(field)
# Second pass, now add the ones that are specific to the subtype.
for field in sns_fields.values():
if field not in combined_fields:
combined_fields.append(field)
stype["fields"] = combined_fields
fieldnames = set() # type: Set[str]
for field in stype["fields"]:
if field["name"] in fieldnames:
raise ValidationException(
f"Field name {field['name']} appears twice in {stype['name']}"
)
fieldnames.add(field["name"])
elif stype["type"] == "enum":
stype = copy.copy(stype)
exsym.extend(stype.get("symbols", []))
stype["symbols"] = exsym
types[stype["name"]] = stype
results.append(stype)
ex_types = {}
for result in results:
ex_types[result["name"]] = result
extended_by = {} # type: Dict[str, str]
for result in results:
if "extends" in result:
for ex in aslist(result["extends"]):
if ex_types[ex].get("abstract"):
add_dictlist(extended_by, ex, ex_types[result["name"]])
add_dictlist(extended_by, validate.avro_type_name(ex), ex_types[ex])
for result in results:
if result.get("abstract") and result["name"] not in extended_by:
raise ValidationException(
f"{result['name']} is abstract but missing a concrete subtype"
)
for result in results:
if "fields" in result:
result["fields"] = replace_type(result["fields"], extended_by, loader, set())
return results
def make_avro(
i: List[Dict[str, Any]],
loader: Loader,
metaschema_vocab: Optional[Dict[str, str]] = None,
) -> List[Any]:
j = extend_and_specialize(i, loader)
name_dict = {} # type: Dict[str, Dict[str, Any]]
for entry in j:
name_dict[entry["name"]] = entry
avro = make_valid_avro(j, name_dict, set(), vocab=metaschema_vocab)
return [
t
for t in avro
if isinstance(t, MutableMapping)
and not t.get("abstract")
and t.get("type") != "org.w3id.cwl.salad.documentation"
]
def make_avro_schema(
i: List[Any], loader: Loader, metaschema_vocab: Optional[Dict[str, str]] = None
) -> Names:
"""
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
"""
names = Names()
avro = make_avro(i, loader, metaschema_vocab)
make_avsc_object(convert_to_dict(avro), names)
return names
def make_avro_schema_from_avro(avro: List[Union[Avro, Dict[str, str], str]]) -> Names:
names = Names()
make_avsc_object(convert_to_dict(avro), names)
return names
def shortname(inputid: str) -> str:
"""Return the last segment of the provided fragment or path."""
parsed_id = urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split("/")[-1]
return parsed_id.path.split("/")[-1]
def print_inheritance(doc: List[Dict[str, Any]], stream: IO[Any]) -> None:
"""Write a Grapviz inheritance graph for the supplied document."""
stream.write("digraph {\n")
for entry in doc:
if entry["type"] == "record":
label = name = shortname(entry["name"])
fields = entry.get("fields", [])
if fields:
label += "\\n* {}\\l".format(
"\\l* ".join(shortname(field["name"]) for field in fields)
)
shape = "ellipse" if entry.get("abstract") else "box"
stream.write(f'"{name}" [shape={shape} label="{label}"];\n') # noqa: B907
if "extends" in entry:
for target in aslist(entry["extends"]):
stream.write(f'"{shortname(target)}" -> "{name}";\n') # noqa: B907
stream.write("}\n")
def print_fieldrefs(doc: List[Dict[str, Any]], loader: Loader, stream: IO[Any]) -> None:
"""Write a GraphViz graph of the relationships between the fields."""
obj = extend_and_specialize(doc, loader)
primitives = {
"http://www.w3.org/2001/XMLSchema#string",
"http://www.w3.org/2001/XMLSchema#boolean",
"http://www.w3.org/2001/XMLSchema#int",
"http://www.w3.org/2001/XMLSchema#long",
saladp + "null",
saladp + "enum",
saladp + "array",
saladp + "record",
saladp + "Any",
}
stream.write("digraph {\n")
for entry in obj:
if entry.get("abstract"):
continue
if entry["type"] == "record":
label = shortname(entry["name"])
for field in entry.get("fields", []):
found = set() # type: Set[str]
field_name = shortname(field["name"])
replace_type(field["type"], {}, loader, found, find_embeds=False)
for each_type in found:
if each_type not in primitives:
stream.write(
f"{label!r} -> {shortname(each_type)!r} [label={field_name!r}];\n"
)
stream.write("}\n")
| 0.627609 | 0.255406 |
import argparse
import logging
import os
import sys
from typing import Any, Dict, List, Mapping, MutableSequence, Optional, Union, cast
from urllib.parse import urlparse
from rdflib import __version__ as rdflib_version
from rdflib.parser import Parser
from rdflib.plugin import register
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from . import codegen, jsonld_context, schema
from .avro.schema import SchemaParseException
from .exceptions import ValidationException, to_one_line_messages
from .makedoc import makedoc
from .ref_resolver import Loader, file_uri
from .utils import json_dump, stdout
if int(rdflib_version.split(".", maxsplit=1)[0]) < 6:
register("json-ld", Parser, "rdflib_jsonld.parser", "JsonLDParser")
if sys.version_info >= (3, 8):
import importlib.metadata as importlib_metadata
else:
import importlib_metadata
_logger = logging.getLogger("salad")
def printrdf(
workflow: str,
wf: Union[CommentedMap, CommentedSeq],
ctx: Dict[str, Any],
sr: str,
) -> None:
g = jsonld_context.makerdf(workflow, wf, ctx)
g.serialize(destination=stdout(), format=sr)
def arg_parser() -> argparse.ArgumentParser:
"""Build the argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--rdf-serializer",
help="Output RDF serialization format used by --print-rdf"
"(one of turtle (default), n3, nt, xml)",
default="turtle",
)
parser.add_argument(
"--skip-schemas",
action="store_true",
default=False,
help="If specified, ignore $schemas sections.",
)
parser.add_argument(
"--strict-foreign-properties",
action="store_true",
help="Strict checking of foreign properties",
default=False,
)
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument(
"--print-jsonld-context",
action="store_true",
help="Print JSON-LD context for schema",
)
exgroup.add_argument("--print-rdfs", action="store_true", help="Print RDF schema")
exgroup.add_argument("--print-avro", action="store_true", help="Print Avro schema")
exgroup.add_argument(
"--print-rdf",
action="store_true",
help="Print corresponding RDF graph for document",
)
exgroup.add_argument(
"--print-pre", action="store_true", help="Print document after preprocessing"
)
exgroup.add_argument("--print-index", action="store_true", help="Print node index")
exgroup.add_argument("--print-metadata", action="store_true", help="Print document metadata")
exgroup.add_argument(
"--print-inheritance-dot",
action="store_true",
help="Print graphviz file of inheritance",
)
exgroup.add_argument(
"--print-fieldrefs-dot",
action="store_true",
help="Print graphviz file of field refs",
)
exgroup.add_argument(
"--codegen",
type=str,
metavar="language",
help="Generate classes in target language, currently supported: "
"python, java, typescript, dotnet, cpp, dlang",
)
parser.add_argument(
"--codegen-target",
type=str,
default=None,
help="Defaults to sys.stdout for Python/C++/Dlang and ./ for " "Java/TypeScript/.Net",
)
parser.add_argument(
"--codegen-examples",
type=str,
metavar="directory",
default=None,
help="Directory of example documents for test case generation (Java/TypeScript/.Net/Dlang only).",
)
parser.add_argument(
"--codegen-package",
type=str,
metavar="dotted.package",
default=None,
help="Optional override of the package name which is other derived "
"from the base URL (Java/TypeScript/.Net/Dlang only).",
),
parser.add_argument(
"--codegen-copyright",
type=str,
metavar="copyright_string",
default=None,
help="Optional copyright of the input schema.",
),
parser.add_argument(
"--codegen-parser-info",
metavar="parser_info",
type=str,
default=None,
help="Optional parser name which is accessible via resulted parser API (Python and Dlang only)",
)
exgroup.add_argument(
"--print-oneline",
action="store_true",
help="Print each error message in oneline",
)
exgroup.add_argument(
"--print-doc", action="store_true", help="Print HTML schema documentation page"
)
exgroup_strict = parser.add_mutually_exclusive_group()
exgroup_strict.add_argument(
"--strict",
action="store_true",
help="Strict validation (unrecognized or out of place fields are error)",
default=True,
dest="strict",
)
exgroup_strict.add_argument(
"--non-strict",
action="store_false",
help="Lenient validation (ignore unrecognized fields)",
default=True,
dest="strict",
)
exgroup_volume = parser.add_mutually_exclusive_group()
exgroup_volume.add_argument("--verbose", action="store_true", help="Default logging")
exgroup_volume.add_argument(
"--quiet", action="store_true", help="Only print warnings and errors."
)
exgroup_volume.add_argument("--debug", action="store_true", help="Print even more logging")
parser.add_argument(
"--only",
action="append",
help="Use with --print-doc, document only listed types",
)
parser.add_argument(
"--redirect",
action="append",
help="Use with --print-doc, override default link for type",
)
parser.add_argument("--brand", help="Use with --print-doc, set the 'brand' text in nav bar")
parser.add_argument(
"--brandlink",
help="Use with --print-doc, set the link for 'brand' in nav bar",
)
parser.add_argument(
"--brandstyle",
help="Use with --print-doc, HTML code to link to an external style sheet",
)
parser.add_argument(
"--brandinverse",
default=False,
action="store_true",
help="Use with --print-doc",
)
parser.add_argument(
"--primtype",
default="#PrimitiveType",
help="Use with --print-doc, link to use for primitive types (string, int etc)",
)
parser.add_argument("schema", type=str, nargs="?", default=None)
parser.add_argument("document", type=str, nargs="*", default=None)
parser.add_argument("--version", "-v", action="store_true", help="Print version", default=None)
return parser
def main(argsl: Optional[List[str]] = None) -> int:
if argsl is None:
argsl = sys.argv[1:]
args = arg_parser().parse_args(argsl)
if args.version is None and args.schema is None:
print(f"{sys.argv[0]}: error: too few arguments.")
return 1
if args.quiet:
_logger.setLevel(logging.WARN)
if args.debug:
_logger.setLevel(logging.DEBUG)
pkg = importlib_metadata.version("schema_salad")
if pkg:
if args.version:
print(f"{sys.argv[0]} Current version: {pkg}")
return 0
_logger.info("%s Current version: %s", sys.argv[0], pkg)
# Get the metaschema to validate the schema
metaschema_names, metaschema_doc, metaschema_loader = schema.get_metaschema()
# Load schema document and resolve refs
schema_uri = args.schema
if not (urlparse(schema_uri)[0] and urlparse(schema_uri)[0] in ["http", "https", "file"]):
schema_uri = file_uri(os.path.abspath(schema_uri))
schema_raw_doc = metaschema_loader.fetch(schema_uri)
try:
schema_doc, schema_metadata = metaschema_loader.resolve_all(schema_raw_doc, schema_uri)
except ValidationException as e:
_logger.error(
"Schema %r failed link checking:\n%s",
args.schema,
str(e),
exc_info=bool(args.debug),
)
_logger.debug("Index is %s", list(metaschema_loader.idx.keys()))
_logger.debug("Vocabulary is %s", list(metaschema_loader.vocab.keys()))
return 1
except RuntimeError as e:
_logger.error(
"Schema %r read error:\n%s",
args.schema,
str(e),
exc_info=bool(args.debug),
)
return 1
if args.print_doc:
makedoc(
sys.stdout,
args.schema,
args.redirect,
args.only,
args.brand,
args.brandlink,
args.primtype,
args.brandstyle,
args.brandinverse,
)
return 0
# Optionally print the schema after ref resolution
if not args.document and args.print_pre:
json_dump(schema_doc, fp=sys.stdout, indent=4, default=str)
return 0
if not args.document and args.print_index:
json_dump(list(metaschema_loader.idx.keys()), fp=sys.stdout, indent=4, default=str)
return 0
# Validate the schema document against the metaschema
try:
schema.validate_doc(metaschema_names, schema_doc, metaschema_loader, args.strict)
except ValidationException as e:
_logger.error("While validating schema %r:\n%s", args.schema, str(e))
return 1
# Get the json-ld context and RDFS representation from the schema
metactx = schema.collect_namespaces(schema_metadata)
if "$base" in schema_metadata:
metactx["@base"] = schema_metadata["$base"]
if isinstance(schema_doc, CommentedSeq):
(schema_ctx, rdfs) = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)
else:
raise ValidationException(f"Expected a CommentedSeq, got {type(schema_doc)}: {schema_doc}.")
# Create the loader that will be used to load the target document.
schema_version = schema_metadata.get("saladVersion", None)
document_loader = Loader(
schema_ctx, skip_schemas=args.skip_schemas, salad_version=schema_version
)
if args.codegen:
codegen.codegen(
args.codegen,
cast(List[Dict[str, Any]], schema_doc),
schema_metadata,
document_loader,
target=args.codegen_target,
examples=args.codegen_examples,
package=args.codegen_package,
copyright=args.codegen_copyright,
parser_info=args.codegen_parser_info,
)
return 0
# Make the Avro validation that will be used to validate the target
# document
if isinstance(schema_doc, MutableSequence):
avsc_obj = schema.make_avro(schema_doc, document_loader)
try:
avsc_names = schema.make_avro_schema_from_avro(avsc_obj)
except SchemaParseException as err:
_logger.error(
"Schema %r error:\n%s",
args.schema,
str(err),
exc_info=((type(err), err, None) if args.debug else None),
)
if args.print_avro:
json_dump(avsc_obj, fp=sys.stdout, indent=4, default=str)
return 1
else:
_logger.error("Schema %r must be a list.", args.schema) # type: ignore[unreachable]
return 1
# Optionally print Avro-compatible schema from schema
if args.print_avro:
json_dump(avsc_obj, fp=sys.stdout, indent=4, default=str)
return 0
# Optionally print the json-ld context from the schema
if args.print_jsonld_context:
j = {"@context": schema_ctx}
json_dump(j, fp=sys.stdout, indent=4, sort_keys=True, default=str)
return 0
# Optionally print the RDFS graph from the schema
if args.print_rdfs:
rdfs.serialize(destination=stdout(), format=args.rdf_serializer)
return 0
if args.print_metadata and not args.document:
json_dump(schema_metadata, fp=sys.stdout, indent=4, default=str)
return 0
if args.print_inheritance_dot:
schema.print_inheritance(schema_doc, sys.stdout)
return 0
if args.print_fieldrefs_dot:
schema.print_fieldrefs(schema_doc, document_loader, sys.stdout)
return 0
# If no document specified, all done.
if not args.document:
print(f"Schema {args.schema!r} is valid")
return 0
# Load target document and resolve refs
for uri in args.document:
try:
document, doc_metadata = document_loader.resolve_ref(
uri, strict_foreign_properties=args.strict_foreign_properties
)
except ValidationException as e:
msg = to_one_line_messages(e) if args.print_oneline else str(e)
_logger.error(
"Document %r failed validation:\n%s",
args.document,
msg,
exc_info=args.debug,
)
return 1
# Optionally print the document after ref resolution
if args.print_pre:
json_dump(document, fp=sys.stdout, indent=4, default=str)
return 0
if args.print_index:
json_dump(list(document_loader.idx.keys()), fp=sys.stdout, indent=4, default=str)
return 0
# Validate the user document against the schema
try:
schema.validate_doc(
avsc_names,
document,
document_loader,
args.strict,
strict_foreign_properties=args.strict_foreign_properties,
)
except ValidationException as e:
msg2 = to_one_line_messages(e) if args.print_oneline else str(e)
_logger.error("While validating document %r:\n%s", uri, msg2)
return 1
# Optionally convert the document to RDF
if args.print_rdf:
if isinstance(document, (Mapping, MutableSequence)):
printrdf(uri, document, schema_ctx, args.rdf_serializer)
return 0
print("Document must be a dictionary or list.")
return 1
if args.print_metadata:
json_dump(doc_metadata, fp=sys.stdout, indent=4, default=str)
return 0
_logger.info("Document %r is valid", uri)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/main.py
|
main.py
|
import argparse
import logging
import os
import sys
from typing import Any, Dict, List, Mapping, MutableSequence, Optional, Union, cast
from urllib.parse import urlparse
from rdflib import __version__ as rdflib_version
from rdflib.parser import Parser
from rdflib.plugin import register
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from . import codegen, jsonld_context, schema
from .avro.schema import SchemaParseException
from .exceptions import ValidationException, to_one_line_messages
from .makedoc import makedoc
from .ref_resolver import Loader, file_uri
from .utils import json_dump, stdout
if int(rdflib_version.split(".", maxsplit=1)[0]) < 6:
register("json-ld", Parser, "rdflib_jsonld.parser", "JsonLDParser")
if sys.version_info >= (3, 8):
import importlib.metadata as importlib_metadata
else:
import importlib_metadata
_logger = logging.getLogger("salad")
def printrdf(
workflow: str,
wf: Union[CommentedMap, CommentedSeq],
ctx: Dict[str, Any],
sr: str,
) -> None:
g = jsonld_context.makerdf(workflow, wf, ctx)
g.serialize(destination=stdout(), format=sr)
def arg_parser() -> argparse.ArgumentParser:
"""Build the argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--rdf-serializer",
help="Output RDF serialization format used by --print-rdf"
"(one of turtle (default), n3, nt, xml)",
default="turtle",
)
parser.add_argument(
"--skip-schemas",
action="store_true",
default=False,
help="If specified, ignore $schemas sections.",
)
parser.add_argument(
"--strict-foreign-properties",
action="store_true",
help="Strict checking of foreign properties",
default=False,
)
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument(
"--print-jsonld-context",
action="store_true",
help="Print JSON-LD context for schema",
)
exgroup.add_argument("--print-rdfs", action="store_true", help="Print RDF schema")
exgroup.add_argument("--print-avro", action="store_true", help="Print Avro schema")
exgroup.add_argument(
"--print-rdf",
action="store_true",
help="Print corresponding RDF graph for document",
)
exgroup.add_argument(
"--print-pre", action="store_true", help="Print document after preprocessing"
)
exgroup.add_argument("--print-index", action="store_true", help="Print node index")
exgroup.add_argument("--print-metadata", action="store_true", help="Print document metadata")
exgroup.add_argument(
"--print-inheritance-dot",
action="store_true",
help="Print graphviz file of inheritance",
)
exgroup.add_argument(
"--print-fieldrefs-dot",
action="store_true",
help="Print graphviz file of field refs",
)
exgroup.add_argument(
"--codegen",
type=str,
metavar="language",
help="Generate classes in target language, currently supported: "
"python, java, typescript, dotnet, cpp, dlang",
)
parser.add_argument(
"--codegen-target",
type=str,
default=None,
help="Defaults to sys.stdout for Python/C++/Dlang and ./ for " "Java/TypeScript/.Net",
)
parser.add_argument(
"--codegen-examples",
type=str,
metavar="directory",
default=None,
help="Directory of example documents for test case generation (Java/TypeScript/.Net/Dlang only).",
)
parser.add_argument(
"--codegen-package",
type=str,
metavar="dotted.package",
default=None,
help="Optional override of the package name which is other derived "
"from the base URL (Java/TypeScript/.Net/Dlang only).",
),
parser.add_argument(
"--codegen-copyright",
type=str,
metavar="copyright_string",
default=None,
help="Optional copyright of the input schema.",
),
parser.add_argument(
"--codegen-parser-info",
metavar="parser_info",
type=str,
default=None,
help="Optional parser name which is accessible via resulted parser API (Python and Dlang only)",
)
exgroup.add_argument(
"--print-oneline",
action="store_true",
help="Print each error message in oneline",
)
exgroup.add_argument(
"--print-doc", action="store_true", help="Print HTML schema documentation page"
)
exgroup_strict = parser.add_mutually_exclusive_group()
exgroup_strict.add_argument(
"--strict",
action="store_true",
help="Strict validation (unrecognized or out of place fields are error)",
default=True,
dest="strict",
)
exgroup_strict.add_argument(
"--non-strict",
action="store_false",
help="Lenient validation (ignore unrecognized fields)",
default=True,
dest="strict",
)
exgroup_volume = parser.add_mutually_exclusive_group()
exgroup_volume.add_argument("--verbose", action="store_true", help="Default logging")
exgroup_volume.add_argument(
"--quiet", action="store_true", help="Only print warnings and errors."
)
exgroup_volume.add_argument("--debug", action="store_true", help="Print even more logging")
parser.add_argument(
"--only",
action="append",
help="Use with --print-doc, document only listed types",
)
parser.add_argument(
"--redirect",
action="append",
help="Use with --print-doc, override default link for type",
)
parser.add_argument("--brand", help="Use with --print-doc, set the 'brand' text in nav bar")
parser.add_argument(
"--brandlink",
help="Use with --print-doc, set the link for 'brand' in nav bar",
)
parser.add_argument(
"--brandstyle",
help="Use with --print-doc, HTML code to link to an external style sheet",
)
parser.add_argument(
"--brandinverse",
default=False,
action="store_true",
help="Use with --print-doc",
)
parser.add_argument(
"--primtype",
default="#PrimitiveType",
help="Use with --print-doc, link to use for primitive types (string, int etc)",
)
parser.add_argument("schema", type=str, nargs="?", default=None)
parser.add_argument("document", type=str, nargs="*", default=None)
parser.add_argument("--version", "-v", action="store_true", help="Print version", default=None)
return parser
def main(argsl: Optional[List[str]] = None) -> int:
if argsl is None:
argsl = sys.argv[1:]
args = arg_parser().parse_args(argsl)
if args.version is None and args.schema is None:
print(f"{sys.argv[0]}: error: too few arguments.")
return 1
if args.quiet:
_logger.setLevel(logging.WARN)
if args.debug:
_logger.setLevel(logging.DEBUG)
pkg = importlib_metadata.version("schema_salad")
if pkg:
if args.version:
print(f"{sys.argv[0]} Current version: {pkg}")
return 0
_logger.info("%s Current version: %s", sys.argv[0], pkg)
# Get the metaschema to validate the schema
metaschema_names, metaschema_doc, metaschema_loader = schema.get_metaschema()
# Load schema document and resolve refs
schema_uri = args.schema
if not (urlparse(schema_uri)[0] and urlparse(schema_uri)[0] in ["http", "https", "file"]):
schema_uri = file_uri(os.path.abspath(schema_uri))
schema_raw_doc = metaschema_loader.fetch(schema_uri)
try:
schema_doc, schema_metadata = metaschema_loader.resolve_all(schema_raw_doc, schema_uri)
except ValidationException as e:
_logger.error(
"Schema %r failed link checking:\n%s",
args.schema,
str(e),
exc_info=bool(args.debug),
)
_logger.debug("Index is %s", list(metaschema_loader.idx.keys()))
_logger.debug("Vocabulary is %s", list(metaschema_loader.vocab.keys()))
return 1
except RuntimeError as e:
_logger.error(
"Schema %r read error:\n%s",
args.schema,
str(e),
exc_info=bool(args.debug),
)
return 1
if args.print_doc:
makedoc(
sys.stdout,
args.schema,
args.redirect,
args.only,
args.brand,
args.brandlink,
args.primtype,
args.brandstyle,
args.brandinverse,
)
return 0
# Optionally print the schema after ref resolution
if not args.document and args.print_pre:
json_dump(schema_doc, fp=sys.stdout, indent=4, default=str)
return 0
if not args.document and args.print_index:
json_dump(list(metaschema_loader.idx.keys()), fp=sys.stdout, indent=4, default=str)
return 0
# Validate the schema document against the metaschema
try:
schema.validate_doc(metaschema_names, schema_doc, metaschema_loader, args.strict)
except ValidationException as e:
_logger.error("While validating schema %r:\n%s", args.schema, str(e))
return 1
# Get the json-ld context and RDFS representation from the schema
metactx = schema.collect_namespaces(schema_metadata)
if "$base" in schema_metadata:
metactx["@base"] = schema_metadata["$base"]
if isinstance(schema_doc, CommentedSeq):
(schema_ctx, rdfs) = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)
else:
raise ValidationException(f"Expected a CommentedSeq, got {type(schema_doc)}: {schema_doc}.")
# Create the loader that will be used to load the target document.
schema_version = schema_metadata.get("saladVersion", None)
document_loader = Loader(
schema_ctx, skip_schemas=args.skip_schemas, salad_version=schema_version
)
if args.codegen:
codegen.codegen(
args.codegen,
cast(List[Dict[str, Any]], schema_doc),
schema_metadata,
document_loader,
target=args.codegen_target,
examples=args.codegen_examples,
package=args.codegen_package,
copyright=args.codegen_copyright,
parser_info=args.codegen_parser_info,
)
return 0
# Make the Avro validation that will be used to validate the target
# document
if isinstance(schema_doc, MutableSequence):
avsc_obj = schema.make_avro(schema_doc, document_loader)
try:
avsc_names = schema.make_avro_schema_from_avro(avsc_obj)
except SchemaParseException as err:
_logger.error(
"Schema %r error:\n%s",
args.schema,
str(err),
exc_info=((type(err), err, None) if args.debug else None),
)
if args.print_avro:
json_dump(avsc_obj, fp=sys.stdout, indent=4, default=str)
return 1
else:
_logger.error("Schema %r must be a list.", args.schema) # type: ignore[unreachable]
return 1
# Optionally print Avro-compatible schema from schema
if args.print_avro:
json_dump(avsc_obj, fp=sys.stdout, indent=4, default=str)
return 0
# Optionally print the json-ld context from the schema
if args.print_jsonld_context:
j = {"@context": schema_ctx}
json_dump(j, fp=sys.stdout, indent=4, sort_keys=True, default=str)
return 0
# Optionally print the RDFS graph from the schema
if args.print_rdfs:
rdfs.serialize(destination=stdout(), format=args.rdf_serializer)
return 0
if args.print_metadata and not args.document:
json_dump(schema_metadata, fp=sys.stdout, indent=4, default=str)
return 0
if args.print_inheritance_dot:
schema.print_inheritance(schema_doc, sys.stdout)
return 0
if args.print_fieldrefs_dot:
schema.print_fieldrefs(schema_doc, document_loader, sys.stdout)
return 0
# If no document specified, all done.
if not args.document:
print(f"Schema {args.schema!r} is valid")
return 0
# Load target document and resolve refs
for uri in args.document:
try:
document, doc_metadata = document_loader.resolve_ref(
uri, strict_foreign_properties=args.strict_foreign_properties
)
except ValidationException as e:
msg = to_one_line_messages(e) if args.print_oneline else str(e)
_logger.error(
"Document %r failed validation:\n%s",
args.document,
msg,
exc_info=args.debug,
)
return 1
# Optionally print the document after ref resolution
if args.print_pre:
json_dump(document, fp=sys.stdout, indent=4, default=str)
return 0
if args.print_index:
json_dump(list(document_loader.idx.keys()), fp=sys.stdout, indent=4, default=str)
return 0
# Validate the user document against the schema
try:
schema.validate_doc(
avsc_names,
document,
document_loader,
args.strict,
strict_foreign_properties=args.strict_foreign_properties,
)
except ValidationException as e:
msg2 = to_one_line_messages(e) if args.print_oneline else str(e)
_logger.error("While validating document %r:\n%s", uri, msg2)
return 1
# Optionally convert the document to RDF
if args.print_rdf:
if isinstance(document, (Mapping, MutableSequence)):
printrdf(uri, document, schema_ctx, args.rdf_serializer)
return 0
print("Document must be a dictionary or list.")
return 1
if args.print_metadata:
json_dump(doc_metadata, fp=sys.stdout, indent=4, default=str)
return 0
_logger.info("Document %r is valid", uri)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 0.568895 | 0.091382 |
import logging
import pprint
from typing import Any, List, Mapping, MutableMapping, MutableSequence, Optional, Set
from urllib.parse import urlsplit
from . import avro
from .avro.schema import Schema
from .exceptions import (
ClassValidationException,
SchemaSaladException,
ValidationException,
)
from .sourceline import SourceLine
_logger = logging.getLogger("salad")
def validate(
expected_schema: Schema,
datum: Any,
identifiers: Optional[List[str]] = None,
strict: bool = False,
foreign_properties: Optional[Set[str]] = None,
vocab: Optional[Mapping[str, str]] = None,
) -> bool:
if not identifiers:
identifiers = []
if not foreign_properties:
foreign_properties = set()
return validate_ex(
expected_schema,
datum,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=False,
vocab=vocab,
)
INT_MIN_VALUE = -(1 << 31)
INT_MAX_VALUE = (1 << 31) - 1
LONG_MIN_VALUE = -(1 << 63)
LONG_MAX_VALUE = (1 << 63) - 1
def avro_shortname(name: str) -> str:
"""Produce an avro friendly short name."""
return name.split(".")[-1]
saladp = "https://w3id.org/cwl/salad#"
primitives = {
"http://www.w3.org/2001/XMLSchema#string": "string",
"http://www.w3.org/2001/XMLSchema#boolean": "boolean",
"http://www.w3.org/2001/XMLSchema#int": "int",
"http://www.w3.org/2001/XMLSchema#long": "long",
"http://www.w3.org/2001/XMLSchema#float": "float",
"http://www.w3.org/2001/XMLSchema#double": "double",
saladp + "null": "null",
saladp + "enum": "enum",
saladp + "array": "array",
saladp + "record": "record",
}
def avro_type_name(url: str) -> str:
"""
Turn a URL into an Avro-safe name.
If the URL has no fragment, return this plain URL.
Extract either the last part of the URL fragment past the slash, otherwise
the whole fragment.
"""
global primitives
if url in primitives:
return primitives[url]
u = urlsplit(url)
joined = filter(
lambda x: x,
list(reversed(u.netloc.split("."))) + u.path.split("/") + u.fragment.split("/"),
)
return ".".join(joined)
def friendly(v): # type: (Any) -> Any
if isinstance(v, avro.schema.NamedSchema):
return avro_shortname(v.name)
if isinstance(v, avro.schema.ArraySchema):
return f"array of <{friendly(v.items)}>"
if isinstance(v, avro.schema.PrimitiveSchema):
return v.type
if isinstance(v, avro.schema.UnionSchema):
return " or ".join([friendly(s) for s in v.schemas])
return avro_shortname(v)
def vpformat(datum): # type: (Any) -> str
a = pprint.pformat(datum)
if len(a) > 160:
a = a[0:160] + "[...]"
return a
def validate_ex(
expected_schema: Schema,
datum, # type: Any
identifiers=None, # type: Optional[List[str]]
strict=False, # type: bool
foreign_properties=None, # type: Optional[Set[str]]
raise_ex=True, # type: bool
strict_foreign_properties=False, # type: bool
logger=_logger, # type: logging.Logger
skip_foreign_properties=False, # type: bool
vocab=None, # type: Optional[Mapping[str, str]]
):
# type: (...) -> bool
"""Determine if a python datum is an instance of a schema."""
debug = _logger.isEnabledFor(logging.DEBUG)
if not identifiers:
identifiers = []
if not foreign_properties:
foreign_properties = set()
if vocab is None:
raise Exception("vocab must be provided")
schema_type = expected_schema.type
if schema_type == "null":
if datum is None:
return True
if raise_ex:
raise ValidationException("the value is not null")
return False
if schema_type == "boolean":
if isinstance(datum, bool):
return True
if raise_ex:
raise ValidationException("the value is not boolean")
return False
if schema_type == "string":
if isinstance(datum, str):
return True
if isinstance(datum, bytes):
return True
if raise_ex:
raise ValidationException("the value is not string")
return False
if schema_type == "int":
if isinstance(datum, int) and INT_MIN_VALUE <= datum <= INT_MAX_VALUE:
return True
if raise_ex:
raise ValidationException(f"{vpformat(datum)!r} is not int")
return False
if schema_type == "long":
if (isinstance(datum, int)) and LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE:
return True
if raise_ex:
raise ValidationException(f"the value {vpformat(datum)!r} is not long")
return False
if schema_type in ["float", "double"]:
if isinstance(datum, (int, float)):
return True
if raise_ex:
raise ValidationException(f"the value {vpformat(datum)!r} is not float or double")
return False
if isinstance(expected_schema, avro.schema.EnumSchema):
if expected_schema.name in ("org.w3id.cwl.salad.Any", "Any"):
if datum is not None:
return True
if raise_ex:
raise ValidationException("'Any' type must be non-null")
return False
if not isinstance(datum, str):
if raise_ex:
raise ValidationException(
f"value is a {type(datum).__name__} but expected a string"
)
return False
if expected_schema.name == "org.w3id.cwl.cwl.Expression":
if "$(" in datum or "${" in datum:
return True
if raise_ex:
raise ValidationException(
f"value {datum!r} does not contain an expression in the form $() or ${{}}"
)
return False
if datum in expected_schema.symbols:
return True
if raise_ex:
raise ValidationException(
"the value {} is not a valid {}, expected {}{}".format(
vpformat(datum),
friendly(expected_schema.name),
"one of " if len(expected_schema.symbols) > 1 else "",
"'" + "', '".join(expected_schema.symbols) + "'",
)
)
return False
if isinstance(expected_schema, avro.schema.ArraySchema):
if isinstance(datum, MutableSequence):
for i, d in enumerate(datum):
try:
sl = SourceLine(datum, i, ValidationException)
if not validate_ex(
expected_schema.items,
d,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=raise_ex,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
):
return False
except ValidationException as v:
if raise_ex:
source = v if debug else None
raise ValidationException("item is invalid because", sl, [v]) from source
return False
return True
if raise_ex:
raise ValidationException(
f"the value {vpformat(datum)} is not a list, "
f"expected list of {friendly(expected_schema.items)}"
)
return False
if isinstance(expected_schema, avro.schema.UnionSchema):
for s in expected_schema.schemas:
if validate_ex(
s,
datum,
identifiers,
strict=strict,
raise_ex=False,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
):
return True
if not raise_ex:
return False
errors: List[SchemaSaladException] = []
checked = []
for s in expected_schema.schemas:
if isinstance(datum, MutableSequence) and not isinstance(s, avro.schema.ArraySchema):
continue
if isinstance(datum, MutableMapping) and not isinstance(s, avro.schema.RecordSchema):
continue
if isinstance(datum, (bool, int, float, str)) and isinstance(
s, (avro.schema.ArraySchema, avro.schema.RecordSchema)
):
continue
if datum is not None and s.type == "null":
continue
checked.append(s)
try:
validate_ex(
s,
datum,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=True,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
)
except ClassValidationException:
raise
except ValidationException as e:
errors.append(e)
if bool(errors):
raise ValidationException(
"",
None,
[
ValidationException(f"tried {friendly(check)} but", None, [err])
for (check, err) in zip(checked, errors)
],
"-",
)
raise ValidationException(
f"value is a {type(datum).__name__}, expected {friendly(expected_schema)}"
)
if isinstance(expected_schema, avro.schema.RecordSchema):
if not isinstance(datum, MutableMapping):
if raise_ex:
raise ValidationException(
f"is not a dict. Expected a {friendly(expected_schema.name)} object."
)
return False
classmatch = None
for f in expected_schema.fields:
if f.name in ("class",):
d = datum.get(f.name)
if not d:
if raise_ex:
raise ValidationException(f"Missing {f.name!r} field")
return False
avroname = None
if d in vocab:
avroname = avro_type_name(vocab[d])
if expected_schema.name not in (d, avroname):
if raise_ex:
raise ValidationException(
f"Expected class {expected_schema.name!r} but this is {d!r}"
)
return False
classmatch = d
break
errors = []
for f in expected_schema.fields:
if f.name in ("class",):
continue
if f.name in datum:
fieldval = datum[f.name]
else:
try:
fieldval = f.default
except KeyError:
fieldval = None
try:
sl = SourceLine(datum, f.name, str)
if not validate_ex(
f.type,
fieldval,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=raise_ex,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
):
return False
except ValidationException as v:
if f.name not in datum:
errors.append(ValidationException(f"missing required field {f.name!r}"))
else:
errors.append(
ValidationException(
f"the {f.name!r} field is not valid because",
sl,
[v],
)
)
for d in datum:
found = False
for f in expected_schema.fields:
if d == f.name:
found = True
if not found:
sl = SourceLine(datum, d, str)
if d is None:
err = ValidationException("mapping with implicit null key", sl)
if strict:
errors.append(err)
else:
logger.warning(err.as_warning())
continue
if d not in identifiers and d not in foreign_properties and d[0] not in ("@", "$"):
if (
(d not in identifiers and strict)
and (
d not in foreign_properties
and strict_foreign_properties
and not skip_foreign_properties
)
and not raise_ex
):
return False
split = urlsplit(d)
if split.scheme:
if not skip_foreign_properties:
err = ValidationException(
"unrecognized extension field {!r}{}.{}".format(
d,
" and strict_foreign_properties checking is enabled"
if strict_foreign_properties
else "",
"\nForeign properties from $schemas:\n {}".format(
"\n ".join(sorted(foreign_properties))
)
if len(foreign_properties) > 0
else "",
),
sl,
)
if strict_foreign_properties:
errors.append(err)
elif len(foreign_properties) > 0:
logger.warning(err.as_warning())
else:
err = ValidationException(
"invalid field {!r}, expected one of: {}".format(
d,
", ".join(f"{fn.name!r}" for fn in expected_schema.fields),
),
sl,
)
if strict:
errors.append(err)
else:
logger.warning(err.as_warning())
if bool(errors):
if raise_ex:
if classmatch:
raise ClassValidationException("", None, errors, "*")
raise ValidationException("", None, errors, "*")
return False
return True
if raise_ex:
raise ValidationException(f"Unrecognized schema_type {schema_type}")
return False
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/validate.py
|
validate.py
|
import logging
import pprint
from typing import Any, List, Mapping, MutableMapping, MutableSequence, Optional, Set
from urllib.parse import urlsplit
from . import avro
from .avro.schema import Schema
from .exceptions import (
ClassValidationException,
SchemaSaladException,
ValidationException,
)
from .sourceline import SourceLine
_logger = logging.getLogger("salad")
def validate(
expected_schema: Schema,
datum: Any,
identifiers: Optional[List[str]] = None,
strict: bool = False,
foreign_properties: Optional[Set[str]] = None,
vocab: Optional[Mapping[str, str]] = None,
) -> bool:
if not identifiers:
identifiers = []
if not foreign_properties:
foreign_properties = set()
return validate_ex(
expected_schema,
datum,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=False,
vocab=vocab,
)
INT_MIN_VALUE = -(1 << 31)
INT_MAX_VALUE = (1 << 31) - 1
LONG_MIN_VALUE = -(1 << 63)
LONG_MAX_VALUE = (1 << 63) - 1
def avro_shortname(name: str) -> str:
"""Produce an avro friendly short name."""
return name.split(".")[-1]
saladp = "https://w3id.org/cwl/salad#"
primitives = {
"http://www.w3.org/2001/XMLSchema#string": "string",
"http://www.w3.org/2001/XMLSchema#boolean": "boolean",
"http://www.w3.org/2001/XMLSchema#int": "int",
"http://www.w3.org/2001/XMLSchema#long": "long",
"http://www.w3.org/2001/XMLSchema#float": "float",
"http://www.w3.org/2001/XMLSchema#double": "double",
saladp + "null": "null",
saladp + "enum": "enum",
saladp + "array": "array",
saladp + "record": "record",
}
def avro_type_name(url: str) -> str:
"""
Turn a URL into an Avro-safe name.
If the URL has no fragment, return this plain URL.
Extract either the last part of the URL fragment past the slash, otherwise
the whole fragment.
"""
global primitives
if url in primitives:
return primitives[url]
u = urlsplit(url)
joined = filter(
lambda x: x,
list(reversed(u.netloc.split("."))) + u.path.split("/") + u.fragment.split("/"),
)
return ".".join(joined)
def friendly(v): # type: (Any) -> Any
if isinstance(v, avro.schema.NamedSchema):
return avro_shortname(v.name)
if isinstance(v, avro.schema.ArraySchema):
return f"array of <{friendly(v.items)}>"
if isinstance(v, avro.schema.PrimitiveSchema):
return v.type
if isinstance(v, avro.schema.UnionSchema):
return " or ".join([friendly(s) for s in v.schemas])
return avro_shortname(v)
def vpformat(datum): # type: (Any) -> str
a = pprint.pformat(datum)
if len(a) > 160:
a = a[0:160] + "[...]"
return a
def validate_ex(
expected_schema: Schema,
datum, # type: Any
identifiers=None, # type: Optional[List[str]]
strict=False, # type: bool
foreign_properties=None, # type: Optional[Set[str]]
raise_ex=True, # type: bool
strict_foreign_properties=False, # type: bool
logger=_logger, # type: logging.Logger
skip_foreign_properties=False, # type: bool
vocab=None, # type: Optional[Mapping[str, str]]
):
# type: (...) -> bool
"""Determine if a python datum is an instance of a schema."""
debug = _logger.isEnabledFor(logging.DEBUG)
if not identifiers:
identifiers = []
if not foreign_properties:
foreign_properties = set()
if vocab is None:
raise Exception("vocab must be provided")
schema_type = expected_schema.type
if schema_type == "null":
if datum is None:
return True
if raise_ex:
raise ValidationException("the value is not null")
return False
if schema_type == "boolean":
if isinstance(datum, bool):
return True
if raise_ex:
raise ValidationException("the value is not boolean")
return False
if schema_type == "string":
if isinstance(datum, str):
return True
if isinstance(datum, bytes):
return True
if raise_ex:
raise ValidationException("the value is not string")
return False
if schema_type == "int":
if isinstance(datum, int) and INT_MIN_VALUE <= datum <= INT_MAX_VALUE:
return True
if raise_ex:
raise ValidationException(f"{vpformat(datum)!r} is not int")
return False
if schema_type == "long":
if (isinstance(datum, int)) and LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE:
return True
if raise_ex:
raise ValidationException(f"the value {vpformat(datum)!r} is not long")
return False
if schema_type in ["float", "double"]:
if isinstance(datum, (int, float)):
return True
if raise_ex:
raise ValidationException(f"the value {vpformat(datum)!r} is not float or double")
return False
if isinstance(expected_schema, avro.schema.EnumSchema):
if expected_schema.name in ("org.w3id.cwl.salad.Any", "Any"):
if datum is not None:
return True
if raise_ex:
raise ValidationException("'Any' type must be non-null")
return False
if not isinstance(datum, str):
if raise_ex:
raise ValidationException(
f"value is a {type(datum).__name__} but expected a string"
)
return False
if expected_schema.name == "org.w3id.cwl.cwl.Expression":
if "$(" in datum or "${" in datum:
return True
if raise_ex:
raise ValidationException(
f"value {datum!r} does not contain an expression in the form $() or ${{}}"
)
return False
if datum in expected_schema.symbols:
return True
if raise_ex:
raise ValidationException(
"the value {} is not a valid {}, expected {}{}".format(
vpformat(datum),
friendly(expected_schema.name),
"one of " if len(expected_schema.symbols) > 1 else "",
"'" + "', '".join(expected_schema.symbols) + "'",
)
)
return False
if isinstance(expected_schema, avro.schema.ArraySchema):
if isinstance(datum, MutableSequence):
for i, d in enumerate(datum):
try:
sl = SourceLine(datum, i, ValidationException)
if not validate_ex(
expected_schema.items,
d,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=raise_ex,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
):
return False
except ValidationException as v:
if raise_ex:
source = v if debug else None
raise ValidationException("item is invalid because", sl, [v]) from source
return False
return True
if raise_ex:
raise ValidationException(
f"the value {vpformat(datum)} is not a list, "
f"expected list of {friendly(expected_schema.items)}"
)
return False
if isinstance(expected_schema, avro.schema.UnionSchema):
for s in expected_schema.schemas:
if validate_ex(
s,
datum,
identifiers,
strict=strict,
raise_ex=False,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
):
return True
if not raise_ex:
return False
errors: List[SchemaSaladException] = []
checked = []
for s in expected_schema.schemas:
if isinstance(datum, MutableSequence) and not isinstance(s, avro.schema.ArraySchema):
continue
if isinstance(datum, MutableMapping) and not isinstance(s, avro.schema.RecordSchema):
continue
if isinstance(datum, (bool, int, float, str)) and isinstance(
s, (avro.schema.ArraySchema, avro.schema.RecordSchema)
):
continue
if datum is not None and s.type == "null":
continue
checked.append(s)
try:
validate_ex(
s,
datum,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=True,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
)
except ClassValidationException:
raise
except ValidationException as e:
errors.append(e)
if bool(errors):
raise ValidationException(
"",
None,
[
ValidationException(f"tried {friendly(check)} but", None, [err])
for (check, err) in zip(checked, errors)
],
"-",
)
raise ValidationException(
f"value is a {type(datum).__name__}, expected {friendly(expected_schema)}"
)
if isinstance(expected_schema, avro.schema.RecordSchema):
if not isinstance(datum, MutableMapping):
if raise_ex:
raise ValidationException(
f"is not a dict. Expected a {friendly(expected_schema.name)} object."
)
return False
classmatch = None
for f in expected_schema.fields:
if f.name in ("class",):
d = datum.get(f.name)
if not d:
if raise_ex:
raise ValidationException(f"Missing {f.name!r} field")
return False
avroname = None
if d in vocab:
avroname = avro_type_name(vocab[d])
if expected_schema.name not in (d, avroname):
if raise_ex:
raise ValidationException(
f"Expected class {expected_schema.name!r} but this is {d!r}"
)
return False
classmatch = d
break
errors = []
for f in expected_schema.fields:
if f.name in ("class",):
continue
if f.name in datum:
fieldval = datum[f.name]
else:
try:
fieldval = f.default
except KeyError:
fieldval = None
try:
sl = SourceLine(datum, f.name, str)
if not validate_ex(
f.type,
fieldval,
identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=raise_ex,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties,
vocab=vocab,
):
return False
except ValidationException as v:
if f.name not in datum:
errors.append(ValidationException(f"missing required field {f.name!r}"))
else:
errors.append(
ValidationException(
f"the {f.name!r} field is not valid because",
sl,
[v],
)
)
for d in datum:
found = False
for f in expected_schema.fields:
if d == f.name:
found = True
if not found:
sl = SourceLine(datum, d, str)
if d is None:
err = ValidationException("mapping with implicit null key", sl)
if strict:
errors.append(err)
else:
logger.warning(err.as_warning())
continue
if d not in identifiers and d not in foreign_properties and d[0] not in ("@", "$"):
if (
(d not in identifiers and strict)
and (
d not in foreign_properties
and strict_foreign_properties
and not skip_foreign_properties
)
and not raise_ex
):
return False
split = urlsplit(d)
if split.scheme:
if not skip_foreign_properties:
err = ValidationException(
"unrecognized extension field {!r}{}.{}".format(
d,
" and strict_foreign_properties checking is enabled"
if strict_foreign_properties
else "",
"\nForeign properties from $schemas:\n {}".format(
"\n ".join(sorted(foreign_properties))
)
if len(foreign_properties) > 0
else "",
),
sl,
)
if strict_foreign_properties:
errors.append(err)
elif len(foreign_properties) > 0:
logger.warning(err.as_warning())
else:
err = ValidationException(
"invalid field {!r}, expected one of: {}".format(
d,
", ".join(f"{fn.name!r}" for fn in expected_schema.fields),
),
sl,
)
if strict:
errors.append(err)
else:
logger.warning(err.as_warning())
if bool(errors):
if raise_ex:
if classmatch:
raise ClassValidationException("", None, errors, "*")
raise ValidationException("", None, errors, "*")
return False
return True
if raise_ex:
raise ValidationException(f"Unrecognized schema_type {schema_type}")
return False
| 0.73431 | 0.211906 |
import json
import os
import sys
from io import BufferedWriter
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
Mapping,
MutableSequence,
Optional,
Tuple,
TypeVar,
Union,
)
import requests
from rdflib.graph import Graph
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.constructor import RoundTripConstructor
from ruamel.yaml.main import YAML
if TYPE_CHECKING:
from .fetcher import Fetcher
ContextType = Dict[str, Union[Dict[str, Any], str, Iterable[str]]]
DocumentType = TypeVar("DocumentType", CommentedSeq, CommentedMap)
DocumentOrStrType = TypeVar("DocumentOrStrType", CommentedSeq, CommentedMap, str)
FieldType = TypeVar("FieldType", str, CommentedSeq, CommentedMap)
MandatoryResolveType = Union[int, float, str, CommentedMap, CommentedSeq]
ResolveType = Optional[MandatoryResolveType]
ResolvedRefType = Tuple[ResolveType, CommentedMap]
IdxResultType = Union[CommentedMap, CommentedSeq, str, None]
IdxType = Dict[str, IdxResultType]
CacheType = Dict[str, Union[str, Graph, bool]]
FetcherCallableType = Callable[[CacheType, requests.sessions.Session], "Fetcher"]
AttachmentsType = Callable[[Union[CommentedMap, CommentedSeq]], bool]
def add_dictlist(di, key, val): # type: (Dict[Any, Any], Any, Any) -> None
if key not in di:
di[key] = []
di[key].append(val)
def aslist(thing: Any) -> MutableSequence[Any]:
"""
Wrap single items and lists.
Return lists unchanged.
"""
if isinstance(thing, MutableSequence):
return thing
return [thing]
def flatten(thing, ltypes=(list, tuple)):
# type: (Any, Any) -> Any
# http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
if thing is None:
return []
if not isinstance(thing, ltypes):
return [thing]
ltype = type(thing)
lst = list(thing)
i = 0
while i < len(lst):
while isinstance(lst[i], ltypes):
if not lst[i]:
lst.pop(i)
i -= 1
break
lst[i : i + 1] = lst[i]
i += 1
return ltype(lst)
# Check if we are on windows OS
def onWindows():
# type: () -> (bool)
return os.name == "nt"
def convert_to_dict(j4): # type: (Any) -> Any
if isinstance(j4, Mapping):
return {k: convert_to_dict(v) for k, v in j4.items()}
if isinstance(j4, MutableSequence):
return [convert_to_dict(v) for v in j4]
return j4
def json_dump(obj: Any, fp: IO[str], **kwargs: Any) -> None:
"""Force use of unicode."""
json.dump(convert_to_dict(obj), fp, **kwargs)
def json_dumps(
obj, # type: Any
**kwargs, # type: Any
): # type: (...) -> str
"""Force use of unicode."""
return json.dumps(convert_to_dict(obj), **kwargs)
def stdout() -> BufferedWriter:
"""Build a replacement for sys.stdout that allow for writing binary data."""
return os.fdopen(sys.stdout.fileno(), "wb", closefd=False)
class _RoundTripNoTimeStampConstructor(RoundTripConstructor):
def construct_yaml_timestamp(self: Any, node: Any, values: Any = None) -> Any:
return node.value
_RoundTripNoTimeStampConstructor.add_constructor(
"tag:yaml.org,2002:timestamp",
_RoundTripNoTimeStampConstructor.construct_yaml_timestamp,
)
# mypy: no-warn-unused-ignores
def yaml_no_ts() -> YAML:
"""
Get a YAML loader that won't parse timestamps into datetime objects.
Such datetime objects can't be easily dumped into JSON.
"""
yaml = YAML(typ="rt")
yaml.preserve_quotes = True # type: ignore
yaml.Constructor = _RoundTripNoTimeStampConstructor
return yaml
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/utils.py
|
utils.py
|
import json
import os
import sys
from io import BufferedWriter
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
Mapping,
MutableSequence,
Optional,
Tuple,
TypeVar,
Union,
)
import requests
from rdflib.graph import Graph
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.constructor import RoundTripConstructor
from ruamel.yaml.main import YAML
if TYPE_CHECKING:
from .fetcher import Fetcher
ContextType = Dict[str, Union[Dict[str, Any], str, Iterable[str]]]
DocumentType = TypeVar("DocumentType", CommentedSeq, CommentedMap)
DocumentOrStrType = TypeVar("DocumentOrStrType", CommentedSeq, CommentedMap, str)
FieldType = TypeVar("FieldType", str, CommentedSeq, CommentedMap)
MandatoryResolveType = Union[int, float, str, CommentedMap, CommentedSeq]
ResolveType = Optional[MandatoryResolveType]
ResolvedRefType = Tuple[ResolveType, CommentedMap]
IdxResultType = Union[CommentedMap, CommentedSeq, str, None]
IdxType = Dict[str, IdxResultType]
CacheType = Dict[str, Union[str, Graph, bool]]
FetcherCallableType = Callable[[CacheType, requests.sessions.Session], "Fetcher"]
AttachmentsType = Callable[[Union[CommentedMap, CommentedSeq]], bool]
def add_dictlist(di, key, val): # type: (Dict[Any, Any], Any, Any) -> None
if key not in di:
di[key] = []
di[key].append(val)
def aslist(thing: Any) -> MutableSequence[Any]:
"""
Wrap single items and lists.
Return lists unchanged.
"""
if isinstance(thing, MutableSequence):
return thing
return [thing]
def flatten(thing, ltypes=(list, tuple)):
# type: (Any, Any) -> Any
# http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
if thing is None:
return []
if not isinstance(thing, ltypes):
return [thing]
ltype = type(thing)
lst = list(thing)
i = 0
while i < len(lst):
while isinstance(lst[i], ltypes):
if not lst[i]:
lst.pop(i)
i -= 1
break
lst[i : i + 1] = lst[i]
i += 1
return ltype(lst)
# Check if we are on windows OS
def onWindows():
# type: () -> (bool)
return os.name == "nt"
def convert_to_dict(j4): # type: (Any) -> Any
if isinstance(j4, Mapping):
return {k: convert_to_dict(v) for k, v in j4.items()}
if isinstance(j4, MutableSequence):
return [convert_to_dict(v) for v in j4]
return j4
def json_dump(obj: Any, fp: IO[str], **kwargs: Any) -> None:
"""Force use of unicode."""
json.dump(convert_to_dict(obj), fp, **kwargs)
def json_dumps(
obj, # type: Any
**kwargs, # type: Any
): # type: (...) -> str
"""Force use of unicode."""
return json.dumps(convert_to_dict(obj), **kwargs)
def stdout() -> BufferedWriter:
"""Build a replacement for sys.stdout that allow for writing binary data."""
return os.fdopen(sys.stdout.fileno(), "wb", closefd=False)
class _RoundTripNoTimeStampConstructor(RoundTripConstructor):
def construct_yaml_timestamp(self: Any, node: Any, values: Any = None) -> Any:
return node.value
_RoundTripNoTimeStampConstructor.add_constructor(
"tag:yaml.org,2002:timestamp",
_RoundTripNoTimeStampConstructor.construct_yaml_timestamp,
)
# mypy: no-warn-unused-ignores
def yaml_no_ts() -> YAML:
"""
Get a YAML loader that won't parse timestamps into datetime objects.
Such datetime objects can't be easily dumped into JSON.
"""
yaml = YAML(typ="rt")
yaml.preserve_quotes = True # type: ignore
yaml.Constructor = _RoundTripNoTimeStampConstructor
return yaml
| 0.544075 | 0.217244 |
import copy
import logging
import os
import pathlib
import tempfile
import uuid as _uuid__ # pylint: disable=unused-import # noqa: F401
import xml.sax # nosec
from abc import ABC, abstractmethod
from io import StringIO
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from urllib.parse import quote, urldefrag, urlparse, urlsplit, urlunsplit
from urllib.request import pathname2url
from rdflib import Graph
from rdflib.plugins.parsers.notation3 import BadSyntax
from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import SchemaSaladException, ValidationException
from schema_salad.fetcher import DefaultFetcher, Fetcher, MemoryCachingFetcher
from schema_salad.sourceline import SourceLine, add_lc_filename
from schema_salad.utils import CacheType, yaml_no_ts # requires schema-salad v8.2+
_vocab: Dict[str, str] = {}
_rvocab: Dict[str, str] = {}
_logger = logging.getLogger("salad")
IdxType = MutableMapping[str, Tuple[Any, "LoadingOptions"]]
class LoadingOptions:
idx: IdxType
fileuri: Optional[str]
baseuri: str
namespaces: MutableMapping[str, str]
schemas: MutableSequence[str]
original_doc: Optional[Any]
addl_metadata: MutableMapping[str, Any]
fetcher: Fetcher
vocab: Dict[str, str]
rvocab: Dict[str, str]
cache: CacheType
imports: List[str]
includes: List[str]
def __init__(
self,
fetcher: Optional[Fetcher] = None,
namespaces: Optional[Dict[str, str]] = None,
schemas: Optional[List[str]] = None,
fileuri: Optional[str] = None,
copyfrom: Optional["LoadingOptions"] = None,
original_doc: Optional[Any] = None,
addl_metadata: Optional[Dict[str, str]] = None,
baseuri: Optional[str] = None,
idx: Optional[IdxType] = None,
imports: Optional[List[str]] = None,
includes: Optional[List[str]] = None,
) -> None:
"""Create a LoadingOptions object."""
self.original_doc = original_doc
if idx is not None:
self.idx = idx
else:
self.idx = copyfrom.idx if copyfrom is not None else {}
if fileuri is not None:
self.fileuri = fileuri
else:
self.fileuri = copyfrom.fileuri if copyfrom is not None else None
if baseuri is not None:
self.baseuri = baseuri
else:
self.baseuri = copyfrom.baseuri if copyfrom is not None else ""
if namespaces is not None:
self.namespaces = namespaces
else:
self.namespaces = copyfrom.namespaces if copyfrom is not None else {}
if schemas is not None:
self.schemas = schemas
else:
self.schemas = copyfrom.schemas if copyfrom is not None else []
if addl_metadata is not None:
self.addl_metadata = addl_metadata
else:
self.addl_metadata = copyfrom.addl_metadata if copyfrom is not None else {}
if imports is not None:
self.imports = imports
else:
self.imports = copyfrom.imports if copyfrom is not None else []
if includes is not None:
self.includes = includes
else:
self.includes = copyfrom.includes if copyfrom is not None else []
if fetcher is not None:
self.fetcher = fetcher
elif copyfrom is not None:
self.fetcher = copyfrom.fetcher
else:
import requests
from cachecontrol.caches import FileCache
from cachecontrol.wrapper import CacheControl
root = pathlib.Path(os.environ.get("HOME", tempfile.gettempdir()))
session = CacheControl(
requests.Session(),
cache=FileCache(root / ".cache" / "salad"),
)
self.fetcher: Fetcher = DefaultFetcher({}, session)
self.cache = self.fetcher.cache if isinstance(self.fetcher, MemoryCachingFetcher) else {}
self.vocab = _vocab
self.rvocab = _rvocab
if namespaces is not None:
self.vocab = self.vocab.copy()
self.rvocab = self.rvocab.copy()
for k, v in namespaces.items():
self.vocab[k] = v
self.rvocab[v] = k
@property
def graph(self) -> Graph:
"""Generate a merged rdflib.Graph from all entries in self.schemas."""
graph = Graph()
if not self.schemas:
return graph
key = str(hash(tuple(self.schemas)))
if key in self.cache:
return cast(Graph, self.cache[key])
for schema in self.schemas:
fetchurl = (
self.fetcher.urljoin(self.fileuri, schema)
if self.fileuri is not None
else pathlib.Path(schema).resolve().as_uri()
)
if fetchurl not in self.cache or self.cache[fetchurl] is True:
_logger.debug("Getting external schema %s", fetchurl)
try:
content = self.fetcher.fetch_text(fetchurl)
except Exception as e:
_logger.warning("Could not load extension schema %s: %s", fetchurl, str(e))
continue
newGraph = Graph()
err_msg = "unknown error"
for fmt in ["xml", "turtle"]:
try:
newGraph.parse(data=content, format=fmt, publicID=str(fetchurl))
self.cache[fetchurl] = newGraph
graph += newGraph
break
except (xml.sax.SAXParseException, TypeError, BadSyntax) as e:
err_msg = str(e)
else:
_logger.warning("Could not load extension schema %s: %s", fetchurl, err_msg)
self.cache[key] = graph
return graph
class Saveable(ABC):
"""Mark classes than have a save() and fromDoc() function."""
@classmethod
@abstractmethod
def fromDoc(
cls,
_doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "Saveable":
"""Construct this object from the result of yaml.load()."""
@abstractmethod
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
"""Convert this object to a JSON/YAML friendly dictionary."""
def load_field(val, fieldtype, baseuri, loadingOptions):
# type: (Union[str, Dict[str, str]], _Loader, str, LoadingOptions) -> Any
if isinstance(val, MutableMapping):
if "$import" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"])
result, metadata = _document_load_by_url(
fieldtype,
url,
loadingOptions,
)
loadingOptions.imports.append(url)
return result
if "$include" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"])
val = loadingOptions.fetcher.fetch_text(url)
loadingOptions.includes.append(url)
return fieldtype.load(val, baseuri, loadingOptions)
save_type = Optional[Union[MutableMapping[str, Any], MutableSequence[Any], int, float, bool, str]]
def save(
val: Any,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
if isinstance(val, Saveable):
return val.save(top=top, base_url=base_url, relative_uris=relative_uris)
if isinstance(val, MutableSequence):
return [save(v, top=False, base_url=base_url, relative_uris=relative_uris) for v in val]
if isinstance(val, MutableMapping):
newdict = {}
for key in val:
newdict[key] = save(val[key], top=False, base_url=base_url, relative_uris=relative_uris)
return newdict
if val is None or isinstance(val, (int, float, bool, str)):
return val
raise Exception("Not Saveable: %s" % type(val))
def save_with_metadata(
val: Any,
valLoadingOpts: LoadingOptions,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
"""Save and set $namespaces, $schemas, $base and any other metadata fields at the top level."""
saved_val = save(val, top, base_url, relative_uris)
newdict: MutableMapping[str, Any] = {}
if isinstance(saved_val, MutableSequence):
newdict = {"$graph": saved_val}
elif isinstance(saved_val, MutableMapping):
newdict = saved_val
if valLoadingOpts.namespaces:
newdict["$namespaces"] = valLoadingOpts.namespaces
if valLoadingOpts.schemas:
newdict["$schemas"] = valLoadingOpts.schemas
if valLoadingOpts.baseuri:
newdict["$base"] = valLoadingOpts.baseuri
for k, v in valLoadingOpts.addl_metadata.items():
if k not in newdict:
newdict[k] = v
return newdict
def expand_url(
url, # type: str
base_url, # type: str
loadingOptions, # type: LoadingOptions
scoped_id=False, # type: bool
vocab_term=False, # type: bool
scoped_ref=None, # type: Optional[int]
):
# type: (...) -> str
if url in ("@id", "@type"):
return url
if vocab_term and url in loadingOptions.vocab:
return url
if bool(loadingOptions.vocab) and ":" in url:
prefix = url.split(":")[0]
if prefix in loadingOptions.vocab:
url = loadingOptions.vocab[prefix] + url[len(prefix) + 1 :]
split = urlsplit(url)
if (
(bool(split.scheme) and split.scheme in loadingOptions.fetcher.supported_schemes())
or url.startswith("$(")
or url.startswith("${")
):
pass
elif scoped_id and not bool(split.fragment):
splitbase = urlsplit(base_url)
frg = ""
if bool(splitbase.fragment):
frg = splitbase.fragment + "/" + split.path
else:
frg = split.path
pt = splitbase.path if splitbase.path != "" else "/"
url = urlunsplit((splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
elif scoped_ref is not None and not bool(split.fragment):
splitbase = urlsplit(base_url)
sp = splitbase.fragment.split("/")
n = scoped_ref
while n > 0 and len(sp) > 0:
sp.pop()
n -= 1
sp.append(url)
url = urlunsplit(
(
splitbase.scheme,
splitbase.netloc,
splitbase.path,
splitbase.query,
"/".join(sp),
)
)
else:
url = loadingOptions.fetcher.urljoin(base_url, url)
if vocab_term:
split = urlsplit(url)
if bool(split.scheme):
if url in loadingOptions.rvocab:
return loadingOptions.rvocab[url]
else:
raise ValidationException(f"Term {url!r} not in vocabulary")
return url
class _Loader:
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
pass
class _AnyLoader(_Loader):
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc is not None:
return doc
raise ValidationException("Expected non-null")
class _PrimitiveLoader(_Loader):
def __init__(self, tp):
# type: (Union[type, Tuple[Type[str], Type[str]]]) -> None
self.tp = tp
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, self.tp):
raise ValidationException(
"Expected a {} but got {}".format(
self.tp.__class__.__name__, doc.__class__.__name__
)
)
return doc
def __repr__(self): # type: () -> str
return str(self.tp)
class _ArrayLoader(_Loader):
def __init__(self, items):
# type: (_Loader) -> None
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableSequence):
raise ValidationException(f"Expected a list, was {type(doc)}")
r = [] # type: List[Any]
errors = [] # type: List[SchemaSaladException]
for i in range(0, len(doc)):
try:
lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)
if isinstance(lf, MutableSequence):
r.extend(lf)
else:
r.append(lf)
except ValidationException as e:
errors.append(e.with_sourceline(SourceLine(doc, i, str)))
if errors:
raise ValidationException("", None, errors)
return r
def __repr__(self): # type: () -> str
return f"array<{self.items}>"
class _EnumLoader(_Loader):
def __init__(self, symbols: Sequence[str], name: str) -> None:
self.symbols = symbols
self.name = name
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc in self.symbols:
return doc
raise ValidationException(f"Expected one of {self.symbols}")
def __repr__(self): # type: () -> str
return self.name
class _SecondaryDSLLoader(_Loader):
def __init__(self, inner):
# type: (_Loader) -> None
self.inner = inner
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
r: List[Dict[str, Any]] = []
if isinstance(doc, MutableSequence):
for d in doc:
if isinstance(d, str):
if d.endswith("?"):
r.append({"pattern": d[:-1], "required": False})
else:
r.append({"pattern": d})
elif isinstance(d, dict):
new_dict: Dict[str, Any] = {}
dict_copy = copy.deepcopy(d)
if "pattern" in dict_copy:
new_dict["pattern"] = dict_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {d}"
)
new_dict["required"] = (
dict_copy.pop("required") if "required" in dict_copy else None
)
if len(dict_copy):
raise ValidationException(
"Unallowed values in secondaryFiles specification entry: {}".format(
dict_copy
)
)
r.append(new_dict)
else:
raise ValidationException(
"Expected a string or sequence of (strings or mappings)."
)
elif isinstance(doc, MutableMapping):
new_dict = {}
doc_copy = copy.deepcopy(doc)
if "pattern" in doc_copy:
new_dict["pattern"] = doc_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {doc}"
)
new_dict["required"] = doc_copy.pop("required") if "required" in doc_copy else None
if len(doc_copy):
raise ValidationException(
f"Unallowed values in secondaryFiles specification entry: {doc_copy}"
)
r.append(new_dict)
elif isinstance(doc, str):
if doc.endswith("?"):
r.append({"pattern": doc[:-1], "required": False})
else:
r.append({"pattern": doc})
else:
raise ValidationException("Expected str or sequence of str")
return self.inner.load(r, baseuri, loadingOptions, docRoot)
class _RecordLoader(_Loader):
def __init__(self, classtype):
# type: (Type[Saveable]) -> None
self.classtype = classtype
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableMapping):
raise ValidationException(f"Expected a dict, was {type(doc)}")
return self.classtype.fromDoc(doc, baseuri, loadingOptions, docRoot=docRoot)
def __repr__(self): # type: () -> str
return str(self.classtype.__name__)
class _ExpressionLoader(_Loader):
def __init__(self, items: Type[str]) -> None:
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, str):
raise ValidationException(f"Expected a str, was {type(doc)}")
return doc
class _UnionLoader(_Loader):
def __init__(self, alternates: Sequence[_Loader]) -> None:
self.alternates = alternates
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
errors = []
for t in self.alternates:
try:
return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)
except ValidationException as e:
errors.append(ValidationException(f"tried {t} but", None, [e]))
raise ValidationException("", None, errors, "-")
def __repr__(self): # type: () -> str
return " | ".join(str(a) for a in self.alternates)
class _URILoader(_Loader):
def __init__(self, inner, scoped_id, vocab_term, scoped_ref):
# type: (_Loader, bool, bool, Union[int, None]) -> None
self.inner = inner
self.scoped_id = scoped_id
self.vocab_term = vocab_term
self.scoped_ref = scoped_ref
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
newdoc = []
for i in doc:
if isinstance(i, str):
newdoc.append(
expand_url(
i,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
)
else:
newdoc.append(i)
doc = newdoc
elif isinstance(doc, str):
doc = expand_url(
doc,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
return self.inner.load(doc, baseuri, loadingOptions)
class _TypeDSLLoader(_Loader):
def __init__(self, inner, refScope, salad_version):
# type: (_Loader, Union[int, None], str) -> None
self.inner = inner
self.refScope = refScope
self.salad_version = salad_version
def resolve(
self,
doc, # type: str
baseuri, # type: str
loadingOptions, # type: LoadingOptions
):
# type: (...) -> Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
doc_ = doc
optional = False
if doc_.endswith("?"):
optional = True
doc_ = doc_[0:-1]
if doc_.endswith("[]"):
salad_versions = [int(v) for v in self.salad_version[1:].split(".")]
items = "" # type: Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
rest = doc_[0:-2]
if salad_versions < [1, 3]:
if rest.endswith("[]"):
# To show the error message with the original type
return doc
else:
items = expand_url(rest, baseuri, loadingOptions, False, True, self.refScope)
else:
items = self.resolve(rest, baseuri, loadingOptions)
if isinstance(items, str):
items = expand_url(items, baseuri, loadingOptions, False, True, self.refScope)
expanded = {"type": "array", "items": items} # type: Union[Dict[str, Any], str]
else:
expanded = expand_url(doc_, baseuri, loadingOptions, False, True, self.refScope)
if optional:
return ["null", expanded]
else:
return expanded
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
r = [] # type: List[Any]
for d in doc:
if isinstance(d, str):
resolved = self.resolve(d, baseuri, loadingOptions)
if isinstance(resolved, MutableSequence):
for i in resolved:
if i not in r:
r.append(i)
else:
if resolved not in r:
r.append(resolved)
else:
r.append(d)
doc = r
elif isinstance(doc, str):
doc = self.resolve(doc, baseuri, loadingOptions)
return self.inner.load(doc, baseuri, loadingOptions)
class _IdMapLoader(_Loader):
def __init__(self, inner, mapSubject, mapPredicate):
# type: (_Loader, str, Union[str, None]) -> None
self.inner = inner
self.mapSubject = mapSubject
self.mapPredicate = mapPredicate
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableMapping):
r = [] # type: List[Any]
for k in sorted(doc.keys()):
val = doc[k]
if isinstance(val, CommentedMap):
v = copy.copy(val)
v.lc.data = val.lc.data
v.lc.filename = val.lc.filename
v[self.mapSubject] = k
r.append(v)
elif isinstance(val, MutableMapping):
v2 = copy.copy(val)
v2[self.mapSubject] = k
r.append(v2)
else:
if self.mapPredicate:
v3 = {self.mapPredicate: val}
v3[self.mapSubject] = k
r.append(v3)
else:
raise ValidationException("No mapPredicate")
doc = r
return self.inner.load(doc, baseuri, loadingOptions)
def _document_load(
loader: _Loader,
doc: Union[str, MutableMapping[str, Any], MutableSequence[Any]],
baseuri: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if isinstance(doc, str):
return _document_load_by_url(
loader,
loadingOptions.fetcher.urljoin(baseuri, doc),
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
if isinstance(doc, MutableMapping):
addl_metadata = {}
if addl_metadata_fields is not None:
for mf in addl_metadata_fields:
if mf in doc:
addl_metadata[mf] = doc[mf]
docuri = baseuri
if "$base" in doc:
baseuri = doc["$base"]
loadingOptions = LoadingOptions(
copyfrom=loadingOptions,
namespaces=doc.get("$namespaces", None),
schemas=doc.get("$schemas", None),
baseuri=doc.get("$base", None),
addl_metadata=addl_metadata,
)
doc = {k: v for k, v in doc.items() if k not in ("$namespaces", "$schemas", "$base")}
if "$graph" in doc:
loadingOptions.idx[baseuri] = (
loader.load(doc["$graph"], baseuri, loadingOptions),
loadingOptions,
)
else:
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions, docRoot=baseuri),
loadingOptions,
)
if docuri != baseuri:
loadingOptions.idx[docuri] = loadingOptions.idx[baseuri]
return loadingOptions.idx[baseuri]
if isinstance(doc, MutableSequence):
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions),
loadingOptions,
)
return loadingOptions.idx[baseuri]
raise ValidationException(
"Expected URI string, MutableMapping or MutableSequence, got %s" % type(doc)
)
def _document_load_by_url(
loader: _Loader,
url: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if url in loadingOptions.idx:
return loadingOptions.idx[url]
doc_url, frg = urldefrag(url)
text = loadingOptions.fetcher.fetch_text(doc_url)
textIO = StringIO(text)
textIO.name = str(doc_url)
yaml = yaml_no_ts()
result = yaml.load(textIO)
add_lc_filename(result, doc_url)
loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=doc_url)
_document_load(
loader,
result,
doc_url,
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
return loadingOptions.idx[url]
def file_uri(path, split_frag=False): # type: (str, bool) -> str
if path.startswith("file://"):
return path
if split_frag:
pathsp = path.split("#", 2)
frag = "#" + quote(str(pathsp[1])) if len(pathsp) == 2 else ""
urlpath = pathname2url(str(pathsp[0]))
else:
urlpath = pathname2url(path)
frag = ""
if urlpath.startswith("//"):
return f"file:{urlpath}{frag}"
return f"file://{urlpath}{frag}"
def prefix_url(url: str, namespaces: Dict[str, str]) -> str:
"""Expand short forms into full URLs using the given namespace dictionary."""
for k, v in namespaces.items():
if url.startswith(v):
return k + ":" + url[len(v) :]
return url
def save_relative_uri(
uri: Any,
base_url: str,
scoped_id: bool,
ref_scope: Optional[int],
relative_uris: bool,
) -> Any:
"""Convert any URI to a relative one, obeying the scoping rules."""
if isinstance(uri, MutableSequence):
return [save_relative_uri(u, base_url, scoped_id, ref_scope, relative_uris) for u in uri]
elif isinstance(uri, str):
if not relative_uris or uri == base_url:
return uri
urisplit = urlsplit(uri)
basesplit = urlsplit(base_url)
if urisplit.scheme == basesplit.scheme and urisplit.netloc == basesplit.netloc:
if urisplit.path != basesplit.path:
p = os.path.relpath(urisplit.path, os.path.dirname(basesplit.path))
if urisplit.fragment:
p = p + "#" + urisplit.fragment
return p
basefrag = basesplit.fragment + "/"
if ref_scope:
sp = basefrag.split("/")
i = 0
while i < ref_scope:
sp.pop()
i += 1
basefrag = "/".join(sp)
if urisplit.fragment.startswith(basefrag):
return urisplit.fragment[len(basefrag) :]
return urisplit.fragment
return uri
else:
return save(uri, top=False, base_url=base_url, relative_uris=relative_uris)
def shortname(inputid: str) -> str:
"""
Compute the shortname of a fully qualified identifier.
See https://w3id.org/cwl/v1.2/SchemaSalad.html#Short_names.
"""
parsed_id = urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split("/")[-1]
return parsed_id.path.split("/")[-1]
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/python_codegen_support.py
|
python_codegen_support.py
|
import copy
import logging
import os
import pathlib
import tempfile
import uuid as _uuid__ # pylint: disable=unused-import # noqa: F401
import xml.sax # nosec
from abc import ABC, abstractmethod
from io import StringIO
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from urllib.parse import quote, urldefrag, urlparse, urlsplit, urlunsplit
from urllib.request import pathname2url
from rdflib import Graph
from rdflib.plugins.parsers.notation3 import BadSyntax
from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import SchemaSaladException, ValidationException
from schema_salad.fetcher import DefaultFetcher, Fetcher, MemoryCachingFetcher
from schema_salad.sourceline import SourceLine, add_lc_filename
from schema_salad.utils import CacheType, yaml_no_ts # requires schema-salad v8.2+
_vocab: Dict[str, str] = {}
_rvocab: Dict[str, str] = {}
_logger = logging.getLogger("salad")
IdxType = MutableMapping[str, Tuple[Any, "LoadingOptions"]]
class LoadingOptions:
idx: IdxType
fileuri: Optional[str]
baseuri: str
namespaces: MutableMapping[str, str]
schemas: MutableSequence[str]
original_doc: Optional[Any]
addl_metadata: MutableMapping[str, Any]
fetcher: Fetcher
vocab: Dict[str, str]
rvocab: Dict[str, str]
cache: CacheType
imports: List[str]
includes: List[str]
def __init__(
self,
fetcher: Optional[Fetcher] = None,
namespaces: Optional[Dict[str, str]] = None,
schemas: Optional[List[str]] = None,
fileuri: Optional[str] = None,
copyfrom: Optional["LoadingOptions"] = None,
original_doc: Optional[Any] = None,
addl_metadata: Optional[Dict[str, str]] = None,
baseuri: Optional[str] = None,
idx: Optional[IdxType] = None,
imports: Optional[List[str]] = None,
includes: Optional[List[str]] = None,
) -> None:
"""Create a LoadingOptions object."""
self.original_doc = original_doc
if idx is not None:
self.idx = idx
else:
self.idx = copyfrom.idx if copyfrom is not None else {}
if fileuri is not None:
self.fileuri = fileuri
else:
self.fileuri = copyfrom.fileuri if copyfrom is not None else None
if baseuri is not None:
self.baseuri = baseuri
else:
self.baseuri = copyfrom.baseuri if copyfrom is not None else ""
if namespaces is not None:
self.namespaces = namespaces
else:
self.namespaces = copyfrom.namespaces if copyfrom is not None else {}
if schemas is not None:
self.schemas = schemas
else:
self.schemas = copyfrom.schemas if copyfrom is not None else []
if addl_metadata is not None:
self.addl_metadata = addl_metadata
else:
self.addl_metadata = copyfrom.addl_metadata if copyfrom is not None else {}
if imports is not None:
self.imports = imports
else:
self.imports = copyfrom.imports if copyfrom is not None else []
if includes is not None:
self.includes = includes
else:
self.includes = copyfrom.includes if copyfrom is not None else []
if fetcher is not None:
self.fetcher = fetcher
elif copyfrom is not None:
self.fetcher = copyfrom.fetcher
else:
import requests
from cachecontrol.caches import FileCache
from cachecontrol.wrapper import CacheControl
root = pathlib.Path(os.environ.get("HOME", tempfile.gettempdir()))
session = CacheControl(
requests.Session(),
cache=FileCache(root / ".cache" / "salad"),
)
self.fetcher: Fetcher = DefaultFetcher({}, session)
self.cache = self.fetcher.cache if isinstance(self.fetcher, MemoryCachingFetcher) else {}
self.vocab = _vocab
self.rvocab = _rvocab
if namespaces is not None:
self.vocab = self.vocab.copy()
self.rvocab = self.rvocab.copy()
for k, v in namespaces.items():
self.vocab[k] = v
self.rvocab[v] = k
@property
def graph(self) -> Graph:
"""Generate a merged rdflib.Graph from all entries in self.schemas."""
graph = Graph()
if not self.schemas:
return graph
key = str(hash(tuple(self.schemas)))
if key in self.cache:
return cast(Graph, self.cache[key])
for schema in self.schemas:
fetchurl = (
self.fetcher.urljoin(self.fileuri, schema)
if self.fileuri is not None
else pathlib.Path(schema).resolve().as_uri()
)
if fetchurl not in self.cache or self.cache[fetchurl] is True:
_logger.debug("Getting external schema %s", fetchurl)
try:
content = self.fetcher.fetch_text(fetchurl)
except Exception as e:
_logger.warning("Could not load extension schema %s: %s", fetchurl, str(e))
continue
newGraph = Graph()
err_msg = "unknown error"
for fmt in ["xml", "turtle"]:
try:
newGraph.parse(data=content, format=fmt, publicID=str(fetchurl))
self.cache[fetchurl] = newGraph
graph += newGraph
break
except (xml.sax.SAXParseException, TypeError, BadSyntax) as e:
err_msg = str(e)
else:
_logger.warning("Could not load extension schema %s: %s", fetchurl, err_msg)
self.cache[key] = graph
return graph
class Saveable(ABC):
"""Mark classes than have a save() and fromDoc() function."""
@classmethod
@abstractmethod
def fromDoc(
cls,
_doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "Saveable":
"""Construct this object from the result of yaml.load()."""
@abstractmethod
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
"""Convert this object to a JSON/YAML friendly dictionary."""
def load_field(val, fieldtype, baseuri, loadingOptions):
# type: (Union[str, Dict[str, str]], _Loader, str, LoadingOptions) -> Any
if isinstance(val, MutableMapping):
if "$import" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"])
result, metadata = _document_load_by_url(
fieldtype,
url,
loadingOptions,
)
loadingOptions.imports.append(url)
return result
if "$include" in val:
if loadingOptions.fileuri is None:
raise SchemaSaladException("Cannot load $import without fileuri")
url = loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"])
val = loadingOptions.fetcher.fetch_text(url)
loadingOptions.includes.append(url)
return fieldtype.load(val, baseuri, loadingOptions)
save_type = Optional[Union[MutableMapping[str, Any], MutableSequence[Any], int, float, bool, str]]
def save(
val: Any,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
if isinstance(val, Saveable):
return val.save(top=top, base_url=base_url, relative_uris=relative_uris)
if isinstance(val, MutableSequence):
return [save(v, top=False, base_url=base_url, relative_uris=relative_uris) for v in val]
if isinstance(val, MutableMapping):
newdict = {}
for key in val:
newdict[key] = save(val[key], top=False, base_url=base_url, relative_uris=relative_uris)
return newdict
if val is None or isinstance(val, (int, float, bool, str)):
return val
raise Exception("Not Saveable: %s" % type(val))
def save_with_metadata(
val: Any,
valLoadingOpts: LoadingOptions,
top: bool = True,
base_url: str = "",
relative_uris: bool = True,
) -> save_type:
"""Save and set $namespaces, $schemas, $base and any other metadata fields at the top level."""
saved_val = save(val, top, base_url, relative_uris)
newdict: MutableMapping[str, Any] = {}
if isinstance(saved_val, MutableSequence):
newdict = {"$graph": saved_val}
elif isinstance(saved_val, MutableMapping):
newdict = saved_val
if valLoadingOpts.namespaces:
newdict["$namespaces"] = valLoadingOpts.namespaces
if valLoadingOpts.schemas:
newdict["$schemas"] = valLoadingOpts.schemas
if valLoadingOpts.baseuri:
newdict["$base"] = valLoadingOpts.baseuri
for k, v in valLoadingOpts.addl_metadata.items():
if k not in newdict:
newdict[k] = v
return newdict
def expand_url(
url, # type: str
base_url, # type: str
loadingOptions, # type: LoadingOptions
scoped_id=False, # type: bool
vocab_term=False, # type: bool
scoped_ref=None, # type: Optional[int]
):
# type: (...) -> str
if url in ("@id", "@type"):
return url
if vocab_term and url in loadingOptions.vocab:
return url
if bool(loadingOptions.vocab) and ":" in url:
prefix = url.split(":")[0]
if prefix in loadingOptions.vocab:
url = loadingOptions.vocab[prefix] + url[len(prefix) + 1 :]
split = urlsplit(url)
if (
(bool(split.scheme) and split.scheme in loadingOptions.fetcher.supported_schemes())
or url.startswith("$(")
or url.startswith("${")
):
pass
elif scoped_id and not bool(split.fragment):
splitbase = urlsplit(base_url)
frg = ""
if bool(splitbase.fragment):
frg = splitbase.fragment + "/" + split.path
else:
frg = split.path
pt = splitbase.path if splitbase.path != "" else "/"
url = urlunsplit((splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
elif scoped_ref is not None and not bool(split.fragment):
splitbase = urlsplit(base_url)
sp = splitbase.fragment.split("/")
n = scoped_ref
while n > 0 and len(sp) > 0:
sp.pop()
n -= 1
sp.append(url)
url = urlunsplit(
(
splitbase.scheme,
splitbase.netloc,
splitbase.path,
splitbase.query,
"/".join(sp),
)
)
else:
url = loadingOptions.fetcher.urljoin(base_url, url)
if vocab_term:
split = urlsplit(url)
if bool(split.scheme):
if url in loadingOptions.rvocab:
return loadingOptions.rvocab[url]
else:
raise ValidationException(f"Term {url!r} not in vocabulary")
return url
class _Loader:
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
pass
class _AnyLoader(_Loader):
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc is not None:
return doc
raise ValidationException("Expected non-null")
class _PrimitiveLoader(_Loader):
def __init__(self, tp):
# type: (Union[type, Tuple[Type[str], Type[str]]]) -> None
self.tp = tp
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, self.tp):
raise ValidationException(
"Expected a {} but got {}".format(
self.tp.__class__.__name__, doc.__class__.__name__
)
)
return doc
def __repr__(self): # type: () -> str
return str(self.tp)
class _ArrayLoader(_Loader):
def __init__(self, items):
# type: (_Loader) -> None
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableSequence):
raise ValidationException(f"Expected a list, was {type(doc)}")
r = [] # type: List[Any]
errors = [] # type: List[SchemaSaladException]
for i in range(0, len(doc)):
try:
lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)
if isinstance(lf, MutableSequence):
r.extend(lf)
else:
r.append(lf)
except ValidationException as e:
errors.append(e.with_sourceline(SourceLine(doc, i, str)))
if errors:
raise ValidationException("", None, errors)
return r
def __repr__(self): # type: () -> str
return f"array<{self.items}>"
class _EnumLoader(_Loader):
def __init__(self, symbols: Sequence[str], name: str) -> None:
self.symbols = symbols
self.name = name
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if doc in self.symbols:
return doc
raise ValidationException(f"Expected one of {self.symbols}")
def __repr__(self): # type: () -> str
return self.name
class _SecondaryDSLLoader(_Loader):
def __init__(self, inner):
# type: (_Loader) -> None
self.inner = inner
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
r: List[Dict[str, Any]] = []
if isinstance(doc, MutableSequence):
for d in doc:
if isinstance(d, str):
if d.endswith("?"):
r.append({"pattern": d[:-1], "required": False})
else:
r.append({"pattern": d})
elif isinstance(d, dict):
new_dict: Dict[str, Any] = {}
dict_copy = copy.deepcopy(d)
if "pattern" in dict_copy:
new_dict["pattern"] = dict_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {d}"
)
new_dict["required"] = (
dict_copy.pop("required") if "required" in dict_copy else None
)
if len(dict_copy):
raise ValidationException(
"Unallowed values in secondaryFiles specification entry: {}".format(
dict_copy
)
)
r.append(new_dict)
else:
raise ValidationException(
"Expected a string or sequence of (strings or mappings)."
)
elif isinstance(doc, MutableMapping):
new_dict = {}
doc_copy = copy.deepcopy(doc)
if "pattern" in doc_copy:
new_dict["pattern"] = doc_copy.pop("pattern")
else:
raise ValidationException(
f"Missing pattern in secondaryFiles specification entry: {doc}"
)
new_dict["required"] = doc_copy.pop("required") if "required" in doc_copy else None
if len(doc_copy):
raise ValidationException(
f"Unallowed values in secondaryFiles specification entry: {doc_copy}"
)
r.append(new_dict)
elif isinstance(doc, str):
if doc.endswith("?"):
r.append({"pattern": doc[:-1], "required": False})
else:
r.append({"pattern": doc})
else:
raise ValidationException("Expected str or sequence of str")
return self.inner.load(r, baseuri, loadingOptions, docRoot)
class _RecordLoader(_Loader):
def __init__(self, classtype):
# type: (Type[Saveable]) -> None
self.classtype = classtype
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, MutableMapping):
raise ValidationException(f"Expected a dict, was {type(doc)}")
return self.classtype.fromDoc(doc, baseuri, loadingOptions, docRoot=docRoot)
def __repr__(self): # type: () -> str
return str(self.classtype.__name__)
class _ExpressionLoader(_Loader):
def __init__(self, items: Type[str]) -> None:
self.items = items
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if not isinstance(doc, str):
raise ValidationException(f"Expected a str, was {type(doc)}")
return doc
class _UnionLoader(_Loader):
def __init__(self, alternates: Sequence[_Loader]) -> None:
self.alternates = alternates
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
errors = []
for t in self.alternates:
try:
return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)
except ValidationException as e:
errors.append(ValidationException(f"tried {t} but", None, [e]))
raise ValidationException("", None, errors, "-")
def __repr__(self): # type: () -> str
return " | ".join(str(a) for a in self.alternates)
class _URILoader(_Loader):
def __init__(self, inner, scoped_id, vocab_term, scoped_ref):
# type: (_Loader, bool, bool, Union[int, None]) -> None
self.inner = inner
self.scoped_id = scoped_id
self.vocab_term = vocab_term
self.scoped_ref = scoped_ref
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
newdoc = []
for i in doc:
if isinstance(i, str):
newdoc.append(
expand_url(
i,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
)
else:
newdoc.append(i)
doc = newdoc
elif isinstance(doc, str):
doc = expand_url(
doc,
baseuri,
loadingOptions,
self.scoped_id,
self.vocab_term,
self.scoped_ref,
)
return self.inner.load(doc, baseuri, loadingOptions)
class _TypeDSLLoader(_Loader):
def __init__(self, inner, refScope, salad_version):
# type: (_Loader, Union[int, None], str) -> None
self.inner = inner
self.refScope = refScope
self.salad_version = salad_version
def resolve(
self,
doc, # type: str
baseuri, # type: str
loadingOptions, # type: LoadingOptions
):
# type: (...) -> Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
doc_ = doc
optional = False
if doc_.endswith("?"):
optional = True
doc_ = doc_[0:-1]
if doc_.endswith("[]"):
salad_versions = [int(v) for v in self.salad_version[1:].split(".")]
items = "" # type: Union[List[Union[Dict[str, Any], str]], Dict[str, Any], str]
rest = doc_[0:-2]
if salad_versions < [1, 3]:
if rest.endswith("[]"):
# To show the error message with the original type
return doc
else:
items = expand_url(rest, baseuri, loadingOptions, False, True, self.refScope)
else:
items = self.resolve(rest, baseuri, loadingOptions)
if isinstance(items, str):
items = expand_url(items, baseuri, loadingOptions, False, True, self.refScope)
expanded = {"type": "array", "items": items} # type: Union[Dict[str, Any], str]
else:
expanded = expand_url(doc_, baseuri, loadingOptions, False, True, self.refScope)
if optional:
return ["null", expanded]
else:
return expanded
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableSequence):
r = [] # type: List[Any]
for d in doc:
if isinstance(d, str):
resolved = self.resolve(d, baseuri, loadingOptions)
if isinstance(resolved, MutableSequence):
for i in resolved:
if i not in r:
r.append(i)
else:
if resolved not in r:
r.append(resolved)
else:
r.append(d)
doc = r
elif isinstance(doc, str):
doc = self.resolve(doc, baseuri, loadingOptions)
return self.inner.load(doc, baseuri, loadingOptions)
class _IdMapLoader(_Loader):
def __init__(self, inner, mapSubject, mapPredicate):
# type: (_Loader, str, Union[str, None]) -> None
self.inner = inner
self.mapSubject = mapSubject
self.mapPredicate = mapPredicate
def load(self, doc, baseuri, loadingOptions, docRoot=None):
# type: (Any, str, LoadingOptions, Optional[str]) -> Any
if isinstance(doc, MutableMapping):
r = [] # type: List[Any]
for k in sorted(doc.keys()):
val = doc[k]
if isinstance(val, CommentedMap):
v = copy.copy(val)
v.lc.data = val.lc.data
v.lc.filename = val.lc.filename
v[self.mapSubject] = k
r.append(v)
elif isinstance(val, MutableMapping):
v2 = copy.copy(val)
v2[self.mapSubject] = k
r.append(v2)
else:
if self.mapPredicate:
v3 = {self.mapPredicate: val}
v3[self.mapSubject] = k
r.append(v3)
else:
raise ValidationException("No mapPredicate")
doc = r
return self.inner.load(doc, baseuri, loadingOptions)
def _document_load(
loader: _Loader,
doc: Union[str, MutableMapping[str, Any], MutableSequence[Any]],
baseuri: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if isinstance(doc, str):
return _document_load_by_url(
loader,
loadingOptions.fetcher.urljoin(baseuri, doc),
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
if isinstance(doc, MutableMapping):
addl_metadata = {}
if addl_metadata_fields is not None:
for mf in addl_metadata_fields:
if mf in doc:
addl_metadata[mf] = doc[mf]
docuri = baseuri
if "$base" in doc:
baseuri = doc["$base"]
loadingOptions = LoadingOptions(
copyfrom=loadingOptions,
namespaces=doc.get("$namespaces", None),
schemas=doc.get("$schemas", None),
baseuri=doc.get("$base", None),
addl_metadata=addl_metadata,
)
doc = {k: v for k, v in doc.items() if k not in ("$namespaces", "$schemas", "$base")}
if "$graph" in doc:
loadingOptions.idx[baseuri] = (
loader.load(doc["$graph"], baseuri, loadingOptions),
loadingOptions,
)
else:
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions, docRoot=baseuri),
loadingOptions,
)
if docuri != baseuri:
loadingOptions.idx[docuri] = loadingOptions.idx[baseuri]
return loadingOptions.idx[baseuri]
if isinstance(doc, MutableSequence):
loadingOptions.idx[baseuri] = (
loader.load(doc, baseuri, loadingOptions),
loadingOptions,
)
return loadingOptions.idx[baseuri]
raise ValidationException(
"Expected URI string, MutableMapping or MutableSequence, got %s" % type(doc)
)
def _document_load_by_url(
loader: _Loader,
url: str,
loadingOptions: LoadingOptions,
addl_metadata_fields: Optional[MutableSequence[str]] = None,
) -> Tuple[Any, LoadingOptions]:
if url in loadingOptions.idx:
return loadingOptions.idx[url]
doc_url, frg = urldefrag(url)
text = loadingOptions.fetcher.fetch_text(doc_url)
textIO = StringIO(text)
textIO.name = str(doc_url)
yaml = yaml_no_ts()
result = yaml.load(textIO)
add_lc_filename(result, doc_url)
loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=doc_url)
_document_load(
loader,
result,
doc_url,
loadingOptions,
addl_metadata_fields=addl_metadata_fields,
)
return loadingOptions.idx[url]
def file_uri(path, split_frag=False): # type: (str, bool) -> str
if path.startswith("file://"):
return path
if split_frag:
pathsp = path.split("#", 2)
frag = "#" + quote(str(pathsp[1])) if len(pathsp) == 2 else ""
urlpath = pathname2url(str(pathsp[0]))
else:
urlpath = pathname2url(path)
frag = ""
if urlpath.startswith("//"):
return f"file:{urlpath}{frag}"
return f"file://{urlpath}{frag}"
def prefix_url(url: str, namespaces: Dict[str, str]) -> str:
"""Expand short forms into full URLs using the given namespace dictionary."""
for k, v in namespaces.items():
if url.startswith(v):
return k + ":" + url[len(v) :]
return url
def save_relative_uri(
uri: Any,
base_url: str,
scoped_id: bool,
ref_scope: Optional[int],
relative_uris: bool,
) -> Any:
"""Convert any URI to a relative one, obeying the scoping rules."""
if isinstance(uri, MutableSequence):
return [save_relative_uri(u, base_url, scoped_id, ref_scope, relative_uris) for u in uri]
elif isinstance(uri, str):
if not relative_uris or uri == base_url:
return uri
urisplit = urlsplit(uri)
basesplit = urlsplit(base_url)
if urisplit.scheme == basesplit.scheme and urisplit.netloc == basesplit.netloc:
if urisplit.path != basesplit.path:
p = os.path.relpath(urisplit.path, os.path.dirname(basesplit.path))
if urisplit.fragment:
p = p + "#" + urisplit.fragment
return p
basefrag = basesplit.fragment + "/"
if ref_scope:
sp = basefrag.split("/")
i = 0
while i < ref_scope:
sp.pop()
i += 1
basefrag = "/".join(sp)
if urisplit.fragment.startswith(basefrag):
return urisplit.fragment[len(basefrag) :]
return urisplit.fragment
return uri
else:
return save(uri, top=False, base_url=base_url, relative_uris=relative_uris)
def shortname(inputid: str) -> str:
"""
Compute the shortname of a fully qualified identifier.
See https://w3id.org/cwl/v1.2/SchemaSalad.html#Short_names.
"""
parsed_id = urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split("/")[-1]
return parsed_id.path.split("/")[-1]
| 0.600774 | 0.081082 |
import re
from typing import IO, Any, Dict, List, Optional, Tuple, Union, cast
from . import _logger
from .codegen_base import CodeGenBase, TypeDef
from .exceptions import SchemaException
from .schema import shortname
from .utils import aslist
def replaceKeywords(s: str) -> str:
"""Rename keywords that are reserved in C++."""
if s in (
"class",
"enum",
"int",
"long",
"float",
"double",
"default",
"stdin",
"stdout",
"stderr",
):
s = s + "_"
return s
def safename(name: str) -> str:
"""Create a C++ safe name."""
classname = re.sub("[^a-zA-Z0-9]", "_", name)
return replaceKeywords(classname)
# TODO: this should be somehow not really exists
def safename2(name: Dict[str, str]) -> str:
"""Create a namespaced safename."""
return safename(name["namespace"]) + "::" + safename(name["classname"])
# Splits names like https://xyz.xyz/blub#cwl/class
# into its class path and non class path
def split_name(s: str) -> Tuple[str, str]:
t = s.split("#")
if len(t) != 2:
raise ValueError("Expected field to be formatted as 'https://xyz.xyz/blub#cwl/class'.")
return (t[0], t[1])
# similar to split_name but for field names
def split_field(s: str) -> Tuple[str, str, str]:
(namespace, field) = split_name(s)
t = field.split("/")
if len(t) != 2:
raise ValueError("Expected field to be formatted as 'https://xyz.xyz/blub#cwl/class'.")
return (namespace, t[0], t[1])
# Prototype of a class
class ClassDefinition:
def __init__(self, name: str):
self.fullName = name
self.extends: List[Dict[str, str]] = []
self.fields: List[FieldDefinition] = []
self.abstract = False
(self.namespace, self.classname) = split_name(name)
self.namespace = safename(self.namespace)
self.classname = safename(self.classname)
def writeFwdDeclaration(self, target: IO[str], fullInd: str, ind: str) -> None:
target.write(f"{fullInd}namespace {self.namespace} {{ struct {self.classname}; }}\n")
def writeDefinition(self, target: IO[Any], fullInd: str, ind: str) -> None:
target.write(f"{fullInd}namespace {self.namespace} {{\n")
target.write(f"{fullInd}struct {self.classname}")
extends = list(map(safename2, self.extends))
override = ""
virtual = "virtual "
if len(self.extends) > 0:
target.write(f"\n{fullInd}{ind}: ")
target.write(f"\n{fullInd}{ind}, ".join(extends))
override = " override"
virtual = ""
target.write(" {\n")
for field in self.fields:
field.writeDefinition(target, fullInd + ind, ind, self.namespace)
if self.abstract:
target.write(f"{fullInd}{ind}virtual ~{self.classname}() = 0;\n")
target.write(f"{fullInd}{ind}{virtual}auto toYaml() const -> YAML::Node{override};\n")
target.write(f"{fullInd}}};\n")
target.write(f"{fullInd}}}\n\n")
def writeImplDefinition(self, target: IO[str], fullInd: str, ind: str) -> None:
extends = list(map(safename2, self.extends))
if self.abstract:
target.write(
f"{fullInd}inline {self.namespace}::{self.classname}::~{self.classname}() = default;\n"
)
target.write(
f"""{fullInd}inline auto {self.namespace}::{self.classname}::toYaml() const -> YAML::Node {{
{fullInd}{ind}using ::toYaml;
{fullInd}{ind}auto n = YAML::Node{{}};
"""
)
for e in extends:
target.write(f"{fullInd}{ind}n = mergeYaml(n, {e}::toYaml());\n")
for field in self.fields:
fieldname = safename(field.name)
target.write(
f'{fullInd}{ind}addYamlField(n, "{field.name}", toYaml(*{fieldname}));\n' # noqa: B907
)
# target.write(f"{fullInd}{ind}addYamlIfNotEmpty(n, \"{field.name}\", toYaml(*{fieldname}));\n")
target.write(f"{fullInd}{ind}return n;\n{fullInd}}}\n")
# Prototype of a single field of a class
class FieldDefinition:
def __init__(self, name: str, typeStr: str, optional: bool):
self.name = name
self.typeStr = typeStr
self.optional = optional
def writeDefinition(self, target: IO[Any], fullInd: str, ind: str, namespace: str) -> None:
"""Write a C++ definition for the class field."""
name = safename(self.name)
typeStr = self.typeStr.replace(namespace + "::", "")
target.write(f"{fullInd}heap_object<{typeStr}> {name};\n")
# Prototype of an enum definition
class EnumDefinition:
def __init__(self, name: str, values: List[str]):
self.name = name
self.values = values
def writeDefinition(self, target: IO[str], ind: str) -> None:
namespace = ""
if len(self.name.split("#")) == 2:
(namespace, classname) = split_name(self.name)
namespace = safename(namespace)
classname = safename(classname)
name = namespace + "::" + classname
else:
name = safename(self.name)
classname = name
if len(namespace) > 0:
target.write(f"namespace {namespace} {{\n")
target.write(f"enum class {classname} : unsigned int {{\n{ind}")
target.write(f",\n{ind}".join(map(safename, self.values)))
target.write("\n};\n")
target.write(f"inline auto to_string({classname} v) {{\n")
target.write(f"{ind}static auto m = std::vector<std::string_view> {{\n")
target.write(f'{ind} "')
target.write(f'",\n{ind} "'.join(self.values))
target.write(f'"\n{ind}}};\n')
target.write(f"{ind}using U = std::underlying_type_t<{name}>;\n")
target.write(f"{ind}return m.at(static_cast<U>(v));\n}}\n")
if len(namespace) > 0:
target.write("}\n")
target.write(f"inline void to_enum(std::string_view v, {name}& out) {{\n")
target.write(f"{ind}static auto m = std::map<std::string, {name}, std::less<>> {{\n")
for v in self.values:
target.write(f'{ind}{ind}{{"{v}", {name}::{safename(v)}}},\n') # noqa: B907
target.write(f"{ind}}};\n{ind}out = m.find(v)->second;\n}}\n")
target.write(f"inline auto toYaml({name} v) {{\n")
target.write(f"{ind}return YAML::Node{{std::string{{to_string(v)}}}};\n}}\n")
target.write(f"inline auto yamlToEnum(YAML::Node n, {name}& out) {{\n")
target.write(f"{ind}to_enum(n.as<std::string>(), out);\n}}\n")
# !TODO way tot many functions, most of these shouldn't exists
def isPrimitiveType(v: Any) -> bool:
if not isinstance(v, str):
return False
return v in ["null", "boolean", "int", "long", "float", "double", "string"]
def hasFieldValue(e: Any, f: str, v: Any) -> bool:
if not isinstance(e, dict):
return False
if f not in e:
return False
return bool(e[f] == v)
def isRecordSchema(v: Any) -> bool:
return hasFieldValue(v, "type", "record")
def isEnumSchema(v: Any) -> bool:
if not hasFieldValue(v, "type", "enum"):
return False
if "symbols" not in v:
return False
if not isinstance(v["symbols"], list):
return False
return True
def isArray(v: Any) -> bool:
if not isinstance(v, list):
return False
for i in v:
if not pred(i):
return False
return True
def pred(i: Any) -> bool:
return (
isPrimitiveType(i)
or isRecordSchema(i)
or isEnumSchema(i)
or isArraySchema(i)
or isinstance(i, str)
)
def isArraySchema(v: Any) -> bool:
if not hasFieldValue(v, "type", "array"):
return False
if "items" not in v:
return False
if not isinstance(v["items"], list):
return False
for i in v["items"]:
if not (pred(i) or isArray(i)):
return False
return True
class CppCodeGen(CodeGenBase):
"""Generation of C++ code for a given Schema Salad definition."""
def __init__(
self,
base: str,
target: IO[str],
examples: Optional[str],
package: str,
copyright: Optional[str],
) -> None:
super().__init__()
self.base_uri = base
self.target = target
self.examples = examples
self.package = package
self.copyright = copyright
self.classDefinitions: Dict[str, ClassDefinition] = {}
self.enumDefinitions: Dict[str, EnumDefinition] = {}
def convertTypeToCpp(self, type_declaration: Union[List[Any], Dict[str, Any], str]) -> str:
"""Convert a Schema Salad type to a C++ type."""
if not isinstance(type_declaration, list):
return self.convertTypeToCpp([type_declaration])
if len(type_declaration) == 1:
if type_declaration[0] in ("null", "https://w3id.org/cwl/salad#null"):
return "std::monostate"
elif type_declaration[0] in (
"string",
"http://www.w3.org/2001/XMLSchema#string",
):
return "std::string"
elif type_declaration[0] in ("int", "http://www.w3.org/2001/XMLSchema#int"):
return "int32_t"
elif type_declaration[0] in (
"long",
"http://www.w3.org/2001/XMLSchema#long",
):
return "int64_t"
elif type_declaration[0] in (
"float",
"http://www.w3.org/2001/XMLSchema#float",
):
return "float"
elif type_declaration[0] in (
"double",
"http://www.w3.org/2001/XMLSchema#double",
):
return "double"
elif type_declaration[0] in (
"boolean",
"http://www.w3.org/2001/XMLSchema#boolean",
):
return "bool"
elif type_declaration[0] == "https://w3id.org/cwl/salad#Any":
return "std::any"
elif type_declaration[0] in (
"PrimitiveType",
"https://w3id.org/cwl/salad#PrimitiveType",
):
return "std::variant<bool, int32_t, int64_t, float, double, std::string>"
elif isinstance(type_declaration[0], dict):
if "type" in type_declaration[0] and type_declaration[0]["type"] in (
"enum",
"https://w3id.org/cwl/salad#enum",
):
name = type_declaration[0]["name"]
if name not in self.enumDefinitions:
self.enumDefinitions[name] = EnumDefinition(
type_declaration[0]["name"],
list(map(shortname, type_declaration[0]["symbols"])),
)
if len(name.split("#")) != 2:
return safename(name)
(namespace, classname) = name.split("#")
return safename(namespace) + "::" + safename(classname)
elif "type" in type_declaration[0] and type_declaration[0]["type"] in (
"array",
"https://w3id.org/cwl/salad#array",
):
items = type_declaration[0]["items"]
if isinstance(items, list):
ts = []
for i in items:
ts.append(self.convertTypeToCpp(i))
name = ", ".join(ts)
return f"std::vector<std::variant<{name}>>"
else:
i = self.convertTypeToCpp(items)
return f"std::vector<{i}>"
elif "type" in type_declaration[0] and type_declaration[0]["type"] in (
"record",
"https://w3id.org/cwl/salad#record",
):
n = type_declaration[0]["name"]
(namespace, classname) = split_name(n)
return safename(namespace) + "::" + safename(classname)
n = type_declaration[0]["type"]
(namespace, classname) = split_name(n)
return safename(namespace) + "::" + safename(classname)
if len(type_declaration[0].split("#")) != 2:
_logger.debug(f"// something weird2 about {type_declaration[0]}")
return cast(str, type_declaration[0])
(namespace, classname) = split_name(type_declaration[0])
return safename(namespace) + "::" + safename(classname)
type_declaration = list(map(self.convertTypeToCpp, type_declaration))
type_declaration = ", ".join(type_declaration)
return f"std::variant<{type_declaration}>"
# start of our generated file
def epilogue(self, root_loader: Optional[TypeDef]) -> None:
self.target.write(
"""#pragma once
// Generated by schema-salad code generator
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <map>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include <yaml-cpp/yaml.h>
#include <any>
inline auto mergeYaml(YAML::Node n1, YAML::Node n2) {
for (auto const& e : n1) {
n2[e.first.as<std::string>()] = e.second;
}
return n2;
}
// declaring toYaml
inline auto toYaml(bool v) {
return YAML::Node{v};
}
inline auto toYaml(float v) {
return YAML::Node{v};
}
inline auto toYaml(double v) {
return YAML::Node{v};
}
inline auto toYaml(int32_t v) {
return YAML::Node{v};
}
inline auto toYaml(int64_t v) {
return YAML::Node{v};
}
inline auto toYaml(std::any const&) {
return YAML::Node{};
}
inline auto toYaml(std::monostate const&) {
return YAML::Node(YAML::NodeType::Undefined);
}
inline auto toYaml(std::string const& v) {
return YAML::Node{v};
}
inline void addYamlField(YAML::Node node, std::string const& key, YAML::Node value) {
if (value.IsDefined()) {
node[key] = value;
}
}
// fwd declaring toYaml
template <typename T>
auto toYaml(std::vector<T> const& v) -> YAML::Node;
template <typename T>
auto toYaml(T const& t) -> YAML::Node;
template <typename ...Args>
auto toYaml(std::variant<Args...> const& t) -> YAML::Node;
template <typename T>
class heap_object {
std::unique_ptr<T> data = std::make_unique<T>();
public:
heap_object() = default;
heap_object(heap_object const& oth) {
*data = *oth;
}
heap_object(heap_object&& oth) {
*data = *oth;
}
template <typename T2>
heap_object(T2 const& oth) {
*data = oth;
}
template <typename T2>
heap_object(T2&& oth) {
*data = oth;
}
auto operator=(heap_object const& oth) -> heap_object& {
*data = *oth;
return *this;
}
auto operator=(heap_object&& oth) -> heap_object& {
*data = std::move(*oth);
return *this;
}
template <typename T2>
auto operator=(T2 const& oth) -> heap_object& {
*data = oth;
return *this;
}
template <typename T2>
auto operator=(T2&& oth) -> heap_object& {
*data = std::move(oth);
return *this;
}
auto operator->() -> T* {
return data.get();
}
auto operator->() const -> T const* {
return data.get();
}
auto operator*() -> T& {
return *data;
}
auto operator*() const -> T const& {
return *data;
}
};
"""
)
# main body, printing fwd declaration, class definitions, and then implementations
for key in self.classDefinitions:
self.classDefinitions[key].writeFwdDeclaration(self.target, "", " ")
for key in self.enumDefinitions:
self.enumDefinitions[key].writeDefinition(self.target, " ")
for key in self.classDefinitions:
self.classDefinitions[key].writeDefinition(self.target, "", " ")
for key in self.classDefinitions:
self.classDefinitions[key].writeImplDefinition(self.target, "", " ")
self.target.write(
"""
template <typename T>
auto toYaml(std::vector<T> const& v) -> YAML::Node {
auto n = YAML::Node(YAML::NodeType::Sequence);
for (auto const& e : v) {
n.push_back(toYaml(e));
}
return n;
}
template <typename T>
auto toYaml(T const& t) -> YAML::Node {
if constexpr (std::is_enum_v<T>) {
return toYaml(t);
} else {
return t.toYaml();
}
}
template <typename ...Args>
auto toYaml(std::variant<Args...> const& t) -> YAML::Node {
return std::visit([](auto const& e) {
return toYaml(e);
}, t);
}
"""
)
def parseRecordField(self, field: Dict[str, Any]) -> FieldDefinition:
(namespace, classname, fieldname) = split_field(field["name"])
if isinstance(field["type"], dict):
if field["type"]["type"] == "enum":
fieldtype = "Enum"
else:
fieldtype = self.convertTypeToCpp(field["type"])
else:
fieldtype = field["type"]
fieldtype = self.convertTypeToCpp(fieldtype)
return FieldDefinition(name=fieldname, typeStr=fieldtype, optional=False)
def parseRecordSchema(self, stype: Dict[str, Any]) -> None:
cd = ClassDefinition(name=stype["name"])
cd.abstract = stype.get("abstract", False)
if "extends" in stype:
for ex in aslist(stype["extends"]):
(base_namespace, base_classname) = split_name(ex)
ext = {"namespace": base_namespace, "classname": base_classname}
cd.extends.append(ext)
if "fields" in stype:
for field in stype["fields"]:
cd.fields.append(self.parseRecordField(field))
self.classDefinitions[stype["name"]] = cd
def parseEnum(self, stype: Dict[str, Any]) -> str:
name = cast(str, stype["name"])
if name not in self.enumDefinitions:
self.enumDefinitions[name] = EnumDefinition(
name, list(map(shortname, stype["symbols"]))
)
return name
def parse(self, items: List[Dict[str, Any]]) -> None:
for stype in items:
if "type" in stype and stype["type"] == "documentation":
continue
if not (pred(stype) or isArray(stype)):
raise SchemaException("not a valid SaladRecordField")
# parsing a record
if isRecordSchema(stype):
self.parseRecordSchema(stype)
elif isEnumSchema(stype):
self.parseEnum(stype)
else:
_logger.error(f"not parsed{stype}")
self.epilogue(None)
self.target.close()
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/cpp_codegen.py
|
cpp_codegen.py
|
import re
from typing import IO, Any, Dict, List, Optional, Tuple, Union, cast
from . import _logger
from .codegen_base import CodeGenBase, TypeDef
from .exceptions import SchemaException
from .schema import shortname
from .utils import aslist
def replaceKeywords(s: str) -> str:
"""Rename keywords that are reserved in C++."""
if s in (
"class",
"enum",
"int",
"long",
"float",
"double",
"default",
"stdin",
"stdout",
"stderr",
):
s = s + "_"
return s
def safename(name: str) -> str:
"""Create a C++ safe name."""
classname = re.sub("[^a-zA-Z0-9]", "_", name)
return replaceKeywords(classname)
# TODO: this should be somehow not really exists
def safename2(name: Dict[str, str]) -> str:
"""Create a namespaced safename."""
return safename(name["namespace"]) + "::" + safename(name["classname"])
# Splits names like https://xyz.xyz/blub#cwl/class
# into its class path and non class path
def split_name(s: str) -> Tuple[str, str]:
t = s.split("#")
if len(t) != 2:
raise ValueError("Expected field to be formatted as 'https://xyz.xyz/blub#cwl/class'.")
return (t[0], t[1])
# similar to split_name but for field names
def split_field(s: str) -> Tuple[str, str, str]:
(namespace, field) = split_name(s)
t = field.split("/")
if len(t) != 2:
raise ValueError("Expected field to be formatted as 'https://xyz.xyz/blub#cwl/class'.")
return (namespace, t[0], t[1])
# Prototype of a class
class ClassDefinition:
def __init__(self, name: str):
self.fullName = name
self.extends: List[Dict[str, str]] = []
self.fields: List[FieldDefinition] = []
self.abstract = False
(self.namespace, self.classname) = split_name(name)
self.namespace = safename(self.namespace)
self.classname = safename(self.classname)
def writeFwdDeclaration(self, target: IO[str], fullInd: str, ind: str) -> None:
target.write(f"{fullInd}namespace {self.namespace} {{ struct {self.classname}; }}\n")
def writeDefinition(self, target: IO[Any], fullInd: str, ind: str) -> None:
target.write(f"{fullInd}namespace {self.namespace} {{\n")
target.write(f"{fullInd}struct {self.classname}")
extends = list(map(safename2, self.extends))
override = ""
virtual = "virtual "
if len(self.extends) > 0:
target.write(f"\n{fullInd}{ind}: ")
target.write(f"\n{fullInd}{ind}, ".join(extends))
override = " override"
virtual = ""
target.write(" {\n")
for field in self.fields:
field.writeDefinition(target, fullInd + ind, ind, self.namespace)
if self.abstract:
target.write(f"{fullInd}{ind}virtual ~{self.classname}() = 0;\n")
target.write(f"{fullInd}{ind}{virtual}auto toYaml() const -> YAML::Node{override};\n")
target.write(f"{fullInd}}};\n")
target.write(f"{fullInd}}}\n\n")
def writeImplDefinition(self, target: IO[str], fullInd: str, ind: str) -> None:
extends = list(map(safename2, self.extends))
if self.abstract:
target.write(
f"{fullInd}inline {self.namespace}::{self.classname}::~{self.classname}() = default;\n"
)
target.write(
f"""{fullInd}inline auto {self.namespace}::{self.classname}::toYaml() const -> YAML::Node {{
{fullInd}{ind}using ::toYaml;
{fullInd}{ind}auto n = YAML::Node{{}};
"""
)
for e in extends:
target.write(f"{fullInd}{ind}n = mergeYaml(n, {e}::toYaml());\n")
for field in self.fields:
fieldname = safename(field.name)
target.write(
f'{fullInd}{ind}addYamlField(n, "{field.name}", toYaml(*{fieldname}));\n' # noqa: B907
)
# target.write(f"{fullInd}{ind}addYamlIfNotEmpty(n, \"{field.name}\", toYaml(*{fieldname}));\n")
target.write(f"{fullInd}{ind}return n;\n{fullInd}}}\n")
# Prototype of a single field of a class
class FieldDefinition:
def __init__(self, name: str, typeStr: str, optional: bool):
self.name = name
self.typeStr = typeStr
self.optional = optional
def writeDefinition(self, target: IO[Any], fullInd: str, ind: str, namespace: str) -> None:
"""Write a C++ definition for the class field."""
name = safename(self.name)
typeStr = self.typeStr.replace(namespace + "::", "")
target.write(f"{fullInd}heap_object<{typeStr}> {name};\n")
# Prototype of an enum definition
class EnumDefinition:
def __init__(self, name: str, values: List[str]):
self.name = name
self.values = values
def writeDefinition(self, target: IO[str], ind: str) -> None:
namespace = ""
if len(self.name.split("#")) == 2:
(namespace, classname) = split_name(self.name)
namespace = safename(namespace)
classname = safename(classname)
name = namespace + "::" + classname
else:
name = safename(self.name)
classname = name
if len(namespace) > 0:
target.write(f"namespace {namespace} {{\n")
target.write(f"enum class {classname} : unsigned int {{\n{ind}")
target.write(f",\n{ind}".join(map(safename, self.values)))
target.write("\n};\n")
target.write(f"inline auto to_string({classname} v) {{\n")
target.write(f"{ind}static auto m = std::vector<std::string_view> {{\n")
target.write(f'{ind} "')
target.write(f'",\n{ind} "'.join(self.values))
target.write(f'"\n{ind}}};\n')
target.write(f"{ind}using U = std::underlying_type_t<{name}>;\n")
target.write(f"{ind}return m.at(static_cast<U>(v));\n}}\n")
if len(namespace) > 0:
target.write("}\n")
target.write(f"inline void to_enum(std::string_view v, {name}& out) {{\n")
target.write(f"{ind}static auto m = std::map<std::string, {name}, std::less<>> {{\n")
for v in self.values:
target.write(f'{ind}{ind}{{"{v}", {name}::{safename(v)}}},\n') # noqa: B907
target.write(f"{ind}}};\n{ind}out = m.find(v)->second;\n}}\n")
target.write(f"inline auto toYaml({name} v) {{\n")
target.write(f"{ind}return YAML::Node{{std::string{{to_string(v)}}}};\n}}\n")
target.write(f"inline auto yamlToEnum(YAML::Node n, {name}& out) {{\n")
target.write(f"{ind}to_enum(n.as<std::string>(), out);\n}}\n")
# !TODO way tot many functions, most of these shouldn't exists
def isPrimitiveType(v: Any) -> bool:
if not isinstance(v, str):
return False
return v in ["null", "boolean", "int", "long", "float", "double", "string"]
def hasFieldValue(e: Any, f: str, v: Any) -> bool:
if not isinstance(e, dict):
return False
if f not in e:
return False
return bool(e[f] == v)
def isRecordSchema(v: Any) -> bool:
return hasFieldValue(v, "type", "record")
def isEnumSchema(v: Any) -> bool:
if not hasFieldValue(v, "type", "enum"):
return False
if "symbols" not in v:
return False
if not isinstance(v["symbols"], list):
return False
return True
def isArray(v: Any) -> bool:
if not isinstance(v, list):
return False
for i in v:
if not pred(i):
return False
return True
def pred(i: Any) -> bool:
return (
isPrimitiveType(i)
or isRecordSchema(i)
or isEnumSchema(i)
or isArraySchema(i)
or isinstance(i, str)
)
def isArraySchema(v: Any) -> bool:
if not hasFieldValue(v, "type", "array"):
return False
if "items" not in v:
return False
if not isinstance(v["items"], list):
return False
for i in v["items"]:
if not (pred(i) or isArray(i)):
return False
return True
class CppCodeGen(CodeGenBase):
"""Generation of C++ code for a given Schema Salad definition."""
def __init__(
self,
base: str,
target: IO[str],
examples: Optional[str],
package: str,
copyright: Optional[str],
) -> None:
super().__init__()
self.base_uri = base
self.target = target
self.examples = examples
self.package = package
self.copyright = copyright
self.classDefinitions: Dict[str, ClassDefinition] = {}
self.enumDefinitions: Dict[str, EnumDefinition] = {}
def convertTypeToCpp(self, type_declaration: Union[List[Any], Dict[str, Any], str]) -> str:
"""Convert a Schema Salad type to a C++ type."""
if not isinstance(type_declaration, list):
return self.convertTypeToCpp([type_declaration])
if len(type_declaration) == 1:
if type_declaration[0] in ("null", "https://w3id.org/cwl/salad#null"):
return "std::monostate"
elif type_declaration[0] in (
"string",
"http://www.w3.org/2001/XMLSchema#string",
):
return "std::string"
elif type_declaration[0] in ("int", "http://www.w3.org/2001/XMLSchema#int"):
return "int32_t"
elif type_declaration[0] in (
"long",
"http://www.w3.org/2001/XMLSchema#long",
):
return "int64_t"
elif type_declaration[0] in (
"float",
"http://www.w3.org/2001/XMLSchema#float",
):
return "float"
elif type_declaration[0] in (
"double",
"http://www.w3.org/2001/XMLSchema#double",
):
return "double"
elif type_declaration[0] in (
"boolean",
"http://www.w3.org/2001/XMLSchema#boolean",
):
return "bool"
elif type_declaration[0] == "https://w3id.org/cwl/salad#Any":
return "std::any"
elif type_declaration[0] in (
"PrimitiveType",
"https://w3id.org/cwl/salad#PrimitiveType",
):
return "std::variant<bool, int32_t, int64_t, float, double, std::string>"
elif isinstance(type_declaration[0], dict):
if "type" in type_declaration[0] and type_declaration[0]["type"] in (
"enum",
"https://w3id.org/cwl/salad#enum",
):
name = type_declaration[0]["name"]
if name not in self.enumDefinitions:
self.enumDefinitions[name] = EnumDefinition(
type_declaration[0]["name"],
list(map(shortname, type_declaration[0]["symbols"])),
)
if len(name.split("#")) != 2:
return safename(name)
(namespace, classname) = name.split("#")
return safename(namespace) + "::" + safename(classname)
elif "type" in type_declaration[0] and type_declaration[0]["type"] in (
"array",
"https://w3id.org/cwl/salad#array",
):
items = type_declaration[0]["items"]
if isinstance(items, list):
ts = []
for i in items:
ts.append(self.convertTypeToCpp(i))
name = ", ".join(ts)
return f"std::vector<std::variant<{name}>>"
else:
i = self.convertTypeToCpp(items)
return f"std::vector<{i}>"
elif "type" in type_declaration[0] and type_declaration[0]["type"] in (
"record",
"https://w3id.org/cwl/salad#record",
):
n = type_declaration[0]["name"]
(namespace, classname) = split_name(n)
return safename(namespace) + "::" + safename(classname)
n = type_declaration[0]["type"]
(namespace, classname) = split_name(n)
return safename(namespace) + "::" + safename(classname)
if len(type_declaration[0].split("#")) != 2:
_logger.debug(f"// something weird2 about {type_declaration[0]}")
return cast(str, type_declaration[0])
(namespace, classname) = split_name(type_declaration[0])
return safename(namespace) + "::" + safename(classname)
type_declaration = list(map(self.convertTypeToCpp, type_declaration))
type_declaration = ", ".join(type_declaration)
return f"std::variant<{type_declaration}>"
# start of our generated file
def epilogue(self, root_loader: Optional[TypeDef]) -> None:
self.target.write(
"""#pragma once
// Generated by schema-salad code generator
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <map>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include <yaml-cpp/yaml.h>
#include <any>
inline auto mergeYaml(YAML::Node n1, YAML::Node n2) {
for (auto const& e : n1) {
n2[e.first.as<std::string>()] = e.second;
}
return n2;
}
// declaring toYaml
inline auto toYaml(bool v) {
return YAML::Node{v};
}
inline auto toYaml(float v) {
return YAML::Node{v};
}
inline auto toYaml(double v) {
return YAML::Node{v};
}
inline auto toYaml(int32_t v) {
return YAML::Node{v};
}
inline auto toYaml(int64_t v) {
return YAML::Node{v};
}
inline auto toYaml(std::any const&) {
return YAML::Node{};
}
inline auto toYaml(std::monostate const&) {
return YAML::Node(YAML::NodeType::Undefined);
}
inline auto toYaml(std::string const& v) {
return YAML::Node{v};
}
inline void addYamlField(YAML::Node node, std::string const& key, YAML::Node value) {
if (value.IsDefined()) {
node[key] = value;
}
}
// fwd declaring toYaml
template <typename T>
auto toYaml(std::vector<T> const& v) -> YAML::Node;
template <typename T>
auto toYaml(T const& t) -> YAML::Node;
template <typename ...Args>
auto toYaml(std::variant<Args...> const& t) -> YAML::Node;
template <typename T>
class heap_object {
std::unique_ptr<T> data = std::make_unique<T>();
public:
heap_object() = default;
heap_object(heap_object const& oth) {
*data = *oth;
}
heap_object(heap_object&& oth) {
*data = *oth;
}
template <typename T2>
heap_object(T2 const& oth) {
*data = oth;
}
template <typename T2>
heap_object(T2&& oth) {
*data = oth;
}
auto operator=(heap_object const& oth) -> heap_object& {
*data = *oth;
return *this;
}
auto operator=(heap_object&& oth) -> heap_object& {
*data = std::move(*oth);
return *this;
}
template <typename T2>
auto operator=(T2 const& oth) -> heap_object& {
*data = oth;
return *this;
}
template <typename T2>
auto operator=(T2&& oth) -> heap_object& {
*data = std::move(oth);
return *this;
}
auto operator->() -> T* {
return data.get();
}
auto operator->() const -> T const* {
return data.get();
}
auto operator*() -> T& {
return *data;
}
auto operator*() const -> T const& {
return *data;
}
};
"""
)
# main body, printing fwd declaration, class definitions, and then implementations
for key in self.classDefinitions:
self.classDefinitions[key].writeFwdDeclaration(self.target, "", " ")
for key in self.enumDefinitions:
self.enumDefinitions[key].writeDefinition(self.target, " ")
for key in self.classDefinitions:
self.classDefinitions[key].writeDefinition(self.target, "", " ")
for key in self.classDefinitions:
self.classDefinitions[key].writeImplDefinition(self.target, "", " ")
self.target.write(
"""
template <typename T>
auto toYaml(std::vector<T> const& v) -> YAML::Node {
auto n = YAML::Node(YAML::NodeType::Sequence);
for (auto const& e : v) {
n.push_back(toYaml(e));
}
return n;
}
template <typename T>
auto toYaml(T const& t) -> YAML::Node {
if constexpr (std::is_enum_v<T>) {
return toYaml(t);
} else {
return t.toYaml();
}
}
template <typename ...Args>
auto toYaml(std::variant<Args...> const& t) -> YAML::Node {
return std::visit([](auto const& e) {
return toYaml(e);
}, t);
}
"""
)
def parseRecordField(self, field: Dict[str, Any]) -> FieldDefinition:
(namespace, classname, fieldname) = split_field(field["name"])
if isinstance(field["type"], dict):
if field["type"]["type"] == "enum":
fieldtype = "Enum"
else:
fieldtype = self.convertTypeToCpp(field["type"])
else:
fieldtype = field["type"]
fieldtype = self.convertTypeToCpp(fieldtype)
return FieldDefinition(name=fieldname, typeStr=fieldtype, optional=False)
def parseRecordSchema(self, stype: Dict[str, Any]) -> None:
cd = ClassDefinition(name=stype["name"])
cd.abstract = stype.get("abstract", False)
if "extends" in stype:
for ex in aslist(stype["extends"]):
(base_namespace, base_classname) = split_name(ex)
ext = {"namespace": base_namespace, "classname": base_classname}
cd.extends.append(ext)
if "fields" in stype:
for field in stype["fields"]:
cd.fields.append(self.parseRecordField(field))
self.classDefinitions[stype["name"]] = cd
def parseEnum(self, stype: Dict[str, Any]) -> str:
name = cast(str, stype["name"])
if name not in self.enumDefinitions:
self.enumDefinitions[name] = EnumDefinition(
name, list(map(shortname, stype["symbols"]))
)
return name
def parse(self, items: List[Dict[str, Any]]) -> None:
for stype in items:
if "type" in stype and stype["type"] == "documentation":
continue
if not (pred(stype) or isArray(stype)):
raise SchemaException("not a valid SaladRecordField")
# parsing a record
if isRecordSchema(stype):
self.parseRecordSchema(stype)
elif isEnumSchema(stype):
self.parseEnum(stype)
else:
_logger.error(f"not parsed{stype}")
self.epilogue(None)
self.target.close()
| 0.642432 | 0.216591 |
import copy
import logging
import os
import pathlib
import re
import tempfile
import traceback
import urllib
import xml.sax # nosec
from io import StringIO
from typing import (
Any,
Callable,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Union,
cast,
)
import requests
from cachecontrol.caches import FileCache
from cachecontrol.wrapper import CacheControl
from rdflib.exceptions import ParserError
from rdflib.graph import Graph
from rdflib.namespace import OWL, RDF, RDFS
from rdflib.plugin import PluginException
from rdflib.plugins.parsers.notation3 import BadSyntax
from rdflib.util import guess_format
from ruamel.yaml.comments import CommentedMap, CommentedSeq, LineCol
from ruamel.yaml.error import MarkedYAMLError
from .exceptions import SchemaSaladException, ValidationException
from .fetcher import DefaultFetcher
from .sourceline import SourceLine, add_lc_filename, relname
from .utils import (
AttachmentsType,
CacheType,
ContextType,
FetcherCallableType,
IdxResultType,
IdxType,
ResolvedRefType,
ResolveType,
aslist,
onWindows,
yaml_no_ts,
)
_logger = logging.getLogger("salad")
def file_uri(path: str, split_frag: bool = False) -> str:
if path.startswith("file://"):
return path
if split_frag:
pathsp = path.split("#", 2)
if len(pathsp) == 2:
frag = "#" + urllib.parse.quote(str(pathsp[1]))
else:
frag = ""
urlpath = urllib.request.pathname2url(str(pathsp[0]))
else:
urlpath = urllib.request.pathname2url(path)
frag = ""
if urlpath.startswith("//"):
return f"file:{urlpath}{frag}"
return f"file://{urlpath}{frag}"
def uri_file_path(url: str) -> str:
split = urllib.parse.urlsplit(url)
if split.scheme == "file":
return urllib.request.url2pathname(str(split.path)) + (
"#" + urllib.parse.unquote(str(split.fragment)) if bool(split.fragment) else ""
)
raise ValidationException(f"Not a file URI: {url}")
def to_validation_exception(e: MarkedYAMLError) -> ValidationException:
"""Convert ruamel.yaml exception to our type."""
fname_regex = re.compile(r"^file://" + re.escape(os.getcwd()) + "/")
exc = ValidationException(e.problem)
mark = e.problem_mark
exc.file = re.sub(fname_regex, "", mark.name)
exc.start = (mark.line + 1, mark.column + 1)
exc.end = None
if e.context:
parent = ValidationException(e.context)
context_mark = e.context_mark
if context_mark:
parent.file = re.sub(fname_regex, "", context_mark.name)
parent.start = (context_mark.line + 1, context_mark.column + 1)
parent.end = None
parent.children = [exc]
return parent
return exc
class NormDict(Dict[str, Union[CommentedMap, CommentedSeq, str, None]]):
"""A Dict where all keys are normalized using the provided function."""
def __init__(self, normalize: Callable[[str], str] = str) -> None:
super().__init__()
self.normalize = normalize
def __eq__(self, other: Any) -> bool:
return super().__eq__(other)
def __getitem__(self, key: Any) -> Any:
return super().__getitem__(self.normalize(key))
def __setitem__(self, key: Any, value: Any) -> Any:
return super().__setitem__(self.normalize(key), value)
def __delitem__(self, key: Any) -> Any:
return super().__delitem__(self.normalize(key))
def __contains__(self, key: Any) -> bool:
return super().__contains__(self.normalize(key))
def __del__(self) -> None:
del self.normalize
def SubLoader(loader: "Loader") -> "Loader":
return Loader(
loader.ctx,
schemagraph=loader.graph,
foreign_properties=loader.foreign_properties,
idx=loader.idx,
cache=loader.cache,
fetcher_constructor=loader.fetcher_constructor,
skip_schemas=loader.skip_schemas,
url_fields=loader.url_fields,
allow_attachments=loader.allow_attachments,
session=loader.session,
salad_version=loader.salad_version,
)
class Loader:
def __init__(
self,
ctx: ContextType,
schemagraph: Optional[Graph] = None,
foreign_properties: Optional[Set[str]] = None,
idx: Optional[IdxType] = None,
cache: Optional[CacheType] = None,
session: Optional[requests.sessions.Session] = None,
fetcher_constructor: Optional[FetcherCallableType] = None,
skip_schemas: Optional[bool] = None,
url_fields: Optional[Set[str]] = None,
allow_attachments: Optional[AttachmentsType] = None,
doc_cache: Union[str, bool] = True,
salad_version: Optional[str] = None,
) -> None:
self.idx: IdxType = (
NormDict(lambda url: urllib.parse.urlsplit(url).geturl()) if idx is None else idx
)
self.ctx: ContextType = {}
self.graph = schemagraph if schemagraph is not None else Graph()
self.foreign_properties = (
set(foreign_properties) if foreign_properties is not None else set()
)
self.cache = cache if cache is not None else {}
self.skip_schemas = skip_schemas if skip_schemas is not None else False
if session is None:
if doc_cache is False:
self.session = requests.Session()
elif doc_cache is True:
root = pathlib.Path(os.environ.get("HOME", tempfile.gettempdir()))
self.session = CacheControl(
requests.Session(),
cache=FileCache(root / ".cache" / "salad"),
)
elif isinstance(doc_cache, str):
self.session = CacheControl(requests.Session(), cache=FileCache(doc_cache))
else:
self.session = session
self.fetcher_constructor = (
fetcher_constructor if fetcher_constructor is not None else DefaultFetcher
)
self.fetcher = self.fetcher_constructor(self.cache, self.session)
self.fetch_text = self.fetcher.fetch_text
self.check_exists = self.fetcher.check_exists
self.url_fields: Set[str] = set() if url_fields is None else set(url_fields)
self.scoped_ref_fields: Dict[str, int] = {}
self.vocab_fields: Set[str] = set()
self.identifiers: List[str] = []
self.identity_links: Set[str] = set()
self.standalone: Optional[Set[str]] = None
self.nolinkcheck: Set[str] = set()
self.vocab: Dict[str, str] = {}
self.rvocab: Dict[str, str] = {}
self.idmap: Dict[str, str] = {}
self.mapPredicate: Dict[str, str] = {}
self.type_dsl_fields: Set[str] = set()
self.subscopes: Dict[str, str] = {}
self.secondaryFile_dsl_fields: Set[str] = set()
self.allow_attachments = allow_attachments
if salad_version:
self.salad_version = salad_version
else:
self.salad_version = "v1.1"
self.add_context(ctx)
def expand_url(
self,
url: str,
base_url: str,
scoped_id: bool = False,
vocab_term: bool = False,
scoped_ref: Optional[int] = None,
) -> str:
if url in ("@id", "@type"):
return url
if vocab_term and url in self.vocab:
return url
if url.startswith("_:"):
return url
if bool(self.vocab) and ":" in url:
prefix = url.split(":")[0]
if not prefix:
pass
elif prefix in self.vocab:
url = self.vocab[prefix] + url[len(prefix) + 1 :]
elif (
prefix not in self.fetcher.supported_schemes()
and "/" not in prefix
and "#" not in prefix
):
_logger.warning(
"URI prefix '%s' of '%s' not recognized, are you missing a "
"$namespaces section?",
prefix,
url,
)
split = urllib.parse.urlsplit(url)
if (
(bool(split.scheme) and split.scheme in self.fetcher.supported_schemes())
or url.startswith("$(")
or url.startswith("${")
):
pass
elif scoped_id and not bool(split.fragment):
splitbase = urllib.parse.urlsplit(base_url)
frg = splitbase.fragment + "/" + split.path if bool(splitbase.fragment) else split.path
pt = splitbase.path if splitbase.path != "" else "/"
url = urllib.parse.urlunsplit(
(splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg)
)
elif scoped_ref is not None and not split.fragment:
pass
else:
url = self.fetcher.urljoin(base_url, url)
if vocab_term and url in self.rvocab:
return self.rvocab[url]
return url
def _add_properties(self, s: str) -> None:
for _, _, rng in self.graph.triples((s, RDFS.range, None)):
literal = (
str(rng).startswith("http://www.w3.org/2001/XMLSchema#")
and str(rng) != "http://www.w3.org/2001/XMLSchema#anyURI"
) or str(rng) == "http://www.w3.org/2000/01/rdf-schema#Literal"
if not literal:
self.url_fields.add(str(s))
self.foreign_properties.add(str(s))
def add_namespaces(self, ns: Dict[str, str]) -> None:
self.vocab.update(ns)
def add_schemas(self, ns: Union[List[str], str], base_url: str) -> None:
if self.skip_schemas:
return
for sch in aslist(ns):
fetchurl = self.fetcher.urljoin(base_url, sch)
if fetchurl not in self.cache or self.cache[fetchurl] is True:
_logger.debug("Getting external schema %s", fetchurl)
try:
content = self.fetch_text(fetchurl)
except Exception as e:
tb = traceback.format_exception(type(e), e, e.__traceback__)
_logger.warning("Could not load extension schema %s: %s", fetchurl, str(e))
_logger.debug(tb)
continue
newGraph = Graph()
err_msg = "unknown error"
for fmt in [
guess_format(sch),
"xml",
"turtle",
None,
guess_format(sch),
]:
try:
newGraph.parse(data=content, format=fmt, publicID=str(fetchurl))
self.cache[fetchurl] = newGraph
self.graph += newGraph
break
except (
xml.sax.SAXParseException,
TypeError,
BadSyntax,
ParserError,
PluginException,
) as e:
err_msg = str(e)
else:
_logger.warning("Could not load extension schema %s: %s", fetchurl, err_msg)
for s, _, _ in self.graph.triples((None, RDF.type, RDF.Property)):
self._add_properties(s)
for s, _, o in self.graph.triples((None, RDFS.subPropertyOf, None)):
self._add_properties(s)
self._add_properties(o)
for s, _, _ in self.graph.triples((None, RDFS.range, None)):
self._add_properties(s)
for s, _, _ in self.graph.triples((None, RDF.type, OWL.ObjectProperty)):
self._add_properties(s)
for s, _, _ in self.graph.triples((None, None, None)):
self.idx[str(s)] = None
def add_context(self, newcontext: ContextType) -> None:
if bool(self.vocab):
raise ValidationException("Refreshing context that already has stuff in it")
self.url_fields = {"$schemas"}
self.scoped_ref_fields.clear()
self.vocab_fields.clear()
self.identifiers.clear()
self.identity_links.clear()
self.standalone = set()
self.nolinkcheck.clear()
self.idmap.clear()
self.mapPredicate.clear()
self.vocab.clear()
self.rvocab.clear()
self.type_dsl_fields.clear()
self.secondaryFile_dsl_fields.clear()
self.subscopes.clear()
self.ctx.update(_copy_dict_without_key(newcontext, "@context"))
_logger.debug("ctx is %s", self.ctx)
for key, value in self.ctx.items():
if value == "@id":
self.identifiers.append(key)
self.identity_links.add(key)
elif isinstance(value, MutableMapping):
if value.get("@type") == "@id":
self.url_fields.add(key)
if "refScope" in value:
self.scoped_ref_fields[key] = value["refScope"]
if value.get("identity", False):
self.identity_links.add(key)
if value.get("@type") == "@vocab":
self.url_fields.add(key)
self.vocab_fields.add(key)
if "refScope" in value:
self.scoped_ref_fields[key] = value["refScope"]
if value.get("typeDSL"):
self.type_dsl_fields.add(key)
if value.get("secondaryFilesDSL"):
self.secondaryFile_dsl_fields.add(key)
if value.get("noLinkCheck"):
self.nolinkcheck.add(key)
if value.get("mapSubject"):
self.idmap[key] = value["mapSubject"]
if value.get("mapPredicate"):
self.mapPredicate[key] = value["mapPredicate"]
if value.get("@id"):
self.vocab[key] = value["@id"]
if value.get("subscope"):
self.subscopes[key] = value["subscope"]
elif isinstance(value, str):
self.vocab[key] = value
for k, v in self.vocab.items():
self.rvocab[self.expand_url(v, "", scoped_id=False)] = k
self.identifiers.sort()
_logger.debug("identifiers is %s", self.identifiers)
_logger.debug("identity_links is %s", self.identity_links)
_logger.debug("url_fields is %s", self.url_fields)
_logger.debug("vocab_fields is %s", self.vocab_fields)
_logger.debug("vocab is %s", self.vocab)
def resolve_ref(
self,
ref: ResolveType,
base_url: Optional[str] = None,
checklinks: bool = True,
strict_foreign_properties: bool = False,
content_types: Optional[List[str]] = None, # Expected content-types
) -> ResolvedRefType:
lref = ref
obj: Optional[CommentedMap] = None
resolved_obj: ResolveType = None
imp = False
inc = False
mixin: Optional[MutableMapping[str, str]] = None
if not base_url:
base_url = file_uri(os.getcwd()) + "/"
# If `ref` is a dict, look for special directives.
if isinstance(lref, CommentedMap):
obj = lref
if "$import" in obj:
if len(obj) == 1:
lref = obj["$import"]
imp = True
obj = None
else:
raise ValidationException(
f"'$import' must be the only field in {obj}",
SourceLine(obj, "$import"),
)
elif "$include" in obj:
if len(obj) == 1:
lref = obj["$include"]
inc = True
obj = None
else:
raise ValidationException(
f"'$include' must be the only field in {obj}",
SourceLine(obj, "$include"),
)
elif "$mixin" in obj:
lref = obj["$mixin"]
mixin = obj
obj = None
else:
lref = None
for identifier in self.identifiers:
if identifier in obj:
lref = obj[identifier]
break
if not lref:
raise ValidationException(
f"Object {obj!r} does not have identifier field in {self.identifiers}",
SourceLine(obj),
)
if not isinstance(lref, str):
raise ValidationException(
f"Expected CommentedMap or string, got {type(lref)}: {lref!r}"
)
if isinstance(lref, str) and os.sep == "\\":
# Convert Windows path separator in ref
lref = lref.replace("\\", "/")
url = self.expand_url(lref, base_url, scoped_id=(obj is not None))
# Has this reference been loaded already?
if url in self.idx and (not mixin):
resolved_obj = self.idx[url]
if isinstance(resolved_obj, MutableMapping):
metadata: Union[CommentedMap, CommentedSeq, str, None] = self.idx.get(
urllib.parse.urldefrag(url)[0], CommentedMap()
)
if isinstance(metadata, MutableMapping):
if "$graph" in resolved_obj:
metadata = _copy_dict_without_key(resolved_obj, "$graph")
return resolved_obj["$graph"], metadata
else:
return resolved_obj, metadata
else:
raise ValidationException(
f"Expected CommentedMap, got {type(metadata)}: {metadata!r}"
)
elif isinstance(resolved_obj, MutableSequence):
metadata = self.idx.get(urllib.parse.urldefrag(url)[0], CommentedMap())
if isinstance(metadata, MutableMapping):
return resolved_obj, metadata
else:
return resolved_obj, CommentedMap()
elif isinstance(resolved_obj, str):
return resolved_obj, CommentedMap()
else:
raise ValidationException(
f"Expected MutableMapping or MutableSequence, got "
f"{type(resolved_obj)}: {resolved_obj!r}"
)
# "$include" directive means load raw text
if inc:
# Make a note in the index that this was an included string
self.idx["include:" + url] = url
return self.fetch_text(url), CommentedMap()
doc = None
if isinstance(obj, MutableMapping):
for identifier in self.identifiers:
obj[identifier] = url
doc_url = url
else:
# Load structured document
doc_url, frg = urllib.parse.urldefrag(url)
if doc_url in self.idx and (not mixin):
# If the base document is in the index, it was already loaded,
# so if we didn't find the reference earlier then it must not
# exist.
raise SourceLine(self.idx, doc_url, ValidationException).makeError(
f"Reference '#{frg}' not found in file {doc_url!r}.",
)
doc = self.fetch(doc_url, inject_ids=(not mixin), content_types=content_types)
if imp:
# Make a note in the index that this was an imported fragment
self.idx["import:" + url] = url
# Recursively expand urls and resolve directives
if bool(mixin):
doc = copy.deepcopy(doc)
if isinstance(doc, CommentedMap) and mixin is not None:
doc.update(mixin)
del doc["$mixin"]
resolved_obj, metadata = self.resolve_all(
doc,
base_url,
file_base=doc_url,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
else:
resolved_obj, metadata = self.resolve_all(
doc or obj,
doc_url,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
# Requested reference should be in the index now, otherwise it's a bad
# reference
if not bool(mixin):
if url in self.idx:
resolved_obj = self.idx[url]
else:
raise ValidationException(
f"Reference {url!r} is not in the index. Index contains: {' '.join(self.idx)}"
)
if isinstance(resolved_obj, CommentedMap):
if "$graph" in resolved_obj:
metadata = _copy_dict_without_key(resolved_obj, "$graph")
return resolved_obj["$graph"], metadata
else:
return resolved_obj, metadata
else:
return resolved_obj, metadata
def _resolve_idmap(
self,
document: CommentedMap,
loader: "Loader",
) -> None:
# Convert fields with mapSubject into lists
# use mapPredicate if the mapped value isn't a dict.
for idmapField in loader.idmap:
if idmapField in document:
idmapFieldValue = document[idmapField]
if (
isinstance(idmapFieldValue, MutableMapping)
and "$import" not in idmapFieldValue
and "$include" not in idmapFieldValue
):
ls = CommentedSeq()
for k in sorted(idmapFieldValue.keys()):
val = idmapFieldValue[k]
v: Optional[CommentedMap] = None
if not isinstance(val, CommentedMap):
if idmapField in loader.mapPredicate:
v = CommentedMap(((loader.mapPredicate[idmapField], val),))
v.lc.add_kv_line_col(
loader.mapPredicate[idmapField],
document[idmapField].lc.data[k],
)
v.lc.filename = document.lc.filename
else:
raise ValidationException(
f"mapSubject {k!r} value {v!r} is not a dict "
"and does not have a mapPredicate.",
SourceLine(document, idmapField),
)
else:
v = val
v[loader.idmap[idmapField]] = k
v.lc.add_kv_line_col(
loader.idmap[idmapField], document[idmapField].lc.data[k]
)
v.lc.filename = document.lc.filename
ls.lc.add_kv_line_col(len(ls), document[idmapField].lc.data[k])
ls.lc.filename = document.lc.filename
ls.append(v)
document[idmapField] = ls
def _type_dsl(
self,
t: Union[str, CommentedMap, CommentedSeq],
lc: LineCol,
filename: str,
) -> Union[str, CommentedMap, CommentedSeq]:
if not isinstance(t, str):
return t
t_ = t
optional = False
if t_.endswith("?"):
optional = True
t_ = t_[0:-1]
if t_.endswith("[]"):
salad_versions = [int(v) for v in self.salad_version[1:].split(".")]
rest = t_[0:-2]
if salad_versions < [1, 3]:
if rest.endswith("[]"):
# To show the error message with the original type
return t
else:
cmap = CommentedMap((("type", "array"), ("items", rest)))
else:
items = self._type_dsl(rest, lc, filename)
cmap = CommentedMap((("type", "array"), ("items", items)))
cmap.lc.add_kv_line_col("type", lc)
cmap.lc.add_kv_line_col("items", lc)
cmap.lc.filename = filename
expanded: Union[str, CommentedMap, CommentedSeq] = cmap
else:
expanded = t_
if optional:
cs = CommentedSeq(["null", expanded])
cs.lc.add_kv_line_col(0, lc)
cs.lc.add_kv_line_col(1, lc)
cs.lc.filename = filename
ret: Union[str, CommentedMap, CommentedSeq] = cs
else:
ret = expanded
return ret
def _secondaryFile_dsl(
self,
t: Union[str, CommentedMap, CommentedSeq],
lc: LineCol,
filename: str,
) -> Union[str, CommentedMap, CommentedSeq]:
if not isinstance(t, str):
return t
pat = t[0:-1] if t.endswith("?") else t
req: Optional[bool] = False if t.endswith("?") else None
second = CommentedMap((("pattern", pat), ("required", req)))
second.lc.add_kv_line_col("pattern", lc)
second.lc.add_kv_line_col("required", lc)
second.lc.filename = filename
return second
def _apply_dsl(
self,
datum: Union[str, CommentedMap, CommentedSeq],
d: str,
loader: "Loader",
lc: LineCol,
filename: str,
) -> Union[str, CommentedMap, CommentedSeq]:
if d in loader.type_dsl_fields:
return self._type_dsl(datum, lc, filename)
if d in loader.secondaryFile_dsl_fields:
return self._secondaryFile_dsl(datum, lc, filename)
return datum
def _resolve_dsl(
self,
document: CommentedMap,
loader: "Loader",
) -> None:
fields = list(loader.type_dsl_fields)
fields.extend(loader.secondaryFile_dsl_fields)
for d in fields:
if d in document:
datum2 = datum = document[d]
if isinstance(datum, str):
datum2 = self._apply_dsl(
datum,
d,
loader,
document.lc.data[d] if document.lc.data else document.lc,
getattr(document.lc, "filename", ""),
)
elif isinstance(datum, CommentedSeq):
datum2 = CommentedSeq()
for n, t in enumerate(datum):
if datum.lc and datum.lc.data:
datum2.lc.add_kv_line_col(len(datum2), datum.lc.data[n])
datum2.append(
self._apply_dsl(
t, d, loader, datum.lc.data[n], document.lc.filename
)
)
else:
datum2.append(self._apply_dsl(t, d, loader, LineCol(), ""))
if isinstance(datum2, CommentedSeq):
datum3 = CommentedSeq()
seen: List[str] = []
for i, item in enumerate(datum2):
if isinstance(item, CommentedSeq):
for j, v in enumerate(item):
if v not in seen:
datum3.lc.add_kv_line_col(len(datum3), item.lc.data[j])
datum3.append(v)
seen.append(v)
else:
if item not in seen:
if datum2.lc and datum2.lc.data:
datum3.lc.add_kv_line_col(len(datum3), datum2.lc.data[i])
datum3.append(item)
seen.append(item)
document[d] = datum3
else:
document[d] = datum2
def _resolve_identifier(self, document: CommentedMap, loader: "Loader", base_url: str) -> str:
# Expand identifier field (usually 'id') to resolve scope
for identifier in loader.identifiers:
if identifier in document:
if isinstance(document[identifier], str):
document[identifier] = loader.expand_url(
document[identifier], base_url, scoped_id=True
)
if document[identifier] not in loader.idx or isinstance(
loader.idx[document[identifier]], str
):
loader.idx[document[identifier]] = document
base_url = document[identifier]
else:
raise ValidationException(
f"identifier field {document[identifier]!r} must be a string"
)
return base_url
def _resolve_identity(
self,
document: Dict[str, Union[str, MutableSequence[Union[str, CommentedMap]]]],
loader: "Loader",
base_url: str,
) -> None:
# Resolve scope for identity fields (fields where the value is the
# identity of a standalone node, such as enum symbols)
for identifier in loader.identity_links:
if identifier in document and isinstance(document[identifier], MutableSequence):
for n, v in enumerate(document[identifier]):
if isinstance(v, str):
document[identifier][n] = loader.expand_url( # type: ignore
v, base_url, scoped_id=True
)
if document[identifier][n] not in loader.idx:
loader.idx[cast(str, document[identifier][n])] = v
def _normalize_fields(self, document: CommentedMap, loader: "Loader") -> None:
# Normalize fields which are prefixed or full URIn to vocabulary terms
for d in list(document.keys()):
if isinstance(d, str):
d2 = loader.expand_url(d, "", scoped_id=False, vocab_term=True)
if d != d2:
document[d2] = document[d]
document.lc.add_kv_line_col(d2, document.lc.data[d])
del document[d]
def _resolve_uris(
self,
document: Dict[str, Union[str, MutableSequence[Union[str, CommentedMap]]]],
loader: "Loader",
base_url: str,
) -> None:
# Resolve remaining URLs based on document base
for d in loader.url_fields:
if d in document:
datum = document[d]
if isinstance(datum, str):
document[d] = loader.expand_url(
datum,
base_url,
scoped_id=False,
vocab_term=(d in loader.vocab_fields),
scoped_ref=loader.scoped_ref_fields.get(d),
)
elif isinstance(datum, MutableSequence):
for i, url in enumerate(datum):
if isinstance(url, str):
datum[i] = loader.expand_url(
url,
base_url,
scoped_id=False,
vocab_term=(d in loader.vocab_fields),
scoped_ref=loader.scoped_ref_fields.get(d),
)
def resolve_all(
self,
document: ResolveType,
base_url: str,
file_base: Optional[str] = None,
checklinks: bool = True,
strict_foreign_properties: bool = False,
) -> ResolvedRefType:
loader = self
metadata = CommentedMap()
if file_base is None:
file_base = base_url
if isinstance(document, CommentedMap):
# Handle $import and $include
if "$import" in document or "$include" in document:
return self.resolve_ref(
document,
base_url=file_base,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
if "$mixin" in document:
return self.resolve_ref(
document,
base_url=base_url,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
elif isinstance(document, CommentedSeq):
pass
elif isinstance(document, (list, dict)):
raise ValidationException(
f"Expected CommentedMap or CommentedSeq, got {type(document)}: {document!r}"
)
else:
return (document, metadata)
newctx: Optional["Loader"] = None
if isinstance(document, CommentedMap):
# Handle $base, $profile, $namespaces, $schemas and $graph
if "$base" in document:
base_url = document["$base"]
if "$profile" in document:
if newctx is None:
newctx = SubLoader(self)
newctx.add_namespaces(document.get("$namespaces", CommentedMap()))
newctx.add_schemas(document.get("$schemas", []), document["$profile"])
if "$namespaces" in document:
if newctx is None:
newctx = SubLoader(self)
namespaces = document["$namespaces"]
if isinstance(namespaces, dict):
newctx.add_namespaces(document["$namespaces"])
else:
raise ValidationException(
"$namespaces must be a dictionary",
SourceLine(document, "$namespaces"),
)
if "$schemas" in document:
if newctx is None:
newctx = SubLoader(self)
schemas = document["$schemas"]
if isinstance(schemas, (list, str)):
newctx.add_schemas(schemas, file_base)
else:
raise ValidationException(
"$schemas must be a string or a list of string",
SourceLine(document, "$schemas"),
)
if newctx is not None:
loader = newctx
for identifier in loader.identity_links:
if identifier in document:
if isinstance(document[identifier], str):
document[identifier] = loader.expand_url(
document[identifier], base_url, scoped_id=True
)
loader.idx[document[identifier]] = document
metadata = document
if "$graph" in document:
document = document["$graph"]
if isinstance(document, CommentedMap):
self._normalize_fields(document, loader)
self._resolve_idmap(document, loader)
self._resolve_dsl(document, loader)
base_url = self._resolve_identifier(document, loader, base_url)
self._resolve_identity(document, loader, base_url)
self._resolve_uris(document, loader, base_url)
try:
for key, val in document.items():
subscope: str = ""
if key in loader.subscopes:
subscope = "/" + loader.subscopes[key]
document[key], _ = loader.resolve_all(
val, base_url + subscope, file_base=file_base, checklinks=False
)
except ValidationException as v:
_logger.warning("loader is %s", id(loader), exc_info=True)
raise ValidationException(
f"({id(loader)}) ({file_base}) Validation error in field {key}:",
None,
[v],
) from v
elif isinstance(document, CommentedSeq):
i = 0
try:
while i < len(document):
val = document[i]
if isinstance(val, CommentedMap) and ("$import" in val or "$mixin" in val):
l, import_metadata = loader.resolve_ref(
val, base_url=file_base, checklinks=False
)
metadata.setdefault("$import_metadata", {})
for identifier in loader.identifiers:
if identifier in import_metadata:
metadata["$import_metadata"][
import_metadata[identifier]
] = import_metadata
if isinstance(l, CommentedSeq):
lc = document.lc.data[i]
del document[i]
llen = len(l)
for j in range(len(document) + llen, i + llen, -1):
document.lc.data[j - 1] = document.lc.data[j - llen]
for item in l:
document.insert(i, item)
document.lc.data[i] = lc
i += 1
else:
document[i] = l
i += 1
else:
document[i], _ = loader.resolve_all(
val, base_url, file_base=file_base, checklinks=False
)
i += 1
except ValidationException as v:
_logger.warning("failed", exc_info=True)
raise ValidationException(
f"({id(loader)}) ({file_base}) Validation error in position {i}:",
None,
[v],
) from v
if checklinks:
all_doc_ids: Dict[str, str] = {}
loader.validate_links(
document,
"",
all_doc_ids,
strict_foreign_properties=strict_foreign_properties,
)
return document, metadata
def fetch(
self,
url: str,
inject_ids: bool = True,
content_types: Optional[List[str]] = None,
) -> IdxResultType:
if url in self.idx:
return self.idx[url]
try:
text = self.fetch_text(url, content_types=content_types)
textIO = StringIO(text)
textIO.name = str(url)
yaml = yaml_no_ts()
attachments = yaml.load_all(textIO)
result = cast(Union[CommentedSeq, CommentedMap], next(attachments))
if self.allow_attachments is not None and self.allow_attachments(result):
i = 1
for a in attachments:
self.idx[f"{url}#attachment-{i}"] = a
i += 1
add_lc_filename(result, url)
except MarkedYAMLError as e:
raise to_validation_exception(e) from e
if isinstance(result, CommentedMap) and inject_ids and bool(self.identifiers):
missing_identifier = True
for identifier in self.identifiers:
if identifier in result:
missing_identifier = False
self.idx[self.expand_url(result[identifier], url, scoped_id=True)] = result
if missing_identifier:
result[self.identifiers[0]] = url
self.idx[url] = result
return result
def validate_scoped(self, field: str, link: str, docid: str) -> str:
split = urllib.parse.urlsplit(docid)
sp = split.fragment.split("/")
n = self.scoped_ref_fields[field]
while n > 0 and len(sp) > 0:
sp.pop()
n -= 1
tried = []
while True:
sp.append(link)
url = urllib.parse.urlunsplit(
(split.scheme, split.netloc, split.path, split.query, "/".join(sp))
)
tried.append(url)
if url in self.idx:
return url
sp.pop()
if len(sp) == 0:
break
sp.pop()
if onWindows() and link.startswith("file:"):
link = link.lower()
raise ValidationException(
f"Field {field!r} references unknown identifier {link!r}, tried {' '.join(tried)}"
)
def validate_link(
self,
field: str,
link: Union[str, CommentedSeq, CommentedMap],
# link also can be None, but that results in
# mypyc "error: Local variable "link" has inferred type None; add an annotation"
docid: str,
all_doc_ids: Dict[str, str],
) -> Union[str, CommentedSeq, CommentedMap]:
if field in self.nolinkcheck:
return link
if isinstance(link, str):
if field in self.vocab_fields:
if link not in self.vocab and link not in self.idx and link not in self.rvocab:
if field in self.scoped_ref_fields:
return self.validate_scoped(field, link, docid)
elif not self.check_exists(link):
raise ValidationException(
f"Field {field!r} contains undefined reference to {link!r}"
)
elif link not in self.idx and link not in self.rvocab:
if field in self.scoped_ref_fields:
return self.validate_scoped(field, link, docid)
elif not self.check_exists(link):
raise ValidationException(
f"Field {field!r} contains undefined reference to {link!r}"
)
elif isinstance(link, CommentedSeq):
errors = []
for n, i in enumerate(link):
try:
link[n] = self.validate_link(field, i, docid, all_doc_ids)
except ValidationException as v:
errors.append(v)
if bool(errors):
raise ValidationException("", None, errors)
elif isinstance(link, CommentedMap):
self.validate_links(link, docid, all_doc_ids)
elif link is None: # type: ignore[unreachable]
return None
else:
raise ValidationException(
f"{field!r} field is {type(link).__name__}, expected string, list, or a dict."
)
return link
def getid(self, d: Any) -> Optional[str]:
if isinstance(d, MutableMapping):
for i in self.identifiers:
if i in d:
idd = d[i]
if isinstance(idd, str):
return idd
return None
def validate_links(
self,
document: ResolveType,
base_url: str,
all_doc_ids: Dict[str, str],
strict_foreign_properties: bool = False,
) -> None:
docid = self.getid(document) or base_url
errors: List[SchemaSaladException] = []
iterator: Any = None
if isinstance(document, MutableSequence):
iterator = enumerate(document)
elif isinstance(document, MutableMapping):
for d in self.url_fields:
try:
if d in document and d not in self.identity_links:
document[d] = self.validate_link(d, document[d], docid, all_doc_ids)
except SchemaSaladException as v:
v = v.with_sourceline(SourceLine(document, d, str))
if d == "$schemas" or (
d in self.foreign_properties and not strict_foreign_properties
):
_logger.warning(v.as_warning())
else:
errors.append(v)
# TODO: Validator should local scope only in which
# duplicated keys are prohibited.
# See also https://github.com/common-workflow-language/common-workflow-language/issues/734 # noqa: B950
# In the future, it should raise
# ValidationException instead of _logger.warn
try:
for identifier in self.identifiers: # validate that each id is defined uniquely
if identifier in document:
sl = SourceLine(document, identifier, str)
if (
document[identifier] in all_doc_ids
and sl.makeLead() != all_doc_ids[document[identifier]]
):
_logger.warning(
"%s object %s %r previously defined",
all_doc_ids[document[identifier]],
identifier,
relname(document[identifier]),
)
else:
all_doc_ids[document[identifier]] = sl.makeLead()
break
except ValidationException as v:
errors.append(v.with_sourceline(sl))
iterator = list(document.items())
else:
return
for key, val in iterator:
sl = SourceLine(document, key, str)
try:
self.validate_links(
val,
docid,
all_doc_ids,
strict_foreign_properties=strict_foreign_properties,
)
except ValidationException as v:
if key in self.nolinkcheck or (isinstance(key, str) and ":" in key):
_logger.warning(v.as_warning())
else:
docid2 = self.getid(val)
if docid2 is not None:
errors.append(
ValidationException(f"checking object {relname(docid2)!r}", sl, [v])
)
else:
if isinstance(key, str):
errors.append(ValidationException(f"checking field {key!r}", sl, [v]))
else:
errors.append(ValidationException("checking item", sl, [v]))
if bool(errors):
if len(errors) > 1:
raise ValidationException("", None, errors)
raise errors[0]
return
def _copy_dict_without_key(
from_dict: Union[CommentedMap, ContextType], filtered_key: str
) -> CommentedMap:
new_dict = CommentedMap(from_dict.items())
if filtered_key in new_dict:
del new_dict[filtered_key]
if isinstance(from_dict, CommentedMap):
new_dict.lc.data = copy.copy(from_dict.lc.data)
new_dict.lc.filename = from_dict.lc.filename
return new_dict
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/ref_resolver.py
|
ref_resolver.py
|
import copy
import logging
import os
import pathlib
import re
import tempfile
import traceback
import urllib
import xml.sax # nosec
from io import StringIO
from typing import (
Any,
Callable,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Union,
cast,
)
import requests
from cachecontrol.caches import FileCache
from cachecontrol.wrapper import CacheControl
from rdflib.exceptions import ParserError
from rdflib.graph import Graph
from rdflib.namespace import OWL, RDF, RDFS
from rdflib.plugin import PluginException
from rdflib.plugins.parsers.notation3 import BadSyntax
from rdflib.util import guess_format
from ruamel.yaml.comments import CommentedMap, CommentedSeq, LineCol
from ruamel.yaml.error import MarkedYAMLError
from .exceptions import SchemaSaladException, ValidationException
from .fetcher import DefaultFetcher
from .sourceline import SourceLine, add_lc_filename, relname
from .utils import (
AttachmentsType,
CacheType,
ContextType,
FetcherCallableType,
IdxResultType,
IdxType,
ResolvedRefType,
ResolveType,
aslist,
onWindows,
yaml_no_ts,
)
_logger = logging.getLogger("salad")
def file_uri(path: str, split_frag: bool = False) -> str:
if path.startswith("file://"):
return path
if split_frag:
pathsp = path.split("#", 2)
if len(pathsp) == 2:
frag = "#" + urllib.parse.quote(str(pathsp[1]))
else:
frag = ""
urlpath = urllib.request.pathname2url(str(pathsp[0]))
else:
urlpath = urllib.request.pathname2url(path)
frag = ""
if urlpath.startswith("//"):
return f"file:{urlpath}{frag}"
return f"file://{urlpath}{frag}"
def uri_file_path(url: str) -> str:
split = urllib.parse.urlsplit(url)
if split.scheme == "file":
return urllib.request.url2pathname(str(split.path)) + (
"#" + urllib.parse.unquote(str(split.fragment)) if bool(split.fragment) else ""
)
raise ValidationException(f"Not a file URI: {url}")
def to_validation_exception(e: MarkedYAMLError) -> ValidationException:
"""Convert ruamel.yaml exception to our type."""
fname_regex = re.compile(r"^file://" + re.escape(os.getcwd()) + "/")
exc = ValidationException(e.problem)
mark = e.problem_mark
exc.file = re.sub(fname_regex, "", mark.name)
exc.start = (mark.line + 1, mark.column + 1)
exc.end = None
if e.context:
parent = ValidationException(e.context)
context_mark = e.context_mark
if context_mark:
parent.file = re.sub(fname_regex, "", context_mark.name)
parent.start = (context_mark.line + 1, context_mark.column + 1)
parent.end = None
parent.children = [exc]
return parent
return exc
class NormDict(Dict[str, Union[CommentedMap, CommentedSeq, str, None]]):
"""A Dict where all keys are normalized using the provided function."""
def __init__(self, normalize: Callable[[str], str] = str) -> None:
super().__init__()
self.normalize = normalize
def __eq__(self, other: Any) -> bool:
return super().__eq__(other)
def __getitem__(self, key: Any) -> Any:
return super().__getitem__(self.normalize(key))
def __setitem__(self, key: Any, value: Any) -> Any:
return super().__setitem__(self.normalize(key), value)
def __delitem__(self, key: Any) -> Any:
return super().__delitem__(self.normalize(key))
def __contains__(self, key: Any) -> bool:
return super().__contains__(self.normalize(key))
def __del__(self) -> None:
del self.normalize
def SubLoader(loader: "Loader") -> "Loader":
return Loader(
loader.ctx,
schemagraph=loader.graph,
foreign_properties=loader.foreign_properties,
idx=loader.idx,
cache=loader.cache,
fetcher_constructor=loader.fetcher_constructor,
skip_schemas=loader.skip_schemas,
url_fields=loader.url_fields,
allow_attachments=loader.allow_attachments,
session=loader.session,
salad_version=loader.salad_version,
)
class Loader:
def __init__(
self,
ctx: ContextType,
schemagraph: Optional[Graph] = None,
foreign_properties: Optional[Set[str]] = None,
idx: Optional[IdxType] = None,
cache: Optional[CacheType] = None,
session: Optional[requests.sessions.Session] = None,
fetcher_constructor: Optional[FetcherCallableType] = None,
skip_schemas: Optional[bool] = None,
url_fields: Optional[Set[str]] = None,
allow_attachments: Optional[AttachmentsType] = None,
doc_cache: Union[str, bool] = True,
salad_version: Optional[str] = None,
) -> None:
self.idx: IdxType = (
NormDict(lambda url: urllib.parse.urlsplit(url).geturl()) if idx is None else idx
)
self.ctx: ContextType = {}
self.graph = schemagraph if schemagraph is not None else Graph()
self.foreign_properties = (
set(foreign_properties) if foreign_properties is not None else set()
)
self.cache = cache if cache is not None else {}
self.skip_schemas = skip_schemas if skip_schemas is not None else False
if session is None:
if doc_cache is False:
self.session = requests.Session()
elif doc_cache is True:
root = pathlib.Path(os.environ.get("HOME", tempfile.gettempdir()))
self.session = CacheControl(
requests.Session(),
cache=FileCache(root / ".cache" / "salad"),
)
elif isinstance(doc_cache, str):
self.session = CacheControl(requests.Session(), cache=FileCache(doc_cache))
else:
self.session = session
self.fetcher_constructor = (
fetcher_constructor if fetcher_constructor is not None else DefaultFetcher
)
self.fetcher = self.fetcher_constructor(self.cache, self.session)
self.fetch_text = self.fetcher.fetch_text
self.check_exists = self.fetcher.check_exists
self.url_fields: Set[str] = set() if url_fields is None else set(url_fields)
self.scoped_ref_fields: Dict[str, int] = {}
self.vocab_fields: Set[str] = set()
self.identifiers: List[str] = []
self.identity_links: Set[str] = set()
self.standalone: Optional[Set[str]] = None
self.nolinkcheck: Set[str] = set()
self.vocab: Dict[str, str] = {}
self.rvocab: Dict[str, str] = {}
self.idmap: Dict[str, str] = {}
self.mapPredicate: Dict[str, str] = {}
self.type_dsl_fields: Set[str] = set()
self.subscopes: Dict[str, str] = {}
self.secondaryFile_dsl_fields: Set[str] = set()
self.allow_attachments = allow_attachments
if salad_version:
self.salad_version = salad_version
else:
self.salad_version = "v1.1"
self.add_context(ctx)
def expand_url(
self,
url: str,
base_url: str,
scoped_id: bool = False,
vocab_term: bool = False,
scoped_ref: Optional[int] = None,
) -> str:
if url in ("@id", "@type"):
return url
if vocab_term and url in self.vocab:
return url
if url.startswith("_:"):
return url
if bool(self.vocab) and ":" in url:
prefix = url.split(":")[0]
if not prefix:
pass
elif prefix in self.vocab:
url = self.vocab[prefix] + url[len(prefix) + 1 :]
elif (
prefix not in self.fetcher.supported_schemes()
and "/" not in prefix
and "#" not in prefix
):
_logger.warning(
"URI prefix '%s' of '%s' not recognized, are you missing a "
"$namespaces section?",
prefix,
url,
)
split = urllib.parse.urlsplit(url)
if (
(bool(split.scheme) and split.scheme in self.fetcher.supported_schemes())
or url.startswith("$(")
or url.startswith("${")
):
pass
elif scoped_id and not bool(split.fragment):
splitbase = urllib.parse.urlsplit(base_url)
frg = splitbase.fragment + "/" + split.path if bool(splitbase.fragment) else split.path
pt = splitbase.path if splitbase.path != "" else "/"
url = urllib.parse.urlunsplit(
(splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg)
)
elif scoped_ref is not None and not split.fragment:
pass
else:
url = self.fetcher.urljoin(base_url, url)
if vocab_term and url in self.rvocab:
return self.rvocab[url]
return url
def _add_properties(self, s: str) -> None:
for _, _, rng in self.graph.triples((s, RDFS.range, None)):
literal = (
str(rng).startswith("http://www.w3.org/2001/XMLSchema#")
and str(rng) != "http://www.w3.org/2001/XMLSchema#anyURI"
) or str(rng) == "http://www.w3.org/2000/01/rdf-schema#Literal"
if not literal:
self.url_fields.add(str(s))
self.foreign_properties.add(str(s))
def add_namespaces(self, ns: Dict[str, str]) -> None:
self.vocab.update(ns)
def add_schemas(self, ns: Union[List[str], str], base_url: str) -> None:
if self.skip_schemas:
return
for sch in aslist(ns):
fetchurl = self.fetcher.urljoin(base_url, sch)
if fetchurl not in self.cache or self.cache[fetchurl] is True:
_logger.debug("Getting external schema %s", fetchurl)
try:
content = self.fetch_text(fetchurl)
except Exception as e:
tb = traceback.format_exception(type(e), e, e.__traceback__)
_logger.warning("Could not load extension schema %s: %s", fetchurl, str(e))
_logger.debug(tb)
continue
newGraph = Graph()
err_msg = "unknown error"
for fmt in [
guess_format(sch),
"xml",
"turtle",
None,
guess_format(sch),
]:
try:
newGraph.parse(data=content, format=fmt, publicID=str(fetchurl))
self.cache[fetchurl] = newGraph
self.graph += newGraph
break
except (
xml.sax.SAXParseException,
TypeError,
BadSyntax,
ParserError,
PluginException,
) as e:
err_msg = str(e)
else:
_logger.warning("Could not load extension schema %s: %s", fetchurl, err_msg)
for s, _, _ in self.graph.triples((None, RDF.type, RDF.Property)):
self._add_properties(s)
for s, _, o in self.graph.triples((None, RDFS.subPropertyOf, None)):
self._add_properties(s)
self._add_properties(o)
for s, _, _ in self.graph.triples((None, RDFS.range, None)):
self._add_properties(s)
for s, _, _ in self.graph.triples((None, RDF.type, OWL.ObjectProperty)):
self._add_properties(s)
for s, _, _ in self.graph.triples((None, None, None)):
self.idx[str(s)] = None
def add_context(self, newcontext: ContextType) -> None:
if bool(self.vocab):
raise ValidationException("Refreshing context that already has stuff in it")
self.url_fields = {"$schemas"}
self.scoped_ref_fields.clear()
self.vocab_fields.clear()
self.identifiers.clear()
self.identity_links.clear()
self.standalone = set()
self.nolinkcheck.clear()
self.idmap.clear()
self.mapPredicate.clear()
self.vocab.clear()
self.rvocab.clear()
self.type_dsl_fields.clear()
self.secondaryFile_dsl_fields.clear()
self.subscopes.clear()
self.ctx.update(_copy_dict_without_key(newcontext, "@context"))
_logger.debug("ctx is %s", self.ctx)
for key, value in self.ctx.items():
if value == "@id":
self.identifiers.append(key)
self.identity_links.add(key)
elif isinstance(value, MutableMapping):
if value.get("@type") == "@id":
self.url_fields.add(key)
if "refScope" in value:
self.scoped_ref_fields[key] = value["refScope"]
if value.get("identity", False):
self.identity_links.add(key)
if value.get("@type") == "@vocab":
self.url_fields.add(key)
self.vocab_fields.add(key)
if "refScope" in value:
self.scoped_ref_fields[key] = value["refScope"]
if value.get("typeDSL"):
self.type_dsl_fields.add(key)
if value.get("secondaryFilesDSL"):
self.secondaryFile_dsl_fields.add(key)
if value.get("noLinkCheck"):
self.nolinkcheck.add(key)
if value.get("mapSubject"):
self.idmap[key] = value["mapSubject"]
if value.get("mapPredicate"):
self.mapPredicate[key] = value["mapPredicate"]
if value.get("@id"):
self.vocab[key] = value["@id"]
if value.get("subscope"):
self.subscopes[key] = value["subscope"]
elif isinstance(value, str):
self.vocab[key] = value
for k, v in self.vocab.items():
self.rvocab[self.expand_url(v, "", scoped_id=False)] = k
self.identifiers.sort()
_logger.debug("identifiers is %s", self.identifiers)
_logger.debug("identity_links is %s", self.identity_links)
_logger.debug("url_fields is %s", self.url_fields)
_logger.debug("vocab_fields is %s", self.vocab_fields)
_logger.debug("vocab is %s", self.vocab)
def resolve_ref(
self,
ref: ResolveType,
base_url: Optional[str] = None,
checklinks: bool = True,
strict_foreign_properties: bool = False,
content_types: Optional[List[str]] = None, # Expected content-types
) -> ResolvedRefType:
lref = ref
obj: Optional[CommentedMap] = None
resolved_obj: ResolveType = None
imp = False
inc = False
mixin: Optional[MutableMapping[str, str]] = None
if not base_url:
base_url = file_uri(os.getcwd()) + "/"
# If `ref` is a dict, look for special directives.
if isinstance(lref, CommentedMap):
obj = lref
if "$import" in obj:
if len(obj) == 1:
lref = obj["$import"]
imp = True
obj = None
else:
raise ValidationException(
f"'$import' must be the only field in {obj}",
SourceLine(obj, "$import"),
)
elif "$include" in obj:
if len(obj) == 1:
lref = obj["$include"]
inc = True
obj = None
else:
raise ValidationException(
f"'$include' must be the only field in {obj}",
SourceLine(obj, "$include"),
)
elif "$mixin" in obj:
lref = obj["$mixin"]
mixin = obj
obj = None
else:
lref = None
for identifier in self.identifiers:
if identifier in obj:
lref = obj[identifier]
break
if not lref:
raise ValidationException(
f"Object {obj!r} does not have identifier field in {self.identifiers}",
SourceLine(obj),
)
if not isinstance(lref, str):
raise ValidationException(
f"Expected CommentedMap or string, got {type(lref)}: {lref!r}"
)
if isinstance(lref, str) and os.sep == "\\":
# Convert Windows path separator in ref
lref = lref.replace("\\", "/")
url = self.expand_url(lref, base_url, scoped_id=(obj is not None))
# Has this reference been loaded already?
if url in self.idx and (not mixin):
resolved_obj = self.idx[url]
if isinstance(resolved_obj, MutableMapping):
metadata: Union[CommentedMap, CommentedSeq, str, None] = self.idx.get(
urllib.parse.urldefrag(url)[0], CommentedMap()
)
if isinstance(metadata, MutableMapping):
if "$graph" in resolved_obj:
metadata = _copy_dict_without_key(resolved_obj, "$graph")
return resolved_obj["$graph"], metadata
else:
return resolved_obj, metadata
else:
raise ValidationException(
f"Expected CommentedMap, got {type(metadata)}: {metadata!r}"
)
elif isinstance(resolved_obj, MutableSequence):
metadata = self.idx.get(urllib.parse.urldefrag(url)[0], CommentedMap())
if isinstance(metadata, MutableMapping):
return resolved_obj, metadata
else:
return resolved_obj, CommentedMap()
elif isinstance(resolved_obj, str):
return resolved_obj, CommentedMap()
else:
raise ValidationException(
f"Expected MutableMapping or MutableSequence, got "
f"{type(resolved_obj)}: {resolved_obj!r}"
)
# "$include" directive means load raw text
if inc:
# Make a note in the index that this was an included string
self.idx["include:" + url] = url
return self.fetch_text(url), CommentedMap()
doc = None
if isinstance(obj, MutableMapping):
for identifier in self.identifiers:
obj[identifier] = url
doc_url = url
else:
# Load structured document
doc_url, frg = urllib.parse.urldefrag(url)
if doc_url in self.idx and (not mixin):
# If the base document is in the index, it was already loaded,
# so if we didn't find the reference earlier then it must not
# exist.
raise SourceLine(self.idx, doc_url, ValidationException).makeError(
f"Reference '#{frg}' not found in file {doc_url!r}.",
)
doc = self.fetch(doc_url, inject_ids=(not mixin), content_types=content_types)
if imp:
# Make a note in the index that this was an imported fragment
self.idx["import:" + url] = url
# Recursively expand urls and resolve directives
if bool(mixin):
doc = copy.deepcopy(doc)
if isinstance(doc, CommentedMap) and mixin is not None:
doc.update(mixin)
del doc["$mixin"]
resolved_obj, metadata = self.resolve_all(
doc,
base_url,
file_base=doc_url,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
else:
resolved_obj, metadata = self.resolve_all(
doc or obj,
doc_url,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
# Requested reference should be in the index now, otherwise it's a bad
# reference
if not bool(mixin):
if url in self.idx:
resolved_obj = self.idx[url]
else:
raise ValidationException(
f"Reference {url!r} is not in the index. Index contains: {' '.join(self.idx)}"
)
if isinstance(resolved_obj, CommentedMap):
if "$graph" in resolved_obj:
metadata = _copy_dict_without_key(resolved_obj, "$graph")
return resolved_obj["$graph"], metadata
else:
return resolved_obj, metadata
else:
return resolved_obj, metadata
def _resolve_idmap(
self,
document: CommentedMap,
loader: "Loader",
) -> None:
# Convert fields with mapSubject into lists
# use mapPredicate if the mapped value isn't a dict.
for idmapField in loader.idmap:
if idmapField in document:
idmapFieldValue = document[idmapField]
if (
isinstance(idmapFieldValue, MutableMapping)
and "$import" not in idmapFieldValue
and "$include" not in idmapFieldValue
):
ls = CommentedSeq()
for k in sorted(idmapFieldValue.keys()):
val = idmapFieldValue[k]
v: Optional[CommentedMap] = None
if not isinstance(val, CommentedMap):
if idmapField in loader.mapPredicate:
v = CommentedMap(((loader.mapPredicate[idmapField], val),))
v.lc.add_kv_line_col(
loader.mapPredicate[idmapField],
document[idmapField].lc.data[k],
)
v.lc.filename = document.lc.filename
else:
raise ValidationException(
f"mapSubject {k!r} value {v!r} is not a dict "
"and does not have a mapPredicate.",
SourceLine(document, idmapField),
)
else:
v = val
v[loader.idmap[idmapField]] = k
v.lc.add_kv_line_col(
loader.idmap[idmapField], document[idmapField].lc.data[k]
)
v.lc.filename = document.lc.filename
ls.lc.add_kv_line_col(len(ls), document[idmapField].lc.data[k])
ls.lc.filename = document.lc.filename
ls.append(v)
document[idmapField] = ls
def _type_dsl(
self,
t: Union[str, CommentedMap, CommentedSeq],
lc: LineCol,
filename: str,
) -> Union[str, CommentedMap, CommentedSeq]:
if not isinstance(t, str):
return t
t_ = t
optional = False
if t_.endswith("?"):
optional = True
t_ = t_[0:-1]
if t_.endswith("[]"):
salad_versions = [int(v) for v in self.salad_version[1:].split(".")]
rest = t_[0:-2]
if salad_versions < [1, 3]:
if rest.endswith("[]"):
# To show the error message with the original type
return t
else:
cmap = CommentedMap((("type", "array"), ("items", rest)))
else:
items = self._type_dsl(rest, lc, filename)
cmap = CommentedMap((("type", "array"), ("items", items)))
cmap.lc.add_kv_line_col("type", lc)
cmap.lc.add_kv_line_col("items", lc)
cmap.lc.filename = filename
expanded: Union[str, CommentedMap, CommentedSeq] = cmap
else:
expanded = t_
if optional:
cs = CommentedSeq(["null", expanded])
cs.lc.add_kv_line_col(0, lc)
cs.lc.add_kv_line_col(1, lc)
cs.lc.filename = filename
ret: Union[str, CommentedMap, CommentedSeq] = cs
else:
ret = expanded
return ret
def _secondaryFile_dsl(
self,
t: Union[str, CommentedMap, CommentedSeq],
lc: LineCol,
filename: str,
) -> Union[str, CommentedMap, CommentedSeq]:
if not isinstance(t, str):
return t
pat = t[0:-1] if t.endswith("?") else t
req: Optional[bool] = False if t.endswith("?") else None
second = CommentedMap((("pattern", pat), ("required", req)))
second.lc.add_kv_line_col("pattern", lc)
second.lc.add_kv_line_col("required", lc)
second.lc.filename = filename
return second
def _apply_dsl(
self,
datum: Union[str, CommentedMap, CommentedSeq],
d: str,
loader: "Loader",
lc: LineCol,
filename: str,
) -> Union[str, CommentedMap, CommentedSeq]:
if d in loader.type_dsl_fields:
return self._type_dsl(datum, lc, filename)
if d in loader.secondaryFile_dsl_fields:
return self._secondaryFile_dsl(datum, lc, filename)
return datum
def _resolve_dsl(
self,
document: CommentedMap,
loader: "Loader",
) -> None:
fields = list(loader.type_dsl_fields)
fields.extend(loader.secondaryFile_dsl_fields)
for d in fields:
if d in document:
datum2 = datum = document[d]
if isinstance(datum, str):
datum2 = self._apply_dsl(
datum,
d,
loader,
document.lc.data[d] if document.lc.data else document.lc,
getattr(document.lc, "filename", ""),
)
elif isinstance(datum, CommentedSeq):
datum2 = CommentedSeq()
for n, t in enumerate(datum):
if datum.lc and datum.lc.data:
datum2.lc.add_kv_line_col(len(datum2), datum.lc.data[n])
datum2.append(
self._apply_dsl(
t, d, loader, datum.lc.data[n], document.lc.filename
)
)
else:
datum2.append(self._apply_dsl(t, d, loader, LineCol(), ""))
if isinstance(datum2, CommentedSeq):
datum3 = CommentedSeq()
seen: List[str] = []
for i, item in enumerate(datum2):
if isinstance(item, CommentedSeq):
for j, v in enumerate(item):
if v not in seen:
datum3.lc.add_kv_line_col(len(datum3), item.lc.data[j])
datum3.append(v)
seen.append(v)
else:
if item not in seen:
if datum2.lc and datum2.lc.data:
datum3.lc.add_kv_line_col(len(datum3), datum2.lc.data[i])
datum3.append(item)
seen.append(item)
document[d] = datum3
else:
document[d] = datum2
def _resolve_identifier(self, document: CommentedMap, loader: "Loader", base_url: str) -> str:
# Expand identifier field (usually 'id') to resolve scope
for identifier in loader.identifiers:
if identifier in document:
if isinstance(document[identifier], str):
document[identifier] = loader.expand_url(
document[identifier], base_url, scoped_id=True
)
if document[identifier] not in loader.idx or isinstance(
loader.idx[document[identifier]], str
):
loader.idx[document[identifier]] = document
base_url = document[identifier]
else:
raise ValidationException(
f"identifier field {document[identifier]!r} must be a string"
)
return base_url
def _resolve_identity(
self,
document: Dict[str, Union[str, MutableSequence[Union[str, CommentedMap]]]],
loader: "Loader",
base_url: str,
) -> None:
# Resolve scope for identity fields (fields where the value is the
# identity of a standalone node, such as enum symbols)
for identifier in loader.identity_links:
if identifier in document and isinstance(document[identifier], MutableSequence):
for n, v in enumerate(document[identifier]):
if isinstance(v, str):
document[identifier][n] = loader.expand_url( # type: ignore
v, base_url, scoped_id=True
)
if document[identifier][n] not in loader.idx:
loader.idx[cast(str, document[identifier][n])] = v
def _normalize_fields(self, document: CommentedMap, loader: "Loader") -> None:
# Normalize fields which are prefixed or full URIn to vocabulary terms
for d in list(document.keys()):
if isinstance(d, str):
d2 = loader.expand_url(d, "", scoped_id=False, vocab_term=True)
if d != d2:
document[d2] = document[d]
document.lc.add_kv_line_col(d2, document.lc.data[d])
del document[d]
def _resolve_uris(
self,
document: Dict[str, Union[str, MutableSequence[Union[str, CommentedMap]]]],
loader: "Loader",
base_url: str,
) -> None:
# Resolve remaining URLs based on document base
for d in loader.url_fields:
if d in document:
datum = document[d]
if isinstance(datum, str):
document[d] = loader.expand_url(
datum,
base_url,
scoped_id=False,
vocab_term=(d in loader.vocab_fields),
scoped_ref=loader.scoped_ref_fields.get(d),
)
elif isinstance(datum, MutableSequence):
for i, url in enumerate(datum):
if isinstance(url, str):
datum[i] = loader.expand_url(
url,
base_url,
scoped_id=False,
vocab_term=(d in loader.vocab_fields),
scoped_ref=loader.scoped_ref_fields.get(d),
)
def resolve_all(
self,
document: ResolveType,
base_url: str,
file_base: Optional[str] = None,
checklinks: bool = True,
strict_foreign_properties: bool = False,
) -> ResolvedRefType:
loader = self
metadata = CommentedMap()
if file_base is None:
file_base = base_url
if isinstance(document, CommentedMap):
# Handle $import and $include
if "$import" in document or "$include" in document:
return self.resolve_ref(
document,
base_url=file_base,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
if "$mixin" in document:
return self.resolve_ref(
document,
base_url=base_url,
checklinks=checklinks,
strict_foreign_properties=strict_foreign_properties,
)
elif isinstance(document, CommentedSeq):
pass
elif isinstance(document, (list, dict)):
raise ValidationException(
f"Expected CommentedMap or CommentedSeq, got {type(document)}: {document!r}"
)
else:
return (document, metadata)
newctx: Optional["Loader"] = None
if isinstance(document, CommentedMap):
# Handle $base, $profile, $namespaces, $schemas and $graph
if "$base" in document:
base_url = document["$base"]
if "$profile" in document:
if newctx is None:
newctx = SubLoader(self)
newctx.add_namespaces(document.get("$namespaces", CommentedMap()))
newctx.add_schemas(document.get("$schemas", []), document["$profile"])
if "$namespaces" in document:
if newctx is None:
newctx = SubLoader(self)
namespaces = document["$namespaces"]
if isinstance(namespaces, dict):
newctx.add_namespaces(document["$namespaces"])
else:
raise ValidationException(
"$namespaces must be a dictionary",
SourceLine(document, "$namespaces"),
)
if "$schemas" in document:
if newctx is None:
newctx = SubLoader(self)
schemas = document["$schemas"]
if isinstance(schemas, (list, str)):
newctx.add_schemas(schemas, file_base)
else:
raise ValidationException(
"$schemas must be a string or a list of string",
SourceLine(document, "$schemas"),
)
if newctx is not None:
loader = newctx
for identifier in loader.identity_links:
if identifier in document:
if isinstance(document[identifier], str):
document[identifier] = loader.expand_url(
document[identifier], base_url, scoped_id=True
)
loader.idx[document[identifier]] = document
metadata = document
if "$graph" in document:
document = document["$graph"]
if isinstance(document, CommentedMap):
self._normalize_fields(document, loader)
self._resolve_idmap(document, loader)
self._resolve_dsl(document, loader)
base_url = self._resolve_identifier(document, loader, base_url)
self._resolve_identity(document, loader, base_url)
self._resolve_uris(document, loader, base_url)
try:
for key, val in document.items():
subscope: str = ""
if key in loader.subscopes:
subscope = "/" + loader.subscopes[key]
document[key], _ = loader.resolve_all(
val, base_url + subscope, file_base=file_base, checklinks=False
)
except ValidationException as v:
_logger.warning("loader is %s", id(loader), exc_info=True)
raise ValidationException(
f"({id(loader)}) ({file_base}) Validation error in field {key}:",
None,
[v],
) from v
elif isinstance(document, CommentedSeq):
i = 0
try:
while i < len(document):
val = document[i]
if isinstance(val, CommentedMap) and ("$import" in val or "$mixin" in val):
l, import_metadata = loader.resolve_ref(
val, base_url=file_base, checklinks=False
)
metadata.setdefault("$import_metadata", {})
for identifier in loader.identifiers:
if identifier in import_metadata:
metadata["$import_metadata"][
import_metadata[identifier]
] = import_metadata
if isinstance(l, CommentedSeq):
lc = document.lc.data[i]
del document[i]
llen = len(l)
for j in range(len(document) + llen, i + llen, -1):
document.lc.data[j - 1] = document.lc.data[j - llen]
for item in l:
document.insert(i, item)
document.lc.data[i] = lc
i += 1
else:
document[i] = l
i += 1
else:
document[i], _ = loader.resolve_all(
val, base_url, file_base=file_base, checklinks=False
)
i += 1
except ValidationException as v:
_logger.warning("failed", exc_info=True)
raise ValidationException(
f"({id(loader)}) ({file_base}) Validation error in position {i}:",
None,
[v],
) from v
if checklinks:
all_doc_ids: Dict[str, str] = {}
loader.validate_links(
document,
"",
all_doc_ids,
strict_foreign_properties=strict_foreign_properties,
)
return document, metadata
def fetch(
self,
url: str,
inject_ids: bool = True,
content_types: Optional[List[str]] = None,
) -> IdxResultType:
if url in self.idx:
return self.idx[url]
try:
text = self.fetch_text(url, content_types=content_types)
textIO = StringIO(text)
textIO.name = str(url)
yaml = yaml_no_ts()
attachments = yaml.load_all(textIO)
result = cast(Union[CommentedSeq, CommentedMap], next(attachments))
if self.allow_attachments is not None and self.allow_attachments(result):
i = 1
for a in attachments:
self.idx[f"{url}#attachment-{i}"] = a
i += 1
add_lc_filename(result, url)
except MarkedYAMLError as e:
raise to_validation_exception(e) from e
if isinstance(result, CommentedMap) and inject_ids and bool(self.identifiers):
missing_identifier = True
for identifier in self.identifiers:
if identifier in result:
missing_identifier = False
self.idx[self.expand_url(result[identifier], url, scoped_id=True)] = result
if missing_identifier:
result[self.identifiers[0]] = url
self.idx[url] = result
return result
def validate_scoped(self, field: str, link: str, docid: str) -> str:
split = urllib.parse.urlsplit(docid)
sp = split.fragment.split("/")
n = self.scoped_ref_fields[field]
while n > 0 and len(sp) > 0:
sp.pop()
n -= 1
tried = []
while True:
sp.append(link)
url = urllib.parse.urlunsplit(
(split.scheme, split.netloc, split.path, split.query, "/".join(sp))
)
tried.append(url)
if url in self.idx:
return url
sp.pop()
if len(sp) == 0:
break
sp.pop()
if onWindows() and link.startswith("file:"):
link = link.lower()
raise ValidationException(
f"Field {field!r} references unknown identifier {link!r}, tried {' '.join(tried)}"
)
def validate_link(
self,
field: str,
link: Union[str, CommentedSeq, CommentedMap],
# link also can be None, but that results in
# mypyc "error: Local variable "link" has inferred type None; add an annotation"
docid: str,
all_doc_ids: Dict[str, str],
) -> Union[str, CommentedSeq, CommentedMap]:
if field in self.nolinkcheck:
return link
if isinstance(link, str):
if field in self.vocab_fields:
if link not in self.vocab and link not in self.idx and link not in self.rvocab:
if field in self.scoped_ref_fields:
return self.validate_scoped(field, link, docid)
elif not self.check_exists(link):
raise ValidationException(
f"Field {field!r} contains undefined reference to {link!r}"
)
elif link not in self.idx and link not in self.rvocab:
if field in self.scoped_ref_fields:
return self.validate_scoped(field, link, docid)
elif not self.check_exists(link):
raise ValidationException(
f"Field {field!r} contains undefined reference to {link!r}"
)
elif isinstance(link, CommentedSeq):
errors = []
for n, i in enumerate(link):
try:
link[n] = self.validate_link(field, i, docid, all_doc_ids)
except ValidationException as v:
errors.append(v)
if bool(errors):
raise ValidationException("", None, errors)
elif isinstance(link, CommentedMap):
self.validate_links(link, docid, all_doc_ids)
elif link is None: # type: ignore[unreachable]
return None
else:
raise ValidationException(
f"{field!r} field is {type(link).__name__}, expected string, list, or a dict."
)
return link
def getid(self, d: Any) -> Optional[str]:
if isinstance(d, MutableMapping):
for i in self.identifiers:
if i in d:
idd = d[i]
if isinstance(idd, str):
return idd
return None
def validate_links(
self,
document: ResolveType,
base_url: str,
all_doc_ids: Dict[str, str],
strict_foreign_properties: bool = False,
) -> None:
docid = self.getid(document) or base_url
errors: List[SchemaSaladException] = []
iterator: Any = None
if isinstance(document, MutableSequence):
iterator = enumerate(document)
elif isinstance(document, MutableMapping):
for d in self.url_fields:
try:
if d in document and d not in self.identity_links:
document[d] = self.validate_link(d, document[d], docid, all_doc_ids)
except SchemaSaladException as v:
v = v.with_sourceline(SourceLine(document, d, str))
if d == "$schemas" or (
d in self.foreign_properties and not strict_foreign_properties
):
_logger.warning(v.as_warning())
else:
errors.append(v)
# TODO: Validator should local scope only in which
# duplicated keys are prohibited.
# See also https://github.com/common-workflow-language/common-workflow-language/issues/734 # noqa: B950
# In the future, it should raise
# ValidationException instead of _logger.warn
try:
for identifier in self.identifiers: # validate that each id is defined uniquely
if identifier in document:
sl = SourceLine(document, identifier, str)
if (
document[identifier] in all_doc_ids
and sl.makeLead() != all_doc_ids[document[identifier]]
):
_logger.warning(
"%s object %s %r previously defined",
all_doc_ids[document[identifier]],
identifier,
relname(document[identifier]),
)
else:
all_doc_ids[document[identifier]] = sl.makeLead()
break
except ValidationException as v:
errors.append(v.with_sourceline(sl))
iterator = list(document.items())
else:
return
for key, val in iterator:
sl = SourceLine(document, key, str)
try:
self.validate_links(
val,
docid,
all_doc_ids,
strict_foreign_properties=strict_foreign_properties,
)
except ValidationException as v:
if key in self.nolinkcheck or (isinstance(key, str) and ":" in key):
_logger.warning(v.as_warning())
else:
docid2 = self.getid(val)
if docid2 is not None:
errors.append(
ValidationException(f"checking object {relname(docid2)!r}", sl, [v])
)
else:
if isinstance(key, str):
errors.append(ValidationException(f"checking field {key!r}", sl, [v]))
else:
errors.append(ValidationException("checking item", sl, [v]))
if bool(errors):
if len(errors) > 1:
raise ValidationException("", None, errors)
raise errors[0]
return
def _copy_dict_without_key(
from_dict: Union[CommentedMap, ContextType], filtered_key: str
) -> CommentedMap:
new_dict = CommentedMap(from_dict.items())
if filtered_key in new_dict:
del new_dict[filtered_key]
if isinstance(from_dict, CommentedMap):
new_dict.lc.data = copy.copy(from_dict.lc.data)
new_dict.lc.filename = from_dict.lc.filename
return new_dict
| 0.692538 | 0.106041 |
import os
import shutil
import string
from io import StringIO
from pathlib import Path
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Union,
)
from importlib_resources import files
from . import _logger, schema
from .codegen_base import CodeGenBase, TypeDef
from .exceptions import SchemaException
from .java_codegen import _ensure_directory_and_write, _safe_makedirs
from .schema import shortname
def doc_to_doc_string(doc: Optional[str], indent_level: int = 0) -> str:
"""Generate a documentation string from a schema salad doc field."""
lead = " " + " " * indent_level + "* "
if doc:
doc_str = "\n".join([f"{lead}{line}" for line in doc.split("\n")])
else:
doc_str = ""
return doc_str
_string_type_def = TypeDef(
name="strtype",
init="new _PrimitiveLoader(TypeGuards.String)",
instance_type="string",
)
_int_type_def = TypeDef(
name="inttype", init="new _PrimitiveLoader(TypeGuards.Int)", instance_type="number"
)
_float_type_def = TypeDef(
name="floattype",
init="new _PrimitiveLoader(TypeGuards.Float)",
instance_type="number",
)
_bool_type_def = TypeDef(
name="booltype",
init="new _PrimitiveLoader(TypeGuards.Bool)",
instance_type="boolean",
)
_null_type_def = TypeDef(
name="undefinedtype",
init="new _PrimitiveLoader(TypeGuards.Undefined)",
instance_type="undefined",
)
_any_type_def = TypeDef(name="anyType", init="new _AnyLoader()", instance_type="any")
prims = {
"http://www.w3.org/2001/XMLSchema#string": _string_type_def,
"http://www.w3.org/2001/XMLSchema#int": _int_type_def,
"http://www.w3.org/2001/XMLSchema#long": _int_type_def,
"http://www.w3.org/2001/XMLSchema#float": _float_type_def,
"http://www.w3.org/2001/XMLSchema#double": _float_type_def,
"http://www.w3.org/2001/XMLSchema#boolean": _bool_type_def,
"https://w3id.org/cwl/salad#null": _null_type_def,
"https://w3id.org/cwl/salad#Any": _any_type_def,
"string": _string_type_def,
"int": _int_type_def,
"long": _int_type_def,
"float": _float_type_def,
"double": _float_type_def,
"boolean": _bool_type_def,
"null": _null_type_def,
"Any": _any_type_def,
}
class TypeScriptCodeGen(CodeGenBase):
"""Generation of TypeScript code for a given Schema Salad definition."""
def __init__(
self, base: str, examples: Optional[str], target: Optional[str], package: str
) -> None:
"""Initialize the TypeScript codegen."""
super().__init__()
self.target_dir = Path(target or ".").resolve()
self.main_src_dir = self.target_dir / "src"
self.test_resources_dir = self.target_dir / "src" / "test" / "data"
self.package = package
self.base_uri = base
self.record_types: Set[str] = set()
self.modules: Set[str] = set()
self.id_field = ""
self.examples = examples
def prologue(self) -> None:
"""Trigger to generate the prolouge code."""
for src_dir in [self.main_src_dir]:
_safe_makedirs(src_dir)
for primitive in prims.values():
self.declare_type(primitive)
@staticmethod
def safe_name(name: str) -> str:
"""Generate a safe version of the given name."""
avn = schema.avro_field_name(name)
if avn.startswith("anon."):
avn = avn[5:]
if avn in (
"class",
"in",
"extends",
"abstract",
"default",
"package",
"arguments",
):
# reserved words
avn = avn + "_"
return avn
def begin_class(
self, # pylint: disable=too-many-arguments
classname: str,
extends: MutableSequence[str],
doc: str,
abstract: bool,
field_names: MutableSequence[str],
idfield: str,
optional_fields: Set[str],
) -> None:
"""Produce the header for the given class."""
self.current_interface = self.safe_name(classname) + "Properties"
cls = self.safe_name(classname)
self.current_class = cls
self.current_class_is_abstract = abstract
interface_module_name = self.current_interface
self.current_interface_target_file = self.main_src_dir / f"{interface_module_name}.ts"
class_module_name = self.current_class
self.current_class_target_file = self.main_src_dir / f"{class_module_name}.ts"
self.current_constructor_signature = StringIO()
self.current_constructor_body = StringIO()
self.current_loader = StringIO()
self.current_serializer = StringIO()
self.current_fieldtypes: Dict[str, TypeDef] = {}
self.idfield = idfield
doc_string = f"""
/**
* Auto-generated interface for {classname}
"""
if doc:
doc_string += " *\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += " */"
self.record_types.add(f"{self.current_interface}")
self.modules.add(interface_module_name)
with open(self.current_interface_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_interface_target_file)
if extends:
ext = "extends Internal." + ", Internal.".join(
self.safe_name(e) + "Properties" for e in extends
)
else:
ext = ""
f.write(
"""
import * as Internal from './util/Internal'
{docstring}
export interface {cls} {ext} {{
""".format(
docstring=doc_string,
cls=f"{self.current_interface}",
ext=ext,
)
)
if self.current_class_is_abstract:
return
self.record_types.add(cls)
self.modules.add(class_module_name)
with open(self.current_interface_target_file, "a") as f:
f.write(
"""
extensionFields?: Internal.Dictionary<any>
"""
)
doc_string = f"""
/**
* Auto-generated class implementation for {classname}
"""
if doc:
doc_string += " *\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += " */"
with open(self.current_class_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_class_target_file)
f.write(
"""
import {{
Dictionary,
expandUrl,
loadField,
LoaderInstances,
LoadingOptions,
Saveable,
ValidationException,
prefixUrl,
save,
saveRelativeUri
}} from './util/Internal'
import {{ v4 as uuidv4 }} from 'uuid'
import * as Internal from './util/Internal'
{docstring}
export class {cls} extends Saveable implements Internal.{current_interface} {{
extensionFields?: Internal.Dictionary<any>
""".format(
cls=cls,
current_interface=self.current_interface,
docstring=doc_string,
)
)
self.current_constructor_signature.write(
"\n" + "\n" + " constructor ({loadingOptions, extensionFields"
)
self.current_constructor_body.write(
"""
super(loadingOptions)
this.extensionFields = extensionFields ?? {}
"""
)
self.current_loader.write(
"""
/**
* Used to construct instances of {{@link {cls} }}.
*
* @param __doc Document fragment to load this record object from.
* @param baseuri Base URI to generate child document IDs against.
* @param loadingOptions Context for loading URIs and populating objects.
* @param docRoot ID at this position in the document (if available)
* @returns An instance of {{@link {cls} }}
* @throws {{@link ValidationException}} If the document fragment is not a
* {{@link Dictionary}} or validation of fields fails.
*/
static override async fromDoc (__doc: any, baseuri: string, loadingOptions: LoadingOptions,
docRoot?: string): Promise<Saveable> {{
const _doc = Object.assign({{}}, __doc)
const __errors: ValidationException[] = []
""".format(
cls=cls
)
)
self.current_serializer.write(
"""
save (top: boolean = false, baseUrl: string = '', relativeUris: boolean = true)
: Dictionary<any> {
const r: Dictionary<any> = {}
for (const ef in this.extensionFields) {
r[prefixUrl(ef, this.loadingOptions.vocab)] = this.extensionFields.ef
}
"""
)
def end_class(self, classname: str, field_names: List[str]) -> None:
"""Signal that we are done with this class."""
with open(self.current_interface_target_file, "a") as f:
f.write("}")
if self.current_class_is_abstract:
return
self.current_constructor_signature.write(
f"}} : {{loadingOptions?: LoadingOptions}} & Internal.{self.current_interface}) {{"
)
self.current_constructor_body.write(" }\n")
self.current_loader.write(
"""
const extensionFields: Dictionary<any> = {{}}
for (const [key, value] of Object.entries(_doc)) {{
if (!{classname}.attr.has(key)) {{
if ((key as string).includes(':')) {{
const ex = expandUrl(key, '', loadingOptions, false, false)
extensionFields[ex] = value
}} else {{
__errors.push(
new ValidationException(`invalid field ${{key as string}}, \\
expected one of: {fields}`)
)
break
}}
}}
}}
if (__errors.length > 0) {{
throw new ValidationException("Trying '{classname}'", __errors)
}}
const schema = new {classname}({{
extensionFields: extensionFields,
loadingOptions: loadingOptions,
""".format(
classname=self.current_class,
fields=",".join(["\\`" + f + "\\`" for f in field_names]),
)
)
self.current_loader.write(
",\n ".join(self.safe_name(f) + ": " + self.safe_name(f) for f in field_names)
+ "\n })"
)
self.current_loader.write(
"""
return schema
}
"""
)
self.current_serializer.write(
"""
if (top) {
if (this.loadingOptions.namespaces != null) {
r.$namespaces = this.loadingOptions.namespaces
}
if (this.loadingOptions.schemas != null) {
r.$schemas = this.loadingOptions.schemas
}
}
return r
}
"""
)
with open(
self.current_class_target_file,
"a",
) as f:
f.write(self.current_constructor_signature.getvalue())
f.write(self.current_constructor_body.getvalue())
f.write(self.current_loader.getvalue())
f.write(self.current_serializer.getvalue())
f.write(
"\n"
+ " static attr: Set<string> = new Set(["
+ ",".join(["'" + shortname(f) + "'" for f in field_names])
+ "])"
)
f.write(
"""
}
"""
)
def type_loader(self, type_declaration: Union[List[Any], Dict[str, Any], str]) -> TypeDef:
"""Parse the given type declaration and declare its components."""
if isinstance(type_declaration, MutableSequence):
sub_types = [self.type_loader(i) for i in type_declaration]
sub_names: List[str] = list(dict.fromkeys([i.name for i in sub_types]))
sub_instance_types: List[str] = list(
dict.fromkeys([i.instance_type for i in sub_types if i.instance_type is not None])
)
return self.declare_type(
TypeDef(
"unionOf{}".format("Or".join(sub_names)),
"new _UnionLoader([{}])".format(", ".join(sub_names)),
instance_type=" | ".join(sub_instance_types),
)
)
if isinstance(type_declaration, MutableMapping):
if type_declaration["type"] in (
"array",
"https://w3id.org/cwl/salad#array",
):
i = self.type_loader(type_declaration["items"])
return self.declare_type(
TypeDef(
f"arrayOf{i.name}",
f"new _ArrayLoader([{i.name}])",
instance_type=f"Array<{i.instance_type}>",
)
)
if type_declaration["type"] in ("enum", "https://w3id.org/cwl/salad#enum"):
return self.type_loader_enum(type_declaration)
if type_declaration["type"] in (
"record",
"https://w3id.org/cwl/salad#record",
):
return self.declare_type(
TypeDef(
self.safe_name(type_declaration["name"]) + "Loader",
"new _RecordLoader({}.fromDoc)".format(
self.safe_name(type_declaration["name"]),
),
instance_type="Internal." + self.safe_name(type_declaration["name"]),
abstract=type_declaration.get("abstract", False),
)
)
raise SchemaException("wft {}".format(type_declaration["type"]))
if type_declaration in prims:
return prims[type_declaration]
if type_declaration in ("Expression", "https://w3id.org/cwl/cwl#Expression"):
return self.declare_type(
TypeDef(
self.safe_name(type_declaration) + "Loader",
"new _ExpressionLoader()",
instance_type="string",
)
)
return self.collected_types[self.safe_name(type_declaration) + "Loader"]
def type_loader_enum(self, type_declaration: Dict[str, Any]) -> TypeDef:
for sym in type_declaration["symbols"]:
self.add_vocab(shortname(sym), sym)
enum_name = self.safe_name(type_declaration["name"])
enum_module_name = enum_name
enum_path = self.main_src_dir / f"{enum_module_name}.ts"
self.modules.add(enum_module_name)
self.record_types.add(enum_name)
with open(enum_path, "w") as f:
_logger.info("Writing file: %s", enum_path)
f.write(
"""
export enum {enum_name} {{
""".format(
enum_name=enum_name
)
)
for sym in type_declaration["symbols"]:
val = self.safe_name(sym)
const = self.safe_name(sym).replace("-", "_").replace(".", "_").upper()
f.write(f""" {const}='{val}',\n""") # noqa: B907
f.write(
"""}
"""
)
return self.declare_type(
TypeDef(
instance_type="Internal." + enum_name,
name=self.safe_name(type_declaration["name"]) + "Loader",
init=f"new _EnumLoader((Object.keys({enum_name}) as Array<keyof typeof "
f"{enum_name}>).map(key => {enum_name}[key]))",
)
)
def declare_field(
self,
name: str,
fieldtype: TypeDef,
doc: Optional[str],
optional: bool,
subscope: str,
) -> None:
"""Output the code to load the given field."""
safename = self.safe_name(name)
fieldname = shortname(name)
self.current_fieldtypes[safename] = fieldtype
if fieldtype.instance_type is not None and "undefined" in fieldtype.instance_type:
optionalstring = "?"
else:
optionalstring = ""
with open(self.current_interface_target_file, "a") as f:
if doc:
f.write(
"""
/**
{doc_str}
*/
""".format(
doc_str=doc_to_doc_string(doc, indent_level=1)
)
)
if fieldname == "class":
f.write(
" {safename}{optionalstring}: {type}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring="?",
)
)
else:
f.write(
" {safename}{optionalstring}: {type}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring=optionalstring,
)
)
if self.current_class_is_abstract:
return
with open(self.current_class_target_file, "a") as f:
if doc:
f.write(
"""
/**
{doc_str}
*/
""".format(
doc_str=doc_to_doc_string(doc, indent_level=1)
)
)
f.write(
" {safename}{optionalstring}: {type}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring=optionalstring,
)
)
if fieldname == "class":
if fieldtype.instance_type == "string":
self.current_constructor_signature.write(
f", {safename} = '{self.current_class}'" # noqa: B907
)
else:
self.current_constructor_signature.write(
", {safename} = {type}.{val}".format(
safename=safename,
type=fieldtype.instance_type,
val=self.current_class.replace("-", "_").replace(".", "_").upper(),
)
)
else:
self.current_constructor_signature.write(
", {safename}".format(
safename=safename,
)
)
self.current_constructor_body.write(
" this.{safeName} = {safeName}\n".format(safeName=safename)
)
self.current_loader.write(
"""
let {safename}""".format(
safename=safename
)
)
if optional:
self.current_loader.write(
"""
if ('{fieldname}' in _doc) {{""".format(
fieldname=fieldname
)
)
spc = " "
else:
spc = ""
self.current_loader.write(
"""
{spc} try {{
{spc} {safename} = await loadField(_doc.{fieldname}, LoaderInstances.{fieldtype},
{spc} baseuri, loadingOptions)
{spc} }} catch (e) {{
{spc} if (e instanceof ValidationException) {{
{spc} __errors.push(
{spc} new ValidationException('the `{fieldname}` field is not valid because: ', [e])
{spc} )
{spc} }} else {{
{spc} throw e
{spc} }}
{spc} }}
""".format(
safename=safename,
fieldname=fieldname,
fieldtype=fieldtype.name,
spc=spc,
)
)
if optional:
self.current_loader.write(" }\n")
if name == self.idfield or not self.idfield:
baseurl = "baseUrl"
else:
baseurl = f"this.{self.safe_name(self.idfield)}"
if fieldtype.is_uri:
self.current_serializer.write(
"""
if (this.{safename} != null) {{
const u = saveRelativeUri(this.{safename}, {base_url}, {scoped_id},
relativeUris, {ref_scope})
if (u != null) {{
r.{fieldname} = u
}}
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
scoped_id=self.to_typescript(fieldtype.scoped_id),
ref_scope=self.to_typescript(fieldtype.ref_scope),
)
)
else:
self.current_serializer.write(
"""
if (this.{safename} != null) {{
r.{fieldname} = save(this.{safename}, false, {base_url}, relativeUris)
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
)
)
def declare_id_field(
self,
name: str,
fieldtype: TypeDef,
doc: str,
optional: bool,
) -> None:
"""Output the code to handle the given ID field."""
self.declare_field(name, fieldtype, doc, True, "")
if optional:
opt = f"""{self.safe_name(name)} = "_" + uuidv4()"""
else:
opt = """throw new ValidationException("Missing {fieldname}")""".format(
fieldname=shortname(name)
)
self.current_loader.write(
"""
const original{safename}IsUndefined = ({safename} === undefined)
if (original{safename}IsUndefined ) {{
if (docRoot != null) {{
{safename} = docRoot
}} else {{
{opt}
}}
}} else {{
baseuri = {safename} as string
}}
""".format(
safename=self.safe_name(name), opt=opt
)
)
def to_typescript(self, val: Any) -> Any:
"""Convert a Python keyword to a TypeScript keyword."""
if val is True:
return "true"
elif val is None:
return "undefined"
elif val is False:
return "false"
return val
def uri_loader(
self,
inner: TypeDef,
scoped_id: bool,
vocab_term: bool,
ref_scope: Optional[int],
) -> TypeDef:
"""Construct the TypeDef for the given URI loader."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"uri{inner.name}{scoped_id}{vocab_term}{ref_scope}",
"new _URILoader({}, {}, {}, {})".format(
inner.name,
self.to_typescript(scoped_id),
self.to_typescript(vocab_term),
self.to_typescript(ref_scope),
),
is_uri=True,
scoped_id=scoped_id,
ref_scope=ref_scope,
instance_type=instance_type,
)
)
def idmap_loader(
self, field: str, inner: TypeDef, map_subject: str, map_predicate: Optional[str]
) -> TypeDef:
"""Construct the TypeDef for the given mapped ID loader."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"idmap{self.safe_name(field)}{inner.name}",
f"new _IdMapLoader({inner.name}, '{map_subject}', '{map_predicate}')", # noqa: B907
instance_type=instance_type,
)
)
def typedsl_loader(self, inner: TypeDef, ref_scope: Optional[int]) -> TypeDef:
"""Construct the TypeDef for the given DSL loader."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"typedsl{self.safe_name(inner.name)}{ref_scope}",
f"new _TypeDSLLoader({self.safe_name(inner.name)}, {ref_scope})",
instance_type=instance_type,
)
)
def epilogue(self, root_loader: TypeDef) -> None:
"""Trigger to generate the epilouge code."""
pd = "This project contains TypeScript objects and utilities "
pd = pd + ' auto-generated by <a href=\\"https://github.com/'
pd = pd + 'common-workflow-language/schema_salad\\">Schema Salad</a>'
pd = pd + " for parsing documents corresponding to the "
pd = pd + str(self.base_uri) + " schema."
sorted_record_types = sorted(self.record_types)
generated_class_imports = ",\n ".join(sorted_record_types)
template_vars: MutableMapping[str, str] = dict(
project_name=self.package,
version="0.0.1-SNAPSHOT",
project_description=pd,
license_name="Apache License, Version 2.0",
generated_class_imports=generated_class_imports,
)
def template_from_resource(resource: Path) -> string.Template:
template_str = resource.read_text("utf-8")
template = string.Template(template_str)
return template
def expand_resource_template_to(resource: str, path: Path) -> None:
template = template_from_resource(
files("schema_salad").joinpath(f"typescript/{resource}")
)
src = template.safe_substitute(template_vars)
_ensure_directory_and_write(path, src)
expand_resource_template_to("package.json", self.target_dir / "package.json")
expand_resource_template_to(".gitignore", self.target_dir / ".gitignore")
expand_resource_template_to("LICENSE", self.target_dir / "LICENSE")
expand_resource_template_to("tsconfig.json", self.target_dir / "tsconfig.json")
expand_resource_template_to("index.ts", self.main_src_dir / "index.ts")
vocab = ",\n ".join(
f"""'{k}': '{self.vocab[k]}'""" for k in sorted(self.vocab.keys()) # noqa: B907
)
rvocab = ",\n ".join(
f"""'{self.vocab[k]}': '{k}'""" for k in sorted(self.vocab.keys()) # noqa: B907
)
loader_instances = ""
for _, collected_type in self.collected_types.items():
if not collected_type.abstract:
loader_instances += "export const {} = {};\n".format(
collected_type.name, collected_type.init
)
sorted_modules = sorted(self.modules)
internal_module_exports = "\n".join(f"export * from '../{f}'" for f in sorted_modules)
example_tests = ""
if self.examples:
_safe_makedirs(self.test_resources_dir)
utils_resources = self.test_resources_dir / "examples"
if os.path.exists(utils_resources):
shutil.rmtree(utils_resources)
shutil.copytree(self.examples, utils_resources)
for example_name in os.listdir(self.examples):
if example_name.startswith("valid"):
basename = os.path.basename(example_name).rsplit(".", 1)[0]
example_tests += """
it('{basename}', async () => {{
await loadDocument(__dirname + '/data/examples/{example_name}')
}})
it('{basename} by string', async () => {{
let doc = fs.readFileSync(__dirname + '/data/examples/{example_name}').toString()
await loadDocumentByString(doc, url.pathToFileURL(__dirname +
'/data/examples/').toString())
}})""".format(
basename=basename.replace("-", "_").replace(".", "_"),
example_name=example_name,
)
template_args: MutableMapping[str, str] = dict(
internal_module_exports=internal_module_exports,
loader_instances=loader_instances,
generated_class_imports=generated_class_imports,
vocab=vocab,
rvocab=rvocab,
root_loader=root_loader.name,
root_loader_type=root_loader.instance_type or "any",
tests=example_tests,
)
util_src_dirs = {
"util": self.main_src_dir / "util",
"test": self.main_src_dir / "test",
}
def copy_utils_recursive(util_src: str, util_target: Path) -> None:
for util in files("schema_salad").joinpath(f"typescript/{util_src}").iterdir():
if util.is_dir():
copy_utils_recursive(os.path.join(util_src, util.name), util_target / util.name)
continue
src_path = util_target / util.name
src_template = template_from_resource(util)
src = src_template.safe_substitute(template_args)
_ensure_directory_and_write(src_path, src)
for util_src, util_target in util_src_dirs.items():
copy_utils_recursive(util_src, util_target)
def secondaryfilesdsl_loader(self, inner: TypeDef) -> TypeDef:
"""Construct the TypeDef for secondary files."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"secondaryfilesdsl{inner.name}",
f"new _SecondaryDSLLoader({inner.name})",
instance_type=instance_type,
)
)
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript_codegen.py
|
typescript_codegen.py
|
import os
import shutil
import string
from io import StringIO
from pathlib import Path
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Union,
)
from importlib_resources import files
from . import _logger, schema
from .codegen_base import CodeGenBase, TypeDef
from .exceptions import SchemaException
from .java_codegen import _ensure_directory_and_write, _safe_makedirs
from .schema import shortname
def doc_to_doc_string(doc: Optional[str], indent_level: int = 0) -> str:
"""Generate a documentation string from a schema salad doc field."""
lead = " " + " " * indent_level + "* "
if doc:
doc_str = "\n".join([f"{lead}{line}" for line in doc.split("\n")])
else:
doc_str = ""
return doc_str
_string_type_def = TypeDef(
name="strtype",
init="new _PrimitiveLoader(TypeGuards.String)",
instance_type="string",
)
_int_type_def = TypeDef(
name="inttype", init="new _PrimitiveLoader(TypeGuards.Int)", instance_type="number"
)
_float_type_def = TypeDef(
name="floattype",
init="new _PrimitiveLoader(TypeGuards.Float)",
instance_type="number",
)
_bool_type_def = TypeDef(
name="booltype",
init="new _PrimitiveLoader(TypeGuards.Bool)",
instance_type="boolean",
)
_null_type_def = TypeDef(
name="undefinedtype",
init="new _PrimitiveLoader(TypeGuards.Undefined)",
instance_type="undefined",
)
_any_type_def = TypeDef(name="anyType", init="new _AnyLoader()", instance_type="any")
prims = {
"http://www.w3.org/2001/XMLSchema#string": _string_type_def,
"http://www.w3.org/2001/XMLSchema#int": _int_type_def,
"http://www.w3.org/2001/XMLSchema#long": _int_type_def,
"http://www.w3.org/2001/XMLSchema#float": _float_type_def,
"http://www.w3.org/2001/XMLSchema#double": _float_type_def,
"http://www.w3.org/2001/XMLSchema#boolean": _bool_type_def,
"https://w3id.org/cwl/salad#null": _null_type_def,
"https://w3id.org/cwl/salad#Any": _any_type_def,
"string": _string_type_def,
"int": _int_type_def,
"long": _int_type_def,
"float": _float_type_def,
"double": _float_type_def,
"boolean": _bool_type_def,
"null": _null_type_def,
"Any": _any_type_def,
}
class TypeScriptCodeGen(CodeGenBase):
"""Generation of TypeScript code for a given Schema Salad definition."""
def __init__(
self, base: str, examples: Optional[str], target: Optional[str], package: str
) -> None:
"""Initialize the TypeScript codegen."""
super().__init__()
self.target_dir = Path(target or ".").resolve()
self.main_src_dir = self.target_dir / "src"
self.test_resources_dir = self.target_dir / "src" / "test" / "data"
self.package = package
self.base_uri = base
self.record_types: Set[str] = set()
self.modules: Set[str] = set()
self.id_field = ""
self.examples = examples
def prologue(self) -> None:
"""Trigger to generate the prolouge code."""
for src_dir in [self.main_src_dir]:
_safe_makedirs(src_dir)
for primitive in prims.values():
self.declare_type(primitive)
@staticmethod
def safe_name(name: str) -> str:
"""Generate a safe version of the given name."""
avn = schema.avro_field_name(name)
if avn.startswith("anon."):
avn = avn[5:]
if avn in (
"class",
"in",
"extends",
"abstract",
"default",
"package",
"arguments",
):
# reserved words
avn = avn + "_"
return avn
def begin_class(
self, # pylint: disable=too-many-arguments
classname: str,
extends: MutableSequence[str],
doc: str,
abstract: bool,
field_names: MutableSequence[str],
idfield: str,
optional_fields: Set[str],
) -> None:
"""Produce the header for the given class."""
self.current_interface = self.safe_name(classname) + "Properties"
cls = self.safe_name(classname)
self.current_class = cls
self.current_class_is_abstract = abstract
interface_module_name = self.current_interface
self.current_interface_target_file = self.main_src_dir / f"{interface_module_name}.ts"
class_module_name = self.current_class
self.current_class_target_file = self.main_src_dir / f"{class_module_name}.ts"
self.current_constructor_signature = StringIO()
self.current_constructor_body = StringIO()
self.current_loader = StringIO()
self.current_serializer = StringIO()
self.current_fieldtypes: Dict[str, TypeDef] = {}
self.idfield = idfield
doc_string = f"""
/**
* Auto-generated interface for {classname}
"""
if doc:
doc_string += " *\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += " */"
self.record_types.add(f"{self.current_interface}")
self.modules.add(interface_module_name)
with open(self.current_interface_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_interface_target_file)
if extends:
ext = "extends Internal." + ", Internal.".join(
self.safe_name(e) + "Properties" for e in extends
)
else:
ext = ""
f.write(
"""
import * as Internal from './util/Internal'
{docstring}
export interface {cls} {ext} {{
""".format(
docstring=doc_string,
cls=f"{self.current_interface}",
ext=ext,
)
)
if self.current_class_is_abstract:
return
self.record_types.add(cls)
self.modules.add(class_module_name)
with open(self.current_interface_target_file, "a") as f:
f.write(
"""
extensionFields?: Internal.Dictionary<any>
"""
)
doc_string = f"""
/**
* Auto-generated class implementation for {classname}
"""
if doc:
doc_string += " *\n"
doc_string += doc_to_doc_string(doc)
doc_string += "\n"
doc_string += " */"
with open(self.current_class_target_file, "w") as f:
_logger.info("Writing file: %s", self.current_class_target_file)
f.write(
"""
import {{
Dictionary,
expandUrl,
loadField,
LoaderInstances,
LoadingOptions,
Saveable,
ValidationException,
prefixUrl,
save,
saveRelativeUri
}} from './util/Internal'
import {{ v4 as uuidv4 }} from 'uuid'
import * as Internal from './util/Internal'
{docstring}
export class {cls} extends Saveable implements Internal.{current_interface} {{
extensionFields?: Internal.Dictionary<any>
""".format(
cls=cls,
current_interface=self.current_interface,
docstring=doc_string,
)
)
self.current_constructor_signature.write(
"\n" + "\n" + " constructor ({loadingOptions, extensionFields"
)
self.current_constructor_body.write(
"""
super(loadingOptions)
this.extensionFields = extensionFields ?? {}
"""
)
self.current_loader.write(
"""
/**
* Used to construct instances of {{@link {cls} }}.
*
* @param __doc Document fragment to load this record object from.
* @param baseuri Base URI to generate child document IDs against.
* @param loadingOptions Context for loading URIs and populating objects.
* @param docRoot ID at this position in the document (if available)
* @returns An instance of {{@link {cls} }}
* @throws {{@link ValidationException}} If the document fragment is not a
* {{@link Dictionary}} or validation of fields fails.
*/
static override async fromDoc (__doc: any, baseuri: string, loadingOptions: LoadingOptions,
docRoot?: string): Promise<Saveable> {{
const _doc = Object.assign({{}}, __doc)
const __errors: ValidationException[] = []
""".format(
cls=cls
)
)
self.current_serializer.write(
"""
save (top: boolean = false, baseUrl: string = '', relativeUris: boolean = true)
: Dictionary<any> {
const r: Dictionary<any> = {}
for (const ef in this.extensionFields) {
r[prefixUrl(ef, this.loadingOptions.vocab)] = this.extensionFields.ef
}
"""
)
def end_class(self, classname: str, field_names: List[str]) -> None:
"""Signal that we are done with this class."""
with open(self.current_interface_target_file, "a") as f:
f.write("}")
if self.current_class_is_abstract:
return
self.current_constructor_signature.write(
f"}} : {{loadingOptions?: LoadingOptions}} & Internal.{self.current_interface}) {{"
)
self.current_constructor_body.write(" }\n")
self.current_loader.write(
"""
const extensionFields: Dictionary<any> = {{}}
for (const [key, value] of Object.entries(_doc)) {{
if (!{classname}.attr.has(key)) {{
if ((key as string).includes(':')) {{
const ex = expandUrl(key, '', loadingOptions, false, false)
extensionFields[ex] = value
}} else {{
__errors.push(
new ValidationException(`invalid field ${{key as string}}, \\
expected one of: {fields}`)
)
break
}}
}}
}}
if (__errors.length > 0) {{
throw new ValidationException("Trying '{classname}'", __errors)
}}
const schema = new {classname}({{
extensionFields: extensionFields,
loadingOptions: loadingOptions,
""".format(
classname=self.current_class,
fields=",".join(["\\`" + f + "\\`" for f in field_names]),
)
)
self.current_loader.write(
",\n ".join(self.safe_name(f) + ": " + self.safe_name(f) for f in field_names)
+ "\n })"
)
self.current_loader.write(
"""
return schema
}
"""
)
self.current_serializer.write(
"""
if (top) {
if (this.loadingOptions.namespaces != null) {
r.$namespaces = this.loadingOptions.namespaces
}
if (this.loadingOptions.schemas != null) {
r.$schemas = this.loadingOptions.schemas
}
}
return r
}
"""
)
with open(
self.current_class_target_file,
"a",
) as f:
f.write(self.current_constructor_signature.getvalue())
f.write(self.current_constructor_body.getvalue())
f.write(self.current_loader.getvalue())
f.write(self.current_serializer.getvalue())
f.write(
"\n"
+ " static attr: Set<string> = new Set(["
+ ",".join(["'" + shortname(f) + "'" for f in field_names])
+ "])"
)
f.write(
"""
}
"""
)
def type_loader(self, type_declaration: Union[List[Any], Dict[str, Any], str]) -> TypeDef:
"""Parse the given type declaration and declare its components."""
if isinstance(type_declaration, MutableSequence):
sub_types = [self.type_loader(i) for i in type_declaration]
sub_names: List[str] = list(dict.fromkeys([i.name for i in sub_types]))
sub_instance_types: List[str] = list(
dict.fromkeys([i.instance_type for i in sub_types if i.instance_type is not None])
)
return self.declare_type(
TypeDef(
"unionOf{}".format("Or".join(sub_names)),
"new _UnionLoader([{}])".format(", ".join(sub_names)),
instance_type=" | ".join(sub_instance_types),
)
)
if isinstance(type_declaration, MutableMapping):
if type_declaration["type"] in (
"array",
"https://w3id.org/cwl/salad#array",
):
i = self.type_loader(type_declaration["items"])
return self.declare_type(
TypeDef(
f"arrayOf{i.name}",
f"new _ArrayLoader([{i.name}])",
instance_type=f"Array<{i.instance_type}>",
)
)
if type_declaration["type"] in ("enum", "https://w3id.org/cwl/salad#enum"):
return self.type_loader_enum(type_declaration)
if type_declaration["type"] in (
"record",
"https://w3id.org/cwl/salad#record",
):
return self.declare_type(
TypeDef(
self.safe_name(type_declaration["name"]) + "Loader",
"new _RecordLoader({}.fromDoc)".format(
self.safe_name(type_declaration["name"]),
),
instance_type="Internal." + self.safe_name(type_declaration["name"]),
abstract=type_declaration.get("abstract", False),
)
)
raise SchemaException("wft {}".format(type_declaration["type"]))
if type_declaration in prims:
return prims[type_declaration]
if type_declaration in ("Expression", "https://w3id.org/cwl/cwl#Expression"):
return self.declare_type(
TypeDef(
self.safe_name(type_declaration) + "Loader",
"new _ExpressionLoader()",
instance_type="string",
)
)
return self.collected_types[self.safe_name(type_declaration) + "Loader"]
def type_loader_enum(self, type_declaration: Dict[str, Any]) -> TypeDef:
for sym in type_declaration["symbols"]:
self.add_vocab(shortname(sym), sym)
enum_name = self.safe_name(type_declaration["name"])
enum_module_name = enum_name
enum_path = self.main_src_dir / f"{enum_module_name}.ts"
self.modules.add(enum_module_name)
self.record_types.add(enum_name)
with open(enum_path, "w") as f:
_logger.info("Writing file: %s", enum_path)
f.write(
"""
export enum {enum_name} {{
""".format(
enum_name=enum_name
)
)
for sym in type_declaration["symbols"]:
val = self.safe_name(sym)
const = self.safe_name(sym).replace("-", "_").replace(".", "_").upper()
f.write(f""" {const}='{val}',\n""") # noqa: B907
f.write(
"""}
"""
)
return self.declare_type(
TypeDef(
instance_type="Internal." + enum_name,
name=self.safe_name(type_declaration["name"]) + "Loader",
init=f"new _EnumLoader((Object.keys({enum_name}) as Array<keyof typeof "
f"{enum_name}>).map(key => {enum_name}[key]))",
)
)
def declare_field(
self,
name: str,
fieldtype: TypeDef,
doc: Optional[str],
optional: bool,
subscope: str,
) -> None:
"""Output the code to load the given field."""
safename = self.safe_name(name)
fieldname = shortname(name)
self.current_fieldtypes[safename] = fieldtype
if fieldtype.instance_type is not None and "undefined" in fieldtype.instance_type:
optionalstring = "?"
else:
optionalstring = ""
with open(self.current_interface_target_file, "a") as f:
if doc:
f.write(
"""
/**
{doc_str}
*/
""".format(
doc_str=doc_to_doc_string(doc, indent_level=1)
)
)
if fieldname == "class":
f.write(
" {safename}{optionalstring}: {type}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring="?",
)
)
else:
f.write(
" {safename}{optionalstring}: {type}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring=optionalstring,
)
)
if self.current_class_is_abstract:
return
with open(self.current_class_target_file, "a") as f:
if doc:
f.write(
"""
/**
{doc_str}
*/
""".format(
doc_str=doc_to_doc_string(doc, indent_level=1)
)
)
f.write(
" {safename}{optionalstring}: {type}\n".format(
safename=safename,
type=fieldtype.instance_type,
optionalstring=optionalstring,
)
)
if fieldname == "class":
if fieldtype.instance_type == "string":
self.current_constructor_signature.write(
f", {safename} = '{self.current_class}'" # noqa: B907
)
else:
self.current_constructor_signature.write(
", {safename} = {type}.{val}".format(
safename=safename,
type=fieldtype.instance_type,
val=self.current_class.replace("-", "_").replace(".", "_").upper(),
)
)
else:
self.current_constructor_signature.write(
", {safename}".format(
safename=safename,
)
)
self.current_constructor_body.write(
" this.{safeName} = {safeName}\n".format(safeName=safename)
)
self.current_loader.write(
"""
let {safename}""".format(
safename=safename
)
)
if optional:
self.current_loader.write(
"""
if ('{fieldname}' in _doc) {{""".format(
fieldname=fieldname
)
)
spc = " "
else:
spc = ""
self.current_loader.write(
"""
{spc} try {{
{spc} {safename} = await loadField(_doc.{fieldname}, LoaderInstances.{fieldtype},
{spc} baseuri, loadingOptions)
{spc} }} catch (e) {{
{spc} if (e instanceof ValidationException) {{
{spc} __errors.push(
{spc} new ValidationException('the `{fieldname}` field is not valid because: ', [e])
{spc} )
{spc} }} else {{
{spc} throw e
{spc} }}
{spc} }}
""".format(
safename=safename,
fieldname=fieldname,
fieldtype=fieldtype.name,
spc=spc,
)
)
if optional:
self.current_loader.write(" }\n")
if name == self.idfield or not self.idfield:
baseurl = "baseUrl"
else:
baseurl = f"this.{self.safe_name(self.idfield)}"
if fieldtype.is_uri:
self.current_serializer.write(
"""
if (this.{safename} != null) {{
const u = saveRelativeUri(this.{safename}, {base_url}, {scoped_id},
relativeUris, {ref_scope})
if (u != null) {{
r.{fieldname} = u
}}
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
scoped_id=self.to_typescript(fieldtype.scoped_id),
ref_scope=self.to_typescript(fieldtype.ref_scope),
)
)
else:
self.current_serializer.write(
"""
if (this.{safename} != null) {{
r.{fieldname} = save(this.{safename}, false, {base_url}, relativeUris)
}}
""".format(
safename=self.safe_name(name),
fieldname=shortname(name).strip(),
base_url=baseurl,
)
)
def declare_id_field(
self,
name: str,
fieldtype: TypeDef,
doc: str,
optional: bool,
) -> None:
"""Output the code to handle the given ID field."""
self.declare_field(name, fieldtype, doc, True, "")
if optional:
opt = f"""{self.safe_name(name)} = "_" + uuidv4()"""
else:
opt = """throw new ValidationException("Missing {fieldname}")""".format(
fieldname=shortname(name)
)
self.current_loader.write(
"""
const original{safename}IsUndefined = ({safename} === undefined)
if (original{safename}IsUndefined ) {{
if (docRoot != null) {{
{safename} = docRoot
}} else {{
{opt}
}}
}} else {{
baseuri = {safename} as string
}}
""".format(
safename=self.safe_name(name), opt=opt
)
)
def to_typescript(self, val: Any) -> Any:
"""Convert a Python keyword to a TypeScript keyword."""
if val is True:
return "true"
elif val is None:
return "undefined"
elif val is False:
return "false"
return val
def uri_loader(
self,
inner: TypeDef,
scoped_id: bool,
vocab_term: bool,
ref_scope: Optional[int],
) -> TypeDef:
"""Construct the TypeDef for the given URI loader."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"uri{inner.name}{scoped_id}{vocab_term}{ref_scope}",
"new _URILoader({}, {}, {}, {})".format(
inner.name,
self.to_typescript(scoped_id),
self.to_typescript(vocab_term),
self.to_typescript(ref_scope),
),
is_uri=True,
scoped_id=scoped_id,
ref_scope=ref_scope,
instance_type=instance_type,
)
)
def idmap_loader(
self, field: str, inner: TypeDef, map_subject: str, map_predicate: Optional[str]
) -> TypeDef:
"""Construct the TypeDef for the given mapped ID loader."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"idmap{self.safe_name(field)}{inner.name}",
f"new _IdMapLoader({inner.name}, '{map_subject}', '{map_predicate}')", # noqa: B907
instance_type=instance_type,
)
)
def typedsl_loader(self, inner: TypeDef, ref_scope: Optional[int]) -> TypeDef:
"""Construct the TypeDef for the given DSL loader."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"typedsl{self.safe_name(inner.name)}{ref_scope}",
f"new _TypeDSLLoader({self.safe_name(inner.name)}, {ref_scope})",
instance_type=instance_type,
)
)
def epilogue(self, root_loader: TypeDef) -> None:
"""Trigger to generate the epilouge code."""
pd = "This project contains TypeScript objects and utilities "
pd = pd + ' auto-generated by <a href=\\"https://github.com/'
pd = pd + 'common-workflow-language/schema_salad\\">Schema Salad</a>'
pd = pd + " for parsing documents corresponding to the "
pd = pd + str(self.base_uri) + " schema."
sorted_record_types = sorted(self.record_types)
generated_class_imports = ",\n ".join(sorted_record_types)
template_vars: MutableMapping[str, str] = dict(
project_name=self.package,
version="0.0.1-SNAPSHOT",
project_description=pd,
license_name="Apache License, Version 2.0",
generated_class_imports=generated_class_imports,
)
def template_from_resource(resource: Path) -> string.Template:
template_str = resource.read_text("utf-8")
template = string.Template(template_str)
return template
def expand_resource_template_to(resource: str, path: Path) -> None:
template = template_from_resource(
files("schema_salad").joinpath(f"typescript/{resource}")
)
src = template.safe_substitute(template_vars)
_ensure_directory_and_write(path, src)
expand_resource_template_to("package.json", self.target_dir / "package.json")
expand_resource_template_to(".gitignore", self.target_dir / ".gitignore")
expand_resource_template_to("LICENSE", self.target_dir / "LICENSE")
expand_resource_template_to("tsconfig.json", self.target_dir / "tsconfig.json")
expand_resource_template_to("index.ts", self.main_src_dir / "index.ts")
vocab = ",\n ".join(
f"""'{k}': '{self.vocab[k]}'""" for k in sorted(self.vocab.keys()) # noqa: B907
)
rvocab = ",\n ".join(
f"""'{self.vocab[k]}': '{k}'""" for k in sorted(self.vocab.keys()) # noqa: B907
)
loader_instances = ""
for _, collected_type in self.collected_types.items():
if not collected_type.abstract:
loader_instances += "export const {} = {};\n".format(
collected_type.name, collected_type.init
)
sorted_modules = sorted(self.modules)
internal_module_exports = "\n".join(f"export * from '../{f}'" for f in sorted_modules)
example_tests = ""
if self.examples:
_safe_makedirs(self.test_resources_dir)
utils_resources = self.test_resources_dir / "examples"
if os.path.exists(utils_resources):
shutil.rmtree(utils_resources)
shutil.copytree(self.examples, utils_resources)
for example_name in os.listdir(self.examples):
if example_name.startswith("valid"):
basename = os.path.basename(example_name).rsplit(".", 1)[0]
example_tests += """
it('{basename}', async () => {{
await loadDocument(__dirname + '/data/examples/{example_name}')
}})
it('{basename} by string', async () => {{
let doc = fs.readFileSync(__dirname + '/data/examples/{example_name}').toString()
await loadDocumentByString(doc, url.pathToFileURL(__dirname +
'/data/examples/').toString())
}})""".format(
basename=basename.replace("-", "_").replace(".", "_"),
example_name=example_name,
)
template_args: MutableMapping[str, str] = dict(
internal_module_exports=internal_module_exports,
loader_instances=loader_instances,
generated_class_imports=generated_class_imports,
vocab=vocab,
rvocab=rvocab,
root_loader=root_loader.name,
root_loader_type=root_loader.instance_type or "any",
tests=example_tests,
)
util_src_dirs = {
"util": self.main_src_dir / "util",
"test": self.main_src_dir / "test",
}
def copy_utils_recursive(util_src: str, util_target: Path) -> None:
for util in files("schema_salad").joinpath(f"typescript/{util_src}").iterdir():
if util.is_dir():
copy_utils_recursive(os.path.join(util_src, util.name), util_target / util.name)
continue
src_path = util_target / util.name
src_template = template_from_resource(util)
src = src_template.safe_substitute(template_args)
_ensure_directory_and_write(src_path, src)
for util_src, util_target in util_src_dirs.items():
copy_utils_recursive(util_src, util_target)
def secondaryfilesdsl_loader(self, inner: TypeDef) -> TypeDef:
"""Construct the TypeDef for secondary files."""
instance_type = inner.instance_type or "any"
return self.declare_type(
TypeDef(
f"secondaryfilesdsl{inner.name}",
f"new _SecondaryDSLLoader({inner.name})",
instance_type=instance_type,
)
)
| 0.693058 | 0.178956 |
from typing import List, Optional, Sequence, Tuple, Union
from .sourceline import SourceLine, reflow_all, strip_duplicated_lineno
class SchemaSaladException(Exception):
"""Base class for all schema-salad exceptions."""
def __init__(
self,
msg: str,
sl: Optional[SourceLine] = None,
children: Optional[Sequence["SchemaSaladException"]] = None,
bullet_for_children: str = "",
) -> None:
super().__init__(msg)
self.message = self.args[0]
self.file: Optional[str] = None
self.start: Optional[Tuple[int, int]] = None
self.end: Optional[Tuple[int, int]] = None
self.is_warning: bool = False
# It will be set by its parent
self.bullet: str = ""
def simplify(exc: "SchemaSaladException") -> List["SchemaSaladException"]:
return [exc] if len(exc.message) else exc.children
def with_bullet(exc: "SchemaSaladException", bullet: str) -> "SchemaSaladException":
if exc.bullet == "":
exc.bullet = bullet
return exc
if children is None:
self.children: List["SchemaSaladException"] = []
elif len(children) <= 1:
self.children = sum((simplify(c) for c in children), [])
else:
self.children = sum(
(simplify(with_bullet(c, bullet_for_children)) for c in children), []
)
self.with_sourceline(sl)
self.propagate_sourceline()
def propagate_sourceline(self) -> None:
if self.file is None:
return
for c in self.children:
if c.file is None:
c.file = self.file
c.start = self.start
c.end = self.end
c.propagate_sourceline()
def as_warning(self) -> "SchemaSaladException":
self.is_warning = True
for c in self.children:
c.as_warning()
return self
def with_sourceline(self, sl: Optional[SourceLine]) -> "SchemaSaladException":
if sl and sl.file():
self.file = sl.file()
self.start = sl.start()
self.end = sl.end()
else:
self.file = None
self.start = None
self.end = None
return self
def leaves(self) -> List["SchemaSaladException"]:
if len(self.children) > 0:
return sum((c.leaves() for c in self.children), [])
if len(self.message):
return [self]
return []
def prefix(self) -> str:
pre: str = ""
if self.file:
linecol0: Union[int, str] = ""
linecol1: Union[int, str] = ""
if self.start:
linecol0, linecol1 = self.start
pre = f"{self.file}:{linecol0}:{linecol1}: "
return pre + "Warning: " if self.is_warning else pre
def summary(self, level: int = 0, with_bullet: bool = False) -> str:
indent_per_level = 2
spaces = (level * indent_per_level) * " "
bullet = self.bullet + " " if len(self.bullet) > 0 and with_bullet else ""
return f"{self.prefix()}{spaces}{bullet}{self.message}"
def __str__(self) -> str:
"""Convert to a string using :py:meth:`pretty_str`."""
return str(self.pretty_str())
def pretty_str(self, level: int = 0) -> str:
messages = len(self.message)
my_summary = [self.summary(level, True)] if messages else []
next_level = level + 1 if messages else level
ret = "\n".join(e for e in my_summary + [c.pretty_str(next_level) for c in self.children])
if level == 0:
return strip_duplicated_lineno(reflow_all(ret))
return ret
class SchemaException(SchemaSaladException):
"""Indicates error with the provided schema definition."""
class ValidationException(SchemaSaladException):
"""Indicates error with document against the provided schema."""
class ClassValidationException(ValidationException):
pass
def to_one_line_messages(exc: SchemaSaladException) -> str:
return "\n".join(c.summary() for c in exc.leaves())
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/exceptions.py
|
exceptions.py
|
from typing import List, Optional, Sequence, Tuple, Union
from .sourceline import SourceLine, reflow_all, strip_duplicated_lineno
class SchemaSaladException(Exception):
"""Base class for all schema-salad exceptions."""
def __init__(
self,
msg: str,
sl: Optional[SourceLine] = None,
children: Optional[Sequence["SchemaSaladException"]] = None,
bullet_for_children: str = "",
) -> None:
super().__init__(msg)
self.message = self.args[0]
self.file: Optional[str] = None
self.start: Optional[Tuple[int, int]] = None
self.end: Optional[Tuple[int, int]] = None
self.is_warning: bool = False
# It will be set by its parent
self.bullet: str = ""
def simplify(exc: "SchemaSaladException") -> List["SchemaSaladException"]:
return [exc] if len(exc.message) else exc.children
def with_bullet(exc: "SchemaSaladException", bullet: str) -> "SchemaSaladException":
if exc.bullet == "":
exc.bullet = bullet
return exc
if children is None:
self.children: List["SchemaSaladException"] = []
elif len(children) <= 1:
self.children = sum((simplify(c) for c in children), [])
else:
self.children = sum(
(simplify(with_bullet(c, bullet_for_children)) for c in children), []
)
self.with_sourceline(sl)
self.propagate_sourceline()
def propagate_sourceline(self) -> None:
if self.file is None:
return
for c in self.children:
if c.file is None:
c.file = self.file
c.start = self.start
c.end = self.end
c.propagate_sourceline()
def as_warning(self) -> "SchemaSaladException":
self.is_warning = True
for c in self.children:
c.as_warning()
return self
def with_sourceline(self, sl: Optional[SourceLine]) -> "SchemaSaladException":
if sl and sl.file():
self.file = sl.file()
self.start = sl.start()
self.end = sl.end()
else:
self.file = None
self.start = None
self.end = None
return self
def leaves(self) -> List["SchemaSaladException"]:
if len(self.children) > 0:
return sum((c.leaves() for c in self.children), [])
if len(self.message):
return [self]
return []
def prefix(self) -> str:
pre: str = ""
if self.file:
linecol0: Union[int, str] = ""
linecol1: Union[int, str] = ""
if self.start:
linecol0, linecol1 = self.start
pre = f"{self.file}:{linecol0}:{linecol1}: "
return pre + "Warning: " if self.is_warning else pre
def summary(self, level: int = 0, with_bullet: bool = False) -> str:
indent_per_level = 2
spaces = (level * indent_per_level) * " "
bullet = self.bullet + " " if len(self.bullet) > 0 and with_bullet else ""
return f"{self.prefix()}{spaces}{bullet}{self.message}"
def __str__(self) -> str:
"""Convert to a string using :py:meth:`pretty_str`."""
return str(self.pretty_str())
def pretty_str(self, level: int = 0) -> str:
messages = len(self.message)
my_summary = [self.summary(level, True)] if messages else []
next_level = level + 1 if messages else level
ret = "\n".join(e for e in my_summary + [c.pretty_str(next_level) for c in self.children])
if level == 0:
return strip_duplicated_lineno(reflow_all(ret))
return ret
class SchemaException(SchemaSaladException):
"""Indicates error with the provided schema definition."""
class ValidationException(SchemaSaladException):
"""Indicates error with document against the provided schema."""
class ClassValidationException(ValidationException):
pass
def to_one_line_messages(exc: SchemaSaladException) -> str:
return "\n".join(c.summary() for c in exc.leaves())
| 0.917488 | 0.261343 |
# Semantic Annotations for Linked Avro Data (SALAD)
Author:
* Peter Amstutz <[email protected]>, Curii Corporation
Contributors:
* The developers of Apache Avro
* The developers of JSON-LD
* Nebojša Tijanić <[email protected]>, Seven Bridges Genomics
* Michael R. Crusoe, ELIXIR-DE
# Abstract
Salad is a schema language for describing structured linked data documents
in JSON or YAML documents. A Salad schema provides rules for
preprocessing, structural validation, and link checking for documents
described by a Salad schema. Salad builds on JSON-LD and the Apache Avro
data serialization system, and extends Avro with features for rich data
modeling such as inheritance, template specialization, object identifiers,
and object references. Salad was developed to provide a bridge between the
record oriented data modeling supported by Apache Avro and the Semantic
Web.
# Status of This Document
This document is the product of the [Common Workflow Language working
group](https://groups.google.com/forum/#!forum/common-workflow-language). The
latest version of this document is available in the "schema_salad" repository at
https://github.com/common-workflow-language/schema_salad
The products of the CWL working group (including this document) are made available
under the terms of the Apache License, version 2.0.
<!--ToC-->
# Introduction
The JSON data model is an extremely popular way to represent structured
data. It is attractive because of its relative simplicity and is a
natural fit with the standard types of many programming languages.
However, this simplicity means that basic JSON lacks expressive features
useful for working with complex data structures and document formats, such
as schemas, object references, and namespaces.
JSON-LD is a W3C standard providing a way to describe how to interpret a
JSON document as Linked Data by means of a "context". JSON-LD provides a
powerful solution for representing object references and namespaces in JSON
based on standard web URIs, but is not itself a schema language. Without a
schema providing a well defined structure, it is difficult to process an
arbitrary JSON-LD document as idiomatic JSON because there are many ways to
express the same data that are logically equivalent but structurally
distinct.
Several schema languages exist for describing and validating JSON data,
such as the Apache Avro data serialization system, however none understand
linked data. As a result, to fully take advantage of JSON-LD to build the
next generation of linked data applications, one must maintain separate
JSON schema, JSON-LD context, RDF schema, and human documentation, despite
significant overlap of content and obvious need for these documents to stay
synchronized.
Schema Salad is designed to address this gap. It provides a schema
language and processing rules for describing structured JSON content
permitting URI resolution and strict document validation. The schema
language supports linked data through annotations that describe the linked
data interpretation of the content, enables generation of JSON-LD context
and RDF schema, and production of RDF triples by applying the JSON-LD
context. The schema language also provides for robust support of inline
documentation.
## Introduction to v1.1
This is the third version of the Schema Salad specification. It is
developed concurrently with v1.1 of the Common Workflow Language for use in
specifying the Common Workflow Language, however Schema Salad is intended to be
useful to a broader audience. Compared to the v1.0 schema salad
specification, the following changes have been made:
* Support for `default` values on record fields to specify default values
* Add subscoped fields (fields which introduce a new inner scope for identifiers)
* Add the *inVocab* flag (default true) to indicate if a type is added to the vocabulary of well known terms or must be prefixed
* Add *secondaryFilesDSL* micro DSL (domain specific language) to convert text strings to a secondaryFiles record type used in CWL
* The `$mixin` feature has been removed from the specification, as it
is poorly documented, not included in conformance testing,
and not widely supported.
## Introduction to v1.2
This is the fourth version of the Schema Salad specification. It was created to
ease the development of extensions to CWL v1.2. The only change is that
inherited records can narrow the types of fields if those fields are re-specified
with a matching jsonldPredicate.
## References to Other Specifications
**Javascript Object Notation (JSON)**: http://json.org
**JSON Linked Data (JSON-LD)**: http://json-ld.org
**YAML**: https://yaml.org/spec/1.2/spec.html
**Avro**: https://avro.apache.org/docs/current/spec.html
**Uniform Resource Identifier (URI) Generic Syntax**: https://tools.ietf.org/html/rfc3986)
**Resource Description Framework (RDF)**: http://www.w3.org/RDF/
**UTF-8**: https://www.ietf.org/rfc/rfc2279.txt)
## Scope
This document describes the syntax, data model, algorithms, and schema
language for working with Salad documents. It is not intended to document
a specific implementation of Salad, however it may serve as a reference for
the behavior of conforming implementations.
## Terminology
The terminology used to describe Salad documents is defined in the Concepts
section of the specification. The terms defined in the following list are
used in building those definitions and in describing the actions of a
Salad implementation:
**may**: Conforming Salad documents and Salad implementations are permitted but
not required to be interpreted as described.
**must**: Conforming Salad documents and Salad implementations are required
to be interpreted as described; otherwise they are in error.
**error**: A violation of the rules of this specification; results are
undefined. Conforming implementations may detect and report an error and may
recover from it.
**fatal error**: A violation of the rules of this specification; results
are undefined. Conforming implementations must not continue to process the
document and may report an error.
**at user option**: Conforming software may or must (depending on the modal verb in
the sentence) behave as described; if it does, it must provide users a means to
enable or disable the behavior described.
# Document model
## Data concepts
An **object** is a data structure equivalent to the "object" type in JSON,
consisting of a unordered set of name/value pairs (referred to here as
**fields**) and where the name is a string and the value is a string, number,
boolean, array, or object.
A **document** is a file containing a serialized object, or an array of
objects.
A **document type** is a class of files that share a common structure and
semantics.
A **document schema** is a formal description of the grammar of a document type.
A **base URI** is a context-dependent URI used to resolve relative references.
An **identifier** is a URI that designates a single document or single
object within a document.
A **vocabulary** is the set of symbolic field names and enumerated symbols defined
by a document schema, where each term maps to absolute URI.
## Syntax
Conforming Salad v1.1 documents are serialized and loaded using a
subset of YAML 1.2 syntax and UTF-8 text encoding. Salad documents
are written using the [JSON-compatible subset of YAML described in
section 10.2](https://yaml.org/spec/1.2/spec.html#id2803231). The
following features of YAML must not be used in conforming Salad
documents:
* Use of explicit node tags with leading `!` or `!!`
* Use of anchors with leading `&` and aliases with leading `*`
* %YAML directives
* %TAG directives
It is a fatal error if the document is not valid YAML.
A Salad document must consist only of either a single root object or an
array of objects.
## Document context
### Implied context
The implicit context consists of the vocabulary defined by the schema and
the base URI. By default, the base URI must be the URI that was used to
load the document. It may be overridden by an explicit context.
### Explicit context
If a document consists of a root object, this object may contain the
fields `$base`, `$namespaces`, `$schemas`, and `$graph`:
* `$base`: Must be a string. Set the base URI for the document used to
resolve relative references.
* `$namespaces`: Must be an object with strings as values. The keys of
the object are namespace prefixes used in the document; the values of
the object are the prefix expansions.
* `$schemas`: Must be an array of strings. This field may list URI
references to documents in RDF-XML format which will be queried for RDF
schema data. The subjects and predicates described by the RDF schema
may provide additional semantic context for the document, and may be
used for validation of prefixed extension fields found in the document.
Other directives beginning with `$` must be ignored.
## Document graph
If a document consists of a single root object, this object may contain the
field `$graph`. This field must be an array of objects. If present, this
field holds the primary content of the document. A document that consists
of array of objects at the root is an implicit graph.
## Document metadata
If a document consists of a single root object, metadata about the
document, such as authorship, may be declared in the root object.
## Document schema
Document preprocessing, link validation and schema validation require a
document schema. A schema may consist of:
* At least one record definition object which defines valid fields that
make up a record type. Record field definitions include the valid types
that may be assigned to each field and annotations to indicate fields
that represent identifiers and links, described below in "Semantic
Annotations".
* Any number of enumerated type objects which define a set of finite set of symbols that are
valid value of the type.
* Any number of documentation objects which allow in-line documentation of the schema.
The schema for defining a salad schema (the metaschema) is described in
detail in the [Schema](#Schema) section.
## Record field annotations
In a document schema, record field definitions may include the field
`jsonldPredicate`, which may be either a string or object. Implementations
must use the following document preprocessing of fields by the following
rules:
* If the value of `jsonldPredicate` is `@id`, the field is an identifier
field.
* If the value of `jsonldPredicate` is an object, and that
object contains the field `_type` with the value `@id`, the
field is a link field. If the field `jsonldPredicate` also
has the field `identity` with the value `true`, the field is
resolved with [identifier resolution](#Identifier_resolution).
Otherwise it is resolved with [link resolution](#Link_resolution).
* If the value of `jsonldPredicate` is an object which contains the
field `_type` with the value `@vocab`, the field value is subject to
[vocabulary resolution](#Vocabulary_resolution).
## Document traversal
To perform document preprocessing, link validation and schema
validation, the document must be traversed starting from the fields or
array items of the root object or array and recursively visiting each child
item which contains an object or arrays.
## Short names
The "short name" of a fully qualified identifier is the portion of
the identifier following the final slash `/` of either the fragment
identifier following `#` or the path portion, if there is no fragment.
Some examples:
* the short name of `http://example.com/foo` is `foo`
* the short name of `http://example.com/#bar` is `bar`
* the short name of `http://example.com/foo/bar` is `bar`
* the short name of `http://example.com/foo#bar` is `bar`
* the short name of `http://example.com/#foo/bar` is `bar`
* the short name of `http://example.com/foo#bar/baz` is `baz`
## Inheritance and specialization
A record definition may inherit from one or more record definitions
with the `extends` field. This copies the fields defined in the
parent record(s) as the base for the new record. A record definition
may `specialize` type declarations of the fields inherited from the
base record. For each field inherited from the base record, any
instance of the type in `specializeFrom` is replaced with the type in
`specializeTo`. The type in `specializeTo` should extend from the
type in `specializeFrom`.
A record definition may be `abstract`. This means the record
definition is not used for validation on its own, but may be extended
by other definitions. If an abstract type appears in a field
definition, it is logically replaced with a union of all concrete
subtypes of the abstract type. In other words, the field value does
not validate as the abstract type, but must validate as some concrete
type that inherits from the abstract type.
# Document preprocessing
After processing the explicit context (if any), document preprocessing
begins. Starting from the document root, object fields values or array
items which contain objects or arrays are recursively traversed
depth-first. For each visited object, field names, identifier fields, link
fields, vocabulary fields, and `$import` and `$include` directives must be
processed as described in this section. The order of traversal of child
nodes within a parent node is undefined.
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/metaschema/salad.md
|
salad.md
|
# Semantic Annotations for Linked Avro Data (SALAD)
Author:
* Peter Amstutz <[email protected]>, Curii Corporation
Contributors:
* The developers of Apache Avro
* The developers of JSON-LD
* Nebojša Tijanić <[email protected]>, Seven Bridges Genomics
* Michael R. Crusoe, ELIXIR-DE
# Abstract
Salad is a schema language for describing structured linked data documents
in JSON or YAML documents. A Salad schema provides rules for
preprocessing, structural validation, and link checking for documents
described by a Salad schema. Salad builds on JSON-LD and the Apache Avro
data serialization system, and extends Avro with features for rich data
modeling such as inheritance, template specialization, object identifiers,
and object references. Salad was developed to provide a bridge between the
record oriented data modeling supported by Apache Avro and the Semantic
Web.
# Status of This Document
This document is the product of the [Common Workflow Language working
group](https://groups.google.com/forum/#!forum/common-workflow-language). The
latest version of this document is available in the "schema_salad" repository at
https://github.com/common-workflow-language/schema_salad
The products of the CWL working group (including this document) are made available
under the terms of the Apache License, version 2.0.
<!--ToC-->
# Introduction
The JSON data model is an extremely popular way to represent structured
data. It is attractive because of its relative simplicity and is a
natural fit with the standard types of many programming languages.
However, this simplicity means that basic JSON lacks expressive features
useful for working with complex data structures and document formats, such
as schemas, object references, and namespaces.
JSON-LD is a W3C standard providing a way to describe how to interpret a
JSON document as Linked Data by means of a "context". JSON-LD provides a
powerful solution for representing object references and namespaces in JSON
based on standard web URIs, but is not itself a schema language. Without a
schema providing a well defined structure, it is difficult to process an
arbitrary JSON-LD document as idiomatic JSON because there are many ways to
express the same data that are logically equivalent but structurally
distinct.
Several schema languages exist for describing and validating JSON data,
such as the Apache Avro data serialization system, however none understand
linked data. As a result, to fully take advantage of JSON-LD to build the
next generation of linked data applications, one must maintain separate
JSON schema, JSON-LD context, RDF schema, and human documentation, despite
significant overlap of content and obvious need for these documents to stay
synchronized.
Schema Salad is designed to address this gap. It provides a schema
language and processing rules for describing structured JSON content
permitting URI resolution and strict document validation. The schema
language supports linked data through annotations that describe the linked
data interpretation of the content, enables generation of JSON-LD context
and RDF schema, and production of RDF triples by applying the JSON-LD
context. The schema language also provides for robust support of inline
documentation.
## Introduction to v1.1
This is the third version of the Schema Salad specification. It is
developed concurrently with v1.1 of the Common Workflow Language for use in
specifying the Common Workflow Language, however Schema Salad is intended to be
useful to a broader audience. Compared to the v1.0 schema salad
specification, the following changes have been made:
* Support for `default` values on record fields to specify default values
* Add subscoped fields (fields which introduce a new inner scope for identifiers)
* Add the *inVocab* flag (default true) to indicate if a type is added to the vocabulary of well known terms or must be prefixed
* Add *secondaryFilesDSL* micro DSL (domain specific language) to convert text strings to a secondaryFiles record type used in CWL
* The `$mixin` feature has been removed from the specification, as it
is poorly documented, not included in conformance testing,
and not widely supported.
## Introduction to v1.2
This is the fourth version of the Schema Salad specification. It was created to
ease the development of extensions to CWL v1.2. The only change is that
inherited records can narrow the types of fields if those fields are re-specified
with a matching jsonldPredicate.
## References to Other Specifications
**Javascript Object Notation (JSON)**: http://json.org
**JSON Linked Data (JSON-LD)**: http://json-ld.org
**YAML**: https://yaml.org/spec/1.2/spec.html
**Avro**: https://avro.apache.org/docs/current/spec.html
**Uniform Resource Identifier (URI) Generic Syntax**: https://tools.ietf.org/html/rfc3986)
**Resource Description Framework (RDF)**: http://www.w3.org/RDF/
**UTF-8**: https://www.ietf.org/rfc/rfc2279.txt)
## Scope
This document describes the syntax, data model, algorithms, and schema
language for working with Salad documents. It is not intended to document
a specific implementation of Salad, however it may serve as a reference for
the behavior of conforming implementations.
## Terminology
The terminology used to describe Salad documents is defined in the Concepts
section of the specification. The terms defined in the following list are
used in building those definitions and in describing the actions of a
Salad implementation:
**may**: Conforming Salad documents and Salad implementations are permitted but
not required to be interpreted as described.
**must**: Conforming Salad documents and Salad implementations are required
to be interpreted as described; otherwise they are in error.
**error**: A violation of the rules of this specification; results are
undefined. Conforming implementations may detect and report an error and may
recover from it.
**fatal error**: A violation of the rules of this specification; results
are undefined. Conforming implementations must not continue to process the
document and may report an error.
**at user option**: Conforming software may or must (depending on the modal verb in
the sentence) behave as described; if it does, it must provide users a means to
enable or disable the behavior described.
# Document model
## Data concepts
An **object** is a data structure equivalent to the "object" type in JSON,
consisting of a unordered set of name/value pairs (referred to here as
**fields**) and where the name is a string and the value is a string, number,
boolean, array, or object.
A **document** is a file containing a serialized object, or an array of
objects.
A **document type** is a class of files that share a common structure and
semantics.
A **document schema** is a formal description of the grammar of a document type.
A **base URI** is a context-dependent URI used to resolve relative references.
An **identifier** is a URI that designates a single document or single
object within a document.
A **vocabulary** is the set of symbolic field names and enumerated symbols defined
by a document schema, where each term maps to absolute URI.
## Syntax
Conforming Salad v1.1 documents are serialized and loaded using a
subset of YAML 1.2 syntax and UTF-8 text encoding. Salad documents
are written using the [JSON-compatible subset of YAML described in
section 10.2](https://yaml.org/spec/1.2/spec.html#id2803231). The
following features of YAML must not be used in conforming Salad
documents:
* Use of explicit node tags with leading `!` or `!!`
* Use of anchors with leading `&` and aliases with leading `*`
* %YAML directives
* %TAG directives
It is a fatal error if the document is not valid YAML.
A Salad document must consist only of either a single root object or an
array of objects.
## Document context
### Implied context
The implicit context consists of the vocabulary defined by the schema and
the base URI. By default, the base URI must be the URI that was used to
load the document. It may be overridden by an explicit context.
### Explicit context
If a document consists of a root object, this object may contain the
fields `$base`, `$namespaces`, `$schemas`, and `$graph`:
* `$base`: Must be a string. Set the base URI for the document used to
resolve relative references.
* `$namespaces`: Must be an object with strings as values. The keys of
the object are namespace prefixes used in the document; the values of
the object are the prefix expansions.
* `$schemas`: Must be an array of strings. This field may list URI
references to documents in RDF-XML format which will be queried for RDF
schema data. The subjects and predicates described by the RDF schema
may provide additional semantic context for the document, and may be
used for validation of prefixed extension fields found in the document.
Other directives beginning with `$` must be ignored.
## Document graph
If a document consists of a single root object, this object may contain the
field `$graph`. This field must be an array of objects. If present, this
field holds the primary content of the document. A document that consists
of array of objects at the root is an implicit graph.
## Document metadata
If a document consists of a single root object, metadata about the
document, such as authorship, may be declared in the root object.
## Document schema
Document preprocessing, link validation and schema validation require a
document schema. A schema may consist of:
* At least one record definition object which defines valid fields that
make up a record type. Record field definitions include the valid types
that may be assigned to each field and annotations to indicate fields
that represent identifiers and links, described below in "Semantic
Annotations".
* Any number of enumerated type objects which define a set of finite set of symbols that are
valid value of the type.
* Any number of documentation objects which allow in-line documentation of the schema.
The schema for defining a salad schema (the metaschema) is described in
detail in the [Schema](#Schema) section.
## Record field annotations
In a document schema, record field definitions may include the field
`jsonldPredicate`, which may be either a string or object. Implementations
must use the following document preprocessing of fields by the following
rules:
* If the value of `jsonldPredicate` is `@id`, the field is an identifier
field.
* If the value of `jsonldPredicate` is an object, and that
object contains the field `_type` with the value `@id`, the
field is a link field. If the field `jsonldPredicate` also
has the field `identity` with the value `true`, the field is
resolved with [identifier resolution](#Identifier_resolution).
Otherwise it is resolved with [link resolution](#Link_resolution).
* If the value of `jsonldPredicate` is an object which contains the
field `_type` with the value `@vocab`, the field value is subject to
[vocabulary resolution](#Vocabulary_resolution).
## Document traversal
To perform document preprocessing, link validation and schema
validation, the document must be traversed starting from the fields or
array items of the root object or array and recursively visiting each child
item which contains an object or arrays.
## Short names
The "short name" of a fully qualified identifier is the portion of
the identifier following the final slash `/` of either the fragment
identifier following `#` or the path portion, if there is no fragment.
Some examples:
* the short name of `http://example.com/foo` is `foo`
* the short name of `http://example.com/#bar` is `bar`
* the short name of `http://example.com/foo/bar` is `bar`
* the short name of `http://example.com/foo#bar` is `bar`
* the short name of `http://example.com/#foo/bar` is `bar`
* the short name of `http://example.com/foo#bar/baz` is `baz`
## Inheritance and specialization
A record definition may inherit from one or more record definitions
with the `extends` field. This copies the fields defined in the
parent record(s) as the base for the new record. A record definition
may `specialize` type declarations of the fields inherited from the
base record. For each field inherited from the base record, any
instance of the type in `specializeFrom` is replaced with the type in
`specializeTo`. The type in `specializeTo` should extend from the
type in `specializeFrom`.
A record definition may be `abstract`. This means the record
definition is not used for validation on its own, but may be extended
by other definitions. If an abstract type appears in a field
definition, it is logically replaced with a union of all concrete
subtypes of the abstract type. In other words, the field value does
not validate as the abstract type, but must validate as some concrete
type that inherits from the abstract type.
# Document preprocessing
After processing the explicit context (if any), document preprocessing
begins. Starting from the document root, object fields values or array
items which contain objects or arrays are recursively traversed
depth-first. For each visited object, field names, identifier fields, link
fields, vocabulary fields, and `$import` and `$include` directives must be
processed as described in this section. The order of traversal of child
nodes within a parent node is undefined.
| 0.915838 | 0.75593 |
## Import
During preprocessing traversal, an implementation must resolve `$import`
directives. An `$import` directive is an object consisting of exactly one
field `$import` specifying resource by URI string. It is an error if there
are additional fields in the `$import` object, such additional fields must
be ignored.
The URI string must be resolved to an absolute URI using the link
resolution rules described previously. Implementations must support
loading from `file`, `http` and `https` resources. The URI referenced by
`$import` must be loaded and recursively preprocessed as a Salad document.
The external imported document does not inherit the context of the
importing document, and the default base URI for processing the imported
document must be the URI used to retrieve the imported document. If the
`$import` URI includes a document fragment, the fragment must be excluded
from the base URI used to preprocess the imported document.
If the `$import` node is in an array and the import operation yields an array,
it is flattened to the parent array. Otherwise the `$import` node is replaced
in the document structure by the object or array yielded from the import operation.
URIs may reference document fragments which refer to specific an object in
the target document. This indicates that the `$import` node must be
replaced by only the object with the appropriate fragment identifier.
It is a fatal error if an import directive refers to an external resource
or resource fragment which does not exist or is not accessible.
### Import example: replacing the `$import` node
import.json:
```
{
"hello": "world"
}
```
parent.json:
```
{
"form": {
"bar": {
"$import": "import.json"
}
}
}
```
This becomes:
```
{
"form": {
"bar": {
"hello": "world"
}
}
}
```
### Import example: flattening the `$import`ed array
import.json:
```
[ "hello", "world" ]
```
parent.json:
```
{
"form": [
"bar",
{
"$import": "import.json"
}
]
}
```
This becomes:
```
{
"form": [
"bar",
"hello",
"world"
]
}
```
## Include
During preprocessing traversal, an implementation must resolve `$include`
directives. An `$include` directive is an object consisting of exactly one
field `$include` specifying a URI string. It is an error if there are
additional fields in the `$include` object, such additional fields must be
ignored.
The URI string must be resolved to an absolute URI using the link
resolution rules described previously. The URI referenced by `$include` must
be loaded as a text data. Implementations must support loading from
`file`, `http` and `https` resources. Implementations may transcode the
character encoding of the text data to match that of the parent document,
but must not interpret or parse the text document in any other way.
Once loaded, the `$include` node is replaced in the document structure by a
string containing the text data loaded from the resource.
It is a fatal error if an import directive refers to an external resource
which does not exist or is not accessible.
### Include example
parent.json:
```
{
"form": {
"bar": {
"$include": "include.txt"
}
}
}
```
include.txt:
```
hello world
```
This becomes:
```
{
"form": {
"bar": "hello world"
}
}
```
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/metaschema/import_include.md
|
import_include.md
|
{
"hello": "world"
}
{
"form": {
"bar": {
"$import": "import.json"
}
}
}
{
"form": {
"bar": {
"hello": "world"
}
}
}
[ "hello", "world" ]
{
"form": [
"bar",
{
"$import": "import.json"
}
]
}
{
"form": [
"bar",
"hello",
"world"
]
}
{
"form": {
"bar": {
"$include": "include.txt"
}
}
}
hello world
{
"form": {
"bar": "hello world"
}
}
| 0.44071 | 0.841109 |
import { assert } from 'chai'
import { DefaultFetcher, ValidationException } from '../util/Internal'
import sinon from 'sinon'
import * as fetchModule from 'node-fetch'
import { Response } from 'node-fetch'
import path from 'path'
import URL from 'url'
describe('Test Fetcher', () => {
const fet = new DefaultFetcher()
describe('Test fetchText()', () => {
afterEach(function () {
sinon.restore()
})
it('Should fetch text from http urls', async () => {
sinon.stub(fetchModule, 'default').returns(new Promise((resolve) => resolve(new Response('test', { status: 200 }))))
assert.equal(await fet.fetchText('http://www.example.com'), 'test')
})
it('Should fetch text from https urls', async () => {
sinon.stub(fetchModule, 'default').returns(new Promise((resolve) => resolve(new Response('test', { status: 200 }))))
assert.equal(await fet.fetchText('https://www.example.com'), 'test')
})
it('Should fetch text from files', async () => {
const filepath = URL.pathToFileURL(path.resolve('./src/test/data/test.txt')).toString()
assert.equal(await fet.fetchText(filepath), 'test\n')
})
it('Throw a 404 exception', async () => {
sinon.stub(fetchModule, 'default').returns(new Promise((resolve) => resolve(new Response('test', { status: 404 }))))
let err
try {
await fet.fetchText('https://www.example.com')
} catch (e) {
err = e
}
assert.exists(err)
assert.isTrue(err instanceof ValidationException)
assert.equal((err as ValidationException).message, 'Error fetching https://www.example.com: HTTP Error Response: 404 Not Found')
})
it('Throw an invalid schema exception', async () => {
let err
try {
await fet.fetchText('invalidscheme://www.example.com')
} catch (e) {
err = e
}
assert.exists(err)
assert.isTrue(err instanceof ValidationException)
assert.equal((err as ValidationException).message, 'Unsupported scheme invalidscheme in url: invalidscheme://www.example.com')
})
})
describe('Test urlJoin()', () => {
it('Should correctly join urls', async () => {
assert.equal(fet.urljoin('http://example.com/base', 'one'), 'http://example.com/one')
assert.equal(fet.urljoin('http://example.com/base', 'two'), 'http://example.com/two')
assert.equal(fet.urljoin('http://example.com/base', '#three'), 'http://example.com/base#three')
assert.equal(fet.urljoin('http://example.com/base', 'four#five'), 'http://example.com/four#five')
assert.equal(fet.urljoin('http://example.com/base', '_:five'), '_:five')
})
it('Should throw a remote exploit exception', async () => {
let err
try {
fet.urljoin('http://example.com/base', 'file:///test/test.txt')
} catch (e) {
err = e
}
assert.exists(err)
assert.isTrue(err instanceof ValidationException)
assert.equal((err as ValidationException).message, 'Not resolving potential remote exploit file:///test/test.txt from base http://example.com/base')
})
})
})
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript/test/Fetcher.spec.ts
|
Fetcher.spec.ts
|
import { assert } from 'chai'
import { DefaultFetcher, ValidationException } from '../util/Internal'
import sinon from 'sinon'
import * as fetchModule from 'node-fetch'
import { Response } from 'node-fetch'
import path from 'path'
import URL from 'url'
describe('Test Fetcher', () => {
const fet = new DefaultFetcher()
describe('Test fetchText()', () => {
afterEach(function () {
sinon.restore()
})
it('Should fetch text from http urls', async () => {
sinon.stub(fetchModule, 'default').returns(new Promise((resolve) => resolve(new Response('test', { status: 200 }))))
assert.equal(await fet.fetchText('http://www.example.com'), 'test')
})
it('Should fetch text from https urls', async () => {
sinon.stub(fetchModule, 'default').returns(new Promise((resolve) => resolve(new Response('test', { status: 200 }))))
assert.equal(await fet.fetchText('https://www.example.com'), 'test')
})
it('Should fetch text from files', async () => {
const filepath = URL.pathToFileURL(path.resolve('./src/test/data/test.txt')).toString()
assert.equal(await fet.fetchText(filepath), 'test\n')
})
it('Throw a 404 exception', async () => {
sinon.stub(fetchModule, 'default').returns(new Promise((resolve) => resolve(new Response('test', { status: 404 }))))
let err
try {
await fet.fetchText('https://www.example.com')
} catch (e) {
err = e
}
assert.exists(err)
assert.isTrue(err instanceof ValidationException)
assert.equal((err as ValidationException).message, 'Error fetching https://www.example.com: HTTP Error Response: 404 Not Found')
})
it('Throw an invalid schema exception', async () => {
let err
try {
await fet.fetchText('invalidscheme://www.example.com')
} catch (e) {
err = e
}
assert.exists(err)
assert.isTrue(err instanceof ValidationException)
assert.equal((err as ValidationException).message, 'Unsupported scheme invalidscheme in url: invalidscheme://www.example.com')
})
})
describe('Test urlJoin()', () => {
it('Should correctly join urls', async () => {
assert.equal(fet.urljoin('http://example.com/base', 'one'), 'http://example.com/one')
assert.equal(fet.urljoin('http://example.com/base', 'two'), 'http://example.com/two')
assert.equal(fet.urljoin('http://example.com/base', '#three'), 'http://example.com/base#three')
assert.equal(fet.urljoin('http://example.com/base', 'four#five'), 'http://example.com/four#five')
assert.equal(fet.urljoin('http://example.com/base', '_:five'), '_:five')
})
it('Should throw a remote exploit exception', async () => {
let err
try {
fet.urljoin('http://example.com/base', 'file:///test/test.txt')
} catch (e) {
err = e
}
assert.exists(err)
assert.isTrue(err instanceof ValidationException)
assert.equal((err as ValidationException).message, 'Not resolving potential remote exploit file:///test/test.txt from base http://example.com/base')
})
})
})
| 0.692226 | 0.404213 |
import { assert } from 'chai'
import { TypeGuards } from '../util/Internal'
describe('Test Typeguards', () => {
describe('Int', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Int(2), true)
assert.equal(TypeGuards.Int(0), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Int(2.2), false)
assert.equal(TypeGuards.Int('2.2'), false)
assert.equal(TypeGuards.Int([2]), false)
assert.equal(TypeGuards.Int({}), false)
assert.equal(TypeGuards.Int(null), false)
assert.equal(TypeGuards.Int(undefined), false)
})
})
describe('Float', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Float(2.0), true)
assert.equal(TypeGuards.Float(2), true)
assert.equal(TypeGuards.Float(0), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Float([2]), false)
assert.equal(TypeGuards.Float('2.2'), false)
assert.equal(TypeGuards.Float({}), false)
assert.equal(TypeGuards.Float(null), false)
assert.equal(TypeGuards.Float(undefined), false)
})
})
describe('Bool', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Bool(true), true)
assert.equal(TypeGuards.Bool(false), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Bool([1]), false)
assert.equal(TypeGuards.Bool('1'), false)
assert.equal(TypeGuards.Bool(1), false)
assert.equal(TypeGuards.Bool({}), false)
assert.equal(TypeGuards.Bool(null), false)
assert.equal(TypeGuards.Bool(undefined), false)
})
})
describe('String', () => {
it('Should return true', () => {
assert.equal(TypeGuards.String('2.2'), true)
assert.equal(TypeGuards.String(''), true)
assert.equal(TypeGuards.String('test'), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.String([2]), false)
assert.equal(TypeGuards.String(2), false)
assert.equal(TypeGuards.String({}), false)
assert.equal(TypeGuards.String(null), false)
assert.equal(TypeGuards.String(undefined), false)
})
})
describe('Undefined', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Undefined(undefined), true)
assert.equal(TypeGuards.Undefined(null), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Undefined([1]), false)
assert.equal(TypeGuards.Undefined('1'), false)
assert.equal(TypeGuards.Undefined(1), false)
assert.equal(TypeGuards.Undefined(1.1), false)
assert.equal(TypeGuards.Undefined({}), false)
})
})
describe('Dictionary', () => {
it('Should return true', () => {
assert.equal(TypeGuards.isDictionary({}), true)
assert.equal(TypeGuards.isDictionary({ test: 'test' }), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.isDictionary([]), false)
assert.equal(TypeGuards.isDictionary('1'), false)
assert.equal(TypeGuards.isDictionary(1), false)
assert.equal(TypeGuards.isDictionary(1.1), false)
assert.equal(TypeGuards.isDictionary(undefined), false)
assert.equal(TypeGuards.isDictionary(null), false)
})
})
})
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript/test/Typeguards.spec.ts
|
Typeguards.spec.ts
|
import { assert } from 'chai'
import { TypeGuards } from '../util/Internal'
describe('Test Typeguards', () => {
describe('Int', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Int(2), true)
assert.equal(TypeGuards.Int(0), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Int(2.2), false)
assert.equal(TypeGuards.Int('2.2'), false)
assert.equal(TypeGuards.Int([2]), false)
assert.equal(TypeGuards.Int({}), false)
assert.equal(TypeGuards.Int(null), false)
assert.equal(TypeGuards.Int(undefined), false)
})
})
describe('Float', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Float(2.0), true)
assert.equal(TypeGuards.Float(2), true)
assert.equal(TypeGuards.Float(0), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Float([2]), false)
assert.equal(TypeGuards.Float('2.2'), false)
assert.equal(TypeGuards.Float({}), false)
assert.equal(TypeGuards.Float(null), false)
assert.equal(TypeGuards.Float(undefined), false)
})
})
describe('Bool', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Bool(true), true)
assert.equal(TypeGuards.Bool(false), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Bool([1]), false)
assert.equal(TypeGuards.Bool('1'), false)
assert.equal(TypeGuards.Bool(1), false)
assert.equal(TypeGuards.Bool({}), false)
assert.equal(TypeGuards.Bool(null), false)
assert.equal(TypeGuards.Bool(undefined), false)
})
})
describe('String', () => {
it('Should return true', () => {
assert.equal(TypeGuards.String('2.2'), true)
assert.equal(TypeGuards.String(''), true)
assert.equal(TypeGuards.String('test'), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.String([2]), false)
assert.equal(TypeGuards.String(2), false)
assert.equal(TypeGuards.String({}), false)
assert.equal(TypeGuards.String(null), false)
assert.equal(TypeGuards.String(undefined), false)
})
})
describe('Undefined', () => {
it('Should return true', () => {
assert.equal(TypeGuards.Undefined(undefined), true)
assert.equal(TypeGuards.Undefined(null), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.Undefined([1]), false)
assert.equal(TypeGuards.Undefined('1'), false)
assert.equal(TypeGuards.Undefined(1), false)
assert.equal(TypeGuards.Undefined(1.1), false)
assert.equal(TypeGuards.Undefined({}), false)
})
})
describe('Dictionary', () => {
it('Should return true', () => {
assert.equal(TypeGuards.isDictionary({}), true)
assert.equal(TypeGuards.isDictionary({ test: 'test' }), true)
})
it('Should return false', () => {
assert.equal(TypeGuards.isDictionary([]), false)
assert.equal(TypeGuards.isDictionary('1'), false)
assert.equal(TypeGuards.isDictionary(1), false)
assert.equal(TypeGuards.isDictionary(1.1), false)
assert.equal(TypeGuards.isDictionary(undefined), false)
assert.equal(TypeGuards.isDictionary(null), false)
})
})
})
| 0.748352 | 0.911101 |
import { LoadingOptions, Dictionary, TypeGuards } from './Internal'
import * as URI from 'uri-js'
import path from 'path'
// eslint-disable-next-line @typescript-eslint/no-extraneous-class
export abstract class Saveable {
loadingOptions: LoadingOptions
constructor(loadingOptions?: LoadingOptions) {
this.loadingOptions = loadingOptions ?? new LoadingOptions({})
}
static async fromDoc (doc: any, baseuri: string, loadingOptions: LoadingOptions, docRoot?: string): Promise<Saveable> {
throw new Error('Not Implemented')
}
abstract save (top: boolean, baseUrl: string, relativeUris: boolean): Dictionary<any>
}
export function save (val: any, top: boolean = true, baseUrl: string = '', relativeUris: boolean = true): any {
if (val instanceof Saveable) {
return val.save(top, baseUrl, relativeUris)
}
if (Array.isArray(val)) {
const r = []
for (const v of val) {
r.push(save(v, false, baseUrl, relativeUris))
}
return r
}
if (TypeGuards.isDictionary(val)) {
const newDict: Dictionary<any> = {}
for (const key in val) {
newDict[key] = save(val[key], false, baseUrl, relativeUris)
}
return newDict
}
return val
}
export function saveRelativeUri (uri: any, baseUrl: string='', scopedId: boolean, relativeUris: boolean, refScope?: number): any {
if (relativeUris === false || uri === baseUrl) {
return uri
}
if (Array.isArray(uri)) {
const r = []
for (const v of uri) {
r.push(saveRelativeUri(v, baseUrl, scopedId, relativeUris, refScope))
}
return r
} else if (typeof uri === 'string') {
const uriSplit = URI.parse(uri)
const baseSplit = URI.parse(baseUrl)
if (uriSplit.path == null || baseSplit.path == null) {
throw new Error('uri or baseurl need to contain a path.')
}
if (uriSplit.scheme === baseSplit.scheme && uriSplit.host === baseSplit.host) {
if (uriSplit.path !== baseSplit.path) {
let p = path.relative(path.dirname(baseSplit.path), uriSplit.path)
if (uriSplit.fragment != null) {
p = p + '#' + uriSplit.fragment
}
return p
}
if (baseSplit.fragment == null) {
baseSplit.fragment = ''
}
let basefrag = baseSplit.fragment + '/'
if (refScope != null) {
const sp = basefrag.split('/')
let i = 0
while (i < refScope) {
sp.pop()
i += 1
}
basefrag = sp.join('/')
}
if (uriSplit.fragment == null) {
uriSplit.fragment = ''
}
if (uriSplit.fragment.startsWith(basefrag)) {
return uriSplit.fragment.slice(basefrag.length)
} else {
return uriSplit.fragment
}
} else {
return save(uri, false, baseUrl)
}
}
}
export function prefixUrl (url: string, namespaces: Dictionary<string>): string {
for (const k in namespaces) {
if (url.startsWith(namespaces.k)) {
return k + ':' + url.slice(namespaces.k.length)
}
}
return url
}
/**
* Compute the shortname of a fully qualified identifier.
* See https://w3id.org/cwl/v1.2/SchemaSalad.html#Short_names.
*
*/
export function shortname (inputId: string): string {
const parsedId = URI.parse(inputId)
if (parsedId.fragment != null) {
const fragmentSplit = parsedId.fragment.split('/')
return fragmentSplit[fragmentSplit.length - 1]
} else if (parsedId.path != null) {
const pathSplit = parsedId.path.split('/')
return pathSplit[pathSplit.length - 1]
} else {
return inputId
}
}
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript/util/Saveable.ts
|
Saveable.ts
|
import { LoadingOptions, Dictionary, TypeGuards } from './Internal'
import * as URI from 'uri-js'
import path from 'path'
// eslint-disable-next-line @typescript-eslint/no-extraneous-class
export abstract class Saveable {
loadingOptions: LoadingOptions
constructor(loadingOptions?: LoadingOptions) {
this.loadingOptions = loadingOptions ?? new LoadingOptions({})
}
static async fromDoc (doc: any, baseuri: string, loadingOptions: LoadingOptions, docRoot?: string): Promise<Saveable> {
throw new Error('Not Implemented')
}
abstract save (top: boolean, baseUrl: string, relativeUris: boolean): Dictionary<any>
}
export function save (val: any, top: boolean = true, baseUrl: string = '', relativeUris: boolean = true): any {
if (val instanceof Saveable) {
return val.save(top, baseUrl, relativeUris)
}
if (Array.isArray(val)) {
const r = []
for (const v of val) {
r.push(save(v, false, baseUrl, relativeUris))
}
return r
}
if (TypeGuards.isDictionary(val)) {
const newDict: Dictionary<any> = {}
for (const key in val) {
newDict[key] = save(val[key], false, baseUrl, relativeUris)
}
return newDict
}
return val
}
export function saveRelativeUri (uri: any, baseUrl: string='', scopedId: boolean, relativeUris: boolean, refScope?: number): any {
if (relativeUris === false || uri === baseUrl) {
return uri
}
if (Array.isArray(uri)) {
const r = []
for (const v of uri) {
r.push(saveRelativeUri(v, baseUrl, scopedId, relativeUris, refScope))
}
return r
} else if (typeof uri === 'string') {
const uriSplit = URI.parse(uri)
const baseSplit = URI.parse(baseUrl)
if (uriSplit.path == null || baseSplit.path == null) {
throw new Error('uri or baseurl need to contain a path.')
}
if (uriSplit.scheme === baseSplit.scheme && uriSplit.host === baseSplit.host) {
if (uriSplit.path !== baseSplit.path) {
let p = path.relative(path.dirname(baseSplit.path), uriSplit.path)
if (uriSplit.fragment != null) {
p = p + '#' + uriSplit.fragment
}
return p
}
if (baseSplit.fragment == null) {
baseSplit.fragment = ''
}
let basefrag = baseSplit.fragment + '/'
if (refScope != null) {
const sp = basefrag.split('/')
let i = 0
while (i < refScope) {
sp.pop()
i += 1
}
basefrag = sp.join('/')
}
if (uriSplit.fragment == null) {
uriSplit.fragment = ''
}
if (uriSplit.fragment.startsWith(basefrag)) {
return uriSplit.fragment.slice(basefrag.length)
} else {
return uriSplit.fragment
}
} else {
return save(uri, false, baseUrl)
}
}
}
export function prefixUrl (url: string, namespaces: Dictionary<string>): string {
for (const k in namespaces) {
if (url.startsWith(namespaces.k)) {
return k + ':' + url.slice(namespaces.k.length)
}
}
return url
}
/**
* Compute the shortname of a fully qualified identifier.
* See https://w3id.org/cwl/v1.2/SchemaSalad.html#Short_names.
*
*/
export function shortname (inputId: string): string {
const parsedId = URI.parse(inputId)
if (parsedId.fragment != null) {
const fragmentSplit = parsedId.fragment.split('/')
return fragmentSplit[fragmentSplit.length - 1]
} else if (parsedId.path != null) {
const pathSplit = parsedId.path.split('/')
return pathSplit[pathSplit.length - 1]
} else {
return inputId
}
}
| 0.558568 | 0.161122 |
import { ValidationException } from './Internal'
import fetch from 'node-fetch'
import * as fs from 'fs'
import * as URI from 'uri-js'
// Code implemented after https://github.com/common-workflow-language/schema_salad/blob/main/schema_salad/fetcher.py
export abstract class Fetcher {
abstract fetchText (url: string, contentTypes?: string[]): Promise<string>
abstract checkExists (url: string): boolean
abstract urljoin (baseUrl: string, url: string): string
static schemes = ['file', 'http', 'https', 'mailto']
}
export class DefaultFetcher extends Fetcher {
async fetchText (urlString: string): Promise<string> {
// TODO: cache
const split = URI.parse(urlString)
const scheme = split.scheme ?? ''
if (Fetcher.schemes.includes(scheme)) {
if (['http', 'https'].includes(scheme)) {
try {
// TODO: content types
const result = await fetch(new URL(urlString))
if (!result.ok) {
throw Error(`HTTP Error Response: ${result.status} ${result.statusText}`)
}
return await result.text()
} catch (e) {
if (e instanceof Error) {
throw new ValidationException(`Error fetching ${urlString}: ${e.message}`)
} else {
throw e
}
}
} else if (scheme === 'file') {
try {
return fs.readFileSync(split.path ?? '', { encoding: 'utf8' })
} catch (e) {
if (e instanceof Error) {
throw new ValidationException(`Error reading file ${urlString}: ${e.message}`)
} else {
throw e
}
}
}
}
throw new ValidationException(`Unsupported scheme ${scheme} in url: ${urlString}`)
}
checkExists (url: string): boolean {
throw new Error('Not implemented.')
}
urljoin (baseUrlString: string, urlString: string): string {
if (urlString.startsWith('_:')) {
return urlString
}
const baseUrl = URI.parse(baseUrlString)
const url = URI.parse(urlString)
if (baseUrl.scheme != null && baseUrl.scheme !== 'file' && url.scheme === 'file') {
throw new ValidationException(`Not resolving potential remote exploit ${urlString} from base ${baseUrlString}`)
}
// TODO: Windows specific join?
return new URL(urlString, baseUrlString).toString()
}
}
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript/util/Fetcher.ts
|
Fetcher.ts
|
import { ValidationException } from './Internal'
import fetch from 'node-fetch'
import * as fs from 'fs'
import * as URI from 'uri-js'
// Code implemented after https://github.com/common-workflow-language/schema_salad/blob/main/schema_salad/fetcher.py
export abstract class Fetcher {
abstract fetchText (url: string, contentTypes?: string[]): Promise<string>
abstract checkExists (url: string): boolean
abstract urljoin (baseUrl: string, url: string): string
static schemes = ['file', 'http', 'https', 'mailto']
}
export class DefaultFetcher extends Fetcher {
async fetchText (urlString: string): Promise<string> {
// TODO: cache
const split = URI.parse(urlString)
const scheme = split.scheme ?? ''
if (Fetcher.schemes.includes(scheme)) {
if (['http', 'https'].includes(scheme)) {
try {
// TODO: content types
const result = await fetch(new URL(urlString))
if (!result.ok) {
throw Error(`HTTP Error Response: ${result.status} ${result.statusText}`)
}
return await result.text()
} catch (e) {
if (e instanceof Error) {
throw new ValidationException(`Error fetching ${urlString}: ${e.message}`)
} else {
throw e
}
}
} else if (scheme === 'file') {
try {
return fs.readFileSync(split.path ?? '', { encoding: 'utf8' })
} catch (e) {
if (e instanceof Error) {
throw new ValidationException(`Error reading file ${urlString}: ${e.message}`)
} else {
throw e
}
}
}
}
throw new ValidationException(`Unsupported scheme ${scheme} in url: ${urlString}`)
}
checkExists (url: string): boolean {
throw new Error('Not implemented.')
}
urljoin (baseUrlString: string, urlString: string): string {
if (urlString.startsWith('_:')) {
return urlString
}
const baseUrl = URI.parse(baseUrlString)
const url = URI.parse(urlString)
if (baseUrl.scheme != null && baseUrl.scheme !== 'file' && url.scheme === 'file') {
throw new ValidationException(`Not resolving potential remote exploit ${urlString} from base ${baseUrlString}`)
}
// TODO: Windows specific join?
return new URL(urlString, baseUrlString).toString()
}
}
| 0.503418 | 0.090977 |
import { LoadingOptions, documentLoadByUrl, TypeGuards, ValidationException } from '../Internal'
import * as URI from 'uri-js'
export interface Loader {
load: (doc: any, baseuri: string, loadingOptions: LoadingOptions, docRoot?: string) => Promise<any>
}
export async function loadField (val: any, fieldType: Loader, baseuri: string, loadingOptions: LoadingOptions): Promise<any> {
if (TypeGuards.isDictionary(val)) {
if ('$import' in val) {
if (loadingOptions.fileUri == null) {
throw Error('Cannot load $import without fileuri')
}
return await documentLoadByUrl(fieldType, loadingOptions.fetcher.urljoin(loadingOptions.fileUri, val.$import), loadingOptions)
} else if ('$include' in val) {
if (loadingOptions.fileUri == null) {
throw Error('Cannot load $import without fileuri')
}
val = await loadingOptions.fetcher.fetchText(loadingOptions.fetcher.urljoin(loadingOptions.fileUri, val.$include))
}
}
return await fieldType.load(val, baseuri, loadingOptions)
}
export function expandUrl (url: string, baseUrl: string, loadingOptions: LoadingOptions, scopedId = false, vocabTerm = false, scopedRef?: number): string {
if (['@id', '@type'].includes(url)) {
return url
}
if (vocabTerm && url in loadingOptions.vocab) {
return url
}
if (loadingOptions.vocab != null && url.includes(':')) {
const prefix = url.split(':')[0]
if (prefix in loadingOptions.vocab) {
url = loadingOptions.vocab[prefix] + url.slice(prefix.length + 1)
}
}
const split = URI.parse(url)
if ((split.scheme != null && ['http', 'https', 'file'].includes(split.scheme)) || url.startsWith('$(') || url.startsWith('${')) {
} else if (scopedId && split.fragment === undefined) {
const splitbase = URI.parse(baseUrl)
let frg = ''
if (splitbase.fragment != null) {
frg = splitbase.fragment + '/' + (split.path ?? '')
} else {
frg = split.path ?? ''
}
const pt = splitbase.path ?? '/'
const parts = {
scheme: splitbase.scheme,
userinfo: undefined,
host: splitbase.host,
port: undefined,
path: pt,
query: splitbase.query,
fragment: frg,
reference: undefined,
error: undefined
}
url = URI.serialize(parts)
} else if (scopedRef != null && split.fragment === undefined) {
const splitbase = URI.parse(baseUrl)
const sp = splitbase.fragment?.split('/') ?? []
let n = scopedRef
while (n > 0 && sp?.length > 0) {
sp.pop()
n -= 1
}
sp.push(url)
const parts = {
scheme: splitbase.scheme,
userinfo: undefined,
host: splitbase.host,
port: undefined,
path: splitbase.path,
query: splitbase.query,
fragment: sp.join('/'),
reference: undefined,
error: undefined
}
url = URI.serialize(parts)
} else {
url = loadingOptions.fetcher.urljoin(baseUrl, url)
}
if (vocabTerm) {
const split = URI.parse(url)
if (split.scheme !== undefined) {
if (url in loadingOptions.rvocab) {
return loadingOptions.rvocab[url]
}
} else {
throw new ValidationException(`Term '${url}' not in vocabulary`)
}
}
return url
}
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript/util/loaders/Loader.ts
|
Loader.ts
|
import { LoadingOptions, documentLoadByUrl, TypeGuards, ValidationException } from '../Internal'
import * as URI from 'uri-js'
export interface Loader {
load: (doc: any, baseuri: string, loadingOptions: LoadingOptions, docRoot?: string) => Promise<any>
}
export async function loadField (val: any, fieldType: Loader, baseuri: string, loadingOptions: LoadingOptions): Promise<any> {
if (TypeGuards.isDictionary(val)) {
if ('$import' in val) {
if (loadingOptions.fileUri == null) {
throw Error('Cannot load $import without fileuri')
}
return await documentLoadByUrl(fieldType, loadingOptions.fetcher.urljoin(loadingOptions.fileUri, val.$import), loadingOptions)
} else if ('$include' in val) {
if (loadingOptions.fileUri == null) {
throw Error('Cannot load $import without fileuri')
}
val = await loadingOptions.fetcher.fetchText(loadingOptions.fetcher.urljoin(loadingOptions.fileUri, val.$include))
}
}
return await fieldType.load(val, baseuri, loadingOptions)
}
export function expandUrl (url: string, baseUrl: string, loadingOptions: LoadingOptions, scopedId = false, vocabTerm = false, scopedRef?: number): string {
if (['@id', '@type'].includes(url)) {
return url
}
if (vocabTerm && url in loadingOptions.vocab) {
return url
}
if (loadingOptions.vocab != null && url.includes(':')) {
const prefix = url.split(':')[0]
if (prefix in loadingOptions.vocab) {
url = loadingOptions.vocab[prefix] + url.slice(prefix.length + 1)
}
}
const split = URI.parse(url)
if ((split.scheme != null && ['http', 'https', 'file'].includes(split.scheme)) || url.startsWith('$(') || url.startsWith('${')) {
} else if (scopedId && split.fragment === undefined) {
const splitbase = URI.parse(baseUrl)
let frg = ''
if (splitbase.fragment != null) {
frg = splitbase.fragment + '/' + (split.path ?? '')
} else {
frg = split.path ?? ''
}
const pt = splitbase.path ?? '/'
const parts = {
scheme: splitbase.scheme,
userinfo: undefined,
host: splitbase.host,
port: undefined,
path: pt,
query: splitbase.query,
fragment: frg,
reference: undefined,
error: undefined
}
url = URI.serialize(parts)
} else if (scopedRef != null && split.fragment === undefined) {
const splitbase = URI.parse(baseUrl)
const sp = splitbase.fragment?.split('/') ?? []
let n = scopedRef
while (n > 0 && sp?.length > 0) {
sp.pop()
n -= 1
}
sp.push(url)
const parts = {
scheme: splitbase.scheme,
userinfo: undefined,
host: splitbase.host,
port: undefined,
path: splitbase.path,
query: splitbase.query,
fragment: sp.join('/'),
reference: undefined,
error: undefined
}
url = URI.serialize(parts)
} else {
url = loadingOptions.fetcher.urljoin(baseUrl, url)
}
if (vocabTerm) {
const split = URI.parse(url)
if (split.scheme !== undefined) {
if (url in loadingOptions.rvocab) {
return loadingOptions.rvocab[url]
}
} else {
throw new ValidationException(`Term '${url}' not in vocabulary`)
}
}
return url
}
| 0.547464 | 0.14253 |
import { Loader, LoadingOptions, Dictionary, TypeGuards, ValidationException } from '../Internal'
export class _SecondaryDSLLoader implements Loader {
inner: Loader
constructor (inner: Loader) {
this.inner = inner
}
async load (doc: any, baseuri: string, loadingOptions: LoadingOptions, docRoot?: string): Promise<any> {
const r: Array<Dictionary<any>> = []
if (Array.isArray(doc)) {
for (const d of doc) {
if (typeof d === 'string') {
if (d.endsWith('?')) {
r.push({ pattern: d.slice(0, -1), required: false })
} else {
r.push({ pattern: d })
}
} else if (TypeGuards.isDictionary(d)) {
const newDict: Dictionary<any> = {}
if ('pattern' in d) {
newDict.pattern = d.pattern
delete d.pattern
} else {
throw new ValidationException(`Missing pattern in secondaryFiles specification entry: ${JSON.stringify(d)}`)
}
if ('required' in d) {
newDict.required = d.required
delete d.required
}
if (Object.keys(d).length > 0) {
throw new ValidationException(`Unallowed values in secondaryFiles specification entry: ${JSON.stringify(d)}`)
}
r.push(newDict)
} else {
throw new ValidationException('Expected a string or sequence of (strings or mappings).')
}
}
} else if (TypeGuards.isDictionary(doc)) {
const newDict: Dictionary<any> = {}
if ('pattern' in doc) {
newDict.pattern = doc.pattern
delete doc.pattern
} else {
throw new ValidationException(`Missing pattern in secondaryFiles specification entry: ${JSON.stringify(doc)}`)
}
if ('required' in doc) {
newDict.required = doc.required
delete doc.required
}
if (Object.keys(doc).length > 0) {
throw new ValidationException(`Unallowed values in secondaryFiles specification entry: ${JSON.stringify(doc)}`)
}
r.push(newDict)
} else if (typeof doc === 'string') {
if (doc.endsWith('?')) {
r.push({ pattern: doc.slice(0, -1), required: false })
} else {
r.push({ pattern: doc })
}
} else {
throw new ValidationException('Expected str or sequence of str')
}
return await this.inner.load(r, baseuri, loadingOptions, docRoot)
}
}
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript/util/loaders/SecondaryDSLLoader.ts
|
SecondaryDSLLoader.ts
|
import { Loader, LoadingOptions, Dictionary, TypeGuards, ValidationException } from '../Internal'
export class _SecondaryDSLLoader implements Loader {
inner: Loader
constructor (inner: Loader) {
this.inner = inner
}
async load (doc: any, baseuri: string, loadingOptions: LoadingOptions, docRoot?: string): Promise<any> {
const r: Array<Dictionary<any>> = []
if (Array.isArray(doc)) {
for (const d of doc) {
if (typeof d === 'string') {
if (d.endsWith('?')) {
r.push({ pattern: d.slice(0, -1), required: false })
} else {
r.push({ pattern: d })
}
} else if (TypeGuards.isDictionary(d)) {
const newDict: Dictionary<any> = {}
if ('pattern' in d) {
newDict.pattern = d.pattern
delete d.pattern
} else {
throw new ValidationException(`Missing pattern in secondaryFiles specification entry: ${JSON.stringify(d)}`)
}
if ('required' in d) {
newDict.required = d.required
delete d.required
}
if (Object.keys(d).length > 0) {
throw new ValidationException(`Unallowed values in secondaryFiles specification entry: ${JSON.stringify(d)}`)
}
r.push(newDict)
} else {
throw new ValidationException('Expected a string or sequence of (strings or mappings).')
}
}
} else if (TypeGuards.isDictionary(doc)) {
const newDict: Dictionary<any> = {}
if ('pattern' in doc) {
newDict.pattern = doc.pattern
delete doc.pattern
} else {
throw new ValidationException(`Missing pattern in secondaryFiles specification entry: ${JSON.stringify(doc)}`)
}
if ('required' in doc) {
newDict.required = doc.required
delete doc.required
}
if (Object.keys(doc).length > 0) {
throw new ValidationException(`Unallowed values in secondaryFiles specification entry: ${JSON.stringify(doc)}`)
}
r.push(newDict)
} else if (typeof doc === 'string') {
if (doc.endsWith('?')) {
r.push({ pattern: doc.slice(0, -1), required: false })
} else {
r.push({ pattern: doc })
}
} else {
throw new ValidationException('Expected str or sequence of str')
}
return await this.inner.load(r, baseuri, loadingOptions, docRoot)
}
}
| 0.631253 | 0.214157 |
import { LoadingOptions, Loader, TypeGuards, LoaderInstances } from '../Internal'
import * as Internal from '../Internal'
import * as yaml from 'js-yaml'
import * as URL from 'url'
export async function documentLoad (loader: Loader, doc: unknown, baseuri: string, loadingOptions: LoadingOptions): Promise<any> {
if (typeof doc === 'string') {
return await documentLoadByUrl(loader, loadingOptions.fetcher.urljoin(baseuri, doc), loadingOptions)
}
if (Array.isArray(doc)) {
return await loader.load(doc, baseuri, loadingOptions)
}
if (TypeGuards.isDictionary(doc)) {
if (doc != null) {
if ('$namespaces' in doc || '$schemas' in doc) {
loadingOptions = new LoadingOptions({ copyFrom: loadingOptions, namespaces: doc.$namespaces ?? undefined, schemas: doc.$schemas ?? undefined })
delete doc.$schemas
delete doc.$namespaces
}
if ('$base' in doc) {
baseuri = doc.$base
}
if ('$graph' in doc) {
return await loader.load(doc.$graph, baseuri, loadingOptions)
} else {
return await loader.load(doc, baseuri, loadingOptions, baseuri)
}
}
}
throw new Error('Reached unexpected path')
}
export async function documentLoadByUrl (loader: Loader, url: string, loadingOptions: LoadingOptions): Promise<void> {
if (url in loadingOptions.idx) {
return await documentLoad(loader, loadingOptions.idx[url], url, loadingOptions)
}
const text = await loadingOptions.fetcher.fetchText(url)
const result = yaml.load(text)
loadingOptions.idx[url] = result
loadingOptions = new LoadingOptions({ copyFrom: loadingOptions, fileUri: url })
return await documentLoad(loader, result, url, loadingOptions)
}
export async function loadDocument (doc: any, baseuri?: string, loadingOptions?: LoadingOptions): Promise<${root_loader_type}> {
if (baseuri == null) {
baseuri = URL.pathToFileURL(process.cwd() + '/').toString()
}
if (loadingOptions == null) {
loadingOptions = new LoadingOptions({})
}
return await documentLoad(LoaderInstances.${root_loader}, doc, baseuri, loadingOptions)
}
export async function loadDocumentByString (doc: string, uri: string, loadingOptions?: LoadingOptions): Promise<${root_loader_type}> {
const result = yaml.load(doc)
if (loadingOptions == null) {
loadingOptions = new LoadingOptions({ fileUri: uri })
}
loadingOptions.idx[uri] = result
return await documentLoad(LoaderInstances.${root_loader}, result, uri, loadingOptions)
}
|
schema-salad
|
/schema-salad-8.4.20230808163024.tar.gz/schema-salad-8.4.20230808163024/schema_salad/typescript/util/loaders/RootLoader.ts
|
RootLoader.ts
|
import { LoadingOptions, Loader, TypeGuards, LoaderInstances } from '../Internal'
import * as Internal from '../Internal'
import * as yaml from 'js-yaml'
import * as URL from 'url'
export async function documentLoad (loader: Loader, doc: unknown, baseuri: string, loadingOptions: LoadingOptions): Promise<any> {
if (typeof doc === 'string') {
return await documentLoadByUrl(loader, loadingOptions.fetcher.urljoin(baseuri, doc), loadingOptions)
}
if (Array.isArray(doc)) {
return await loader.load(doc, baseuri, loadingOptions)
}
if (TypeGuards.isDictionary(doc)) {
if (doc != null) {
if ('$namespaces' in doc || '$schemas' in doc) {
loadingOptions = new LoadingOptions({ copyFrom: loadingOptions, namespaces: doc.$namespaces ?? undefined, schemas: doc.$schemas ?? undefined })
delete doc.$schemas
delete doc.$namespaces
}
if ('$base' in doc) {
baseuri = doc.$base
}
if ('$graph' in doc) {
return await loader.load(doc.$graph, baseuri, loadingOptions)
} else {
return await loader.load(doc, baseuri, loadingOptions, baseuri)
}
}
}
throw new Error('Reached unexpected path')
}
export async function documentLoadByUrl (loader: Loader, url: string, loadingOptions: LoadingOptions): Promise<void> {
if (url in loadingOptions.idx) {
return await documentLoad(loader, loadingOptions.idx[url], url, loadingOptions)
}
const text = await loadingOptions.fetcher.fetchText(url)
const result = yaml.load(text)
loadingOptions.idx[url] = result
loadingOptions = new LoadingOptions({ copyFrom: loadingOptions, fileUri: url })
return await documentLoad(loader, result, url, loadingOptions)
}
export async function loadDocument (doc: any, baseuri?: string, loadingOptions?: LoadingOptions): Promise<${root_loader_type}> {
if (baseuri == null) {
baseuri = URL.pathToFileURL(process.cwd() + '/').toString()
}
if (loadingOptions == null) {
loadingOptions = new LoadingOptions({})
}
return await documentLoad(LoaderInstances.${root_loader}, doc, baseuri, loadingOptions)
}
export async function loadDocumentByString (doc: string, uri: string, loadingOptions?: LoadingOptions): Promise<${root_loader_type}> {
const result = yaml.load(doc)
if (loadingOptions == null) {
loadingOptions = new LoadingOptions({ fileUri: uri })
}
loadingOptions.idx[uri] = result
return await documentLoad(LoaderInstances.${root_loader}, result, uri, loadingOptions)
}
| 0.50952 | 0.150778 |
from typing import List, Dict, Optional, Union
from dataclasses import dataclass
import xml.etree.ElementTree as ET
import os
@dataclass(frozen=True)
class St4Entry():
type:str
label:str
node_id:str
link_id:str
titles:Dict[str,str]
content:Dict[str,str]
thumbnail:Optional[str]=None
data_web:Optional[Dict[str,str]]=None
data_web_data:Optional[Dict[str,str]]=None
@property
def languages(self)->List[str]:
if len(self.content) == 0:
return list(self.titles.keys())
return list(self.content.keys())
def get_namespaces(xml_file:Union[str, os.PathLike])->Dict[str,str]:
"""
Extracts the namespaces from a schema st4 xml file
"""
namespaces = {}
for event, elem in ET.iterparse(xml_file, events=("start", "start-ns")):
if event == "start-ns":
prefix, url = elem
namespaces[prefix] = url
return namespaces
def parse(xml_file:Union[str, os.PathLike])->List[St4Entry]:
"""
Parses a schema st4 xml file and returns a list of St4Entry objects
"""
namespaces = get_namespaces(xml_file)
assert "n" in namespaces and "l" in namespaces , "No namespaces found! Is this a valid ST4 file?"
extracted_entries=[]
def extract_language_and_values(element:ET.Element,with_entry=False)->Dict[str,str]:
extracted={}
value_elements = element.findall("./n:Value",namespaces)
for value_element in value_elements:
language = value_element.attrib[(f"{'{'+namespaces['n']+'}'}Aspect")]
if with_entry:
entry_element = value_element.find(".//n:Entry",namespaces)
if entry_element is not None:
extracted[language]=entry_element.text
else:
extracted[language]=value_element.text
return extracted
tree = ET.parse(xml_file)
root = tree.getroot()
# Find all 'n:SystemFolder' elements
system_folder_elements = root.findall(".//n:SystemFolder",namespaces)
for system_folder_element in system_folder_elements:
#get info elements
info_elements = system_folder_element.findall(".//n:Data-Title/..",namespaces) #Just dont ask me why, but im not gonna hardcode the InfoType02 element
if info_elements is None:
continue
for info_element in info_elements:
#extract label and ids
type=info_element.tag
label = info_element.attrib[(f"{'{'+namespaces['l']+'}'}Label")]
node_id = info_element.attrib[(f"{'{'+namespaces['n']+'}'}Id")]
link_id = info_element.attrib[(f"{'{'+namespaces['l']+'}'}Id")]
#extract the titles in all languages
title_element = info_element.find(".//n:Data-Title",namespaces)
titles=extract_language_and_values(title_element,with_entry=True)
#get the content in all languages
data_content_element = info_element.find(".//n:Data-Content",namespaces)
content={}
if data_content_element is not None:
value_elements = data_content_element.findall("./n:Value",namespaces)
for value_element in value_elements:
language = value_element.attrib[(f"{'{'+namespaces['n']+'}'}Aspect")]
content_element = value_element.find(".//n:Entry//content",namespaces)
content[language]= ET.tostring(content_element, encoding='unicode')
#check if we got content or titles, if not, skip this entry
if len(titles)==0 and len(content)==0:
continue
#get thumbnail if it exists
thumbnail=None
thumbnail_element = info_element.find(".//n:Data-Thumbnail",namespaces)
if thumbnail_element is not None:
thumbnail = thumbnail_element.text
#get data web if it exists
data_web = None
data_web_element = info_element.find(".//n:Data-Web",namespaces)
if data_web_element is not None:
data_web = extract_language_and_values(data_web_element)
# get data web.data if it exists // dont ask me why it is named this way, its just stupid
data_web_data = None
data_web_data_element = info_element.find(".//n:Data-Web.Data",namespaces)
if data_web_data_element is not None:
data_web_data = extract_language_and_values(data_web_data_element)
extracted_entries.append(St4Entry(type,label,node_id,link_id,titles,content,thumbnail,data_web,data_web_data))
return extracted_entries
|
schema-st4-parser
|
/schema_st4_parser-1.0.1.tar.gz/schema_st4_parser-1.0.1/src/schema_st4_parser/__init__.py
|
__init__.py
|
from typing import List, Dict, Optional, Union
from dataclasses import dataclass
import xml.etree.ElementTree as ET
import os
@dataclass(frozen=True)
class St4Entry():
type:str
label:str
node_id:str
link_id:str
titles:Dict[str,str]
content:Dict[str,str]
thumbnail:Optional[str]=None
data_web:Optional[Dict[str,str]]=None
data_web_data:Optional[Dict[str,str]]=None
@property
def languages(self)->List[str]:
if len(self.content) == 0:
return list(self.titles.keys())
return list(self.content.keys())
def get_namespaces(xml_file:Union[str, os.PathLike])->Dict[str,str]:
"""
Extracts the namespaces from a schema st4 xml file
"""
namespaces = {}
for event, elem in ET.iterparse(xml_file, events=("start", "start-ns")):
if event == "start-ns":
prefix, url = elem
namespaces[prefix] = url
return namespaces
def parse(xml_file:Union[str, os.PathLike])->List[St4Entry]:
"""
Parses a schema st4 xml file and returns a list of St4Entry objects
"""
namespaces = get_namespaces(xml_file)
assert "n" in namespaces and "l" in namespaces , "No namespaces found! Is this a valid ST4 file?"
extracted_entries=[]
def extract_language_and_values(element:ET.Element,with_entry=False)->Dict[str,str]:
extracted={}
value_elements = element.findall("./n:Value",namespaces)
for value_element in value_elements:
language = value_element.attrib[(f"{'{'+namespaces['n']+'}'}Aspect")]
if with_entry:
entry_element = value_element.find(".//n:Entry",namespaces)
if entry_element is not None:
extracted[language]=entry_element.text
else:
extracted[language]=value_element.text
return extracted
tree = ET.parse(xml_file)
root = tree.getroot()
# Find all 'n:SystemFolder' elements
system_folder_elements = root.findall(".//n:SystemFolder",namespaces)
for system_folder_element in system_folder_elements:
#get info elements
info_elements = system_folder_element.findall(".//n:Data-Title/..",namespaces) #Just dont ask me why, but im not gonna hardcode the InfoType02 element
if info_elements is None:
continue
for info_element in info_elements:
#extract label and ids
type=info_element.tag
label = info_element.attrib[(f"{'{'+namespaces['l']+'}'}Label")]
node_id = info_element.attrib[(f"{'{'+namespaces['n']+'}'}Id")]
link_id = info_element.attrib[(f"{'{'+namespaces['l']+'}'}Id")]
#extract the titles in all languages
title_element = info_element.find(".//n:Data-Title",namespaces)
titles=extract_language_and_values(title_element,with_entry=True)
#get the content in all languages
data_content_element = info_element.find(".//n:Data-Content",namespaces)
content={}
if data_content_element is not None:
value_elements = data_content_element.findall("./n:Value",namespaces)
for value_element in value_elements:
language = value_element.attrib[(f"{'{'+namespaces['n']+'}'}Aspect")]
content_element = value_element.find(".//n:Entry//content",namespaces)
content[language]= ET.tostring(content_element, encoding='unicode')
#check if we got content or titles, if not, skip this entry
if len(titles)==0 and len(content)==0:
continue
#get thumbnail if it exists
thumbnail=None
thumbnail_element = info_element.find(".//n:Data-Thumbnail",namespaces)
if thumbnail_element is not None:
thumbnail = thumbnail_element.text
#get data web if it exists
data_web = None
data_web_element = info_element.find(".//n:Data-Web",namespaces)
if data_web_element is not None:
data_web = extract_language_and_values(data_web_element)
# get data web.data if it exists // dont ask me why it is named this way, its just stupid
data_web_data = None
data_web_data_element = info_element.find(".//n:Data-Web.Data",namespaces)
if data_web_data_element is not None:
data_web_data = extract_language_and_values(data_web_data_element)
extracted_entries.append(St4Entry(type,label,node_id,link_id,titles,content,thumbnail,data_web,data_web_data))
return extracted_entries
| 0.507324 | 0.214229 |
from schema_tools.schema.json import ValueSchema, Enum, StringSchema
class ValidationIssue(object):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "{}: {}".format(self.__class__.__name__, self.msg)
def __repr__(self):
return str(self)
class Warning(ValidationIssue): pass
class Error(ValidationIssue): pass
class Mapping(object):
def __init__(self, source, target):
self.source = source
self.target = target
self.validation = []
@property
def is_valid(self):
return self._validate()
@property
def issues(self):
self._validate()
return self.validation
@property
def errors(self):
self._validate()
return [ e for e in self.validation if isinstance(e, Error) ]
@property
def warnings(self):
self._validate()
return [ w for w in self.validation if isinstance(w, Warning) ]
def warn(self, msg, *args):
self.validation.append(Warning(msg.format(*args)))
return True
def error(self, msg, *args):
self.validation.append(Error(msg.format(*args)))
return False
def __str__(self):
return "source:{}\ntarget:{}".format(repr(self.source), repr(self.target))
# TODO make this more generic and easier to simply add checks
def _validate(self):
self.validation = []
if isinstance(self.source, ValueSchema) and isinstance(self.target, ValueSchema):
return self._validate_value_schemas()
if isinstance(self.source, Enum) and isinstance(self.target, Enum):
return self._validate_enum_schemas()
if isinstance(self.source, Enum) and isinstance(self.target, StringSchema):
return self.warn(
"target type, 'StringSchema', accepts '{}' with cast",
self.source.__class__.__name__
)
return self.error(
"can't compare source '{}' with target '{}'",
self.source.__class__.__name__, self.target.__class__.__name__
)
def _validate_value_schemas(self):
if self.source.__class__ is self.target.__class__: return True
if self.target.__class__ is StringSchema:
return self.warn(
"target type, 'StringSchema', accepts '{}' with cast",
self.source.__class__.__name__
)
else:
return self.error(
"source type '{}' doesn't match target type '{}'",
self.source.__class__.__name__, self.target.__class__.__name__
)
def _validate_enum_schemas(self):
if not self.source.__class__ is self.target.__class__:
return self.error(
"source type '{}' doesn't match target type '{}'",
self.source.__class__.__name__, self.target.__class__.__name__
)
if not self.source.values == self.target.values:
return self.error(
"source enum values () don't match target enum values ()",
", ".join(self.source.values), ", ".join(self.target.values)
)
return True
|
schema-tools
|
/schema_tools-0.0.19-py3-none-any.whl/schema_tools/mapping.py
|
mapping.py
|
from schema_tools.schema.json import ValueSchema, Enum, StringSchema
class ValidationIssue(object):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "{}: {}".format(self.__class__.__name__, self.msg)
def __repr__(self):
return str(self)
class Warning(ValidationIssue): pass
class Error(ValidationIssue): pass
class Mapping(object):
def __init__(self, source, target):
self.source = source
self.target = target
self.validation = []
@property
def is_valid(self):
return self._validate()
@property
def issues(self):
self._validate()
return self.validation
@property
def errors(self):
self._validate()
return [ e for e in self.validation if isinstance(e, Error) ]
@property
def warnings(self):
self._validate()
return [ w for w in self.validation if isinstance(w, Warning) ]
def warn(self, msg, *args):
self.validation.append(Warning(msg.format(*args)))
return True
def error(self, msg, *args):
self.validation.append(Error(msg.format(*args)))
return False
def __str__(self):
return "source:{}\ntarget:{}".format(repr(self.source), repr(self.target))
# TODO make this more generic and easier to simply add checks
def _validate(self):
self.validation = []
if isinstance(self.source, ValueSchema) and isinstance(self.target, ValueSchema):
return self._validate_value_schemas()
if isinstance(self.source, Enum) and isinstance(self.target, Enum):
return self._validate_enum_schemas()
if isinstance(self.source, Enum) and isinstance(self.target, StringSchema):
return self.warn(
"target type, 'StringSchema', accepts '{}' with cast",
self.source.__class__.__name__
)
return self.error(
"can't compare source '{}' with target '{}'",
self.source.__class__.__name__, self.target.__class__.__name__
)
def _validate_value_schemas(self):
if self.source.__class__ is self.target.__class__: return True
if self.target.__class__ is StringSchema:
return self.warn(
"target type, 'StringSchema', accepts '{}' with cast",
self.source.__class__.__name__
)
else:
return self.error(
"source type '{}' doesn't match target type '{}'",
self.source.__class__.__name__, self.target.__class__.__name__
)
def _validate_enum_schemas(self):
if not self.source.__class__ is self.target.__class__:
return self.error(
"source type '{}' doesn't match target type '{}'",
self.source.__class__.__name__, self.target.__class__.__name__
)
if not self.source.values == self.target.values:
return self.error(
"source enum values () don't match target enum values ()",
", ".join(self.source.values), ", ".join(self.target.values)
)
return True
| 0.512937 | 0.105579 |
class SchemaNode(object):
def __init__(self, line, column):
self._line = line
self._column = column
class ValueNode(SchemaNode):
def __init__(self, value, line, column):
super().__init__(line, column)
self._value = value
def replace(self, find, replace_by):
return self._value.replace(find, replace_by)
def __repr__(self):
return "ValueNode(value={}, line={}, column={})".format(
self._value, self._line, self._column
)
def __call__(self):
return self._value
def __eq__(self, other):
if not isinstance(other, self.__class__): return NotImplemented
return self._value == other._value
def __hash__(self):
return hash( (self._value, self._line, self._column) )
class ListNode(SchemaNode):
def __init__(self, items, line, column):
super().__init__(line, column)
self._items = items
self.current = -1
def __iter__(self):
return iter(self._items)
def __setitem__(self, key, item):
self._items[key] = item
def __getitem__(self, key):
return self._items[key]
def __repr__(self):
return "ListNode(len={}, line={}, column={})".format(
len(self._items), self._line, self._column
)
def __call__(self):
return [ v() for v in self ]
def __eq__(self, other):
if not isinstance(other, self.__class__): return NotImplemented
for index, _ in enumerate(self._items):
if self._items[index] != other._items[index]: return False
return True
class ObjectNode(SchemaNode):
def __init__(self, items, line, column):
super().__init__(line, column)
self._items = items
def __iter__(self):
return iter(self._items.items())
def __setitem__(self, key, item):
self._items[key] = item
def __getitem__(self, key):
return self._items[key]
def __getattr__(self, key):
return self._items[key]
def __repr__(self):
return "ObjectNode(len={}, line={}, column={})".format(
len(self._items), self._line, self._column
)
def __len__(self):
return len(self._items)
def __delitem__(self, key):
del self._items[key]
def clear(self):
return self._items.clear()
def copy(self):
return self._items.copy()
def has_key(self, k):
return k in self._items
def __contains__(self, k):
return k in self._items
def update(self, *args, **kwargs):
return self._items.update(*args, **kwargs)
def keys(self):
return self._items.keys()
def values(self):
return self._items.values()
def items(self):
return self._items.items()
def __call__(self):
return {
k : v() for k, v in self.items()
}
def __eq__(self, other):
if not isinstance(other, self.__class__): return NotImplemented
for key in self._items.keys():
if self._items[key] != other._items[key]: return False
return True
|
schema-tools
|
/schema_tools-0.0.19-py3-none-any.whl/schema_tools/ast.py
|
ast.py
|
class SchemaNode(object):
def __init__(self, line, column):
self._line = line
self._column = column
class ValueNode(SchemaNode):
def __init__(self, value, line, column):
super().__init__(line, column)
self._value = value
def replace(self, find, replace_by):
return self._value.replace(find, replace_by)
def __repr__(self):
return "ValueNode(value={}, line={}, column={})".format(
self._value, self._line, self._column
)
def __call__(self):
return self._value
def __eq__(self, other):
if not isinstance(other, self.__class__): return NotImplemented
return self._value == other._value
def __hash__(self):
return hash( (self._value, self._line, self._column) )
class ListNode(SchemaNode):
def __init__(self, items, line, column):
super().__init__(line, column)
self._items = items
self.current = -1
def __iter__(self):
return iter(self._items)
def __setitem__(self, key, item):
self._items[key] = item
def __getitem__(self, key):
return self._items[key]
def __repr__(self):
return "ListNode(len={}, line={}, column={})".format(
len(self._items), self._line, self._column
)
def __call__(self):
return [ v() for v in self ]
def __eq__(self, other):
if not isinstance(other, self.__class__): return NotImplemented
for index, _ in enumerate(self._items):
if self._items[index] != other._items[index]: return False
return True
class ObjectNode(SchemaNode):
def __init__(self, items, line, column):
super().__init__(line, column)
self._items = items
def __iter__(self):
return iter(self._items.items())
def __setitem__(self, key, item):
self._items[key] = item
def __getitem__(self, key):
return self._items[key]
def __getattr__(self, key):
return self._items[key]
def __repr__(self):
return "ObjectNode(len={}, line={}, column={})".format(
len(self._items), self._line, self._column
)
def __len__(self):
return len(self._items)
def __delitem__(self, key):
del self._items[key]
def clear(self):
return self._items.clear()
def copy(self):
return self._items.copy()
def has_key(self, k):
return k in self._items
def __contains__(self, k):
return k in self._items
def update(self, *args, **kwargs):
return self._items.update(*args, **kwargs)
def keys(self):
return self._items.keys()
def values(self):
return self._items.values()
def items(self):
return self._items.items()
def __call__(self):
return {
k : v() for k, v in self.items()
}
def __eq__(self, other):
if not isinstance(other, self.__class__): return NotImplemented
for key in self._items.keys():
if self._items[key] != other._items[key]: return False
return True
| 0.760828 | 0.196884 |
from collections import namedtuple
from schema_tools.ast import ValueNode, ListNode, ObjectNode
location = namedtuple("NodeLocation", "line column")
def node_location(node):
try:
return location(node._line, node._column)
except AttributeError:
raise TypeError("Expected a config node but received a {}.".format(
node.__class__.__name__
))
class VisitorException(Exception): pass
class Visitor(object):
def __init__(self, value_class, list_class, object_class):
self.value_class = value_class
self.list_class = list_class
self.object_class = object_class
def visit(self, obj):
try:
if isinstance(obj, self.object_class):
return self.visit_object(obj)
elif isinstance(obj, self.list_class):
return self.visit_list(obj)
elif isinstance(obj, self.value_class):
return self.visit_value(obj)
else:
raise TypeError("Node type '{}' is not supported by '{}'".format(
obj.__class__.__name__, self.__class__.__name__
))
except VisitorException as e:
raise e
except Exception as e:
raise VisitorException("Failed to visit '{}', due to '{}'".format(
repr(obj),
str(e)
))
def visit_value(self, value_node):
raise NotImplementedError
def visit_list(self, list_node):
raise NotImplementedError
def visit_object(self, object_node):
raise NotImplementedError
class ASTVisitor(Visitor):
def __init__(self):
super().__init__(ValueNode, ListNode, ObjectNode)
self.level = 0
def location(self, node):
return node_location(node)
def visit_value(self, value_node):
raise NotImplementedError
def visit_list(self, list_node):
self.level += 1
children = [ self.visit(item) for item in list_node ]
self.level -= 1
return children
def visit_object(self, object_node):
self.level += 1
children = { str(key) : self.visit(child) for key, child in object_node.items() }
self.level -= 1
return children
class ASTDumper(ASTVisitor):
def dump(self, node):
return "\n".join(self.visit(node))
def indent(self):
return " " * self.level
def location(self, node):
location = super().location(node)
# don't take into account column ;-)
return "[{},{}]{} ".format(*location, self.indent())
def visit_value(self, value_node):
return "{}{}".format(self.location(value_node), value_node())
def visit_object(self, object_node):
children = []
for key, child in super().visit_object(object_node).items():
# reuse location of child for key
children.append("{}{}".format(self.location(object_node[key]), key))
if isinstance(child, list):
children.extend(child)
else:
children.append(child)
return children
|
schema-tools
|
/schema_tools-0.0.19-py3-none-any.whl/schema_tools/utils.py
|
utils.py
|
from collections import namedtuple
from schema_tools.ast import ValueNode, ListNode, ObjectNode
location = namedtuple("NodeLocation", "line column")
def node_location(node):
try:
return location(node._line, node._column)
except AttributeError:
raise TypeError("Expected a config node but received a {}.".format(
node.__class__.__name__
))
class VisitorException(Exception): pass
class Visitor(object):
def __init__(self, value_class, list_class, object_class):
self.value_class = value_class
self.list_class = list_class
self.object_class = object_class
def visit(self, obj):
try:
if isinstance(obj, self.object_class):
return self.visit_object(obj)
elif isinstance(obj, self.list_class):
return self.visit_list(obj)
elif isinstance(obj, self.value_class):
return self.visit_value(obj)
else:
raise TypeError("Node type '{}' is not supported by '{}'".format(
obj.__class__.__name__, self.__class__.__name__
))
except VisitorException as e:
raise e
except Exception as e:
raise VisitorException("Failed to visit '{}', due to '{}'".format(
repr(obj),
str(e)
))
def visit_value(self, value_node):
raise NotImplementedError
def visit_list(self, list_node):
raise NotImplementedError
def visit_object(self, object_node):
raise NotImplementedError
class ASTVisitor(Visitor):
def __init__(self):
super().__init__(ValueNode, ListNode, ObjectNode)
self.level = 0
def location(self, node):
return node_location(node)
def visit_value(self, value_node):
raise NotImplementedError
def visit_list(self, list_node):
self.level += 1
children = [ self.visit(item) for item in list_node ]
self.level -= 1
return children
def visit_object(self, object_node):
self.level += 1
children = { str(key) : self.visit(child) for key, child in object_node.items() }
self.level -= 1
return children
class ASTDumper(ASTVisitor):
def dump(self, node):
return "\n".join(self.visit(node))
def indent(self):
return " " * self.level
def location(self, node):
location = super().location(node)
# don't take into account column ;-)
return "[{},{}]{} ".format(*location, self.indent())
def visit_value(self, value_node):
return "{}{}".format(self.location(value_node), value_node())
def visit_object(self, object_node):
children = []
for key, child in super().visit_object(object_node).items():
# reuse location of child for key
children.append("{}{}".format(self.location(object_node[key]), key))
if isinstance(child, list):
children.extend(child)
else:
children.append(child)
return children
| 0.675978 | 0.283381 |
import collections
import inspect
UnknownProperty = collections.namedtuple("UnknownProperty", "name definition")
from schema_tools import json
from schema_tools.utils import ASTVisitor
def load(path, parser=json):
return build(parser.load(path), origin=path)
def loads(src, parser=json, origin=None):
return build(parser.loads(src), origin=origin)
def build(nodes, origin=None):
from schema_tools.schema.json import SchemaMapper
schema = NodesMapper(SchemaMapper()).visit(nodes)
schema._origin = origin
return schema
class NodesMapper(ASTVisitor):
def __init__(self, *mappers):
super().__init__()
self.mappers = [
func \
for mapper in mappers \
for func in inspect.getmembers(mapper, predicate=callable) \
if func[0].startswith("map_")
]
def visit_value(self, value_node):
return ConstantValueSchema(value=value_node(), _location=self.location(value_node))
def visit_object(self, object_node):
properties = super().visit_object(object_node)
properties["_location"] = self.location(object_node)
for name, mapper in self.mappers:
result = mapper(properties)
if result: return result
return Schema(**properties)
class Mapper(object):
def has(self, properties, name, of_type=None, containing=None):
if not name in properties: return False
value = properties[name]
if isinstance(value, ConstantValueSchema):
value = value.value
if of_type:
if isinstance(of_type, str):
return value == of_type
elif isinstance(of_type, dict):
return value in of_type
else:
if isinstance(value, of_type):
if not containing or containing in value:
return True
return False
return bool(value)
class Schema(object):
args = {}
_location = None
_origin = None
def __init__(self, **kwargs):
self.parent = None
self._location = None
self._origin = None
if "_location" in kwargs:
self._location = kwargs.pop("_location")
self.args = kwargs # catchall properties
# drop examples
if "examples" in kwargs and not isinstance(kwargs["examples"], IdentifiedSchema):
kwargs.pop("examples")
def __getattr__(self, key):
try:
return self.args[key]
except KeyError:
return None
def select(self, *path, stack=None):
path = self._clean(path)
if not path: return None
# print("select", path)
return self._select(*path, stack=stack)
def trace(self, *path):
path = self._clean(path)
if not path: return []
# print("trace", path)
stack = []
self.select(*path, stack=stack)
# add UnknownProperties for not returned items in stack
for missing in path[len(stack):]:
stack.append(UnknownProperty(missing, None))
return stack
def _clean(self, path):
if not path or path[0] is None: return None
# ensure all parts in the path are strings
for step in path:
if not isinstance(step, str):
raise ValueError("only string paths are selectable")
# single path can be dotted string
if len(path) == 1: path = path[0].split(".")
return path
def _select(self, *path, stack=None):
# print(stack, "schema", path)
return None # default
def __repr__(self):
props = { k: v for k, v in self.args.items() } # TODO not "if v" ?
props.update(self._more_repr())
props["<location>"] = self._location
return "{}({})".format(
self.__class__.__name__,
", ".join( [ "{}={}".format(k, v) for k, v in props.items() ] )
)
def _more_repr(self):
return {}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
items = {}
for k, v in self.args.items():
if isinstance(v, Schema):
v = v.to_dict(deref=deref, prefix=prefix, stack=stack+[k])
elif isinstance(v, list):
vv = []
for i in v:
vv.append(i.to_dict(deref=deref, prefix=prefix, stack=stack+[k]) if isinstance(i, Schema) else i)
v = vv
elif v is None or isinstance(v, (str, int, float)):
pass
else:
print(v.__class__.__name__, v)
raise NotImplementedError
items[k] = v
return items
def items(self):
return self.args.items()
def dependencies(self, external=False, visited=None):
return []
@property
def root(self):
if not self.parent: return self
p = self.parent
while not p.parent is None:
p = p.parent
return p
@property
def origin(self):
return self.root._origin
class IdentifiedSchema(Schema): pass
class ConstantValueSchema(IdentifiedSchema):
def to_dict(self, deref=False, prefix=None, stack=None):
return self.value
|
schema-tools
|
/schema_tools-0.0.19-py3-none-any.whl/schema_tools/schema/__init__.py
|
__init__.py
|
import collections
import inspect
UnknownProperty = collections.namedtuple("UnknownProperty", "name definition")
from schema_tools import json
from schema_tools.utils import ASTVisitor
def load(path, parser=json):
return build(parser.load(path), origin=path)
def loads(src, parser=json, origin=None):
return build(parser.loads(src), origin=origin)
def build(nodes, origin=None):
from schema_tools.schema.json import SchemaMapper
schema = NodesMapper(SchemaMapper()).visit(nodes)
schema._origin = origin
return schema
class NodesMapper(ASTVisitor):
def __init__(self, *mappers):
super().__init__()
self.mappers = [
func \
for mapper in mappers \
for func in inspect.getmembers(mapper, predicate=callable) \
if func[0].startswith("map_")
]
def visit_value(self, value_node):
return ConstantValueSchema(value=value_node(), _location=self.location(value_node))
def visit_object(self, object_node):
properties = super().visit_object(object_node)
properties["_location"] = self.location(object_node)
for name, mapper in self.mappers:
result = mapper(properties)
if result: return result
return Schema(**properties)
class Mapper(object):
def has(self, properties, name, of_type=None, containing=None):
if not name in properties: return False
value = properties[name]
if isinstance(value, ConstantValueSchema):
value = value.value
if of_type:
if isinstance(of_type, str):
return value == of_type
elif isinstance(of_type, dict):
return value in of_type
else:
if isinstance(value, of_type):
if not containing or containing in value:
return True
return False
return bool(value)
class Schema(object):
args = {}
_location = None
_origin = None
def __init__(self, **kwargs):
self.parent = None
self._location = None
self._origin = None
if "_location" in kwargs:
self._location = kwargs.pop("_location")
self.args = kwargs # catchall properties
# drop examples
if "examples" in kwargs and not isinstance(kwargs["examples"], IdentifiedSchema):
kwargs.pop("examples")
def __getattr__(self, key):
try:
return self.args[key]
except KeyError:
return None
def select(self, *path, stack=None):
path = self._clean(path)
if not path: return None
# print("select", path)
return self._select(*path, stack=stack)
def trace(self, *path):
path = self._clean(path)
if not path: return []
# print("trace", path)
stack = []
self.select(*path, stack=stack)
# add UnknownProperties for not returned items in stack
for missing in path[len(stack):]:
stack.append(UnknownProperty(missing, None))
return stack
def _clean(self, path):
if not path or path[0] is None: return None
# ensure all parts in the path are strings
for step in path:
if not isinstance(step, str):
raise ValueError("only string paths are selectable")
# single path can be dotted string
if len(path) == 1: path = path[0].split(".")
return path
def _select(self, *path, stack=None):
# print(stack, "schema", path)
return None # default
def __repr__(self):
props = { k: v for k, v in self.args.items() } # TODO not "if v" ?
props.update(self._more_repr())
props["<location>"] = self._location
return "{}({})".format(
self.__class__.__name__,
", ".join( [ "{}={}".format(k, v) for k, v in props.items() ] )
)
def _more_repr(self):
return {}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
items = {}
for k, v in self.args.items():
if isinstance(v, Schema):
v = v.to_dict(deref=deref, prefix=prefix, stack=stack+[k])
elif isinstance(v, list):
vv = []
for i in v:
vv.append(i.to_dict(deref=deref, prefix=prefix, stack=stack+[k]) if isinstance(i, Schema) else i)
v = vv
elif v is None or isinstance(v, (str, int, float)):
pass
else:
print(v.__class__.__name__, v)
raise NotImplementedError
items[k] = v
return items
def items(self):
return self.args.items()
def dependencies(self, external=False, visited=None):
return []
@property
def root(self):
if not self.parent: return self
p = self.parent
while not p.parent is None:
p = p.parent
return p
@property
def origin(self):
return self.root._origin
class IdentifiedSchema(Schema): pass
class ConstantValueSchema(IdentifiedSchema):
def to_dict(self, deref=False, prefix=None, stack=None):
return self.value
| 0.399109 | 0.206114 |
import requests
from requests_file import FileAdapter
from urllib.parse import urldefrag, urlparse
from pathlib import Path
from schema_tools import json, yaml
from schema_tools.schema import Schema, Mapper, loads, IdentifiedSchema, ConstantValueSchema
def log(*args):
if False: print(*args)
class ObjectSchema(IdentifiedSchema):
def __init__(self, properties=None, definitions=None,
allOf=None, anyOf=None, oneOf=None,
**kwargs):
super().__init__(**kwargs)
if properties is None: properties = []
self.properties = properties
if isinstance(self.properties, list):
for prop in self.properties:
prop.parent = self
elif isinstance(self.properties, ConstantValueSchema):
if self.properties.value is None:
self.properties = []
else:
raise ValueError("can't handle properties", self.properties)
self.definitions = []
if definitions:
for definition in definitions:
self.add_definition(definition)
self.allOf = allOf
if self.allOf: self.allOf.parent = self
self.anyOf = anyOf
if self.anyOf: self.anyOf.parent = self
self.oneOf = oneOf
if self.oneOf: self.oneOf.parent = self
def add_definition(self, definition):
definition.parent = self
self.definitions.append(definition)
def definition(self, key, return_definition=True):
for definition in self.definitions:
if definition.name == key:
return definition.definition if return_definition else definition
raise KeyError("'{}' is not a known definition".format(key))
def _combinations(self):
for combination in [ self.allOf, self.anyOf, self.oneOf ]:
if isinstance(combination, Combination):
for option in combination.options:
yield option
def property(self, key, return_definition=True):
# local properties
for prop in self.properties:
if prop.name == key:
return prop.definition if return_definition else prop
# collected/combinations properties
for candidate in self._combinations():
if isinstance(candidate, Reference):
candidate = candidate.resolve()
try:
return candidate.property(key, return_definition=return_definition)
except:
pass
raise KeyError("'{}' is not a known property".format(key))
def _select(self, name, *remainder, stack=None):
if stack is None: stack = []
log(stack, "object", name, remainder)
result = None
# TODO generalize this at schema level
if name == "components" and remainder[0] == "schemas":
try:
remainder = list(remainder)
stack.append("components")
stack.append(remainder.pop(0))
name = remainder.pop(0)
result = self.definition(name, return_definition=False)
stack.append(result)
if remainder:
result = result._select(*remainder, stack=stack)
except KeyError:
pass
else:
try:
result = self.property(name, return_definition=False)
stack.append(result)
if remainder:
result = result._select(*remainder, stack=stack)
except KeyError:
pass
return result
def _more_repr(self):
return {
"properties" : [ prop.name for prop in self.properties ],
"definitions" : [ definition.name for definition in self.definitions ],
# "allOf" : [ repr(candidate) for candidate in self.allOf.options ],
# "oneOf" : [ repr(candidate) for candidate in self.oneOf.options ],
# "anyOf" : [ repr(candidate) for candidate in self.anyOf.options ]
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
if self.properties:
out["properties"] = {
p.name : p.to_dict(deref=deref, prefix=prefix, stack=stack+["properties"]) for p in self.properties
}
if self.definitions:
out["definitions"] = {
d.name : d.to_dict(deref=deref, prefix=prefix, stack=stack+["definitions"]) for d in self.definitions
}
if self.allOf:
out["allOf"] = [
a.to_dict(deref=deref, prefix=prefix, stack=stack) for a in self.allOf.options
]
if self.oneOf:
out["oneOf"] = [
a.to_dict(deref=deref, prefix=prefix, stack=stack) for a in self.oneOf.options
]
if self.anyOf:
out["anyOf"] = [
a.to_dict(deref=deref, prefix=prefix, stack=stack) for a in self.anyOf.options
]
return out
def dependencies(self, external=False, visited=None):
return list({
dependency \
for prop in self.properties + list(self._combinations()) \
for dependency in prop.dependencies(external=external, visited=visited)
})
class Definition(IdentifiedSchema):
def __init__(self, name, definition):
self.name = name
self._definition = definition
if isinstance(self._definition, Schema):
self._definition.parent = self
self._location = self._definition._location
else:
raise ValueError("unsupported items type: '{}'".format(
self.items.__class__.__type__)
)
def is_ref(self):
return isinstance(self._definition, Reference)
@property
def definition(self):
d = self._definition
while isinstance(d, Reference):
d = d.resolve()
return d
def _more_repr(self):
return {
"name" : self.name,
"definition" : repr(self._definition)
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
if isinstance(self._definition, Schema):
return self._definition.to_dict(deref=deref, prefix=prefix, stack=stack + [self.name])
else:
return self._definition
def _select(self, *path, stack=None):
log(stack, "definition/property", path)
return self.definition._select(*path, stack=stack)
def dependencies(self, external=False, visited=None):
return self._definition.dependencies(external=external, visited=visited)
class Property(Definition): pass
class ValueSchema(IdentifiedSchema): pass
class StringSchema(ValueSchema): pass
class IntegerSchema(ValueSchema): pass
class NullSchema(ValueSchema): pass
class NumberSchema(ValueSchema): pass
class BooleanSchema(ValueSchema): pass
class ArraySchema(IdentifiedSchema):
def __init__(self, items=None, **kwargs):
super().__init__(**kwargs)
self.items = items
if isinstance(self.items, Schema):
self.items.parent = self
elif self.items is None:
self.items = []
else:
raise ValueError("unsupported items type: '{}'".format(
self.items.__class__.__name__)
)
def _more_repr(self):
return {
"items" : repr(self.items)
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
if isinstance(self.items, Schema):
out["items"] = self.items.to_dict(deref=deref, prefix=prefix, stack=stack+["items"])
else:
out["items"] = self.items
return out
def _select(self, index, *path, stack=None):
# TODO in case of (None)
log(stack, "array", index, path)
if isinstance(self.items, Schema):
return self.items._select(index, *path, stack=stack)
def dependencies(self, external=False, visited=None):
if isinstance(self.items, Schema):
return self.items.dependencies(external=external, visited=visited)
else:
return list({
dependency \
for item in self.items \
for dependency in item.dependencies(external=external, visited=visited)
})
class TupleItem(Definition):
def _more_repr(self):
return {
"index" : self.name,
"definition" : repr(self._definition)
}
class TupleSchema(IdentifiedSchema):
def __init__(self, items=None, **kwargs):
super().__init__(**kwargs)
self.items = items
if not isinstance(self.items, list):
raise ValueError("tuple items should be list, not: '{}'".format(
self.items.__class__.__name__)
)
for item in self.items:
item.parent = self
def _more_repr(self):
return {
"items" : repr(self.items)
}
def item(self, index):
return self[index].definition
def __getitem__(self, index):
if not isinstance(index, int):
raise TypeError("tuple access only with numeric indices")
return self.items[index]
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
out["items"] = [ item.to_dict(deref=deref, prefix=prefix, stack=stack) for item in self.items ]
return out
def _select(self, index, *path, stack=None):
log(stack, "tuple", index, path)
if path:
return self[int(index)]._select(*path, stack=stack)
else:
return self[int(index)]
def dependencies(self, external=False, visited=None):
return list({
dependency \
for item in self.items \
for dependency in item.dependencies(external=external, visited=visited)
})
class Combination(IdentifiedSchema):
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options if options else []
for option in self.options:
option.parent = self
def _more_repr(self):
return {
"options" : len(self.options)
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
name = self.__class__.__name__
name = name[0].lower() + name[1:]
out[name] = [
o.to_dict(deref=deref, prefix=prefix, stack=stack+[name]+[str(index)]) \
for index, o in enumerate(self.options)
]
return out
def _select(self, *path, stack=None):
log(stack, "combination", path)
best_stack = []
result = None
for option in self.options:
local_stack = []
result = option._select(*path, stack=local_stack)
if len(local_stack) > len(best_stack):
best_stack = local_stack
if result: break
stack.extend(local_stack)
return result
def dependencies(self, external=False, visited=None):
return list({
dependency \
for option in self.options \
for dependency in option.dependencies(external=external, visited=visited)
})
class AllOf(Combination): pass
class AnyOf(Combination): pass
class OneOf(Combination): pass
class Reference(IdentifiedSchema):
def __init__(self, ref=None, **kwargs):
super().__init__(**kwargs)
self.ref = ref.value
def __repr__(self):
return "Reference(ref={})".format( self.ref )
def _more_repr(self):
return {
"$ref" : self.ref
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
if prefix is None: prefix = "#"
if deref:
if self.is_remote:
prefix = "#/" + "/".join(stack)
return self.resolve(strip_id=True).to_dict(deref=deref, prefix=prefix, stack=stack)
else:
return { "$ref" : prefix + self.ref[1:] }
return { "$ref" : self.ref }
def resolve(self, return_definition=True, strip_id=False):
url = ""
fragment = ""
parts = self.ref.split("#")
if len(parts) == 1:
url = self.ref
else:
url = parts[0]
fragment = parts[1]
if url:
doc = self._fetch(url)
if strip_id:
try:
del doc.args["$id"]
except KeyError:
pass
else:
doc = self.root
if not fragment:
return doc
name = None
fragment_schema = None
if fragment.startswith("/definitions/"):
name = fragment.replace("/definitions/", "")
if not doc.definition:
raise ValueError("doc " + repr(doc) + " has no definitions ?!")
fragment_schema = doc.definition(name, return_definition=return_definition)
elif fragment.startswith("/properties/"):
name = fragment.replace("/properties/", "")
fragment_schema = doc.property(name, return_definition=return_definition)
elif fragment.startswith("/components/schemas/"):
name = fragment.replace("/components/schemas/", "")
fragment_schema = doc.definition(name, return_definition=return_definition)
else:
raise NotImplementedError
# FIXME: when refering to a non local fragment, the fragment can refer to
# something else in its own file. A partial solution here includes
# all other definitions. Refering to properties or the whole schema
# remains problematic.
if url and isinstance(fragment_schema, ObjectSchema) and doc.definition:
for definition in doc.definitions:
if definition.name != name:
fragment_schema.add_definition(Definition(definition.name, definition._definition))
return fragment_schema
def _fetch(self, url):
s = requests.Session()
s.mount("file:", FileAdapter())
# make sure file url is absolute
u = urlparse(url)
if u.scheme == "file":
u = u._replace(path=str(Path(u.path).absolute()))
url = u.geturl()
try:
doc = s.get(url)
except Exception as e:
raise ValueError("unable to fetch '{}', due to '{}'".format(url, str(e)))
src = doc.text
try:
return loads(src, origin=url)
except:
try:
return loads(src, parser=yaml)
except Exception as e:
print(src)
raise ValueError("unable to parse '{}', due to '{}'".format(url, str(e)))
def _select(self, *path, stack=None):
log(self._stack, "ref", path)
return self.resolve()._select(*path, stack=stack)
@property
def is_remote(self):
return not self.ref.startswith("#")
def dependencies(self, external=False, visited=None):
if not visited: visited = []
if self in visited:
return []
visited.append(self)
if self.is_remote:
if external:
return list(set( self.resolve(return_definition=False).dependencies(external=external, visited=visited) + [ self ] ))
else:
return [ self ]
else:
return list(set( self.resolve(return_definition=False).dependencies(external=external, visited=visited) ))
def __hash__(self):
return hash(self.ref)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.ref == other.ref
else:
return False
class Enum(IdentifiedSchema):
def __init__(self, enum=None, **kwargs):
super().__init__(**kwargs)
self.values = []
if enum:
for e in enum:
if not isinstance(e, ConstantValueSchema):
raise ValueError("not constant value", e)
else:
self.values.append(e.value)
def _more_repr(self):
return {
"enum" : self.values
}
def to_dict(self, deref=False, prefix=None, stack=None):
return { "enum" : self.values }
class SchemaMapper(Mapper):
def map_object(self, properties):
if self.has( properties, "type", "object" ) or \
self.has( properties, "type", list, containing="object") or \
( self.has(properties, "properties") and \
not isinstance(properties["properties"], IdentifiedSchema) ) or \
( self.has(properties, "components") and \
not isinstance(properties["components"], IdentifiedSchema) ):
# properties and definitions bubble up as Generic Schemas
if self.has(properties, "properties"):
properties["properties"] = [
Property(name, definition) \
for name, definition in properties["properties"].items()
]
if self.has(properties, "definitions"):
properties["definitions"] = [
Definition(name, definition) \
for name, definition in properties["definitions"].items()
]
if self.has(properties, "components") and properties["components"].schemas:
components = properties.pop("components")
if not "definitions" in properties: properties["definitions"] = []
properties["definitions"] += [
Definition(name, definition) \
for name, definition in components.schemas.items()
]
# extract combinations
for combination, cls in { "allOf" : AllOf, "oneOf" : OneOf, "anyOf": AnyOf }.items():
options = self._combine_options(properties, combination, combination.lower())
if options:
properties[combination] = cls(options=options)
return ObjectSchema(**properties)
def map_value(self, properties):
value_mapping = {
"boolean": BooleanSchema,
"integer": IntegerSchema,
"null": NullSchema,
"number": NumberSchema,
"string": StringSchema
}
if self.has(properties, "type", value_mapping):
return value_mapping[properties["type"].value](**properties)
def map_array(self, properties):
if not self.has(properties, "type", "array"): return
if self.has(properties, "items", list):
properties["items"] = [
TupleItem(index, value) \
for index, value in enumerate(properties["items"])
]
return TupleSchema(**properties)
return ArraySchema(**properties)
def _combine_options(self, properties, *keys):
combined = []
for key in keys:
if self.has(properties, key):
combined += properties.pop(key, {})
return combined
def map_all_of(self, properties):
if "type" in properties: return
options = self._combine_options(properties, "allOf", "allof")
if options:
properties["options"] = options
return AllOf(**properties)
def map_any_of(self, properties):
if "type" in properties: return
options = self._combine_options(properties, "anyOf", "anyof")
if options:
properties["options"] = options
return AnyOf(**properties)
def map_one_of(self, properties):
if "type" in properties: return
options = self._combine_options(properties, "oneOf", "oneof")
if options:
properties["options"] = options
return OneOf(**properties)
def map_reference(self, properties):
if self.has(properties, "$ref", str):
properties["ref"] = properties.pop("$ref")
return Reference(**properties)
def map_enum(self, properties):
if self.has(properties, "enum", list):
return Enum(**properties)
|
schema-tools
|
/schema_tools-0.0.19-py3-none-any.whl/schema_tools/schema/json.py
|
json.py
|
import requests
from requests_file import FileAdapter
from urllib.parse import urldefrag, urlparse
from pathlib import Path
from schema_tools import json, yaml
from schema_tools.schema import Schema, Mapper, loads, IdentifiedSchema, ConstantValueSchema
def log(*args):
if False: print(*args)
class ObjectSchema(IdentifiedSchema):
def __init__(self, properties=None, definitions=None,
allOf=None, anyOf=None, oneOf=None,
**kwargs):
super().__init__(**kwargs)
if properties is None: properties = []
self.properties = properties
if isinstance(self.properties, list):
for prop in self.properties:
prop.parent = self
elif isinstance(self.properties, ConstantValueSchema):
if self.properties.value is None:
self.properties = []
else:
raise ValueError("can't handle properties", self.properties)
self.definitions = []
if definitions:
for definition in definitions:
self.add_definition(definition)
self.allOf = allOf
if self.allOf: self.allOf.parent = self
self.anyOf = anyOf
if self.anyOf: self.anyOf.parent = self
self.oneOf = oneOf
if self.oneOf: self.oneOf.parent = self
def add_definition(self, definition):
definition.parent = self
self.definitions.append(definition)
def definition(self, key, return_definition=True):
for definition in self.definitions:
if definition.name == key:
return definition.definition if return_definition else definition
raise KeyError("'{}' is not a known definition".format(key))
def _combinations(self):
for combination in [ self.allOf, self.anyOf, self.oneOf ]:
if isinstance(combination, Combination):
for option in combination.options:
yield option
def property(self, key, return_definition=True):
# local properties
for prop in self.properties:
if prop.name == key:
return prop.definition if return_definition else prop
# collected/combinations properties
for candidate in self._combinations():
if isinstance(candidate, Reference):
candidate = candidate.resolve()
try:
return candidate.property(key, return_definition=return_definition)
except:
pass
raise KeyError("'{}' is not a known property".format(key))
def _select(self, name, *remainder, stack=None):
if stack is None: stack = []
log(stack, "object", name, remainder)
result = None
# TODO generalize this at schema level
if name == "components" and remainder[0] == "schemas":
try:
remainder = list(remainder)
stack.append("components")
stack.append(remainder.pop(0))
name = remainder.pop(0)
result = self.definition(name, return_definition=False)
stack.append(result)
if remainder:
result = result._select(*remainder, stack=stack)
except KeyError:
pass
else:
try:
result = self.property(name, return_definition=False)
stack.append(result)
if remainder:
result = result._select(*remainder, stack=stack)
except KeyError:
pass
return result
def _more_repr(self):
return {
"properties" : [ prop.name for prop in self.properties ],
"definitions" : [ definition.name for definition in self.definitions ],
# "allOf" : [ repr(candidate) for candidate in self.allOf.options ],
# "oneOf" : [ repr(candidate) for candidate in self.oneOf.options ],
# "anyOf" : [ repr(candidate) for candidate in self.anyOf.options ]
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
if self.properties:
out["properties"] = {
p.name : p.to_dict(deref=deref, prefix=prefix, stack=stack+["properties"]) for p in self.properties
}
if self.definitions:
out["definitions"] = {
d.name : d.to_dict(deref=deref, prefix=prefix, stack=stack+["definitions"]) for d in self.definitions
}
if self.allOf:
out["allOf"] = [
a.to_dict(deref=deref, prefix=prefix, stack=stack) for a in self.allOf.options
]
if self.oneOf:
out["oneOf"] = [
a.to_dict(deref=deref, prefix=prefix, stack=stack) for a in self.oneOf.options
]
if self.anyOf:
out["anyOf"] = [
a.to_dict(deref=deref, prefix=prefix, stack=stack) for a in self.anyOf.options
]
return out
def dependencies(self, external=False, visited=None):
return list({
dependency \
for prop in self.properties + list(self._combinations()) \
for dependency in prop.dependencies(external=external, visited=visited)
})
class Definition(IdentifiedSchema):
def __init__(self, name, definition):
self.name = name
self._definition = definition
if isinstance(self._definition, Schema):
self._definition.parent = self
self._location = self._definition._location
else:
raise ValueError("unsupported items type: '{}'".format(
self.items.__class__.__type__)
)
def is_ref(self):
return isinstance(self._definition, Reference)
@property
def definition(self):
d = self._definition
while isinstance(d, Reference):
d = d.resolve()
return d
def _more_repr(self):
return {
"name" : self.name,
"definition" : repr(self._definition)
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
if isinstance(self._definition, Schema):
return self._definition.to_dict(deref=deref, prefix=prefix, stack=stack + [self.name])
else:
return self._definition
def _select(self, *path, stack=None):
log(stack, "definition/property", path)
return self.definition._select(*path, stack=stack)
def dependencies(self, external=False, visited=None):
return self._definition.dependencies(external=external, visited=visited)
class Property(Definition): pass
class ValueSchema(IdentifiedSchema): pass
class StringSchema(ValueSchema): pass
class IntegerSchema(ValueSchema): pass
class NullSchema(ValueSchema): pass
class NumberSchema(ValueSchema): pass
class BooleanSchema(ValueSchema): pass
class ArraySchema(IdentifiedSchema):
def __init__(self, items=None, **kwargs):
super().__init__(**kwargs)
self.items = items
if isinstance(self.items, Schema):
self.items.parent = self
elif self.items is None:
self.items = []
else:
raise ValueError("unsupported items type: '{}'".format(
self.items.__class__.__name__)
)
def _more_repr(self):
return {
"items" : repr(self.items)
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
if isinstance(self.items, Schema):
out["items"] = self.items.to_dict(deref=deref, prefix=prefix, stack=stack+["items"])
else:
out["items"] = self.items
return out
def _select(self, index, *path, stack=None):
# TODO in case of (None)
log(stack, "array", index, path)
if isinstance(self.items, Schema):
return self.items._select(index, *path, stack=stack)
def dependencies(self, external=False, visited=None):
if isinstance(self.items, Schema):
return self.items.dependencies(external=external, visited=visited)
else:
return list({
dependency \
for item in self.items \
for dependency in item.dependencies(external=external, visited=visited)
})
class TupleItem(Definition):
def _more_repr(self):
return {
"index" : self.name,
"definition" : repr(self._definition)
}
class TupleSchema(IdentifiedSchema):
def __init__(self, items=None, **kwargs):
super().__init__(**kwargs)
self.items = items
if not isinstance(self.items, list):
raise ValueError("tuple items should be list, not: '{}'".format(
self.items.__class__.__name__)
)
for item in self.items:
item.parent = self
def _more_repr(self):
return {
"items" : repr(self.items)
}
def item(self, index):
return self[index].definition
def __getitem__(self, index):
if not isinstance(index, int):
raise TypeError("tuple access only with numeric indices")
return self.items[index]
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
out["items"] = [ item.to_dict(deref=deref, prefix=prefix, stack=stack) for item in self.items ]
return out
def _select(self, index, *path, stack=None):
log(stack, "tuple", index, path)
if path:
return self[int(index)]._select(*path, stack=stack)
else:
return self[int(index)]
def dependencies(self, external=False, visited=None):
return list({
dependency \
for item in self.items \
for dependency in item.dependencies(external=external, visited=visited)
})
class Combination(IdentifiedSchema):
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options if options else []
for option in self.options:
option.parent = self
def _more_repr(self):
return {
"options" : len(self.options)
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
out = super().to_dict(deref=deref, prefix=prefix, stack=stack)
name = self.__class__.__name__
name = name[0].lower() + name[1:]
out[name] = [
o.to_dict(deref=deref, prefix=prefix, stack=stack+[name]+[str(index)]) \
for index, o in enumerate(self.options)
]
return out
def _select(self, *path, stack=None):
log(stack, "combination", path)
best_stack = []
result = None
for option in self.options:
local_stack = []
result = option._select(*path, stack=local_stack)
if len(local_stack) > len(best_stack):
best_stack = local_stack
if result: break
stack.extend(local_stack)
return result
def dependencies(self, external=False, visited=None):
return list({
dependency \
for option in self.options \
for dependency in option.dependencies(external=external, visited=visited)
})
class AllOf(Combination): pass
class AnyOf(Combination): pass
class OneOf(Combination): pass
class Reference(IdentifiedSchema):
def __init__(self, ref=None, **kwargs):
super().__init__(**kwargs)
self.ref = ref.value
def __repr__(self):
return "Reference(ref={})".format( self.ref )
def _more_repr(self):
return {
"$ref" : self.ref
}
def to_dict(self, deref=False, prefix=None, stack=None):
if stack is None: stack = []
if prefix is None: prefix = "#"
if deref:
if self.is_remote:
prefix = "#/" + "/".join(stack)
return self.resolve(strip_id=True).to_dict(deref=deref, prefix=prefix, stack=stack)
else:
return { "$ref" : prefix + self.ref[1:] }
return { "$ref" : self.ref }
def resolve(self, return_definition=True, strip_id=False):
url = ""
fragment = ""
parts = self.ref.split("#")
if len(parts) == 1:
url = self.ref
else:
url = parts[0]
fragment = parts[1]
if url:
doc = self._fetch(url)
if strip_id:
try:
del doc.args["$id"]
except KeyError:
pass
else:
doc = self.root
if not fragment:
return doc
name = None
fragment_schema = None
if fragment.startswith("/definitions/"):
name = fragment.replace("/definitions/", "")
if not doc.definition:
raise ValueError("doc " + repr(doc) + " has no definitions ?!")
fragment_schema = doc.definition(name, return_definition=return_definition)
elif fragment.startswith("/properties/"):
name = fragment.replace("/properties/", "")
fragment_schema = doc.property(name, return_definition=return_definition)
elif fragment.startswith("/components/schemas/"):
name = fragment.replace("/components/schemas/", "")
fragment_schema = doc.definition(name, return_definition=return_definition)
else:
raise NotImplementedError
# FIXME: when refering to a non local fragment, the fragment can refer to
# something else in its own file. A partial solution here includes
# all other definitions. Refering to properties or the whole schema
# remains problematic.
if url and isinstance(fragment_schema, ObjectSchema) and doc.definition:
for definition in doc.definitions:
if definition.name != name:
fragment_schema.add_definition(Definition(definition.name, definition._definition))
return fragment_schema
def _fetch(self, url):
s = requests.Session()
s.mount("file:", FileAdapter())
# make sure file url is absolute
u = urlparse(url)
if u.scheme == "file":
u = u._replace(path=str(Path(u.path).absolute()))
url = u.geturl()
try:
doc = s.get(url)
except Exception as e:
raise ValueError("unable to fetch '{}', due to '{}'".format(url, str(e)))
src = doc.text
try:
return loads(src, origin=url)
except:
try:
return loads(src, parser=yaml)
except Exception as e:
print(src)
raise ValueError("unable to parse '{}', due to '{}'".format(url, str(e)))
def _select(self, *path, stack=None):
log(self._stack, "ref", path)
return self.resolve()._select(*path, stack=stack)
@property
def is_remote(self):
return not self.ref.startswith("#")
def dependencies(self, external=False, visited=None):
if not visited: visited = []
if self in visited:
return []
visited.append(self)
if self.is_remote:
if external:
return list(set( self.resolve(return_definition=False).dependencies(external=external, visited=visited) + [ self ] ))
else:
return [ self ]
else:
return list(set( self.resolve(return_definition=False).dependencies(external=external, visited=visited) ))
def __hash__(self):
return hash(self.ref)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.ref == other.ref
else:
return False
class Enum(IdentifiedSchema):
def __init__(self, enum=None, **kwargs):
super().__init__(**kwargs)
self.values = []
if enum:
for e in enum:
if not isinstance(e, ConstantValueSchema):
raise ValueError("not constant value", e)
else:
self.values.append(e.value)
def _more_repr(self):
return {
"enum" : self.values
}
def to_dict(self, deref=False, prefix=None, stack=None):
return { "enum" : self.values }
class SchemaMapper(Mapper):
def map_object(self, properties):
if self.has( properties, "type", "object" ) or \
self.has( properties, "type", list, containing="object") or \
( self.has(properties, "properties") and \
not isinstance(properties["properties"], IdentifiedSchema) ) or \
( self.has(properties, "components") and \
not isinstance(properties["components"], IdentifiedSchema) ):
# properties and definitions bubble up as Generic Schemas
if self.has(properties, "properties"):
properties["properties"] = [
Property(name, definition) \
for name, definition in properties["properties"].items()
]
if self.has(properties, "definitions"):
properties["definitions"] = [
Definition(name, definition) \
for name, definition in properties["definitions"].items()
]
if self.has(properties, "components") and properties["components"].schemas:
components = properties.pop("components")
if not "definitions" in properties: properties["definitions"] = []
properties["definitions"] += [
Definition(name, definition) \
for name, definition in components.schemas.items()
]
# extract combinations
for combination, cls in { "allOf" : AllOf, "oneOf" : OneOf, "anyOf": AnyOf }.items():
options = self._combine_options(properties, combination, combination.lower())
if options:
properties[combination] = cls(options=options)
return ObjectSchema(**properties)
def map_value(self, properties):
value_mapping = {
"boolean": BooleanSchema,
"integer": IntegerSchema,
"null": NullSchema,
"number": NumberSchema,
"string": StringSchema
}
if self.has(properties, "type", value_mapping):
return value_mapping[properties["type"].value](**properties)
def map_array(self, properties):
if not self.has(properties, "type", "array"): return
if self.has(properties, "items", list):
properties["items"] = [
TupleItem(index, value) \
for index, value in enumerate(properties["items"])
]
return TupleSchema(**properties)
return ArraySchema(**properties)
def _combine_options(self, properties, *keys):
combined = []
for key in keys:
if self.has(properties, key):
combined += properties.pop(key, {})
return combined
def map_all_of(self, properties):
if "type" in properties: return
options = self._combine_options(properties, "allOf", "allof")
if options:
properties["options"] = options
return AllOf(**properties)
def map_any_of(self, properties):
if "type" in properties: return
options = self._combine_options(properties, "anyOf", "anyof")
if options:
properties["options"] = options
return AnyOf(**properties)
def map_one_of(self, properties):
if "type" in properties: return
options = self._combine_options(properties, "oneOf", "oneof")
if options:
properties["options"] = options
return OneOf(**properties)
def map_reference(self, properties):
if self.has(properties, "$ref", str):
properties["ref"] = properties.pop("$ref")
return Reference(**properties)
def map_enum(self, properties):
if self.has(properties, "enum", list):
return Enum(**properties)
| 0.443841 | 0.180504 |
from __future__ import unicode_literals
import six
import abc
import json
import logging
from jsonpointer import resolve_pointer, JsonPointerException
logger = logging.getLogger(__name__)
try:
from lxml import etree
except ImportError:
logger.info('You will need to install lxml to use the XML transformer')
@six.add_metaclass(abc.ABCMeta)
class BaseTransformer(object):
def __init__(self, schema):
self.schema = schema
@abc.abstractmethod
def _transform_string(self, string, doc):
raise NotImplementedError
@abc.abstractmethod
def load(self, doc):
raise NotImplementedError
def transform(self, doc, fail=False, load=True):
doc = self.load(doc) if load else doc
return self._transform_dict(self.schema, doc, fail=fail)
def _transform_dict(self, d, doc, fail=False):
return {
key: self._maybe_transform_value(value, doc, fail=fail)
for key, value in d.items()
}
def _transform_list(self, l, doc, fail=False):
return [
self._maybe_transform_value(item, doc, fail=fail)
for item in l
]
def _maybe_transform_value(self, value, doc, fail=False):
try:
return self._transform_value(value, doc, fail=fail)
except Exception as e:
if fail:
raise
logger.exception(e)
return None
def _transform_value(self, value, doc, fail=False):
if isinstance(value, dict):
return self._transform_dict(value, doc, fail=fail)
elif isinstance(value, list):
return self._transform_list(value, doc, fail=fail)
elif isinstance(value, tuple) and len(value) == 2 and isinstance(value[0], tuple):
return self._transform_args_kwargs(value, doc)
elif isinstance(value, tuple):
return self._transform_tuple(value, doc)
elif isinstance(value, six.string_types):
return self._transform_string(value, doc)
elif callable(value):
return value(doc)
def _transform_tuple(self, l, doc):
fn, values = l[-1], l[:-1]
args = []
for value in values:
if isinstance(value, six.string_types):
args.append(self._transform_string(value, doc))
elif callable(value):
args.append(value(doc))
return fn(*args)
def _transform_args_kwargs(self, l, doc):
fn = l[1]
return fn(
*self._transform_args(l[0], doc),
**self._transform_kwargs(l[0], doc)
)
def _transform_args(self, t, doc):
return [self._transform_string(arg, doc) for arg in t[0]]
def _transform_kwargs(self, t, doc):
return {
k: self._transform_string(v, doc) for k, v in t[1].items()
} if len(t) == 2 else {}
@six.add_metaclass(abc.ABCMeta)
class XMLTransformer(BaseTransformer):
def __init__(self, schema, namespaces=None):
BaseTransformer.__init__(self, schema)
self.namespaces = namespaces or {}
def load(self, doc):
return etree.XML(doc)
def _transform_string(self, string, doc):
return doc.xpath(string, namespaces=self.namespaces)
@six.add_metaclass(abc.ABCMeta)
class JSONTransformer(BaseTransformer):
def load(self, doc):
return json.loads(doc)
def _transform_string(self, val, doc):
try:
return resolve_pointer(doc, val)
except JsonPointerException as e:
# This is because of jsonpointer's exception structure
if 'not found in' in e.args[0] or 'is not a valid list index' in e.args[0]:
return None
raise e
@six.add_metaclass(abc.ABCMeta)
class CSVTransformer(BaseTransformer):
def __init__(self, schema, keys):
BaseTransformer.__init__(self, schema)
self.keys = dict((val, key) for key, val in enumerate(keys))
def load(self, doc):
raise NotImplementedError
def _transform_string(self, val, doc):
return doc[self.keys[val]]
|
schema-transformer
|
/schema-transformer-0.0.2.tar.gz/schema-transformer-0.0.2/schema_transformer/transformer.py
|
transformer.py
|
from __future__ import unicode_literals
import six
import abc
import json
import logging
from jsonpointer import resolve_pointer, JsonPointerException
logger = logging.getLogger(__name__)
try:
from lxml import etree
except ImportError:
logger.info('You will need to install lxml to use the XML transformer')
@six.add_metaclass(abc.ABCMeta)
class BaseTransformer(object):
def __init__(self, schema):
self.schema = schema
@abc.abstractmethod
def _transform_string(self, string, doc):
raise NotImplementedError
@abc.abstractmethod
def load(self, doc):
raise NotImplementedError
def transform(self, doc, fail=False, load=True):
doc = self.load(doc) if load else doc
return self._transform_dict(self.schema, doc, fail=fail)
def _transform_dict(self, d, doc, fail=False):
return {
key: self._maybe_transform_value(value, doc, fail=fail)
for key, value in d.items()
}
def _transform_list(self, l, doc, fail=False):
return [
self._maybe_transform_value(item, doc, fail=fail)
for item in l
]
def _maybe_transform_value(self, value, doc, fail=False):
try:
return self._transform_value(value, doc, fail=fail)
except Exception as e:
if fail:
raise
logger.exception(e)
return None
def _transform_value(self, value, doc, fail=False):
if isinstance(value, dict):
return self._transform_dict(value, doc, fail=fail)
elif isinstance(value, list):
return self._transform_list(value, doc, fail=fail)
elif isinstance(value, tuple) and len(value) == 2 and isinstance(value[0], tuple):
return self._transform_args_kwargs(value, doc)
elif isinstance(value, tuple):
return self._transform_tuple(value, doc)
elif isinstance(value, six.string_types):
return self._transform_string(value, doc)
elif callable(value):
return value(doc)
def _transform_tuple(self, l, doc):
fn, values = l[-1], l[:-1]
args = []
for value in values:
if isinstance(value, six.string_types):
args.append(self._transform_string(value, doc))
elif callable(value):
args.append(value(doc))
return fn(*args)
def _transform_args_kwargs(self, l, doc):
fn = l[1]
return fn(
*self._transform_args(l[0], doc),
**self._transform_kwargs(l[0], doc)
)
def _transform_args(self, t, doc):
return [self._transform_string(arg, doc) for arg in t[0]]
def _transform_kwargs(self, t, doc):
return {
k: self._transform_string(v, doc) for k, v in t[1].items()
} if len(t) == 2 else {}
@six.add_metaclass(abc.ABCMeta)
class XMLTransformer(BaseTransformer):
def __init__(self, schema, namespaces=None):
BaseTransformer.__init__(self, schema)
self.namespaces = namespaces or {}
def load(self, doc):
return etree.XML(doc)
def _transform_string(self, string, doc):
return doc.xpath(string, namespaces=self.namespaces)
@six.add_metaclass(abc.ABCMeta)
class JSONTransformer(BaseTransformer):
def load(self, doc):
return json.loads(doc)
def _transform_string(self, val, doc):
try:
return resolve_pointer(doc, val)
except JsonPointerException as e:
# This is because of jsonpointer's exception structure
if 'not found in' in e.args[0] or 'is not a valid list index' in e.args[0]:
return None
raise e
@six.add_metaclass(abc.ABCMeta)
class CSVTransformer(BaseTransformer):
def __init__(self, schema, keys):
BaseTransformer.__init__(self, schema)
self.keys = dict((val, key) for key, val in enumerate(keys))
def load(self, doc):
raise NotImplementedError
def _transform_string(self, val, doc):
return doc[self.keys[val]]
| 0.584745 | 0.139133 |
import functools
from copy import deepcopy
def CONSTANT(x):
''' Takes a value, returns a function that always returns that value
Useful inside schemas for defining constants
>>> CONSTANT(7)('my', 'name', verb='is')
7
>>> CONSTANT([123, 456])()
[123, 456]
'''
def inner(*y, **z):
return x
return inner
def compose(*functions):
''' evaluates functions from right to left.
>>> add = lambda x, y: x + y
>>> add3 = lambda x: x + 3
>>> divide2 = lambda x: x/2
>>> subtract4 = lambda x: x - 4
>>> subtract1 = compose(add3, subtract4)
>>> subtract1(1)
0
>>> compose(subtract1, add3)(4)
6
>>> compose(int, add3, add3, divide2)(4)
8
>>> compose(int, divide2, add3, add3)(4)
5
>>> compose(int, divide2, compose(add3, add3), add)(7, 3)
8
'''
def inner(func1, func2):
return lambda *x, **y: func1(func2(*x, **y))
return functools.reduce(inner, functions)
def updated_schema(old, new):
''' Creates a dictionary resulting from adding all keys/values of the second to the first
The second dictionary will overwrite the first.
>>> old, new = {'name': 'ric', 'job': None}, {'name': 'Rick'}
>>> updated = updated_schema(old, new)
>>> len(updated.keys())
2
>>> print(updated['name'])
Rick
>>> updated['job'] is None
True
'''
d = deepcopy(old)
for key, value in new.items():
if isinstance(value, dict) and old.get(key) and isinstance(old[key], dict):
d[key] = updated_schema(old[key], new[key])
else:
d[key] = value
return d
def single_result(l, default=''):
''' A function that will return the first element of a list if it exists
>>> print(single_result(['hello', None]))
hello
>>> print(single_result([], default='hello'))
hello
>>> print(single_result([]))
<BLANKLINE>
'''
return l[0] if l else default
|
schema-transformer
|
/schema-transformer-0.0.2.tar.gz/schema-transformer-0.0.2/schema_transformer/helpers.py
|
helpers.py
|
import functools
from copy import deepcopy
def CONSTANT(x):
''' Takes a value, returns a function that always returns that value
Useful inside schemas for defining constants
>>> CONSTANT(7)('my', 'name', verb='is')
7
>>> CONSTANT([123, 456])()
[123, 456]
'''
def inner(*y, **z):
return x
return inner
def compose(*functions):
''' evaluates functions from right to left.
>>> add = lambda x, y: x + y
>>> add3 = lambda x: x + 3
>>> divide2 = lambda x: x/2
>>> subtract4 = lambda x: x - 4
>>> subtract1 = compose(add3, subtract4)
>>> subtract1(1)
0
>>> compose(subtract1, add3)(4)
6
>>> compose(int, add3, add3, divide2)(4)
8
>>> compose(int, divide2, add3, add3)(4)
5
>>> compose(int, divide2, compose(add3, add3), add)(7, 3)
8
'''
def inner(func1, func2):
return lambda *x, **y: func1(func2(*x, **y))
return functools.reduce(inner, functions)
def updated_schema(old, new):
''' Creates a dictionary resulting from adding all keys/values of the second to the first
The second dictionary will overwrite the first.
>>> old, new = {'name': 'ric', 'job': None}, {'name': 'Rick'}
>>> updated = updated_schema(old, new)
>>> len(updated.keys())
2
>>> print(updated['name'])
Rick
>>> updated['job'] is None
True
'''
d = deepcopy(old)
for key, value in new.items():
if isinstance(value, dict) and old.get(key) and isinstance(old[key], dict):
d[key] = updated_schema(old[key], new[key])
else:
d[key] = value
return d
def single_result(l, default=''):
''' A function that will return the first element of a list if it exists
>>> print(single_result(['hello', None]))
hello
>>> print(single_result([], default='hello'))
hello
>>> print(single_result([]))
<BLANKLINE>
'''
return l[0] if l else default
| 0.732592 | 0.454654 |
# Schema Transpose
This library and CLI converts JSON Schema Documents into other languages. It can work with JSON Schema or Pydantic files as a source.
## Usage
This needs to be installed in the same virtual environment as the Models that it is converting. Once installed a CLI tool, `schema_transpose`, will be available in the environment.
For example, to generate a variables file for the RDS ParameterValidation class-
```shell
robert@Roberts-MacBook-Pro terraform-aws-core % schema_transpose modules.rds.validation.parameters:ParameterValidation
```
The command will output the following to stdout:
```hcl
variable "type" {
type = string
default = null
}
variable "pit_identifier" {
type = string
default = null
}
variable "tags" {
type = map(any)
default = {}
}
variable "name" {
type = string
validation {
# Automatically Generated from Rule: minlength
condition = length(var.name) >= 1
error_message = "Field should not be less than 1 characters"
}
validation {
# Automatically Generated from Rule: maxlength
condition = length(var.name) <= 63
error_message = "Field should not be larger than 63 characters"
}
validation {
# Automatically Generated from Rule: pattern
condition = length(regexall("^(?!.*--)[a-zA-Z][A-Za-z0-9.-]+(?<!-)$", var.name)) > 0
error_message = "Field does not match regex pattern ^(?!.*--)[a-zA-Z][A-Za-z0-9.-]+(?<!-)$"
}
}
variable "vpc_name" {
type = string
}
variable "engine" {
type = string
validation {
# Automatically Generated from Rule: minlength
condition = length(var.engine) >= 1
error_message = "Field should not be less than 1 characters"
}
}
variable "engine_version" {
type = string
validation {
# Automatically Generated from Rule: minlength
condition = length(var.engine_version) >= 1
error_message = "Field should not be less than 1 characters"
}
}
variable "is_public" {
type = bool
default = false
}
```
|
schema-transpose
|
/schema_transpose-0.1.0.tar.gz/schema_transpose-0.1.0/README.md
|
README.md
|
robert@Roberts-MacBook-Pro terraform-aws-core % schema_transpose modules.rds.validation.parameters:ParameterValidation
variable "type" {
type = string
default = null
}
variable "pit_identifier" {
type = string
default = null
}
variable "tags" {
type = map(any)
default = {}
}
variable "name" {
type = string
validation {
# Automatically Generated from Rule: minlength
condition = length(var.name) >= 1
error_message = "Field should not be less than 1 characters"
}
validation {
# Automatically Generated from Rule: maxlength
condition = length(var.name) <= 63
error_message = "Field should not be larger than 63 characters"
}
validation {
# Automatically Generated from Rule: pattern
condition = length(regexall("^(?!.*--)[a-zA-Z][A-Za-z0-9.-]+(?<!-)$", var.name)) > 0
error_message = "Field does not match regex pattern ^(?!.*--)[a-zA-Z][A-Za-z0-9.-]+(?<!-)$"
}
}
variable "vpc_name" {
type = string
}
variable "engine" {
type = string
validation {
# Automatically Generated from Rule: minlength
condition = length(var.engine) >= 1
error_message = "Field should not be less than 1 characters"
}
}
variable "engine_version" {
type = string
validation {
# Automatically Generated from Rule: minlength
condition = length(var.engine_version) >= 1
error_message = "Field should not be less than 1 characters"
}
}
variable "is_public" {
type = bool
default = false
}
| 0.396302 | 0.774413 |
import json
import os
import sys
import textwrap
from pathlib import Path
from typing import Any, Dict, List, cast
from glom import glom # type: ignore
from jinja2 import Environment, FileSystemLoader, Template
from pydantic import BaseModel
from schema_transpose import validators
env = Environment(loader=FileSystemLoader(Path(__file__).parent.parent / "templates"))
SETTINGS = {
"hcl": {
"template": cast(Template, env.get_template("hcl.tf")),
"delimiter": "\n\n",
},
"tfvars": {
"template": cast(Template, env.get_template("variable.tfvars")),
"delimiter": "\n",
},
"module": {
"template": cast(Template, env.get_template("module_params.tf")),
"delimiter": "\n",
"wrapper": cast(Template, env.get_template("module_wrapper.tf")),
},
}
class JinjaConverter:
numbers: List[str] = ["int", "float", "integer"]
objects: List[str] = ["dict", "object"]
pass_through_fields: List[str] = ["string"]
def __init__(self, format="hcl") -> None:
if format not in SETTINGS:
raise ValueError(f"Format must be hcl or tfvars")
self.template = SETTINGS[format]["template"]
self.delimiter = SETTINGS[format]["delimiter"]
self.wrapper = cast(Template, SETTINGS[format]["wrapper"]) if "wrapper" in SETTINGS[format] else None
def convert(self, model: BaseModel) -> str:
schema = model.schema()
output = ""
for field_name, field_data in schema["properties"].items():
output += self.convert_field(schema, field_name, field_data)
output += self.delimiter # type: ignore
if self.wrapper:
output = self.wrapper.render(parameters=output)
return output.strip()
def _get_reference(self, schema, reference):
# Convert reference into glom format.
path = reference.strip("#/").replace("/", ".")
return glom(schema, path)
def convert_field(self, schema, field_name, field_data) -> Any:
field_data = self.get_field_data(schema, field_name, field_data)
return self.template.render(**field_data) # type: ignore
def get_field_data(self, schema, field_name, field_data) -> Dict[str, Any]:
# This field references another field. Swap the field_data out.
if "$ref" in field_data:
field_data = self._get_reference(schema, field_data["$ref"])
if "allOf" in field_data:
for parent_model in field_data["allOf"]:
# Attempt to keep any existing descriptions instead of using the enum descriptions.
field_description = False
if "description" in field_data:
field_description = field_data["description"]
if "$ref" in parent_model:
parent_model = self._get_reference(schema, parent_model["$ref"])
field_data |= parent_model
# If there was a field description add it back.
if field_description:
field_data["description"] = field_description
type, type_rules = self.get_type(field_name, field_data)
# Since "null" and "false" are valid defaults we can't rely on default being defined.
# To get around that we have `has_default`.
has_default = False
default = None
required = field_name in schema.get("required", {})
if "default" in field_data or not required:
has_default = True
default = self.get_default(type, field_data.get("default"))
rules = self.get_validation_list(field_name, field_data, allow_none=not required)
if type_rules:
rules += type_rules
return {
"name": field_name,
"type": type,
"description": field_data.get("description"),
"has_default": has_default,
"default": default,
"sensitive": "writeOnly" in field_data,
"required": required,
"validation_rules": rules,
}
def get_type(self, field_name, field_data):
type_validators = []
if "enum" in field_data:
if "type" not in field_data:
field_data["type"] = "string"
type_validators.append(validators.oneof(field_name, field_data["enum"], allow_none=False))
if "type" not in field_data:
print(field_data)
raise ValueError(f"Unknown type for {field_name}")
field_type = field_data["type"].lower()
if field_type in self.pass_through_fields:
return field_type, type_validators
if field_type in self.numbers:
return "number", type_validators
if field_type == "array":
subtype = self.get_subtype(field_name, field_data)
return f"list({subtype})", None
if field_type in self.objects:
subtype = self.get_subtype(field_name, field_data)
return f"map({subtype})", None
if field_type == "boolean":
return "bool", None
return "string", None
def get_subtype(self, field_name, field_data):
subtype = "any"
if "items" in field_data:
if "type" in field_data["items"]:
subtype, rules = self.get_type(field_name, field_data["items"])
if "additionalProperties" in field_data and "type" in field_data["additionalProperties"]:
subtype = field_data["additionalProperties"]["type"]
return subtype
def get_default(self, type, value):
if type.startswith("list"):
return textwrap.indent(json.dumps(value, indent=2), " ").strip() if value else "[]"
if type.startswith("map"):
return textwrap.indent(json.dumps(value, indent=2), " ").strip() if value else "{}"
# Custom Nulls above this line
if value == None:
return "null"
if type == "number":
return value
if type == "string":
escaped_value = str(value).replace('"', '\\"')
return f'"{escaped_value}"'
if type == "bool":
return "true" if value else "false"
if value:
return value
return "null"
def get_validation_list(self, field_name, field_data, allow_none):
tf_rules = []
for key, value in field_data.items():
if not hasattr(validators, key.lower()):
continue
rule = getattr(validators, key.lower())(field_name, value, allow_none)
if rule:
tf_rules.append(rule)
return tf_rules
|
schema-transpose
|
/schema_transpose-0.1.0.tar.gz/schema_transpose-0.1.0/schema_transpose/converters/jinja.py
|
jinja.py
|
import json
import os
import sys
import textwrap
from pathlib import Path
from typing import Any, Dict, List, cast
from glom import glom # type: ignore
from jinja2 import Environment, FileSystemLoader, Template
from pydantic import BaseModel
from schema_transpose import validators
env = Environment(loader=FileSystemLoader(Path(__file__).parent.parent / "templates"))
SETTINGS = {
"hcl": {
"template": cast(Template, env.get_template("hcl.tf")),
"delimiter": "\n\n",
},
"tfvars": {
"template": cast(Template, env.get_template("variable.tfvars")),
"delimiter": "\n",
},
"module": {
"template": cast(Template, env.get_template("module_params.tf")),
"delimiter": "\n",
"wrapper": cast(Template, env.get_template("module_wrapper.tf")),
},
}
class JinjaConverter:
numbers: List[str] = ["int", "float", "integer"]
objects: List[str] = ["dict", "object"]
pass_through_fields: List[str] = ["string"]
def __init__(self, format="hcl") -> None:
if format not in SETTINGS:
raise ValueError(f"Format must be hcl or tfvars")
self.template = SETTINGS[format]["template"]
self.delimiter = SETTINGS[format]["delimiter"]
self.wrapper = cast(Template, SETTINGS[format]["wrapper"]) if "wrapper" in SETTINGS[format] else None
def convert(self, model: BaseModel) -> str:
schema = model.schema()
output = ""
for field_name, field_data in schema["properties"].items():
output += self.convert_field(schema, field_name, field_data)
output += self.delimiter # type: ignore
if self.wrapper:
output = self.wrapper.render(parameters=output)
return output.strip()
def _get_reference(self, schema, reference):
# Convert reference into glom format.
path = reference.strip("#/").replace("/", ".")
return glom(schema, path)
def convert_field(self, schema, field_name, field_data) -> Any:
field_data = self.get_field_data(schema, field_name, field_data)
return self.template.render(**field_data) # type: ignore
def get_field_data(self, schema, field_name, field_data) -> Dict[str, Any]:
# This field references another field. Swap the field_data out.
if "$ref" in field_data:
field_data = self._get_reference(schema, field_data["$ref"])
if "allOf" in field_data:
for parent_model in field_data["allOf"]:
# Attempt to keep any existing descriptions instead of using the enum descriptions.
field_description = False
if "description" in field_data:
field_description = field_data["description"]
if "$ref" in parent_model:
parent_model = self._get_reference(schema, parent_model["$ref"])
field_data |= parent_model
# If there was a field description add it back.
if field_description:
field_data["description"] = field_description
type, type_rules = self.get_type(field_name, field_data)
# Since "null" and "false" are valid defaults we can't rely on default being defined.
# To get around that we have `has_default`.
has_default = False
default = None
required = field_name in schema.get("required", {})
if "default" in field_data or not required:
has_default = True
default = self.get_default(type, field_data.get("default"))
rules = self.get_validation_list(field_name, field_data, allow_none=not required)
if type_rules:
rules += type_rules
return {
"name": field_name,
"type": type,
"description": field_data.get("description"),
"has_default": has_default,
"default": default,
"sensitive": "writeOnly" in field_data,
"required": required,
"validation_rules": rules,
}
def get_type(self, field_name, field_data):
type_validators = []
if "enum" in field_data:
if "type" not in field_data:
field_data["type"] = "string"
type_validators.append(validators.oneof(field_name, field_data["enum"], allow_none=False))
if "type" not in field_data:
print(field_data)
raise ValueError(f"Unknown type for {field_name}")
field_type = field_data["type"].lower()
if field_type in self.pass_through_fields:
return field_type, type_validators
if field_type in self.numbers:
return "number", type_validators
if field_type == "array":
subtype = self.get_subtype(field_name, field_data)
return f"list({subtype})", None
if field_type in self.objects:
subtype = self.get_subtype(field_name, field_data)
return f"map({subtype})", None
if field_type == "boolean":
return "bool", None
return "string", None
def get_subtype(self, field_name, field_data):
subtype = "any"
if "items" in field_data:
if "type" in field_data["items"]:
subtype, rules = self.get_type(field_name, field_data["items"])
if "additionalProperties" in field_data and "type" in field_data["additionalProperties"]:
subtype = field_data["additionalProperties"]["type"]
return subtype
def get_default(self, type, value):
if type.startswith("list"):
return textwrap.indent(json.dumps(value, indent=2), " ").strip() if value else "[]"
if type.startswith("map"):
return textwrap.indent(json.dumps(value, indent=2), " ").strip() if value else "{}"
# Custom Nulls above this line
if value == None:
return "null"
if type == "number":
return value
if type == "string":
escaped_value = str(value).replace('"', '\\"')
return f'"{escaped_value}"'
if type == "bool":
return "true" if value else "false"
if value:
return value
return "null"
def get_validation_list(self, field_name, field_data, allow_none):
tf_rules = []
for key, value in field_data.items():
if not hasattr(validators, key.lower()):
continue
rule = getattr(validators, key.lower())(field_name, value, allow_none)
if rule:
tf_rules.append(rule)
return tf_rules
| 0.575349 | 0.142649 |
from typing import Any, List
try:
# This works on MacOS but not Ubuntu
from markdowntable import markdownTable # type: ignore
except:
# This works on Ubuntu but not MacOS
from markdownTable import markdownTable # type: ignore
from pydantic import BaseModel
from schema_transpose.converters.jinja import JinjaConverter
class MarkdownTableBuilder(markdownTable):
def getHeader(self):
# The only change in this function from the parent is the `title` casing
header = ""
if self.row_sep in ("topbottom", "always"):
header += self.newline_char + self.var_row_sep_last + self.newline_char
for key in self.data[0].keys():
margin = self.var_padding[key] - len(key)
right = self.getMargin(margin)
header += "|" + key.title().rjust(self.var_padding[key] - right, self.padding_char).ljust(
self.var_padding[key], self.padding_char
)
header += "|" + self.newline_char
if self.row_sep == "always":
header += self.var_row_sep + self.newline_char
if self.row_sep == "markdown":
header += self.var_row_sep.replace("+", "|") + self.newline_char
return header
class MarkdownConverter(JinjaConverter):
numbers: List[str] = [] # ["int", "float", "integer"]
objects: List[str] = ["dict", "object"]
pass_through_fields: List[str] = ["string", "int", "float", "integer"]
def __init__(self, format="table") -> None:
self.format = format
def convert(self, model: BaseModel) -> str:
schema = model.schema()
output = ""
fields = []
for field_name, field_data in schema["properties"].items():
if field_name == "type":
continue
fields.append(self.convert_field(schema, field_name, field_data))
# We need at least one field to make a table.
if len(fields) < 1:
return ""
# Sort required fields to the top, then sort by alphabet.
fields.sort(key=lambda t: (0 if t["Required"] == "Yes" else 1, t["Name"]))
return MarkdownTableBuilder(fields).setParams(quote=False, row_sep="markdown").getMarkdown()
def convert_field(self, schema, field_name, field_data) -> Any:
field_data = self.get_field_data(schema, field_name, field_data)
description = ""
if "description" in field_data and field_data["description"]:
description = field_data["description"]
return {
"Name": f"`{field_name}`",
"Type": self.get_display_type(field_name, field_data),
"Description": description,
"Default": field_data["default"] if field_data["has_default"] else "",
"Required": "Yes" if field_data["required"] else "No",
}
def get_display_type(self, field_name, field_data):
if field_data["type"] == "bool":
return "boolean"
if "list" in field_data["type"]:
return field_data["type"].replace("list", "array")
return field_data["type"]
|
schema-transpose
|
/schema_transpose-0.1.0.tar.gz/schema_transpose-0.1.0/schema_transpose/converters/markdown.py
|
markdown.py
|
from typing import Any, List
try:
# This works on MacOS but not Ubuntu
from markdowntable import markdownTable # type: ignore
except:
# This works on Ubuntu but not MacOS
from markdownTable import markdownTable # type: ignore
from pydantic import BaseModel
from schema_transpose.converters.jinja import JinjaConverter
class MarkdownTableBuilder(markdownTable):
def getHeader(self):
# The only change in this function from the parent is the `title` casing
header = ""
if self.row_sep in ("topbottom", "always"):
header += self.newline_char + self.var_row_sep_last + self.newline_char
for key in self.data[0].keys():
margin = self.var_padding[key] - len(key)
right = self.getMargin(margin)
header += "|" + key.title().rjust(self.var_padding[key] - right, self.padding_char).ljust(
self.var_padding[key], self.padding_char
)
header += "|" + self.newline_char
if self.row_sep == "always":
header += self.var_row_sep + self.newline_char
if self.row_sep == "markdown":
header += self.var_row_sep.replace("+", "|") + self.newline_char
return header
class MarkdownConverter(JinjaConverter):
numbers: List[str] = [] # ["int", "float", "integer"]
objects: List[str] = ["dict", "object"]
pass_through_fields: List[str] = ["string", "int", "float", "integer"]
def __init__(self, format="table") -> None:
self.format = format
def convert(self, model: BaseModel) -> str:
schema = model.schema()
output = ""
fields = []
for field_name, field_data in schema["properties"].items():
if field_name == "type":
continue
fields.append(self.convert_field(schema, field_name, field_data))
# We need at least one field to make a table.
if len(fields) < 1:
return ""
# Sort required fields to the top, then sort by alphabet.
fields.sort(key=lambda t: (0 if t["Required"] == "Yes" else 1, t["Name"]))
return MarkdownTableBuilder(fields).setParams(quote=False, row_sep="markdown").getMarkdown()
def convert_field(self, schema, field_name, field_data) -> Any:
field_data = self.get_field_data(schema, field_name, field_data)
description = ""
if "description" in field_data and field_data["description"]:
description = field_data["description"]
return {
"Name": f"`{field_name}`",
"Type": self.get_display_type(field_name, field_data),
"Description": description,
"Default": field_data["default"] if field_data["has_default"] else "",
"Required": "Yes" if field_data["required"] else "No",
}
def get_display_type(self, field_name, field_data):
if field_data["type"] == "bool":
return "boolean"
if "list" in field_data["type"]:
return field_data["type"].replace("list", "array")
return field_data["type"]
| 0.660501 | 0.135976 |
# Package
This package is a wrapper for jsonschema and simplejson to simplyfy JSON schema validation specified by [JSON Schema Draft 7](https://json-schema.org/specification-links.html#draft-7) (link to [IETF](https://tools.ietf.org/html/draft-handrews-json-schema-01)).
## Example
```python
from validator import validate
# Define the validation schema
schema = {
"type": "object",
"required": [
"name",
"age",
],
"properties": {
"name": { "type": "string" },
"age": { "type": "number" },
}
}
# Data to be validated
data = {
"name": "Daniel",
"age": 30,
}
# Validate and run
validation = validate(schema, data)
if validation==True:
# do something with data, e.g. create a new friend
else:
print(validation) # will show a well formated dict with errors
```
> Note: More examples can be shown in the tests
## Contribute
This package is intended to be used by private projects. But go ahead if you like and make comments and pull requests and I might look into it.
### Install the package
```sh
python -m venv venv
source venv/bin/activate
pip install -r requirements.txt
```
### Run tests
```sh
python -m pytest -m validator -s
```
### Upload package
```sh
# Set your user with python keyring
python3 -m keyring set https://upload.pypi.org/legacy/ $username
# substitue $username with your actual username
# Update packaging tools
python3 -m pip install --user --upgrade setuptools wheel twine
# Remove dist folder
rm -rf dist/*
# Create a new dist
python3 setup.py sdist bdist_wheel
# Above command creates
# dist/
# schema-validator-halpa-0.0.5-py3-none-any.whl
# schema-validator-halpa-0.0.5.tar.gz
# where "0.0.1" is equivalent to value in "version" from setup.py
# Upload the package
python3 -m twine upload dist/*
```
|
schema-validator-halpa
|
/schema-validator-halpa-0.0.7.tar.gz/schema-validator-halpa-0.0.7/README.md
|
README.md
|
from validator import validate
# Define the validation schema
schema = {
"type": "object",
"required": [
"name",
"age",
],
"properties": {
"name": { "type": "string" },
"age": { "type": "number" },
}
}
# Data to be validated
data = {
"name": "Daniel",
"age": 30,
}
# Validate and run
validation = validate(schema, data)
if validation==True:
# do something with data, e.g. create a new friend
else:
print(validation) # will show a well formated dict with errors
python -m venv venv
source venv/bin/activate
pip install -r requirements.txt
python -m pytest -m validator -s
# Set your user with python keyring
python3 -m keyring set https://upload.pypi.org/legacy/ $username
# substitue $username with your actual username
# Update packaging tools
python3 -m pip install --user --upgrade setuptools wheel twine
# Remove dist folder
rm -rf dist/*
# Create a new dist
python3 setup.py sdist bdist_wheel
# Above command creates
# dist/
# schema-validator-halpa-0.0.5-py3-none-any.whl
# schema-validator-halpa-0.0.5.tar.gz
# where "0.0.1" is equivalent to value in "version" from setup.py
# Upload the package
python3 -m twine upload dist/*
| 0.496094 | 0.811489 |
import re
import logging
from collections.abc import Mapping
from typing import Any, Dict, List, Optional, Tuple
from humps import camelize, decamelize
from pydantic.json import pydantic_encoder
from pydantic.schema import model_schema
from schema_validator.constants import (
IGNORE_METHODS, REF_PREFIX, SCHEMA_QUERYSTRING_ATTRIBUTE,
SCHEMA_REQUEST_ATTRIBUTE, SCHEMA_RESPONSE_ATTRIBUTE, SCHEMA_TAG_ATTRIBUTE,
SWAGGER_CSS_URL, SWAGGER_JS_URL
)
from schema_validator.types import ServerObject
from schema_validator.utils import DataSource
try:
from quart import current_app, render_template_string
from quart.json import JSONDecoder, JSONEncoder
IS_FLASK = False
except ImportError:
from flask import current_app, render_template_string, Flask
from flask.json import JSONDecoder, JSONEncoder
IS_FLASK = True
PATH_RE = re.compile("<(?:[^:]*:)?([^>]+)>")
logger = logging.getLogger(__name__)
class PydanticJSONEncoder(JSONEncoder):
def default(self, object_: Any) -> Any:
return pydantic_encoder(object_)
class CasingJSONEncoder(PydanticJSONEncoder):
def encode(self, object_: Any) -> Any:
if isinstance(object_, (list, Mapping)):
object_ = camelize(object_)
return super().encode(camelize(object_))
class CasingJSONDecoder(JSONDecoder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, object_hook=self.object_hook, **kwargs)
@staticmethod
def object_hook(object_: dict) -> Any:
return decamelize(object_)
class SchemaValidator:
"""A Flask-Schema instance.
app = Flask(__name__)
FlaskSchema(app)
flask_schema = FlaskSchema()
or
def create_app():
app = Flask(__name__)
flask_schema.init_app(app)
return app
Arguments:
swagger_ui_path: The path used to serve the documentation UI using
swagger or None to disable swagger documentation.
title: The publishable title for the app.
version: The publishable version for the app.
"""
def __init__(
self,
app=None,
*,
swagger_ui_path: Optional[str] = "/swagger/docs",
title: Optional[str] = None,
version: str = "0.1.0",
convert_casing: bool = False,
servers: Optional[List[ServerObject]] = None
) -> None:
self.openapi_path = "/swagger/openapi.json"
self.openapi_tag_path = "/swagger/openapi-<tag>.json"
self.swagger_ui_path = swagger_ui_path
self.title = title
self.version = version
self.convert_casing = convert_casing
self.servers = servers or []
if app is not None:
self.init_app(app)
def init_app(self, app) -> None:
app.extensions["SCHEMA_VALIDATOR"] = self
self.title = app.name if self.title is None else self.title
if self.convert_casing:
app.json_decoder = CasingJSONDecoder
app.json_encoder = CasingJSONEncoder
else:
app.json_encoder = PydanticJSONEncoder
app.config.setdefault(
"SCHEMA_SWAGGER_JS_URL",
SWAGGER_JS_URL
)
app.config.setdefault(
"SCHEMA_SWAGGER_CSS_URL",
SWAGGER_CSS_URL
)
try:
if isinstance(app, Flask):
IS_FLASK = True
else:
IS_FLASK = False
except BaseException:
IS_FLASK = False
if self.openapi_path is not None and app.config.get("SWAGGER_ROUTE"):
if IS_FLASK:
from .flask import openapi, swagger_ui
app_name = "FLASK"
else:
from .quart import openapi, swagger_ui, convert_model_result
app.make_response = convert_model_result(app.make_response)
app_name = "QUART"
logger.info(f"start validator by {app_name}")
app.add_url_rule(
self.openapi_path, "openapi",
lambda: openapi(validator=self)
)
app.add_url_rule(
self.openapi_tag_path, "openapi_tag",
lambda tag: openapi(self, tag)
)
if self.swagger_ui_path is not None:
app.add_url_rule(
self.swagger_ui_path, "swagger_ui",
lambda: swagger_ui(validator=self)
)
app.add_url_rule(
f"{self.swagger_ui_path}/<tag>", "swagger_ui_tag",
lambda tag: swagger_ui(self, tag)
)
def _split_definitions(schema: dict) -> Tuple[dict, dict]:
new_schema = schema.copy()
definitions = new_schema.pop("definitions", {})
return definitions, new_schema
def _build_openapi_schema(
app,
extension: SchemaValidator,
expected_tag: str = None
) -> dict:
"""
params:
expected_tag: str
"""
paths: Dict[str, dict] = {}
components = {"schemas": {}}
for rule in app.url_map.iter_rules():
if rule.endpoint in [
"static", "openapi", "swagger_ui",
"swagger_ui_tag", "openapi_tag"
]:
continue
func = app.view_functions[rule.endpoint]
for method in rule.methods - IGNORE_METHODS:
view_func = None
view_class = getattr(func, "view_class", None)
if view_class is not None:
view_func = getattr(view_class, method.lower(), None)
path_object = {
"parameters": [], "responses": {},
}
function = view_func or func
if function.__doc__ is not None:
summary, *description = function.__doc__.splitlines()
path_object["description"] = "\n".join(description)
path_object["summary"] = summary
if view_class:
tags = getattr(view_class, SCHEMA_TAG_ATTRIBUTE, [])
else:
tags = getattr(func, SCHEMA_TAG_ATTRIBUTE, [])
if tags:
path_object["tags"] = tags
if expected_tag and expected_tag not in tags:
continue
response_models = getattr(function, SCHEMA_RESPONSE_ATTRIBUTE, {})
for status_code, model_class in response_models.items():
schema = model_schema(model_class, ref_prefix=REF_PREFIX)
if extension.convert_casing:
schema = camelize(schema)
definitions, schema = _split_definitions(schema)
components["schemas"].update(definitions)
path_object["responses"][status_code] = { # type: ignore
"content": {
"application/json": {
"schema": schema,
},
},
"description": model_class.__doc__,
}
request_data = getattr(function, SCHEMA_REQUEST_ATTRIBUTE, None)
if request_data is not None:
schema = model_schema(request_data[0], ref_prefix=REF_PREFIX)
if extension.convert_casing:
schema = camelize(schema)
definitions, schema = _split_definitions(schema)
components["schemas"].update(definitions)
if request_data[1] == DataSource.JSON:
encoding = "application/json"
else:
encoding = "application/x-www-form-urlencoded"
path_object["requestBody"] = {
"content": {
encoding: {
"schema": schema,
},
},
}
querystring_model = getattr(
function, SCHEMA_QUERYSTRING_ATTRIBUTE, None)
if querystring_model is not None:
schema = model_schema(querystring_model, ref_prefix=REF_PREFIX)
if extension.convert_casing:
schema = camelize(schema)
definitions, schema = _split_definitions(schema)
components["schemas"].update(definitions)
for name, type_ in schema["properties"].items():
path_object["parameters"].append(
{
"name": name,
"in": "query",
"schema": type_,
}
)
for name, converter in rule._converters.items():
path_object["parameters"].append(
{
"name": name,
"in": "path",
}
)
path = re.sub(PATH_RE, r"{\1}", rule.rule)
paths.setdefault(path, {})
paths[path][method.lower()] = path_object
return {
"openapi": "3.0.3",
"info": {
"title": extension.title,
"version": extension.version,
},
"components": components,
"paths": paths,
"tags": [],
"servers": extension.servers,
}
|
schema-validator
|
/schema_validator-0.2.5.tar.gz/schema_validator-0.2.5/schema_validator/core.py
|
core.py
|
import re
import logging
from collections.abc import Mapping
from typing import Any, Dict, List, Optional, Tuple
from humps import camelize, decamelize
from pydantic.json import pydantic_encoder
from pydantic.schema import model_schema
from schema_validator.constants import (
IGNORE_METHODS, REF_PREFIX, SCHEMA_QUERYSTRING_ATTRIBUTE,
SCHEMA_REQUEST_ATTRIBUTE, SCHEMA_RESPONSE_ATTRIBUTE, SCHEMA_TAG_ATTRIBUTE,
SWAGGER_CSS_URL, SWAGGER_JS_URL
)
from schema_validator.types import ServerObject
from schema_validator.utils import DataSource
try:
from quart import current_app, render_template_string
from quart.json import JSONDecoder, JSONEncoder
IS_FLASK = False
except ImportError:
from flask import current_app, render_template_string, Flask
from flask.json import JSONDecoder, JSONEncoder
IS_FLASK = True
PATH_RE = re.compile("<(?:[^:]*:)?([^>]+)>")
logger = logging.getLogger(__name__)
class PydanticJSONEncoder(JSONEncoder):
def default(self, object_: Any) -> Any:
return pydantic_encoder(object_)
class CasingJSONEncoder(PydanticJSONEncoder):
def encode(self, object_: Any) -> Any:
if isinstance(object_, (list, Mapping)):
object_ = camelize(object_)
return super().encode(camelize(object_))
class CasingJSONDecoder(JSONDecoder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, object_hook=self.object_hook, **kwargs)
@staticmethod
def object_hook(object_: dict) -> Any:
return decamelize(object_)
class SchemaValidator:
"""A Flask-Schema instance.
app = Flask(__name__)
FlaskSchema(app)
flask_schema = FlaskSchema()
or
def create_app():
app = Flask(__name__)
flask_schema.init_app(app)
return app
Arguments:
swagger_ui_path: The path used to serve the documentation UI using
swagger or None to disable swagger documentation.
title: The publishable title for the app.
version: The publishable version for the app.
"""
def __init__(
self,
app=None,
*,
swagger_ui_path: Optional[str] = "/swagger/docs",
title: Optional[str] = None,
version: str = "0.1.0",
convert_casing: bool = False,
servers: Optional[List[ServerObject]] = None
) -> None:
self.openapi_path = "/swagger/openapi.json"
self.openapi_tag_path = "/swagger/openapi-<tag>.json"
self.swagger_ui_path = swagger_ui_path
self.title = title
self.version = version
self.convert_casing = convert_casing
self.servers = servers or []
if app is not None:
self.init_app(app)
def init_app(self, app) -> None:
app.extensions["SCHEMA_VALIDATOR"] = self
self.title = app.name if self.title is None else self.title
if self.convert_casing:
app.json_decoder = CasingJSONDecoder
app.json_encoder = CasingJSONEncoder
else:
app.json_encoder = PydanticJSONEncoder
app.config.setdefault(
"SCHEMA_SWAGGER_JS_URL",
SWAGGER_JS_URL
)
app.config.setdefault(
"SCHEMA_SWAGGER_CSS_URL",
SWAGGER_CSS_URL
)
try:
if isinstance(app, Flask):
IS_FLASK = True
else:
IS_FLASK = False
except BaseException:
IS_FLASK = False
if self.openapi_path is not None and app.config.get("SWAGGER_ROUTE"):
if IS_FLASK:
from .flask import openapi, swagger_ui
app_name = "FLASK"
else:
from .quart import openapi, swagger_ui, convert_model_result
app.make_response = convert_model_result(app.make_response)
app_name = "QUART"
logger.info(f"start validator by {app_name}")
app.add_url_rule(
self.openapi_path, "openapi",
lambda: openapi(validator=self)
)
app.add_url_rule(
self.openapi_tag_path, "openapi_tag",
lambda tag: openapi(self, tag)
)
if self.swagger_ui_path is not None:
app.add_url_rule(
self.swagger_ui_path, "swagger_ui",
lambda: swagger_ui(validator=self)
)
app.add_url_rule(
f"{self.swagger_ui_path}/<tag>", "swagger_ui_tag",
lambda tag: swagger_ui(self, tag)
)
def _split_definitions(schema: dict) -> Tuple[dict, dict]:
new_schema = schema.copy()
definitions = new_schema.pop("definitions", {})
return definitions, new_schema
def _build_openapi_schema(
app,
extension: SchemaValidator,
expected_tag: str = None
) -> dict:
"""
params:
expected_tag: str
"""
paths: Dict[str, dict] = {}
components = {"schemas": {}}
for rule in app.url_map.iter_rules():
if rule.endpoint in [
"static", "openapi", "swagger_ui",
"swagger_ui_tag", "openapi_tag"
]:
continue
func = app.view_functions[rule.endpoint]
for method in rule.methods - IGNORE_METHODS:
view_func = None
view_class = getattr(func, "view_class", None)
if view_class is not None:
view_func = getattr(view_class, method.lower(), None)
path_object = {
"parameters": [], "responses": {},
}
function = view_func or func
if function.__doc__ is not None:
summary, *description = function.__doc__.splitlines()
path_object["description"] = "\n".join(description)
path_object["summary"] = summary
if view_class:
tags = getattr(view_class, SCHEMA_TAG_ATTRIBUTE, [])
else:
tags = getattr(func, SCHEMA_TAG_ATTRIBUTE, [])
if tags:
path_object["tags"] = tags
if expected_tag and expected_tag not in tags:
continue
response_models = getattr(function, SCHEMA_RESPONSE_ATTRIBUTE, {})
for status_code, model_class in response_models.items():
schema = model_schema(model_class, ref_prefix=REF_PREFIX)
if extension.convert_casing:
schema = camelize(schema)
definitions, schema = _split_definitions(schema)
components["schemas"].update(definitions)
path_object["responses"][status_code] = { # type: ignore
"content": {
"application/json": {
"schema": schema,
},
},
"description": model_class.__doc__,
}
request_data = getattr(function, SCHEMA_REQUEST_ATTRIBUTE, None)
if request_data is not None:
schema = model_schema(request_data[0], ref_prefix=REF_PREFIX)
if extension.convert_casing:
schema = camelize(schema)
definitions, schema = _split_definitions(schema)
components["schemas"].update(definitions)
if request_data[1] == DataSource.JSON:
encoding = "application/json"
else:
encoding = "application/x-www-form-urlencoded"
path_object["requestBody"] = {
"content": {
encoding: {
"schema": schema,
},
},
}
querystring_model = getattr(
function, SCHEMA_QUERYSTRING_ATTRIBUTE, None)
if querystring_model is not None:
schema = model_schema(querystring_model, ref_prefix=REF_PREFIX)
if extension.convert_casing:
schema = camelize(schema)
definitions, schema = _split_definitions(schema)
components["schemas"].update(definitions)
for name, type_ in schema["properties"].items():
path_object["parameters"].append(
{
"name": name,
"in": "query",
"schema": type_,
}
)
for name, converter in rule._converters.items():
path_object["parameters"].append(
{
"name": name,
"in": "path",
}
)
path = re.sub(PATH_RE, r"{\1}", rule.rule)
paths.setdefault(path, {})
paths[path][method.lower()] = path_object
return {
"openapi": "3.0.3",
"info": {
"title": extension.title,
"version": extension.version,
},
"components": components,
"paths": paths,
"tags": [],
"servers": extension.servers,
}
| 0.763351 | 0.104569 |
from dataclasses import asdict, is_dataclass
from functools import wraps
from typing import (Any, Callable, Dict, Iterable, List, Optional, Union, cast)
from pydantic import BaseModel, ValidationError
from pydantic.dataclasses import is_builtin_dataclass
from flask import Response, current_app, g, jsonify, request
from werkzeug.datastructures import Headers
from werkzeug.exceptions import BadRequest
from schema_validator.constants import (
SCHEMA_QUERYSTRING_ATTRIBUTE, SCHEMA_REQUEST_ATTRIBUTE,
SCHEMA_RESPONSE_ATTRIBUTE, SCHEMA_TAG_ATTRIBUTE
)
from schema_validator.types import PydanticModel
from schema_validator.utils import DataSource, check_body_schema, \
check_query_string_schema, check_response_schema
def check_response(result, response_model: Dict[int, PydanticModel]):
status_or_headers: Union[None, int, str, Dict, List] = None
headers: Optional[Headers] = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (
3 - len(result))
else:
value = result
if isinstance(value, Response):
value = value.get_json()
status = 200
if status_or_headers is not None and not isinstance(
status_or_headers, (Headers, dict, list)
) and str(status_or_headers).isdigit():
status = int(status_or_headers)
bad_status = BadRequest.code
for status_code, model_cls in response_model.items():
if status_code != status:
continue
if isinstance(value, dict):
try:
model_value = model_cls(**value)
except (TypeError, ValidationError) as ve:
return jsonify(validation_error=str(ve)), bad_status
elif type(value) == model_cls:
model_value = value
elif is_builtin_dataclass(value):
model_value = model_cls(**asdict(value))
else:
return jsonify(validation_error="invalid response"), bad_status
if is_dataclass(model_value):
return asdict(model_value), status_or_headers, headers
else:
model_value = cast(BaseModel, model_value)
return model_value.dict(), status_or_headers, headers
return result
def validate(
query_string: Optional[PydanticModel] = None,
body: Optional[PydanticModel] = None,
source: DataSource = DataSource.JSON,
validate_path_args: bool = False,
responses: Union[PydanticModel, Dict[int, PydanticModel], None] = None,
headers: Optional[PydanticModel] = None,
tags: Optional[Iterable[str]] = None
) -> Callable:
"""
params:
query_string:
the params in query
body:
json body or form
source:
the body source
response:
response model define
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
from flask import Flask
from schema_validator import FlaskSchema, validate
app = Flask(__name__)
FlaskSchema(app)
OR
schema = FlaskSchema()
schema.init_app(app)
@dataclass
class Todo:
task: str
due: Optional[datetime]
class TodoResponse(BaseModel):
id: int
name: str
@app.post("/")
@validate(body=Todo, responses=TodoResponse)
def create_todo():
... # Do something with data, e.g. save to the DB
return dict(id=1, name="2")
@app.put("/")
@validate(
body=Todo,
responses={200: TodoResponse, 400: TodoResponse},
tags=["SOME-TAG"]
)
def update_todo():
... # Do something with data, e.g. save to the DB
return TodoResponse(id=1, name="123")
@tags("SOME-TAG", "OTHER-TAG")
class View(MethodView):
@validate(...)
def get(self):
return {}
"""
# TODO
if validate_path_args:
pass
# TODO
if headers is not None:
pass
if query_string is not None:
query_string = check_query_string_schema(query_string)
if body is not None:
body = check_body_schema(body, source)
if responses is not None:
responses = check_response_schema(responses)
def decorator(func: Callable) -> Callable:
if query_string:
setattr(func, SCHEMA_QUERYSTRING_ATTRIBUTE, query_string)
if body:
setattr(func, SCHEMA_REQUEST_ATTRIBUTE, (body, source))
if responses:
setattr(func, SCHEMA_RESPONSE_ATTRIBUTE, responses)
if tags:
setattr(func, SCHEMA_TAG_ATTRIBUTE, list(set(tags)))
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
err = {}
if body:
if source == DataSource.JSON:
data = request.get_json()
else:
data = request.form
try:
body_model = body(**data)
except (TypeError, ValidationError) as ve:
err["body_params"] = str(ve)
else:
g.body_params = body_model
if query_string:
try:
query_params = query_string(**request.args)
except (TypeError, ValidationError) as ve:
err["query_params"] = str(ve)
else:
g.query_params = query_params
if err:
return jsonify(validation_error=err), BadRequest.code
result = current_app.ensure_sync(func)(*args, **kwargs)
if responses:
return check_response(result, responses)
return result
return wrapper
return decorator
|
schema-validator
|
/schema_validator-0.2.5.tar.gz/schema_validator-0.2.5/schema_validator/flask/validation.py
|
validation.py
|
from dataclasses import asdict, is_dataclass
from functools import wraps
from typing import (Any, Callable, Dict, Iterable, List, Optional, Union, cast)
from pydantic import BaseModel, ValidationError
from pydantic.dataclasses import is_builtin_dataclass
from flask import Response, current_app, g, jsonify, request
from werkzeug.datastructures import Headers
from werkzeug.exceptions import BadRequest
from schema_validator.constants import (
SCHEMA_QUERYSTRING_ATTRIBUTE, SCHEMA_REQUEST_ATTRIBUTE,
SCHEMA_RESPONSE_ATTRIBUTE, SCHEMA_TAG_ATTRIBUTE
)
from schema_validator.types import PydanticModel
from schema_validator.utils import DataSource, check_body_schema, \
check_query_string_schema, check_response_schema
def check_response(result, response_model: Dict[int, PydanticModel]):
status_or_headers: Union[None, int, str, Dict, List] = None
headers: Optional[Headers] = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (
3 - len(result))
else:
value = result
if isinstance(value, Response):
value = value.get_json()
status = 200
if status_or_headers is not None and not isinstance(
status_or_headers, (Headers, dict, list)
) and str(status_or_headers).isdigit():
status = int(status_or_headers)
bad_status = BadRequest.code
for status_code, model_cls in response_model.items():
if status_code != status:
continue
if isinstance(value, dict):
try:
model_value = model_cls(**value)
except (TypeError, ValidationError) as ve:
return jsonify(validation_error=str(ve)), bad_status
elif type(value) == model_cls:
model_value = value
elif is_builtin_dataclass(value):
model_value = model_cls(**asdict(value))
else:
return jsonify(validation_error="invalid response"), bad_status
if is_dataclass(model_value):
return asdict(model_value), status_or_headers, headers
else:
model_value = cast(BaseModel, model_value)
return model_value.dict(), status_or_headers, headers
return result
def validate(
query_string: Optional[PydanticModel] = None,
body: Optional[PydanticModel] = None,
source: DataSource = DataSource.JSON,
validate_path_args: bool = False,
responses: Union[PydanticModel, Dict[int, PydanticModel], None] = None,
headers: Optional[PydanticModel] = None,
tags: Optional[Iterable[str]] = None
) -> Callable:
"""
params:
query_string:
the params in query
body:
json body or form
source:
the body source
response:
response model define
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
from flask import Flask
from schema_validator import FlaskSchema, validate
app = Flask(__name__)
FlaskSchema(app)
OR
schema = FlaskSchema()
schema.init_app(app)
@dataclass
class Todo:
task: str
due: Optional[datetime]
class TodoResponse(BaseModel):
id: int
name: str
@app.post("/")
@validate(body=Todo, responses=TodoResponse)
def create_todo():
... # Do something with data, e.g. save to the DB
return dict(id=1, name="2")
@app.put("/")
@validate(
body=Todo,
responses={200: TodoResponse, 400: TodoResponse},
tags=["SOME-TAG"]
)
def update_todo():
... # Do something with data, e.g. save to the DB
return TodoResponse(id=1, name="123")
@tags("SOME-TAG", "OTHER-TAG")
class View(MethodView):
@validate(...)
def get(self):
return {}
"""
# TODO
if validate_path_args:
pass
# TODO
if headers is not None:
pass
if query_string is not None:
query_string = check_query_string_schema(query_string)
if body is not None:
body = check_body_schema(body, source)
if responses is not None:
responses = check_response_schema(responses)
def decorator(func: Callable) -> Callable:
if query_string:
setattr(func, SCHEMA_QUERYSTRING_ATTRIBUTE, query_string)
if body:
setattr(func, SCHEMA_REQUEST_ATTRIBUTE, (body, source))
if responses:
setattr(func, SCHEMA_RESPONSE_ATTRIBUTE, responses)
if tags:
setattr(func, SCHEMA_TAG_ATTRIBUTE, list(set(tags)))
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
err = {}
if body:
if source == DataSource.JSON:
data = request.get_json()
else:
data = request.form
try:
body_model = body(**data)
except (TypeError, ValidationError) as ve:
err["body_params"] = str(ve)
else:
g.body_params = body_model
if query_string:
try:
query_params = query_string(**request.args)
except (TypeError, ValidationError) as ve:
err["query_params"] = str(ve)
else:
g.query_params = query_params
if err:
return jsonify(validation_error=err), BadRequest.code
result = current_app.ensure_sync(func)(*args, **kwargs)
if responses:
return check_response(result, responses)
return result
return wrapper
return decorator
| 0.72662 | 0.152473 |
from dataclasses import asdict, is_dataclass
from functools import wraps
from typing import (Any, Callable, Dict, Iterable, List, Optional, Union, cast)
from pydantic import BaseModel, ValidationError
from pydantic.dataclasses import is_builtin_dataclass
from quart import Response, current_app, g, jsonify, request
from werkzeug.datastructures import Headers
from werkzeug.exceptions import BadRequest
from schema_validator.constants import (
SCHEMA_QUERYSTRING_ATTRIBUTE, SCHEMA_REQUEST_ATTRIBUTE,
SCHEMA_RESPONSE_ATTRIBUTE, SCHEMA_TAG_ATTRIBUTE
)
from schema_validator.types import PydanticModel
from schema_validator.utils import DataSource, check_body_schema, \
check_query_string_schema, check_response_schema
async def check_response(result, response_model: Dict[int, PydanticModel]):
status_or_headers: Union[None, int, str, Dict, List] = None
headers: Optional[Headers] = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (
3 - len(result))
else:
value = result
if isinstance(value, Response):
value = await value.get_json()
status = 200
if status_or_headers is not None and not isinstance(
status_or_headers, (Headers, dict, list)
) and str(status_or_headers).isdigit():
status = int(status_or_headers)
bad_status = BadRequest.code
for status_code, model_cls in response_model.items():
if status_code != status:
continue
if isinstance(value, dict):
try:
model_value = model_cls(**value)
except (TypeError, ValidationError) as ve:
return jsonify(validation_error=str(ve)), bad_status
elif type(value) == model_cls:
model_value = value
elif is_builtin_dataclass(value):
model_value = model_cls(**asdict(value))
else:
return jsonify(validation_error="invalid response"), bad_status
if is_dataclass(model_value):
return asdict(model_value), status_or_headers, headers
else:
model_value = cast(BaseModel, model_value)
return model_value.dict(), status_or_headers, headers
return result
def validate(
query_string: Optional[PydanticModel] = None,
body: Optional[PydanticModel] = None,
source: DataSource = DataSource.JSON,
validate_path_args: bool = False,
responses: Union[PydanticModel, Dict[int, PydanticModel], None] = None,
headers: Optional[PydanticModel] = None,
tags: Optional[Iterable[str]] = None
) -> Callable:
"""
params:
query_string:
the params in query
body:
json body or form
source:
the body source
response:
response model define
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
from flask import Flask
from schema_validator import FlaskSchema, validate
app = Flask(__name__)
FlaskSchema(app)
OR
schema = FlaskSchema()
schema.init_app(app)
@dataclass
class Todo:
task: str
due: Optional[datetime]
class TodoResponse(BaseModel):
id: int
name: str
@app.post("/")
@validate(body=Todo, responses=TodoResponse)
def create_todo():
... # Do something with data, e.g. save to the DB
return dict(id=1, name="2")
@app.put("/")
@validate(
body=Todo,
responses={200: TodoResponse, 400: TodoResponse},
tags=["SOME-TAG"]
)
def update_todo():
... # Do something with data, e.g. save to the DB
return TodoResponse(id=1, name="123")
@tags("SOME-TAG", "OTHER-TAG")
class View(MethodView):
@validate(...)
def get(self):
return {}
"""
# TODO
if validate_path_args:
pass
# TODO
if headers is not None:
pass
if query_string is not None:
query_string = check_query_string_schema(query_string)
if body is not None:
body = check_body_schema(body, source)
if responses is not None:
responses = check_response_schema(responses)
def decorator(func: Callable) -> Callable:
if query_string:
setattr(func, SCHEMA_QUERYSTRING_ATTRIBUTE, query_string)
if body:
setattr(func, SCHEMA_REQUEST_ATTRIBUTE, (body, source))
if responses:
setattr(func, SCHEMA_RESPONSE_ATTRIBUTE, responses)
if tags:
setattr(func, SCHEMA_TAG_ATTRIBUTE, list(set(tags)))
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
err = {}
if body:
if source == DataSource.JSON:
data = await request.get_json()
else:
data = await request.form
try:
body_model = body(**data)
except (TypeError, ValidationError) as ve:
err["body_params"] = str(ve)
else:
g.body_params = body_model
if query_string:
try:
query_params = query_string(**request.args)
except (TypeError, ValidationError) as ve:
err["query_params"] = str(ve)
else:
g.query_params = query_params
if err:
return jsonify(validation_error=err), BadRequest.code
result = await current_app.ensure_async(func)(*args, **kwargs)
if responses:
return await check_response(result, responses)
return result
return wrapper
return decorator
|
schema-validator
|
/schema_validator-0.2.5.tar.gz/schema_validator-0.2.5/schema_validator/quart/validation.py
|
validation.py
|
from dataclasses import asdict, is_dataclass
from functools import wraps
from typing import (Any, Callable, Dict, Iterable, List, Optional, Union, cast)
from pydantic import BaseModel, ValidationError
from pydantic.dataclasses import is_builtin_dataclass
from quart import Response, current_app, g, jsonify, request
from werkzeug.datastructures import Headers
from werkzeug.exceptions import BadRequest
from schema_validator.constants import (
SCHEMA_QUERYSTRING_ATTRIBUTE, SCHEMA_REQUEST_ATTRIBUTE,
SCHEMA_RESPONSE_ATTRIBUTE, SCHEMA_TAG_ATTRIBUTE
)
from schema_validator.types import PydanticModel
from schema_validator.utils import DataSource, check_body_schema, \
check_query_string_schema, check_response_schema
async def check_response(result, response_model: Dict[int, PydanticModel]):
status_or_headers: Union[None, int, str, Dict, List] = None
headers: Optional[Headers] = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (
3 - len(result))
else:
value = result
if isinstance(value, Response):
value = await value.get_json()
status = 200
if status_or_headers is not None and not isinstance(
status_or_headers, (Headers, dict, list)
) and str(status_or_headers).isdigit():
status = int(status_or_headers)
bad_status = BadRequest.code
for status_code, model_cls in response_model.items():
if status_code != status:
continue
if isinstance(value, dict):
try:
model_value = model_cls(**value)
except (TypeError, ValidationError) as ve:
return jsonify(validation_error=str(ve)), bad_status
elif type(value) == model_cls:
model_value = value
elif is_builtin_dataclass(value):
model_value = model_cls(**asdict(value))
else:
return jsonify(validation_error="invalid response"), bad_status
if is_dataclass(model_value):
return asdict(model_value), status_or_headers, headers
else:
model_value = cast(BaseModel, model_value)
return model_value.dict(), status_or_headers, headers
return result
def validate(
query_string: Optional[PydanticModel] = None,
body: Optional[PydanticModel] = None,
source: DataSource = DataSource.JSON,
validate_path_args: bool = False,
responses: Union[PydanticModel, Dict[int, PydanticModel], None] = None,
headers: Optional[PydanticModel] = None,
tags: Optional[Iterable[str]] = None
) -> Callable:
"""
params:
query_string:
the params in query
body:
json body or form
source:
the body source
response:
response model define
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
from flask import Flask
from schema_validator import FlaskSchema, validate
app = Flask(__name__)
FlaskSchema(app)
OR
schema = FlaskSchema()
schema.init_app(app)
@dataclass
class Todo:
task: str
due: Optional[datetime]
class TodoResponse(BaseModel):
id: int
name: str
@app.post("/")
@validate(body=Todo, responses=TodoResponse)
def create_todo():
... # Do something with data, e.g. save to the DB
return dict(id=1, name="2")
@app.put("/")
@validate(
body=Todo,
responses={200: TodoResponse, 400: TodoResponse},
tags=["SOME-TAG"]
)
def update_todo():
... # Do something with data, e.g. save to the DB
return TodoResponse(id=1, name="123")
@tags("SOME-TAG", "OTHER-TAG")
class View(MethodView):
@validate(...)
def get(self):
return {}
"""
# TODO
if validate_path_args:
pass
# TODO
if headers is not None:
pass
if query_string is not None:
query_string = check_query_string_schema(query_string)
if body is not None:
body = check_body_schema(body, source)
if responses is not None:
responses = check_response_schema(responses)
def decorator(func: Callable) -> Callable:
if query_string:
setattr(func, SCHEMA_QUERYSTRING_ATTRIBUTE, query_string)
if body:
setattr(func, SCHEMA_REQUEST_ATTRIBUTE, (body, source))
if responses:
setattr(func, SCHEMA_RESPONSE_ATTRIBUTE, responses)
if tags:
setattr(func, SCHEMA_TAG_ATTRIBUTE, list(set(tags)))
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
err = {}
if body:
if source == DataSource.JSON:
data = await request.get_json()
else:
data = await request.form
try:
body_model = body(**data)
except (TypeError, ValidationError) as ve:
err["body_params"] = str(ve)
else:
g.body_params = body_model
if query_string:
try:
query_params = query_string(**request.args)
except (TypeError, ValidationError) as ve:
err["query_params"] = str(ve)
else:
g.query_params = query_params
if err:
return jsonify(validation_error=err), BadRequest.code
result = await current_app.ensure_async(func)(*args, **kwargs)
if responses:
return await check_response(result, responses)
return result
return wrapper
return decorator
| 0.732209 | 0.157266 |
Schema validation just got Pythonic
===============================================================================
**schema** is a library for validating Python data structures, such as those
obtained from config-files, forms, external services or command-line
parsing, converted from JSON/YAML (or something else) to Python data-types.
.. image:: https://secure.travis-ci.org/keleshev/schema.svg?branch=master
:target: https://travis-ci.org/keleshev/schema
.. image:: https://img.shields.io/codecov/c/github/keleshev/schema.svg
:target: http://codecov.io/github/keleshev/schema
Example
----------------------------------------------------------------------------
Here is a quick example to get a feeling of **schema**, validating a list of
entries with personal information:
.. code:: python
>>> from schema import Schema, And, Use, Optional, SchemaError
>>> schema = Schema([{'name': And(str, len),
... 'age': And(Use(int), lambda n: 18 <= n <= 99),
... Optional('gender'): And(str, Use(str.lower),
... lambda s: s in ('squid', 'kid'))}])
>>> data = [{'name': 'Sue', 'age': '28', 'gender': 'Squid'},
... {'name': 'Sam', 'age': '42'},
... {'name': 'Sacha', 'age': '20', 'gender': 'KID'}]
>>> validated = schema.validate(data)
>>> assert validated == [{'name': 'Sue', 'age': 28, 'gender': 'squid'},
... {'name': 'Sam', 'age': 42},
... {'name': 'Sacha', 'age' : 20, 'gender': 'kid'}]
If data is valid, ``Schema.validate`` will return the validated data
(optionally converted with `Use` calls, see below).
If data is invalid, ``Schema`` will raise ``SchemaError`` exception.
If you just want to check that the data is valid, ``schema.is_valid(data)`` will
return ``True`` or ``False``.
Installation
-------------------------------------------------------------------------------
Use `pip <http://pip-installer.org>`_ or easy_install::
pip install schema
Alternatively, you can just drop ``schema.py`` file into your project—it is
self-contained.
- **schema** is tested with Python 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9 and PyPy.
- **schema** follows `semantic versioning <http://semver.org>`_.
How ``Schema`` validates data
-------------------------------------------------------------------------------
Types
~~~~~
If ``Schema(...)`` encounters a type (such as ``int``, ``str``, ``object``,
etc.), it will check if the corresponding piece of data is an instance of that type,
otherwise it will raise ``SchemaError``.
.. code:: python
>>> from schema import Schema
>>> Schema(int).validate(123)
123
>>> Schema(int).validate('123')
Traceback (most recent call last):
...
schema.SchemaUnexpectedTypeError: '123' should be instance of 'int'
>>> Schema(object).validate('hai')
'hai'
Callables
~~~~~~~~~
If ``Schema(...)`` encounters a callable (function, class, or object with
``__call__`` method) it will call it, and if its return value evaluates to
``True`` it will continue validating, else—it will raise ``SchemaError``.
.. code:: python
>>> import os
>>> Schema(os.path.exists).validate('./')
'./'
>>> Schema(os.path.exists).validate('./non-existent/')
Traceback (most recent call last):
...
schema.SchemaError: exists('./non-existent/') should evaluate to True
>>> Schema(lambda n: n > 0).validate(123)
123
>>> Schema(lambda n: n > 0).validate(-12)
Traceback (most recent call last):
...
schema.SchemaError: <lambda>(-12) should evaluate to True
"Validatables"
~~~~~~~~~~~~~~
If ``Schema(...)`` encounters an object with method ``validate`` it will run
this method on corresponding data as ``data = obj.validate(data)``. This method
may raise ``SchemaError`` exception, which will tell ``Schema`` that that piece
of data is invalid, otherwise—it will continue validating.
An example of "validatable" is ``Regex``, that tries to match a string or a
buffer with the given regular expression (itself as a string, buffer or
compiled regex ``SRE_Pattern``):
.. code:: python
>>> from schema import Regex
>>> import re
>>> Regex(r'^foo').validate('foobar')
'foobar'
>>> Regex(r'^[A-Z]+$', flags=re.I).validate('those-dashes-dont-match')
Traceback (most recent call last):
...
schema.SchemaError: Regex('^[A-Z]+$', flags=re.IGNORECASE) does not match 'those-dashes-dont-match'
For a more general case, you can use ``Use`` for creating such objects.
``Use`` helps to use a function or type to convert a value while validating it:
.. code:: python
>>> from schema import Use
>>> Schema(Use(int)).validate('123')
123
>>> Schema(Use(lambda f: open(f, 'a'))).validate('LICENSE-MIT')
<_io.TextIOWrapper name='LICENSE-MIT' mode='a' encoding='UTF-8'>
Dropping the details, ``Use`` is basically:
.. code:: python
class Use(object):
def __init__(self, callable_):
self._callable = callable_
def validate(self, data):
try:
return self._callable(data)
except Exception as e:
raise SchemaError('%r raised %r' % (self._callable.__name__, e))
Sometimes you need to transform and validate part of data, but keep original data unchanged.
``Const`` helps to keep your data safe:
.. code:: python
>> from schema import Use, Const, And, Schema
>> from datetime import datetime
>> is_future = lambda date: datetime.now() > date
>> to_json = lambda v: {"timestamp": v}
>> Schema(And(Const(And(Use(datetime.fromtimestamp), is_future)), Use(to_json))).validate(1234567890)
{"timestamp": 1234567890}
Now you can write your own validation-aware classes and data types.
Lists, similar containers
~~~~~~~~~~~~~~~~~~~~~~~~~
If ``Schema(...)`` encounters an instance of ``list``, ``tuple``, ``set``
or ``frozenset``, it will validate contents of corresponding data container
against all schemas listed inside that container and aggregate all errors:
.. code:: python
>>> Schema([1, 0]).validate([1, 1, 0, 1])
[1, 1, 0, 1]
>>> Schema((int, float)).validate((5, 7, 8, 'not int or float here'))
Traceback (most recent call last):
...
schema.SchemaError: Or(<class 'int'>, <class 'float'>) did not validate 'not int or float here'
'not int or float here' should be instance of 'int'
'not int or float here' should be instance of 'float'
Dictionaries
~~~~~~~~~~~~
If ``Schema(...)`` encounters an instance of ``dict``, it will validate data
key-value pairs:
.. code:: python
>>> d = Schema({'name': str,
... 'age': lambda n: 18 <= n <= 99}).validate({'name': 'Sue', 'age': 28})
>>> assert d == {'name': 'Sue', 'age': 28}
You can specify keys as schemas too:
.. code:: python
>>> schema = Schema({str: int, # string keys should have integer values
... int: None}) # int keys should be always None
>>> data = schema.validate({'key1': 1, 'key2': 2,
... 10: None, 20: None})
>>> schema.validate({'key1': 1,
... 10: 'not None here'})
Traceback (most recent call last):
...
schema.SchemaError: Key '10' error:
None does not match 'not None here'
This is useful if you want to check certain key-values, but don't care
about others:
.. code:: python
>>> schema = Schema({'<id>': int,
... '<file>': Use(open),
... str: object}) # don't care about other str keys
>>> data = schema.validate({'<id>': 10,
... '<file>': 'README.rst',
... '--verbose': True})
You can mark a key as optional as follows:
.. code:: python
>>> from schema import Optional
>>> Schema({'name': str,
... Optional('occupation'): str}).validate({'name': 'Sam'})
{'name': 'Sam'}
``Optional`` keys can also carry a ``default``, to be used when no key in the
data matches:
.. code:: python
>>> from schema import Optional
>>> Schema({Optional('color', default='blue'): str,
... str: str}).validate({'texture': 'furry'}
... ) == {'color': 'blue', 'texture': 'furry'}
True
Defaults are used verbatim, not passed through any validators specified in the
value.
default can also be a callable:
.. code:: python
>>> from schema import Schema, Optional
>>> Schema({Optional('data', default=dict): {}}).validate({}) == {'data': {}}
True
Also, a caveat: If you specify types, **schema** won't validate the empty dict:
.. code:: python
>>> Schema({int:int}).is_valid({})
False
To do that, you need ``Schema(Or({int:int}, {}))``. This is unlike what happens with
lists, where ``Schema([int]).is_valid([])`` will return True.
**schema** has classes ``And`` and ``Or`` that help validating several schemas
for the same data:
.. code:: python
>>> from schema import And, Or
>>> Schema({'age': And(int, lambda n: 0 < n < 99)}).validate({'age': 7})
{'age': 7}
>>> Schema({'password': And(str, lambda s: len(s) > 6)}).validate({'password': 'hai'})
Traceback (most recent call last):
...
schema.SchemaError: Key 'password' error:
<lambda>('hai') should evaluate to True
>>> Schema(And(Or(int, float), lambda x: x > 0)).validate(3.1415)
3.1415
In a dictionary, you can also combine two keys in a "one or the other" manner. To do
so, use the `Or` class as a key:
.. code:: python
>>> from schema import Or, Schema
>>> schema = Schema({
... Or("key1", "key2", only_one=True): str
... })
>>> schema.validate({"key1": "test"}) # Ok
{'key1': 'test'}
>>> schema.validate({"key1": "test", "key2": "test"}) # SchemaError
Traceback (most recent call last):
...
schema.SchemaOnlyOneAllowedError: There are multiple keys present from the Or('key1', 'key2') condition
Hooks
~~~~~~~~~~
You can define hooks which are functions that are executed whenever a valid key:value is found.
The `Forbidden` class is an example of this.
You can mark a key as forbidden as follows:
.. code:: python
>>> from schema import Forbidden
>>> Schema({Forbidden('age'): object}).validate({'age': 50})
Traceback (most recent call last):
...
schema.SchemaForbiddenKeyError: Forbidden key encountered: 'age' in {'age': 50}
A few things are worth noting. First, the value paired with the forbidden
key determines whether it will be rejected:
.. code:: python
>>> Schema({Forbidden('age'): str, 'age': int}).validate({'age': 50})
{'age': 50}
Note: if we hadn't supplied the 'age' key here, the call would have failed too, but with
SchemaWrongKeyError, not SchemaForbiddenKeyError.
Second, Forbidden has a higher priority than standard keys, and consequently than Optional.
This means we can do that:
.. code:: python
>>> Schema({Forbidden('age'): object, Optional(str): object}).validate({'age': 50})
Traceback (most recent call last):
...
schema.SchemaForbiddenKeyError: Forbidden key encountered: 'age' in {'age': 50}
You can also define your own hooks. The following hook will call `_my_function` if `key` is encountered.
.. code:: python
from schema import Hook
def _my_function(key, scope, error):
print(key, scope, error)
Hook("key", handler=_my_function)
Here's an example where a `Deprecated` class is added to log warnings whenever a key is encountered:
.. code:: python
from schema import Hook, Schema
class Deprecated(Hook):
def __init__(self, *args, **kwargs):
kwargs["handler"] = lambda key, *args: logging.warn(f"`{key}` is deprecated. " + (self._error or ""))
super(Deprecated, self).__init__(*args, **kwargs)
Schema({Deprecated("test", "custom error message."): object}, ignore_extra_keys=True).validate({"test": "value"})
...
WARNING: `test` is deprecated. custom error message.
Extra Keys
~~~~~~~~~~
The ``Schema(...)`` parameter ``ignore_extra_keys`` causes validation to ignore extra keys in a dictionary, and also to not return them after validating.
.. code:: python
>>> schema = Schema({'name': str}, ignore_extra_keys=True)
>>> schema.validate({'name': 'Sam', 'age': '42'})
{'name': 'Sam'}
If you would like any extra keys returned, use ``object: object`` as one of the key/value pairs, which will match any key and any value.
Otherwise, extra keys will raise a ``SchemaError``.
Customized Validation
~~~~~~~~~~~~~~~~~~~~~~~
The ``Schema.validate`` method accepts additional keyword arguments. The
keyword arguments will be propagated to the ``validate`` method of any
child validatables (including any ad-hoc ``Schema`` objects), or the default
value callable (if a callable is specified) for ``Optional`` keys.
This feature can be used together with inheritance of the ``Schema`` class
for customized validation.
Here is an example where a "post-validation" hook that runs after validation
against a sub-schema in a larger schema:
.. code:: python
class EventSchema(schema.Schema):
def validate(self, data, _is_event_schema=True):
data = super(EventSchema, self).validate(data, _is_event_schema=False)
if _is_event_schema and data.get("minimum", None) is None:
data["minimum"] = data["capacity"]
return data
events_schema = schema.Schema(
{
str: EventSchema({
"capacity": int,
schema.Optional("minimum"): int, # default to capacity
})
}
)
data = {'event1': {'capacity': 1}, 'event2': {'capacity': 2, 'minimum': 3}}
events = events_schema.validate(data)
assert events['event1']['minimum'] == 1 # == capacity
assert events['event2']['minimum'] == 3
Note that the additional keyword argument ``_is_event_schema`` is necessary to
limit the customized behavior to the ``EventSchema`` object itself so that it
won't affect any recursive invoke of the ``self.__class__.validate`` for the
child schemas (e.g., the call to ``Schema("capacity").validate("capacity")``).
User-friendly error reporting
-------------------------------------------------------------------------------
You can pass a keyword argument ``error`` to any of validatable classes
(such as ``Schema``, ``And``, ``Or``, ``Regex``, ``Use``) to report this error
instead of a built-in one.
.. code:: python
>>> Schema(Use(int, error='Invalid year')).validate('XVII')
Traceback (most recent call last):
...
schema.SchemaError: Invalid year
You can see all errors that occurred by accessing exception's ``exc.autos``
for auto-generated error messages, and ``exc.errors`` for errors
which had ``error`` text passed to them.
You can exit with ``sys.exit(exc.code)`` if you want to show the messages
to the user without traceback. ``error`` messages are given precedence in that
case.
A JSON API example
-------------------------------------------------------------------------------
Here is a quick example: validation of
`create a gist <http://developer.github.com/v3/gists/>`_
request from github API.
.. code:: python
>>> gist = '''{"description": "the description for this gist",
... "public": true,
... "files": {
... "file1.txt": {"content": "String file contents"},
... "other.txt": {"content": "Another file contents"}}}'''
>>> from schema import Schema, And, Use, Optional
>>> import json
>>> gist_schema = Schema(And(Use(json.loads), # first convert from JSON
... # use str since json returns unicode
... {Optional('description'): str,
... 'public': bool,
... 'files': {str: {'content': str}}}))
>>> gist = gist_schema.validate(gist)
# gist:
{u'description': u'the description for this gist',
u'files': {u'file1.txt': {u'content': u'String file contents'},
u'other.txt': {u'content': u'Another file contents'}},
u'public': True}
Using **schema** with `docopt <http://github.com/docopt/docopt>`_
-------------------------------------------------------------------------------
Assume you are using **docopt** with the following usage-pattern:
Usage: my_program.py [--count=N] <path> <files>...
and you would like to validate that ``<files>`` are readable, and that
``<path>`` exists, and that ``--count`` is either integer from 0 to 5, or
``None``.
Assuming **docopt** returns the following dict:
.. code:: python
>>> args = {'<files>': ['LICENSE-MIT', 'setup.py'],
... '<path>': '../',
... '--count': '3'}
this is how you validate it using ``schema``:
.. code:: python
>>> from schema import Schema, And, Or, Use
>>> import os
>>> s = Schema({'<files>': [Use(open)],
... '<path>': os.path.exists,
... '--count': Or(None, And(Use(int), lambda n: 0 < n < 5))})
>>> args = s.validate(args)
>>> args['<files>']
[<_io.TextIOWrapper name='LICENSE-MIT' ...>, <_io.TextIOWrapper name='setup.py' ...]
>>> args['<path>']
'../'
>>> args['--count']
3
As you can see, **schema** validated data successfully, opened files and
converted ``'3'`` to ``int``.
JSON schema
-----------
You can also generate standard `draft-07 JSON schema <https://json-schema.org/>`_ from a dict ``Schema``.
This can be used to add word completion, validation, and documentation directly in code editors.
The output schema can also be used with JSON schema compatible libraries.
JSON: Generating
~~~~~~~~~~~~~~~~
Just define your schema normally and call ``.json_schema()`` on it. The output is a Python dict, you need to dump it to JSON.
.. code:: python
>>> from schema import Optional, Schema
>>> import json
>>> s = Schema({"test": str,
... "nested": {Optional("other"): str}
... })
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"))
# json_schema
{
"type":"object",
"properties": {
"test": {"type": "string"},
"nested": {
"type":"object",
"properties": {
"other": {"type": "string"}
},
"required": [],
"additionalProperties": false
}
},
"required":[
"test",
"nested"
],
"additionalProperties":false,
"$id":"https://example.com/my-schema.json",
"$schema":"http://json-schema.org/draft-07/schema#"
}
You can add descriptions for the schema elements using the ``Literal`` object instead of a string. The main schema can also have a description.
These will appear in IDEs to help your users write a configuration.
.. code:: python
>>> from schema import Literal, Schema
>>> import json
>>> s = Schema({Literal("project_name", description="Names must be unique"): str}, description="Project schema")
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"), indent=4)
# json_schema
{
"type": "object",
"properties": {
"project_name": {
"description": "Names must be unique",
"type": "string"
}
},
"required": [
"project_name"
],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "Project schema"
}
JSON: Supported validations
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The resulting JSON schema is not guaranteed to accept the same objects as the library would accept, since some validations are not implemented or
have no JSON schema equivalent. This is the case of the ``Use`` and ``Hook`` objects for example.
Implemented
'''''''''''
`Object properties <https://json-schema.org/understanding-json-schema/reference/object.html#properties>`_
Use a dict literal. The dict keys are the JSON schema properties.
Example:
``Schema({"test": str})``
becomes
``{'type': 'object', 'properties': {'test': {'type': 'string'}}, 'required': ['test'], 'additionalProperties': False}``.
Please note that attributes are required by default. To create optional attributes use ``Optional``, like so:
``Schema({Optional("test"): str})``
becomes
``{'type': 'object', 'properties': {'test': {'type': 'string'}}, 'required': [], 'additionalProperties': False}``
additionalProperties is set to true when at least one of the conditions is met:
- ignore_extra_keys is True
- at least one key is `str` or `object`
For example:
``Schema({str: str})`` and ``Schema({}, ignore_extra_keys=True)``
both becomes
``{'type': 'object', 'properties' : {}, 'required': [], 'additionalProperties': True}``
and
``Schema({})``
becomes
``{'type': 'object', 'properties' : {}, 'required': [], 'additionalProperties': False}``
Types
Use the Python type name directly. It will be converted to the JSON name:
- ``str`` -> `string <https://json-schema.org/understanding-json-schema/reference/string.html>`_
- ``int`` -> `integer <https://json-schema.org/understanding-json-schema/reference/numeric.html#integer>`_
- ``float`` -> `number <https://json-schema.org/understanding-json-schema/reference/numeric.html#number>`_
- ``bool`` -> `boolean <https://json-schema.org/understanding-json-schema/reference/boolean.html>`_
- ``list`` -> `array <https://json-schema.org/understanding-json-schema/reference/array.html>`_
- ``dict`` -> `object <https://json-schema.org/understanding-json-schema/reference/object.html>`_
Example:
``Schema(float)``
becomes
``{"type": "number"}``
`Array items <https://json-schema.org/understanding-json-schema/reference/array.html#items>`_
Surround a schema with ``[]``.
Example:
``Schema([str])`` means an array of string and becomes:
``{'type': 'array', 'items': {'type': 'string'}}``
`Enumerated values <https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values>`_
Use `Or`.
Example:
``Schema(Or(1, 2, 3))`` becomes
``{"enum": [1, 2, 3]}``
`Constant values <https://json-schema.org/understanding-json-schema/reference/generic.html#constant-values>`_
Use the value itself.
Example:
``Schema("name")`` becomes
``{"const": "name"}``
`Regular expressions <https://json-schema.org/understanding-json-schema/reference/regular_expressions.html>`_
Use ``Regex``.
Example:
``Schema(Regex("^v\d+"))`` becomes
``{'type': 'string', 'pattern': '^v\\d+'}``
`Annotations (title and description) <https://json-schema.org/understanding-json-schema/reference/generic.html#annotations>`_
You can use the ``name`` and ``description`` parameters of the ``Schema`` object init method.
To add description to keys, replace a str with a ``Literal`` object.
Example:
``Schema({Literal("test", description="A description"): str})``
is equivalent to
``Schema({"test": str})``
with the description added to the resulting JSON schema.
`Combining schemas with allOf <https://json-schema.org/understanding-json-schema/reference/combining.html#allof>`_
Use ``And``
Example:
``Schema(And(str, "value"))``
becomes
``{"allOf": [{"type": "string"}, {"const": "value"}]}``
Note that this example is not really useful in the real world, since ``const`` already implies the type.
`Combining schemas with anyOf <https://json-schema.org/understanding-json-schema/reference/combining.html#anyof>`_
Use ``Or``
Example:
``Schema(Or(str, int))``
becomes
``{"anyOf": [{"type": "string"}, {"type": "integer"}]}``
Not implemented
'''''''''''''''
The following JSON schema validations cannot be generated from this library.
- `String length <https://json-schema.org/understanding-json-schema/reference/string.html#length>`_
However, those can be implemented using ``Regex``
- `String format <https://json-schema.org/understanding-json-schema/reference/string.html#format>`_
However, those can be implemented using ``Regex``
- `Object dependencies <https://json-schema.org/understanding-json-schema/reference/object.html#dependencies>`_
- `Array length <https://json-schema.org/understanding-json-schema/reference/array.html#length>`_
- `Array uniqueness <https://json-schema.org/understanding-json-schema/reference/array.html#uniqueness>`_
- `Numeric multiples <https://json-schema.org/understanding-json-schema/reference/numeric.html#multiples>`_
- `Numeric ranges <https://json-schema.org/understanding-json-schema/reference/numeric.html#range>`_
- `Property Names <https://json-schema.org/understanding-json-schema/reference/object.html#property-names>`_
Not implemented. We suggest listing the possible keys instead. As a tip, you can use ``Or`` as a dict key.
Example:
``Schema({Or("name1", "name2"): str})``
- `Annotations (default and examples) <https://json-schema.org/understanding-json-schema/reference/generic.html#annotations>`_
- `Combining schemas with oneOf <https://json-schema.org/understanding-json-schema/reference/combining.html#oneof>`_
- `Not <https://json-schema.org/understanding-json-schema/reference/combining.html#not>`_
- `Object size <https://json-schema.org/understanding-json-schema/reference/object.html#size>`_
- `additionalProperties having a different schema (true and false is supported)`
JSON: Minimizing output size
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Explicit Reuse
''''''''''''''
If your JSON schema is big and has a lot of repetition, it can be made simpler and smaller by defining Schema objects as reference.
These references will be placed in a "definitions" section in the main schema.
`You can look at the JSON schema documentation for more information <https://json-schema.org/understanding-json-schema/structuring.html#reuse>`_
.. code:: python
>>> from schema import Optional, Schema
>>> import json
>>> s = Schema({"test": str,
... "nested": Schema({Optional("other"): str}, name="nested", as_reference=True)
... })
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"), indent=4)
# json_schema
{
"type": "object",
"properties": {
"test": {
"type": "string"
},
"nested": {
"$ref": "#/definitions/nested"
}
},
"required": [
"test",
"nested"
],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"nested": {
"type": "object",
"properties": {
"other": {
"type": "string"
}
},
"required": [],
"additionalProperties": false
}
}
}
This becomes really useful when using the same object several times
.. code:: python
>>> from schema import Optional, Or, Schema
>>> import json
>>> language_configuration = Schema({"autocomplete": bool, "stop_words": [str]}, name="language", as_reference=True)
>>> s = Schema({Or("ar", "cs", "de", "el", "eu", "en", "es", "fr"): language_configuration})
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"), indent=4)
# json_schema
{
"type": "object",
"properties": {
"ar": {
"$ref": "#/definitions/language"
},
"cs": {
"$ref": "#/definitions/language"
},
"de": {
"$ref": "#/definitions/language"
},
"el": {
"$ref": "#/definitions/language"
},
"eu": {
"$ref": "#/definitions/language"
},
"en": {
"$ref": "#/definitions/language"
},
"es": {
"$ref": "#/definitions/language"
},
"fr": {
"$ref": "#/definitions/language"
}
},
"required": [],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"language": {
"type": "object",
"properties": {
"autocomplete": {
"type": "boolean"
},
"stop_words": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false
}
}
}
Automatic reuse
'''''''''''''''
If you want to minimize the output size without using names explicitly, you can have the library generate hashes of parts of the output JSON
schema and use them as references throughout.
Enable this behaviour by providing the parameter ``use_refs`` to the json_schema method.
Be aware that this method is less often compatible with IDEs and JSON schema libraries.
It produces a JSON schema that is more difficult to read by humans.
.. code:: python
>>> from schema import Optional, Or, Schema
>>> import json
>>> language_configuration = Schema({"autocomplete": bool, "stop_words": [str]})
>>> s = Schema({Or("ar", "cs", "de", "el", "eu", "en", "es", "fr"): language_configuration})
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json", use_refs=True), indent=4)
# json_schema
{
"type": "object",
"properties": {
"ar": {
"type": "object",
"properties": {
"autocomplete": {
"type": "boolean",
"$id": "#6456104181059880193"
},
"stop_words": {
"type": "array",
"items": {
"type": "string",
"$id": "#1856069563381977338"
}
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false
},
"cs": {
"type": "object",
"properties": {
"autocomplete": {
"$ref": "#6456104181059880193"
},
"stop_words": {
"type": "array",
"items": {
"$ref": "#1856069563381977338"
},
"$id": "#-5377945144312515805"
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false
},
"de": {
"type": "object",
"properties": {
"autocomplete": {
"$ref": "#6456104181059880193"
},
"stop_words": {
"$ref": "#-5377945144312515805"
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false,
"$id": "#-8142886105174600858"
},
"el": {
"$ref": "#-8142886105174600858"
},
"eu": {
"$ref": "#-8142886105174600858"
},
"en": {
"$ref": "#-8142886105174600858"
},
"es": {
"$ref": "#-8142886105174600858"
},
"fr": {
"$ref": "#-8142886105174600858"
}
},
"required": [],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#"
}
|
schema
|
/schema-0.7.5.tar.gz/schema-0.7.5/README.rst
|
README.rst
|
Schema validation just got Pythonic
===============================================================================
**schema** is a library for validating Python data structures, such as those
obtained from config-files, forms, external services or command-line
parsing, converted from JSON/YAML (or something else) to Python data-types.
.. image:: https://secure.travis-ci.org/keleshev/schema.svg?branch=master
:target: https://travis-ci.org/keleshev/schema
.. image:: https://img.shields.io/codecov/c/github/keleshev/schema.svg
:target: http://codecov.io/github/keleshev/schema
Example
----------------------------------------------------------------------------
Here is a quick example to get a feeling of **schema**, validating a list of
entries with personal information:
.. code:: python
>>> from schema import Schema, And, Use, Optional, SchemaError
>>> schema = Schema([{'name': And(str, len),
... 'age': And(Use(int), lambda n: 18 <= n <= 99),
... Optional('gender'): And(str, Use(str.lower),
... lambda s: s in ('squid', 'kid'))}])
>>> data = [{'name': 'Sue', 'age': '28', 'gender': 'Squid'},
... {'name': 'Sam', 'age': '42'},
... {'name': 'Sacha', 'age': '20', 'gender': 'KID'}]
>>> validated = schema.validate(data)
>>> assert validated == [{'name': 'Sue', 'age': 28, 'gender': 'squid'},
... {'name': 'Sam', 'age': 42},
... {'name': 'Sacha', 'age' : 20, 'gender': 'kid'}]
If data is valid, ``Schema.validate`` will return the validated data
(optionally converted with `Use` calls, see below).
If data is invalid, ``Schema`` will raise ``SchemaError`` exception.
If you just want to check that the data is valid, ``schema.is_valid(data)`` will
return ``True`` or ``False``.
Installation
-------------------------------------------------------------------------------
Use `pip <http://pip-installer.org>`_ or easy_install::
pip install schema
Alternatively, you can just drop ``schema.py`` file into your project—it is
self-contained.
- **schema** is tested with Python 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9 and PyPy.
- **schema** follows `semantic versioning <http://semver.org>`_.
How ``Schema`` validates data
-------------------------------------------------------------------------------
Types
~~~~~
If ``Schema(...)`` encounters a type (such as ``int``, ``str``, ``object``,
etc.), it will check if the corresponding piece of data is an instance of that type,
otherwise it will raise ``SchemaError``.
.. code:: python
>>> from schema import Schema
>>> Schema(int).validate(123)
123
>>> Schema(int).validate('123')
Traceback (most recent call last):
...
schema.SchemaUnexpectedTypeError: '123' should be instance of 'int'
>>> Schema(object).validate('hai')
'hai'
Callables
~~~~~~~~~
If ``Schema(...)`` encounters a callable (function, class, or object with
``__call__`` method) it will call it, and if its return value evaluates to
``True`` it will continue validating, else—it will raise ``SchemaError``.
.. code:: python
>>> import os
>>> Schema(os.path.exists).validate('./')
'./'
>>> Schema(os.path.exists).validate('./non-existent/')
Traceback (most recent call last):
...
schema.SchemaError: exists('./non-existent/') should evaluate to True
>>> Schema(lambda n: n > 0).validate(123)
123
>>> Schema(lambda n: n > 0).validate(-12)
Traceback (most recent call last):
...
schema.SchemaError: <lambda>(-12) should evaluate to True
"Validatables"
~~~~~~~~~~~~~~
If ``Schema(...)`` encounters an object with method ``validate`` it will run
this method on corresponding data as ``data = obj.validate(data)``. This method
may raise ``SchemaError`` exception, which will tell ``Schema`` that that piece
of data is invalid, otherwise—it will continue validating.
An example of "validatable" is ``Regex``, that tries to match a string or a
buffer with the given regular expression (itself as a string, buffer or
compiled regex ``SRE_Pattern``):
.. code:: python
>>> from schema import Regex
>>> import re
>>> Regex(r'^foo').validate('foobar')
'foobar'
>>> Regex(r'^[A-Z]+$', flags=re.I).validate('those-dashes-dont-match')
Traceback (most recent call last):
...
schema.SchemaError: Regex('^[A-Z]+$', flags=re.IGNORECASE) does not match 'those-dashes-dont-match'
For a more general case, you can use ``Use`` for creating such objects.
``Use`` helps to use a function or type to convert a value while validating it:
.. code:: python
>>> from schema import Use
>>> Schema(Use(int)).validate('123')
123
>>> Schema(Use(lambda f: open(f, 'a'))).validate('LICENSE-MIT')
<_io.TextIOWrapper name='LICENSE-MIT' mode='a' encoding='UTF-8'>
Dropping the details, ``Use`` is basically:
.. code:: python
class Use(object):
def __init__(self, callable_):
self._callable = callable_
def validate(self, data):
try:
return self._callable(data)
except Exception as e:
raise SchemaError('%r raised %r' % (self._callable.__name__, e))
Sometimes you need to transform and validate part of data, but keep original data unchanged.
``Const`` helps to keep your data safe:
.. code:: python
>> from schema import Use, Const, And, Schema
>> from datetime import datetime
>> is_future = lambda date: datetime.now() > date
>> to_json = lambda v: {"timestamp": v}
>> Schema(And(Const(And(Use(datetime.fromtimestamp), is_future)), Use(to_json))).validate(1234567890)
{"timestamp": 1234567890}
Now you can write your own validation-aware classes and data types.
Lists, similar containers
~~~~~~~~~~~~~~~~~~~~~~~~~
If ``Schema(...)`` encounters an instance of ``list``, ``tuple``, ``set``
or ``frozenset``, it will validate contents of corresponding data container
against all schemas listed inside that container and aggregate all errors:
.. code:: python
>>> Schema([1, 0]).validate([1, 1, 0, 1])
[1, 1, 0, 1]
>>> Schema((int, float)).validate((5, 7, 8, 'not int or float here'))
Traceback (most recent call last):
...
schema.SchemaError: Or(<class 'int'>, <class 'float'>) did not validate 'not int or float here'
'not int or float here' should be instance of 'int'
'not int or float here' should be instance of 'float'
Dictionaries
~~~~~~~~~~~~
If ``Schema(...)`` encounters an instance of ``dict``, it will validate data
key-value pairs:
.. code:: python
>>> d = Schema({'name': str,
... 'age': lambda n: 18 <= n <= 99}).validate({'name': 'Sue', 'age': 28})
>>> assert d == {'name': 'Sue', 'age': 28}
You can specify keys as schemas too:
.. code:: python
>>> schema = Schema({str: int, # string keys should have integer values
... int: None}) # int keys should be always None
>>> data = schema.validate({'key1': 1, 'key2': 2,
... 10: None, 20: None})
>>> schema.validate({'key1': 1,
... 10: 'not None here'})
Traceback (most recent call last):
...
schema.SchemaError: Key '10' error:
None does not match 'not None here'
This is useful if you want to check certain key-values, but don't care
about others:
.. code:: python
>>> schema = Schema({'<id>': int,
... '<file>': Use(open),
... str: object}) # don't care about other str keys
>>> data = schema.validate({'<id>': 10,
... '<file>': 'README.rst',
... '--verbose': True})
You can mark a key as optional as follows:
.. code:: python
>>> from schema import Optional
>>> Schema({'name': str,
... Optional('occupation'): str}).validate({'name': 'Sam'})
{'name': 'Sam'}
``Optional`` keys can also carry a ``default``, to be used when no key in the
data matches:
.. code:: python
>>> from schema import Optional
>>> Schema({Optional('color', default='blue'): str,
... str: str}).validate({'texture': 'furry'}
... ) == {'color': 'blue', 'texture': 'furry'}
True
Defaults are used verbatim, not passed through any validators specified in the
value.
default can also be a callable:
.. code:: python
>>> from schema import Schema, Optional
>>> Schema({Optional('data', default=dict): {}}).validate({}) == {'data': {}}
True
Also, a caveat: If you specify types, **schema** won't validate the empty dict:
.. code:: python
>>> Schema({int:int}).is_valid({})
False
To do that, you need ``Schema(Or({int:int}, {}))``. This is unlike what happens with
lists, where ``Schema([int]).is_valid([])`` will return True.
**schema** has classes ``And`` and ``Or`` that help validating several schemas
for the same data:
.. code:: python
>>> from schema import And, Or
>>> Schema({'age': And(int, lambda n: 0 < n < 99)}).validate({'age': 7})
{'age': 7}
>>> Schema({'password': And(str, lambda s: len(s) > 6)}).validate({'password': 'hai'})
Traceback (most recent call last):
...
schema.SchemaError: Key 'password' error:
<lambda>('hai') should evaluate to True
>>> Schema(And(Or(int, float), lambda x: x > 0)).validate(3.1415)
3.1415
In a dictionary, you can also combine two keys in a "one or the other" manner. To do
so, use the `Or` class as a key:
.. code:: python
>>> from schema import Or, Schema
>>> schema = Schema({
... Or("key1", "key2", only_one=True): str
... })
>>> schema.validate({"key1": "test"}) # Ok
{'key1': 'test'}
>>> schema.validate({"key1": "test", "key2": "test"}) # SchemaError
Traceback (most recent call last):
...
schema.SchemaOnlyOneAllowedError: There are multiple keys present from the Or('key1', 'key2') condition
Hooks
~~~~~~~~~~
You can define hooks which are functions that are executed whenever a valid key:value is found.
The `Forbidden` class is an example of this.
You can mark a key as forbidden as follows:
.. code:: python
>>> from schema import Forbidden
>>> Schema({Forbidden('age'): object}).validate({'age': 50})
Traceback (most recent call last):
...
schema.SchemaForbiddenKeyError: Forbidden key encountered: 'age' in {'age': 50}
A few things are worth noting. First, the value paired with the forbidden
key determines whether it will be rejected:
.. code:: python
>>> Schema({Forbidden('age'): str, 'age': int}).validate({'age': 50})
{'age': 50}
Note: if we hadn't supplied the 'age' key here, the call would have failed too, but with
SchemaWrongKeyError, not SchemaForbiddenKeyError.
Second, Forbidden has a higher priority than standard keys, and consequently than Optional.
This means we can do that:
.. code:: python
>>> Schema({Forbidden('age'): object, Optional(str): object}).validate({'age': 50})
Traceback (most recent call last):
...
schema.SchemaForbiddenKeyError: Forbidden key encountered: 'age' in {'age': 50}
You can also define your own hooks. The following hook will call `_my_function` if `key` is encountered.
.. code:: python
from schema import Hook
def _my_function(key, scope, error):
print(key, scope, error)
Hook("key", handler=_my_function)
Here's an example where a `Deprecated` class is added to log warnings whenever a key is encountered:
.. code:: python
from schema import Hook, Schema
class Deprecated(Hook):
def __init__(self, *args, **kwargs):
kwargs["handler"] = lambda key, *args: logging.warn(f"`{key}` is deprecated. " + (self._error or ""))
super(Deprecated, self).__init__(*args, **kwargs)
Schema({Deprecated("test", "custom error message."): object}, ignore_extra_keys=True).validate({"test": "value"})
...
WARNING: `test` is deprecated. custom error message.
Extra Keys
~~~~~~~~~~
The ``Schema(...)`` parameter ``ignore_extra_keys`` causes validation to ignore extra keys in a dictionary, and also to not return them after validating.
.. code:: python
>>> schema = Schema({'name': str}, ignore_extra_keys=True)
>>> schema.validate({'name': 'Sam', 'age': '42'})
{'name': 'Sam'}
If you would like any extra keys returned, use ``object: object`` as one of the key/value pairs, which will match any key and any value.
Otherwise, extra keys will raise a ``SchemaError``.
Customized Validation
~~~~~~~~~~~~~~~~~~~~~~~
The ``Schema.validate`` method accepts additional keyword arguments. The
keyword arguments will be propagated to the ``validate`` method of any
child validatables (including any ad-hoc ``Schema`` objects), or the default
value callable (if a callable is specified) for ``Optional`` keys.
This feature can be used together with inheritance of the ``Schema`` class
for customized validation.
Here is an example where a "post-validation" hook that runs after validation
against a sub-schema in a larger schema:
.. code:: python
class EventSchema(schema.Schema):
def validate(self, data, _is_event_schema=True):
data = super(EventSchema, self).validate(data, _is_event_schema=False)
if _is_event_schema and data.get("minimum", None) is None:
data["minimum"] = data["capacity"]
return data
events_schema = schema.Schema(
{
str: EventSchema({
"capacity": int,
schema.Optional("minimum"): int, # default to capacity
})
}
)
data = {'event1': {'capacity': 1}, 'event2': {'capacity': 2, 'minimum': 3}}
events = events_schema.validate(data)
assert events['event1']['minimum'] == 1 # == capacity
assert events['event2']['minimum'] == 3
Note that the additional keyword argument ``_is_event_schema`` is necessary to
limit the customized behavior to the ``EventSchema`` object itself so that it
won't affect any recursive invoke of the ``self.__class__.validate`` for the
child schemas (e.g., the call to ``Schema("capacity").validate("capacity")``).
User-friendly error reporting
-------------------------------------------------------------------------------
You can pass a keyword argument ``error`` to any of validatable classes
(such as ``Schema``, ``And``, ``Or``, ``Regex``, ``Use``) to report this error
instead of a built-in one.
.. code:: python
>>> Schema(Use(int, error='Invalid year')).validate('XVII')
Traceback (most recent call last):
...
schema.SchemaError: Invalid year
You can see all errors that occurred by accessing exception's ``exc.autos``
for auto-generated error messages, and ``exc.errors`` for errors
which had ``error`` text passed to them.
You can exit with ``sys.exit(exc.code)`` if you want to show the messages
to the user without traceback. ``error`` messages are given precedence in that
case.
A JSON API example
-------------------------------------------------------------------------------
Here is a quick example: validation of
`create a gist <http://developer.github.com/v3/gists/>`_
request from github API.
.. code:: python
>>> gist = '''{"description": "the description for this gist",
... "public": true,
... "files": {
... "file1.txt": {"content": "String file contents"},
... "other.txt": {"content": "Another file contents"}}}'''
>>> from schema import Schema, And, Use, Optional
>>> import json
>>> gist_schema = Schema(And(Use(json.loads), # first convert from JSON
... # use str since json returns unicode
... {Optional('description'): str,
... 'public': bool,
... 'files': {str: {'content': str}}}))
>>> gist = gist_schema.validate(gist)
# gist:
{u'description': u'the description for this gist',
u'files': {u'file1.txt': {u'content': u'String file contents'},
u'other.txt': {u'content': u'Another file contents'}},
u'public': True}
Using **schema** with `docopt <http://github.com/docopt/docopt>`_
-------------------------------------------------------------------------------
Assume you are using **docopt** with the following usage-pattern:
Usage: my_program.py [--count=N] <path> <files>...
and you would like to validate that ``<files>`` are readable, and that
``<path>`` exists, and that ``--count`` is either integer from 0 to 5, or
``None``.
Assuming **docopt** returns the following dict:
.. code:: python
>>> args = {'<files>': ['LICENSE-MIT', 'setup.py'],
... '<path>': '../',
... '--count': '3'}
this is how you validate it using ``schema``:
.. code:: python
>>> from schema import Schema, And, Or, Use
>>> import os
>>> s = Schema({'<files>': [Use(open)],
... '<path>': os.path.exists,
... '--count': Or(None, And(Use(int), lambda n: 0 < n < 5))})
>>> args = s.validate(args)
>>> args['<files>']
[<_io.TextIOWrapper name='LICENSE-MIT' ...>, <_io.TextIOWrapper name='setup.py' ...]
>>> args['<path>']
'../'
>>> args['--count']
3
As you can see, **schema** validated data successfully, opened files and
converted ``'3'`` to ``int``.
JSON schema
-----------
You can also generate standard `draft-07 JSON schema <https://json-schema.org/>`_ from a dict ``Schema``.
This can be used to add word completion, validation, and documentation directly in code editors.
The output schema can also be used with JSON schema compatible libraries.
JSON: Generating
~~~~~~~~~~~~~~~~
Just define your schema normally and call ``.json_schema()`` on it. The output is a Python dict, you need to dump it to JSON.
.. code:: python
>>> from schema import Optional, Schema
>>> import json
>>> s = Schema({"test": str,
... "nested": {Optional("other"): str}
... })
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"))
# json_schema
{
"type":"object",
"properties": {
"test": {"type": "string"},
"nested": {
"type":"object",
"properties": {
"other": {"type": "string"}
},
"required": [],
"additionalProperties": false
}
},
"required":[
"test",
"nested"
],
"additionalProperties":false,
"$id":"https://example.com/my-schema.json",
"$schema":"http://json-schema.org/draft-07/schema#"
}
You can add descriptions for the schema elements using the ``Literal`` object instead of a string. The main schema can also have a description.
These will appear in IDEs to help your users write a configuration.
.. code:: python
>>> from schema import Literal, Schema
>>> import json
>>> s = Schema({Literal("project_name", description="Names must be unique"): str}, description="Project schema")
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"), indent=4)
# json_schema
{
"type": "object",
"properties": {
"project_name": {
"description": "Names must be unique",
"type": "string"
}
},
"required": [
"project_name"
],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "Project schema"
}
JSON: Supported validations
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The resulting JSON schema is not guaranteed to accept the same objects as the library would accept, since some validations are not implemented or
have no JSON schema equivalent. This is the case of the ``Use`` and ``Hook`` objects for example.
Implemented
'''''''''''
`Object properties <https://json-schema.org/understanding-json-schema/reference/object.html#properties>`_
Use a dict literal. The dict keys are the JSON schema properties.
Example:
``Schema({"test": str})``
becomes
``{'type': 'object', 'properties': {'test': {'type': 'string'}}, 'required': ['test'], 'additionalProperties': False}``.
Please note that attributes are required by default. To create optional attributes use ``Optional``, like so:
``Schema({Optional("test"): str})``
becomes
``{'type': 'object', 'properties': {'test': {'type': 'string'}}, 'required': [], 'additionalProperties': False}``
additionalProperties is set to true when at least one of the conditions is met:
- ignore_extra_keys is True
- at least one key is `str` or `object`
For example:
``Schema({str: str})`` and ``Schema({}, ignore_extra_keys=True)``
both becomes
``{'type': 'object', 'properties' : {}, 'required': [], 'additionalProperties': True}``
and
``Schema({})``
becomes
``{'type': 'object', 'properties' : {}, 'required': [], 'additionalProperties': False}``
Types
Use the Python type name directly. It will be converted to the JSON name:
- ``str`` -> `string <https://json-schema.org/understanding-json-schema/reference/string.html>`_
- ``int`` -> `integer <https://json-schema.org/understanding-json-schema/reference/numeric.html#integer>`_
- ``float`` -> `number <https://json-schema.org/understanding-json-schema/reference/numeric.html#number>`_
- ``bool`` -> `boolean <https://json-schema.org/understanding-json-schema/reference/boolean.html>`_
- ``list`` -> `array <https://json-schema.org/understanding-json-schema/reference/array.html>`_
- ``dict`` -> `object <https://json-schema.org/understanding-json-schema/reference/object.html>`_
Example:
``Schema(float)``
becomes
``{"type": "number"}``
`Array items <https://json-schema.org/understanding-json-schema/reference/array.html#items>`_
Surround a schema with ``[]``.
Example:
``Schema([str])`` means an array of string and becomes:
``{'type': 'array', 'items': {'type': 'string'}}``
`Enumerated values <https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values>`_
Use `Or`.
Example:
``Schema(Or(1, 2, 3))`` becomes
``{"enum": [1, 2, 3]}``
`Constant values <https://json-schema.org/understanding-json-schema/reference/generic.html#constant-values>`_
Use the value itself.
Example:
``Schema("name")`` becomes
``{"const": "name"}``
`Regular expressions <https://json-schema.org/understanding-json-schema/reference/regular_expressions.html>`_
Use ``Regex``.
Example:
``Schema(Regex("^v\d+"))`` becomes
``{'type': 'string', 'pattern': '^v\\d+'}``
`Annotations (title and description) <https://json-schema.org/understanding-json-schema/reference/generic.html#annotations>`_
You can use the ``name`` and ``description`` parameters of the ``Schema`` object init method.
To add description to keys, replace a str with a ``Literal`` object.
Example:
``Schema({Literal("test", description="A description"): str})``
is equivalent to
``Schema({"test": str})``
with the description added to the resulting JSON schema.
`Combining schemas with allOf <https://json-schema.org/understanding-json-schema/reference/combining.html#allof>`_
Use ``And``
Example:
``Schema(And(str, "value"))``
becomes
``{"allOf": [{"type": "string"}, {"const": "value"}]}``
Note that this example is not really useful in the real world, since ``const`` already implies the type.
`Combining schemas with anyOf <https://json-schema.org/understanding-json-schema/reference/combining.html#anyof>`_
Use ``Or``
Example:
``Schema(Or(str, int))``
becomes
``{"anyOf": [{"type": "string"}, {"type": "integer"}]}``
Not implemented
'''''''''''''''
The following JSON schema validations cannot be generated from this library.
- `String length <https://json-schema.org/understanding-json-schema/reference/string.html#length>`_
However, those can be implemented using ``Regex``
- `String format <https://json-schema.org/understanding-json-schema/reference/string.html#format>`_
However, those can be implemented using ``Regex``
- `Object dependencies <https://json-schema.org/understanding-json-schema/reference/object.html#dependencies>`_
- `Array length <https://json-schema.org/understanding-json-schema/reference/array.html#length>`_
- `Array uniqueness <https://json-schema.org/understanding-json-schema/reference/array.html#uniqueness>`_
- `Numeric multiples <https://json-schema.org/understanding-json-schema/reference/numeric.html#multiples>`_
- `Numeric ranges <https://json-schema.org/understanding-json-schema/reference/numeric.html#range>`_
- `Property Names <https://json-schema.org/understanding-json-schema/reference/object.html#property-names>`_
Not implemented. We suggest listing the possible keys instead. As a tip, you can use ``Or`` as a dict key.
Example:
``Schema({Or("name1", "name2"): str})``
- `Annotations (default and examples) <https://json-schema.org/understanding-json-schema/reference/generic.html#annotations>`_
- `Combining schemas with oneOf <https://json-schema.org/understanding-json-schema/reference/combining.html#oneof>`_
- `Not <https://json-schema.org/understanding-json-schema/reference/combining.html#not>`_
- `Object size <https://json-schema.org/understanding-json-schema/reference/object.html#size>`_
- `additionalProperties having a different schema (true and false is supported)`
JSON: Minimizing output size
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Explicit Reuse
''''''''''''''
If your JSON schema is big and has a lot of repetition, it can be made simpler and smaller by defining Schema objects as reference.
These references will be placed in a "definitions" section in the main schema.
`You can look at the JSON schema documentation for more information <https://json-schema.org/understanding-json-schema/structuring.html#reuse>`_
.. code:: python
>>> from schema import Optional, Schema
>>> import json
>>> s = Schema({"test": str,
... "nested": Schema({Optional("other"): str}, name="nested", as_reference=True)
... })
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"), indent=4)
# json_schema
{
"type": "object",
"properties": {
"test": {
"type": "string"
},
"nested": {
"$ref": "#/definitions/nested"
}
},
"required": [
"test",
"nested"
],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"nested": {
"type": "object",
"properties": {
"other": {
"type": "string"
}
},
"required": [],
"additionalProperties": false
}
}
}
This becomes really useful when using the same object several times
.. code:: python
>>> from schema import Optional, Or, Schema
>>> import json
>>> language_configuration = Schema({"autocomplete": bool, "stop_words": [str]}, name="language", as_reference=True)
>>> s = Schema({Or("ar", "cs", "de", "el", "eu", "en", "es", "fr"): language_configuration})
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json"), indent=4)
# json_schema
{
"type": "object",
"properties": {
"ar": {
"$ref": "#/definitions/language"
},
"cs": {
"$ref": "#/definitions/language"
},
"de": {
"$ref": "#/definitions/language"
},
"el": {
"$ref": "#/definitions/language"
},
"eu": {
"$ref": "#/definitions/language"
},
"en": {
"$ref": "#/definitions/language"
},
"es": {
"$ref": "#/definitions/language"
},
"fr": {
"$ref": "#/definitions/language"
}
},
"required": [],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"language": {
"type": "object",
"properties": {
"autocomplete": {
"type": "boolean"
},
"stop_words": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false
}
}
}
Automatic reuse
'''''''''''''''
If you want to minimize the output size without using names explicitly, you can have the library generate hashes of parts of the output JSON
schema and use them as references throughout.
Enable this behaviour by providing the parameter ``use_refs`` to the json_schema method.
Be aware that this method is less often compatible with IDEs and JSON schema libraries.
It produces a JSON schema that is more difficult to read by humans.
.. code:: python
>>> from schema import Optional, Or, Schema
>>> import json
>>> language_configuration = Schema({"autocomplete": bool, "stop_words": [str]})
>>> s = Schema({Or("ar", "cs", "de", "el", "eu", "en", "es", "fr"): language_configuration})
>>> json_schema = json.dumps(s.json_schema("https://example.com/my-schema.json", use_refs=True), indent=4)
# json_schema
{
"type": "object",
"properties": {
"ar": {
"type": "object",
"properties": {
"autocomplete": {
"type": "boolean",
"$id": "#6456104181059880193"
},
"stop_words": {
"type": "array",
"items": {
"type": "string",
"$id": "#1856069563381977338"
}
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false
},
"cs": {
"type": "object",
"properties": {
"autocomplete": {
"$ref": "#6456104181059880193"
},
"stop_words": {
"type": "array",
"items": {
"$ref": "#1856069563381977338"
},
"$id": "#-5377945144312515805"
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false
},
"de": {
"type": "object",
"properties": {
"autocomplete": {
"$ref": "#6456104181059880193"
},
"stop_words": {
"$ref": "#-5377945144312515805"
}
},
"required": [
"autocomplete",
"stop_words"
],
"additionalProperties": false,
"$id": "#-8142886105174600858"
},
"el": {
"$ref": "#-8142886105174600858"
},
"eu": {
"$ref": "#-8142886105174600858"
},
"en": {
"$ref": "#-8142886105174600858"
},
"es": {
"$ref": "#-8142886105174600858"
},
"fr": {
"$ref": "#-8142886105174600858"
}
},
"required": [],
"additionalProperties": false,
"$id": "https://example.com/my-schema.json",
"$schema": "http://json-schema.org/draft-07/schema#"
}
| 0.928547 | 0.667039 |
# Changelog
## v0.7.4 (2021-02-01)
### Fixes
* Don't double-format errors. fixes #240 (#247) [Leif Ryge]
* Fix "Unknown format code" in Python 3.8 (#245) [Denis Blanchette]
* JSON Schema: Allow using $ref when schema is not a dict (#244) [Denis Blanchette]
* JSON Schema: Set additionalProperties true when dict contains str as key (#243) [Denis Blanchette]
## v0.7.3 (2020-07-31)
### Fixes
* JSON Schema: Support schemas where the root is not a dict. [Stavros Korokithakis]
* Do not drop previous errors within an Or criterion. [Stavros Korokithakis]
## v0.7.1 (2019-09-09)
### Features
* JSON Schema: Include default values. [Stavros Korokithakis]
* JSON schema with common definitions + Update README. [Stavros Korokithakis]
* Add references to JSON schema rendering. [Stavros Korokithakis]
* Add the "Literal" type for JSONSchema. [Stavros Korokithakis]
* Improve JSON schema generation (#206) [Denis Blanchette]
### Fixes
* JSON Schema: Fix allOf and oneOf with only one condition. [Stavros Korokithakis]
* Fix readme code block typo. [Stavros Korokithakis]
* JSON Schema: Don't add a description in a ref. [Stavros Korokithakis]
* JSON Schema: Fix using `dict` as type. [Stavros Korokithakis]
* Fix using Literal in enum in JSON Schema. [Stavros Korokithakis]
## v0.7.0 (2019-02-25)
### Features
* Add Hook class. Allows to introduce custom handlers (#175) [Julien Duchesne]
### Fixes
* Add pre-commit to CI (#187) [Stavros Korokithakis]
* Use correct singular/plural form of “key(s)” in error messages (#184) [Joel Rosdahl]
* When ignoring extra keys, Or's only_one should still be handled (#181) [Julien Duchesne]
* Fix Or reset() when Or is Optional (#178) [Julien Duchesne]
* Don't accept boolens as instances of ints (#176) [Brandon Skari]
* Remove assert statements (#170) [Ryan Morshead]
## v0.6.8 (2018-06-14)
### Features
* Add an is_valid method to the schema (as in #134) (#150) [Shailyn Ortiz]
### Fixes
* Fix typo in schema.py: vaidated->validated (#151) [drootnar]
* Fix callable check under PyPy2 (#149) [cfs-pure]
## v0.6.6 (2017-04-26)
### Fixes
* Schema can be inherited (#127) [Hiroyuki Ishii]
* Show a key error if a dict error happens. [Stavros Korokithakis]
## v0.6.4 (2016-09-19)
### Fixes
* Revert the optional error commit. [Stavros Korokithakis]
## v0.6.3 (2016-09-19)
### Fixes
* Sort missing keys. [Stavros Korokithakis]
## v0.6.2 (2016-07-27)
### Fixes
* Add SchemaError SubClasses: SchemaWrongKey, SchemaMissingKeyError (#111) [Stavros Korokithakis]
## v0.6.1 (2016-07-27)
### Fixes
* Handle None as the error message properly. [Stavros Korokithakis]
## v0.6.0 (2016-07-18)
### Features
* Add the "Regex" class. [Stavros Korokithakis]
|
schema
|
/schema-0.7.5.tar.gz/schema-0.7.5/CHANGELOG.md
|
CHANGELOG.md
|
# Changelog
## v0.7.4 (2021-02-01)
### Fixes
* Don't double-format errors. fixes #240 (#247) [Leif Ryge]
* Fix "Unknown format code" in Python 3.8 (#245) [Denis Blanchette]
* JSON Schema: Allow using $ref when schema is not a dict (#244) [Denis Blanchette]
* JSON Schema: Set additionalProperties true when dict contains str as key (#243) [Denis Blanchette]
## v0.7.3 (2020-07-31)
### Fixes
* JSON Schema: Support schemas where the root is not a dict. [Stavros Korokithakis]
* Do not drop previous errors within an Or criterion. [Stavros Korokithakis]
## v0.7.1 (2019-09-09)
### Features
* JSON Schema: Include default values. [Stavros Korokithakis]
* JSON schema with common definitions + Update README. [Stavros Korokithakis]
* Add references to JSON schema rendering. [Stavros Korokithakis]
* Add the "Literal" type for JSONSchema. [Stavros Korokithakis]
* Improve JSON schema generation (#206) [Denis Blanchette]
### Fixes
* JSON Schema: Fix allOf and oneOf with only one condition. [Stavros Korokithakis]
* Fix readme code block typo. [Stavros Korokithakis]
* JSON Schema: Don't add a description in a ref. [Stavros Korokithakis]
* JSON Schema: Fix using `dict` as type. [Stavros Korokithakis]
* Fix using Literal in enum in JSON Schema. [Stavros Korokithakis]
## v0.7.0 (2019-02-25)
### Features
* Add Hook class. Allows to introduce custom handlers (#175) [Julien Duchesne]
### Fixes
* Add pre-commit to CI (#187) [Stavros Korokithakis]
* Use correct singular/plural form of “key(s)” in error messages (#184) [Joel Rosdahl]
* When ignoring extra keys, Or's only_one should still be handled (#181) [Julien Duchesne]
* Fix Or reset() when Or is Optional (#178) [Julien Duchesne]
* Don't accept boolens as instances of ints (#176) [Brandon Skari]
* Remove assert statements (#170) [Ryan Morshead]
## v0.6.8 (2018-06-14)
### Features
* Add an is_valid method to the schema (as in #134) (#150) [Shailyn Ortiz]
### Fixes
* Fix typo in schema.py: vaidated->validated (#151) [drootnar]
* Fix callable check under PyPy2 (#149) [cfs-pure]
## v0.6.6 (2017-04-26)
### Fixes
* Schema can be inherited (#127) [Hiroyuki Ishii]
* Show a key error if a dict error happens. [Stavros Korokithakis]
## v0.6.4 (2016-09-19)
### Fixes
* Revert the optional error commit. [Stavros Korokithakis]
## v0.6.3 (2016-09-19)
### Fixes
* Sort missing keys. [Stavros Korokithakis]
## v0.6.2 (2016-07-27)
### Fixes
* Add SchemaError SubClasses: SchemaWrongKey, SchemaMissingKeyError (#111) [Stavros Korokithakis]
## v0.6.1 (2016-07-27)
### Fixes
* Handle None as the error message properly. [Stavros Korokithakis]
## v0.6.0 (2016-07-18)
### Features
* Add the "Regex" class. [Stavros Korokithakis]
| 0.735262 | 0.583559 |
`schema2rst` generates reST doc from database schema
Features
========
Generates table definitions document from database schema.
`schema2rst` recognizes your database comments on tables and columns,
and reflect it to docs.
Setup
=====
Use easy_install (or pip):
$ sudo easy_install schema2rst
Configuration
=============
Make config.yaml to connect your database server.
This is example for MySQL Server::
type: mysql
db: sample
host: localhost
user: username
passwd: passw0rd
`type` parameter is accept these values: mysql, mysql+pymysql, postgresql
Usage
=====
Execute schema2rst command::
$ schema2rst -c config.yaml -o database.rst
Examples
========
You can see example at http://tk0miya.bitbucket.org/schema2rst/build/html/ .
Requirements
============
* Python 2.6, 2.7, 3.2, 3.3
* SQLAlchemy
* PyYAML
* Six
* pymysql or MySQL-python (optional)
* psycopg2 (optional)
License
=======
Apache License 2.0
|
schema2rst
|
/schema2rst-0.9.0.tar.gz/schema2rst-0.9.0/README.rst
|
README.rst
|
`schema2rst` generates reST doc from database schema
Features
========
Generates table definitions document from database schema.
`schema2rst` recognizes your database comments on tables and columns,
and reflect it to docs.
Setup
=====
Use easy_install (or pip):
$ sudo easy_install schema2rst
Configuration
=============
Make config.yaml to connect your database server.
This is example for MySQL Server::
type: mysql
db: sample
host: localhost
user: username
passwd: passw0rd
`type` parameter is accept these values: mysql, mysql+pymysql, postgresql
Usage
=====
Execute schema2rst command::
$ schema2rst -c config.yaml -o database.rst
Examples
========
You can see example at http://tk0miya.bitbucket.org/schema2rst/build/html/ .
Requirements
============
* Python 2.6, 2.7, 3.2, 3.3
* SQLAlchemy
* PyYAML
* Six
* pymysql or MySQL-python (optional)
* psycopg2 (optional)
License
=======
Apache License 2.0
| 0.613352 | 0.400749 |
===================
JSON-Schema builder
===================
Helpers to build you define JSON schema for either validation or publication.
Requirements
============
It requires Python 2.7 and ``jsonschema``. ``jsonschema`` or ``setuptools``
should be installed with Python.
Install
=======
Using pip::
pip install schemabuilder
Or easy_install::
easty_install schemabuilder
You may install it manually::
git clone https://github.com/dinoboff/schemabuilder.git
cd schemabuilder
python setup.py install
Usage
=====
Primitives
----------
JSON schema primitives are represented by object of type:
* ``schemabuilder.Str``
* ``schemabuilder.Bool``
* ``schemabuilder.Number``
* ``schemabuilder.Int``
* ``schemabuilder.Object``
* ``schemabuilder.Array``
.. code-block:: python
>>> import schemabuilder as jsb
>>> import pprint
>>>
>>> name = jsb.Str(pattern="^[a-zA-Z][- 'a-zA-Z0-9]+")
>>> email = jsb.Str(format="email")
>>> user = jsb.Object(properties={
... 'name': name(required=True),
... 'email': email(),
... 'home': jsb.Str(format='uri'),
... })
>>> pprint.pprint(user.to_dict())
{'properties': {'email': {'type': 'string'},
'home': {'format': 'uri', 'type': 'string'},
'name': {'type': 'string'}},
'required': ['name'],
'type': 'object'}
Schema
------
Schema collects those definitions for validation (using ``jsonschema``) or
publication.
.. code-block:: python
>>> import schemabuilder as jsb
>>> import pprint
>>>
>>> my_schemas = jsb.Schema(id='http://example.com/schemas.json#')
>>> name = my_schemas.define(
... 'name', jsb.Str(pattern="^[a-zA-Z][- 'a-zA-Z0-9]+")
... )
>>> email = my_schemas.define('email', jsb.Str(format="email"))
>>> user = my_schemas.define('user', jsb.Object(properties={
... 'name': name(required=True),
... 'email': email(required=True),
... }))
>>>
>>> user.validate({'name': 'bob'})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "schemabuilder/schema.py", line 50, in validate
validator.validate(data)
File "/Users/bob/pyenv/lib/python2.7/site-packages/jsonschema/validators.py", line 117, in validate
raise error
jsonschema.exceptions.ValidationError: 'email' is a required property
Failed validating 'required' in schema:
{'properties': {'email': {'$ref': '#/definitions/email'},
'name': {'$ref': '#/definitions/name'}},
'required': ['name', 'email'],
'type': 'object'}
On instance:
{'name': 'bob'}
>>>
>>> user.validate({'name': 'bob', 'email': '[email protected]'})
>>>
>>> import json
>>> print json.dumps(my_schemas.to_dict(), indent=4)
{
"definitions": {
"email": {
"type": "string",
"format": "email"
},
"user": {
"required": [
"name",
"email"
],
"type": "object",
"properties": {
"name": {
"$ref": "#/definitions/name"
},
"email": {
"$ref": "#/definitions/email"
}
}
},
"name": {
"pattern": "^[a-zA-Z][- 'a-zA-Z0-9]+",
"type": "string"
}
},
"id": "http://example.com/schemas.json#",
"$schema": "http://json-schema.org/draft-04/schema#"
}
|
schemabuilder
|
/schemabuilder-0.3.0.tar.gz/schemabuilder-0.3.0/README.rst
|
README.rst
|
===================
JSON-Schema builder
===================
Helpers to build you define JSON schema for either validation or publication.
Requirements
============
It requires Python 2.7 and ``jsonschema``. ``jsonschema`` or ``setuptools``
should be installed with Python.
Install
=======
Using pip::
pip install schemabuilder
Or easy_install::
easty_install schemabuilder
You may install it manually::
git clone https://github.com/dinoboff/schemabuilder.git
cd schemabuilder
python setup.py install
Usage
=====
Primitives
----------
JSON schema primitives are represented by object of type:
* ``schemabuilder.Str``
* ``schemabuilder.Bool``
* ``schemabuilder.Number``
* ``schemabuilder.Int``
* ``schemabuilder.Object``
* ``schemabuilder.Array``
.. code-block:: python
>>> import schemabuilder as jsb
>>> import pprint
>>>
>>> name = jsb.Str(pattern="^[a-zA-Z][- 'a-zA-Z0-9]+")
>>> email = jsb.Str(format="email")
>>> user = jsb.Object(properties={
... 'name': name(required=True),
... 'email': email(),
... 'home': jsb.Str(format='uri'),
... })
>>> pprint.pprint(user.to_dict())
{'properties': {'email': {'type': 'string'},
'home': {'format': 'uri', 'type': 'string'},
'name': {'type': 'string'}},
'required': ['name'],
'type': 'object'}
Schema
------
Schema collects those definitions for validation (using ``jsonschema``) or
publication.
.. code-block:: python
>>> import schemabuilder as jsb
>>> import pprint
>>>
>>> my_schemas = jsb.Schema(id='http://example.com/schemas.json#')
>>> name = my_schemas.define(
... 'name', jsb.Str(pattern="^[a-zA-Z][- 'a-zA-Z0-9]+")
... )
>>> email = my_schemas.define('email', jsb.Str(format="email"))
>>> user = my_schemas.define('user', jsb.Object(properties={
... 'name': name(required=True),
... 'email': email(required=True),
... }))
>>>
>>> user.validate({'name': 'bob'})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "schemabuilder/schema.py", line 50, in validate
validator.validate(data)
File "/Users/bob/pyenv/lib/python2.7/site-packages/jsonschema/validators.py", line 117, in validate
raise error
jsonschema.exceptions.ValidationError: 'email' is a required property
Failed validating 'required' in schema:
{'properties': {'email': {'$ref': '#/definitions/email'},
'name': {'$ref': '#/definitions/name'}},
'required': ['name', 'email'],
'type': 'object'}
On instance:
{'name': 'bob'}
>>>
>>> user.validate({'name': 'bob', 'email': '[email protected]'})
>>>
>>> import json
>>> print json.dumps(my_schemas.to_dict(), indent=4)
{
"definitions": {
"email": {
"type": "string",
"format": "email"
},
"user": {
"required": [
"name",
"email"
],
"type": "object",
"properties": {
"name": {
"$ref": "#/definitions/name"
},
"email": {
"$ref": "#/definitions/email"
}
}
},
"name": {
"pattern": "^[a-zA-Z][- 'a-zA-Z0-9]+",
"type": "string"
}
},
"id": "http://example.com/schemas.json#",
"$schema": "http://json-schema.org/draft-04/schema#"
}
| 0.716516 | 0.213726 |
schemaconvertor
===============
**schemaconvertor**\ 提供了一种使用schema来转换对象的方法,通过schema,可以指定该对象序列化的部分和对应的类型,其结果可以进一步序列化为json。
安装:\ ``pip install schemaconvertor``
项目:\ `github <https://github.com/MrLYC/schemaconvertor>`__
`pypi <https://pypi.python.org/pypi/schemaconvertor/>`__
版本:0.3
演示
----
假设有个简单的数据类型\ ``User``\ :
.. code:: py
from collections import namedtuple
User = namedtuple("User", ["name", "password", "age"])
可以通过指定schema来转换对象:
.. code:: py
schema = {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "integer"
}
}
}
user = User(name="lyc", password="schemaconvertor", age="24")
from schemaconvertor.convertor import convert_by_schema
print convert_by_schema(user, schema)
输出: > {'age': 24, 'name': 'lyc'}
更多示例:\ `demo
0.3 <https://github.com/MrLYC/schemaconvertor/blob/master/schemaconvertor/tests/test_demo.py>`__
说明
----
基本字段
~~~~~~~~
version
^^^^^^^
**version**\ 字段标识着Schema版本。
description
^^^^^^^^^^^
**description**\ 字段标识着Schema说明。
encoding
^^^^^^^^
**encoding**\ 指定Schema的\ **string**\ 字段的字符编码,默认是\ *utf-8*\ 。
decoderrors
^^^^^^^^^^^
**decoderrors**\ 指定Schema的\ **string**\ 字段解码失败的操作,用于\ ``str.decode``\ 的第二个参数,主要有\ *strict*\ ,\ *ignore*\ ,\ *replace*\ 三种可选参数,默认是\ ``strict``\ 。
type
^^^^
**type**\ 字段指定对应待转换数据的最终类型,主要类型对应如下表:
+-----------+-------------+
| type | Python |
+===========+=============+
| string | unicode |
+-----------+-------------+
| object | dict |
+-----------+-------------+
| integer | int |
+-----------+-------------+
| float | float |
+-----------+-------------+
| number | int/float |
+-----------+-------------+
| boolean | bool |
+-----------+-------------+
| dict | dict |
+-----------+-------------+
| array | list |
+-----------+-------------+
| null | NoneType |
+-----------+-------------+
| raw | object |
+-----------+-------------+
**type**\ 字段直接影响转换行为,因此基本上每个Schema都需指定\ **type**\ ,为简化表达,当一个Schema仅有\ **type**\ 一项时,可以直接使用\ **type**\ 的值简化表示为Schema。
typeOf
^^^^^^
当前仅在声明\ **typeOf**\ 字段时可以不指定\ **type**\ ,\ **typeOf**\ 指示如何根据数据的类型选择对应的Schema。可以使用真实的Python类型或类型元组作为key(作为\ ``isinstance``\ 的第二个参数)。
default
^^^^^^^
**default**\ 字段仅用在\ **typeOf**\ 字段内,用于指示缺省类型表示的Schema。
items
^^^^^
**items**\ 字段仅在\ **type**\ 为array时生效,用于描述序列中的每一项对应的Schema。
properties
^^^^^^^^^^
**items**\ 字段仅在\ **type**\ 为dict或object时生效,指定给出的项的Schema(没有指定的项不会处理)。
patternProperties
^^^^^^^^^^^^^^^^^
**items**\ 字段仅在\ **type**\ 为dict或object时生效,指定符合给定的正则表达式的项的Schema(使用\ ``re.search``\ 匹配)。
附加信息
~~~~~~~~
1. Schema使用lazy compile方式,仅在转换使用时自动编译,初始化代价极小。
2. 子Schema中如无显式声明,\ *version*\ ,\ *description*\ ,\ *encoding*\ ,\ *decoderrors*\ 自动继承父Schema对应的值。
3. **typeOf**\ 能够识别继承关系,但针对使用数据真实类型的情况有优化。
4. **typeOf**\ 指定多种类型时不要使用\ ``list``\ 等非hashable类型。
5. 对于\ *object*\ 的情况是使用\ ``ObjAsDictAdapter``\ 将数据包装成类\ ``dict``\ 对象进行转换的。
|
schemaconvertor
|
/schemaconvertor-0.3.1.0.tar.gz/schemaconvertor-0.3.1.0/README.rst
|
README.rst
|
schemaconvertor
===============
**schemaconvertor**\ 提供了一种使用schema来转换对象的方法,通过schema,可以指定该对象序列化的部分和对应的类型,其结果可以进一步序列化为json。
安装:\ ``pip install schemaconvertor``
项目:\ `github <https://github.com/MrLYC/schemaconvertor>`__
`pypi <https://pypi.python.org/pypi/schemaconvertor/>`__
版本:0.3
演示
----
假设有个简单的数据类型\ ``User``\ :
.. code:: py
from collections import namedtuple
User = namedtuple("User", ["name", "password", "age"])
可以通过指定schema来转换对象:
.. code:: py
schema = {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "integer"
}
}
}
user = User(name="lyc", password="schemaconvertor", age="24")
from schemaconvertor.convertor import convert_by_schema
print convert_by_schema(user, schema)
输出: > {'age': 24, 'name': 'lyc'}
更多示例:\ `demo
0.3 <https://github.com/MrLYC/schemaconvertor/blob/master/schemaconvertor/tests/test_demo.py>`__
说明
----
基本字段
~~~~~~~~
version
^^^^^^^
**version**\ 字段标识着Schema版本。
description
^^^^^^^^^^^
**description**\ 字段标识着Schema说明。
encoding
^^^^^^^^
**encoding**\ 指定Schema的\ **string**\ 字段的字符编码,默认是\ *utf-8*\ 。
decoderrors
^^^^^^^^^^^
**decoderrors**\ 指定Schema的\ **string**\ 字段解码失败的操作,用于\ ``str.decode``\ 的第二个参数,主要有\ *strict*\ ,\ *ignore*\ ,\ *replace*\ 三种可选参数,默认是\ ``strict``\ 。
type
^^^^
**type**\ 字段指定对应待转换数据的最终类型,主要类型对应如下表:
+-----------+-------------+
| type | Python |
+===========+=============+
| string | unicode |
+-----------+-------------+
| object | dict |
+-----------+-------------+
| integer | int |
+-----------+-------------+
| float | float |
+-----------+-------------+
| number | int/float |
+-----------+-------------+
| boolean | bool |
+-----------+-------------+
| dict | dict |
+-----------+-------------+
| array | list |
+-----------+-------------+
| null | NoneType |
+-----------+-------------+
| raw | object |
+-----------+-------------+
**type**\ 字段直接影响转换行为,因此基本上每个Schema都需指定\ **type**\ ,为简化表达,当一个Schema仅有\ **type**\ 一项时,可以直接使用\ **type**\ 的值简化表示为Schema。
typeOf
^^^^^^
当前仅在声明\ **typeOf**\ 字段时可以不指定\ **type**\ ,\ **typeOf**\ 指示如何根据数据的类型选择对应的Schema。可以使用真实的Python类型或类型元组作为key(作为\ ``isinstance``\ 的第二个参数)。
default
^^^^^^^
**default**\ 字段仅用在\ **typeOf**\ 字段内,用于指示缺省类型表示的Schema。
items
^^^^^
**items**\ 字段仅在\ **type**\ 为array时生效,用于描述序列中的每一项对应的Schema。
properties
^^^^^^^^^^
**items**\ 字段仅在\ **type**\ 为dict或object时生效,指定给出的项的Schema(没有指定的项不会处理)。
patternProperties
^^^^^^^^^^^^^^^^^
**items**\ 字段仅在\ **type**\ 为dict或object时生效,指定符合给定的正则表达式的项的Schema(使用\ ``re.search``\ 匹配)。
附加信息
~~~~~~~~
1. Schema使用lazy compile方式,仅在转换使用时自动编译,初始化代价极小。
2. 子Schema中如无显式声明,\ *version*\ ,\ *description*\ ,\ *encoding*\ ,\ *decoderrors*\ 自动继承父Schema对应的值。
3. **typeOf**\ 能够识别继承关系,但针对使用数据真实类型的情况有优化。
4. **typeOf**\ 指定多种类型时不要使用\ ``list``\ 等非hashable类型。
5. 对于\ *object*\ 的情况是使用\ ``ObjAsDictAdapter``\ 将数据包装成类\ ``dict``\ 对象进行转换的。
| 0.551091 | 0.55658 |
.. image:: https://img.shields.io/pypi/v/schemadict.svg?style=flat
:target: https://pypi.org/project/schemadict/
:alt: Latest PyPI version
.. image:: https://readthedocs.org/projects/schemadict/badge/?version=latest
:target: https://schemadict.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://img.shields.io/badge/license-Apache%202-blue.svg
:target: https://github.com/airinnova/schemadict/blob/master/LICENSE.txt
:alt: License
.. image:: https://travis-ci.org/airinnova/schemadict.svg?branch=master
:target: https://travis-ci.org/airinnova/schemadict
:alt: Build status
.. image:: https://codecov.io/gh/airinnova/schemadict/branch/master/graph/badge.svg
:target: https://codecov.io/gh/airinnova/schemadict
:alt: Coverage
|
.. image:: https://raw.githubusercontent.com/airinnova/schemadict/master/docs/source/_static/images/logo.png
:target: https://github.com/airinnova/schemadict/
:alt: logo
A *schemadict* is a regular Python dictionary which specifies the type and format of values for some given key. To check if a test dictionary is conform with the expected schema, *schemadict* provides the `validate()` method. If the test dictionary is ill-defined, an error will be thrown, otherwise `None` is returned.
Examples
========
**Basic usage**
.. code:: python
>>> from schemadict import schemadict
>>> schema = schemadict({
... 'name': {
... 'type': str,
... 'min_len': 3,
... 'max_len': 12,
... },
... 'age': {
... 'type': int,
... '>=': 0,
... '<': 150,
... },
... })
>>>
>>> testdict = {'name': 'Neil', 'age': 55}
>>> schema.validate(testdict)
>>>
>>> testdict = {'name': 'Neil', 'age': -12}
>>> schema.validate(testdict)
Traceback (most recent call last):
...
ValueError: 'age' too small: expected >= 0, but was -12
>>>
>>> testdict = {'name': 'Neil', 'age': '55'}
>>> schema.validate(testdict)
Traceback (most recent call last):
...
TypeError: unexpected type for 'age': expected <class 'int'>, but was <class 'str'>
>>>
**Nested schemadict**
It is possible to check individual item in a list. For instance, in the following example we check if each item (of type ``str``) looks like a valid IPv4 address. How each item should look like can be specified with the ``item_schema`` keyword.
.. code:: python
>>> schema = schemadict({
... 'ip_addrs': {
... 'type': list,
... 'item_schema': {
... 'type': str,
... 'regex': r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$',
... },
... },
... })
>>>
>>>
>>> schema.validate({'ip_addrs': ['127.0.0.1', '192.168.1.1']}) # Valid
>>> schema.validate({'ip_addrs': ['127.0.0.1', '192.168.1.1', '1234.5678']}) # Last item invalid
Traceback (most recent call last):
...
ValueError: regex mismatch for 'ip_addrs': expected pattern '^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$', got '1234.5678'
>>>
Items in a ``list`` (or ``tuple``) may themselves be dictionaries which can be described with *schemadicts*. In this case, we use the keyword ``item_schemadict`` as illustrated in the following example.
.. code:: python
>>> schema_city = schemadict({
... 'name': {
... 'type': str
... },
... 'population': {
... 'type': int,
... '>=': 0,
... },
... })
>>>
>>> schema_country = schemadict({
... 'name': {'type': str},
... 'cities': {
... 'type': list,
... 'item_type': dict,
... 'item_schemadict': schema_city,
... },
... })
>>>
>>> test_country = {
... 'name': 'Neverland',
... 'cities': [
... {'name': 'Faketown', 'population': 3},
... {'name': 'Evergreen', 'population': True},
... ],
... }
>>>
>>> schema_country.validate(test_country)
Traceback (most recent call last):
...
TypeError: unexpected type for 'population': expected <class 'int'>, but was <class 'bool'>
>>>
**Custom validation functions**
Each *type* (``int``, ``bool``, ``str``, etc.) defines its own set of validation keywords and corresponding test functions. The dictionary ``STANDARD_VALIDATORS`` provided by the ``schemadict`` module contains the default validation functions for the Python's built-in types. However, it is also possible to modify or extend this dictionary with custom validation functions.
.. code:: python
>>> from schemadict import schemadict, STANDARD_VALIDATORS
>>> # Add a custom validation function
>>> def is_divisible(key, value, comp_value, _):
... if value % comp_value != 0:
... raise ValueError(f"{key!r} is not divisible by {comp_value}")
...
...
...
>>>
>>> # Update the standard validator dictionary
>>> my_validators = STANDARD_VALIDATORS
>>> my_validators[int]['%'] = is_divisible
>>> # Register the updated validator dictionary in the new schemadict instance
>>> s = schemadict({'my_num': {'type': int, '%': 3}}, validators=my_validators)
>>> s.validate({'my_num': 33})
>>> s.validate({'my_num': 4})
Traceback (most recent call last):
...
ValueError: 'my_num' is not divisible by 3
>>>
It is also possible to define *custom types* and *custom test functions* as shown in the following example.
.. code:: python
>>> from schemadict import schemadict, STANDARD_VALIDATORS
>>> class MyOcean:
... has_dolphins = True
... has_plastic = False
...
>>>
>>> def has_dolphins(key, value, comp_value, _):
... if getattr(value, 'has_dolphins') is not comp_value:
... raise ValueError(f"{key!r} does not have dolphins")
...
>>>
>>> my_validators = STANDARD_VALIDATORS
>>> my_validators.update({MyOcean: {'has_dolphins': has_dolphins}})
>>>
>>> schema_ocean = schemadict(
... {'ocean': {'type': MyOcean, 'has_dolphins': True}},
... validators=my_validators,
... )
>>>
>>> ocean1 = MyOcean()
>>> schema_ocean.validate({'ocean': ocean1})
>>>
>>> ocean2 = MyOcean()
>>> ocean2.has_dolphins = False
>>> schema_ocean.validate({'ocean': ocean2})
Traceback (most recent call last):
...
ValueError: 'ocean' does not have dolphins
Full documentation: https://schemadict.readthedocs.io/
Features
========
What *schemadict* offers:
* Built-in support for Python's primitive types
* Specify *required* and *optional* keys
* Validate *nested* schemas
* Add custom validation functions to built-in types
* Add custom validation functions to custom types
* Support for Regex checks of strings
Features currently in development
* Metaschema validation
* Lazy validation and summary of all errors
* Allow schema variations: schmea 1 OR schema 2
* Add support for validation of type `number.Number`
Installation
============
*Schemadict* is available on `PyPI <https://pypi.org/project/schemadict/>`_ and may simply be installed with
.. code::
pip install schemadict
Idea
====
*Schemadict* is loosely inspired by `JSON schema <https://json-schema.org/>`_ and `jsonschema <https://github.com/Julian/jsonschema>`_, a JSON schema validator for Python.
License
=======
**License:** Apache-2.0
|
schemadict
|
/schemadict-0.0.9.tar.gz/schemadict-0.0.9/README.rst
|
README.rst
|
.. image:: https://img.shields.io/pypi/v/schemadict.svg?style=flat
:target: https://pypi.org/project/schemadict/
:alt: Latest PyPI version
.. image:: https://readthedocs.org/projects/schemadict/badge/?version=latest
:target: https://schemadict.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://img.shields.io/badge/license-Apache%202-blue.svg
:target: https://github.com/airinnova/schemadict/blob/master/LICENSE.txt
:alt: License
.. image:: https://travis-ci.org/airinnova/schemadict.svg?branch=master
:target: https://travis-ci.org/airinnova/schemadict
:alt: Build status
.. image:: https://codecov.io/gh/airinnova/schemadict/branch/master/graph/badge.svg
:target: https://codecov.io/gh/airinnova/schemadict
:alt: Coverage
|
.. image:: https://raw.githubusercontent.com/airinnova/schemadict/master/docs/source/_static/images/logo.png
:target: https://github.com/airinnova/schemadict/
:alt: logo
A *schemadict* is a regular Python dictionary which specifies the type and format of values for some given key. To check if a test dictionary is conform with the expected schema, *schemadict* provides the `validate()` method. If the test dictionary is ill-defined, an error will be thrown, otherwise `None` is returned.
Examples
========
**Basic usage**
.. code:: python
>>> from schemadict import schemadict
>>> schema = schemadict({
... 'name': {
... 'type': str,
... 'min_len': 3,
... 'max_len': 12,
... },
... 'age': {
... 'type': int,
... '>=': 0,
... '<': 150,
... },
... })
>>>
>>> testdict = {'name': 'Neil', 'age': 55}
>>> schema.validate(testdict)
>>>
>>> testdict = {'name': 'Neil', 'age': -12}
>>> schema.validate(testdict)
Traceback (most recent call last):
...
ValueError: 'age' too small: expected >= 0, but was -12
>>>
>>> testdict = {'name': 'Neil', 'age': '55'}
>>> schema.validate(testdict)
Traceback (most recent call last):
...
TypeError: unexpected type for 'age': expected <class 'int'>, but was <class 'str'>
>>>
**Nested schemadict**
It is possible to check individual item in a list. For instance, in the following example we check if each item (of type ``str``) looks like a valid IPv4 address. How each item should look like can be specified with the ``item_schema`` keyword.
.. code:: python
>>> schema = schemadict({
... 'ip_addrs': {
... 'type': list,
... 'item_schema': {
... 'type': str,
... 'regex': r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$',
... },
... },
... })
>>>
>>>
>>> schema.validate({'ip_addrs': ['127.0.0.1', '192.168.1.1']}) # Valid
>>> schema.validate({'ip_addrs': ['127.0.0.1', '192.168.1.1', '1234.5678']}) # Last item invalid
Traceback (most recent call last):
...
ValueError: regex mismatch for 'ip_addrs': expected pattern '^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$', got '1234.5678'
>>>
Items in a ``list`` (or ``tuple``) may themselves be dictionaries which can be described with *schemadicts*. In this case, we use the keyword ``item_schemadict`` as illustrated in the following example.
.. code:: python
>>> schema_city = schemadict({
... 'name': {
... 'type': str
... },
... 'population': {
... 'type': int,
... '>=': 0,
... },
... })
>>>
>>> schema_country = schemadict({
... 'name': {'type': str},
... 'cities': {
... 'type': list,
... 'item_type': dict,
... 'item_schemadict': schema_city,
... },
... })
>>>
>>> test_country = {
... 'name': 'Neverland',
... 'cities': [
... {'name': 'Faketown', 'population': 3},
... {'name': 'Evergreen', 'population': True},
... ],
... }
>>>
>>> schema_country.validate(test_country)
Traceback (most recent call last):
...
TypeError: unexpected type for 'population': expected <class 'int'>, but was <class 'bool'>
>>>
**Custom validation functions**
Each *type* (``int``, ``bool``, ``str``, etc.) defines its own set of validation keywords and corresponding test functions. The dictionary ``STANDARD_VALIDATORS`` provided by the ``schemadict`` module contains the default validation functions for the Python's built-in types. However, it is also possible to modify or extend this dictionary with custom validation functions.
.. code:: python
>>> from schemadict import schemadict, STANDARD_VALIDATORS
>>> # Add a custom validation function
>>> def is_divisible(key, value, comp_value, _):
... if value % comp_value != 0:
... raise ValueError(f"{key!r} is not divisible by {comp_value}")
...
...
...
>>>
>>> # Update the standard validator dictionary
>>> my_validators = STANDARD_VALIDATORS
>>> my_validators[int]['%'] = is_divisible
>>> # Register the updated validator dictionary in the new schemadict instance
>>> s = schemadict({'my_num': {'type': int, '%': 3}}, validators=my_validators)
>>> s.validate({'my_num': 33})
>>> s.validate({'my_num': 4})
Traceback (most recent call last):
...
ValueError: 'my_num' is not divisible by 3
>>>
It is also possible to define *custom types* and *custom test functions* as shown in the following example.
.. code:: python
>>> from schemadict import schemadict, STANDARD_VALIDATORS
>>> class MyOcean:
... has_dolphins = True
... has_plastic = False
...
>>>
>>> def has_dolphins(key, value, comp_value, _):
... if getattr(value, 'has_dolphins') is not comp_value:
... raise ValueError(f"{key!r} does not have dolphins")
...
>>>
>>> my_validators = STANDARD_VALIDATORS
>>> my_validators.update({MyOcean: {'has_dolphins': has_dolphins}})
>>>
>>> schema_ocean = schemadict(
... {'ocean': {'type': MyOcean, 'has_dolphins': True}},
... validators=my_validators,
... )
>>>
>>> ocean1 = MyOcean()
>>> schema_ocean.validate({'ocean': ocean1})
>>>
>>> ocean2 = MyOcean()
>>> ocean2.has_dolphins = False
>>> schema_ocean.validate({'ocean': ocean2})
Traceback (most recent call last):
...
ValueError: 'ocean' does not have dolphins
Full documentation: https://schemadict.readthedocs.io/
Features
========
What *schemadict* offers:
* Built-in support for Python's primitive types
* Specify *required* and *optional* keys
* Validate *nested* schemas
* Add custom validation functions to built-in types
* Add custom validation functions to custom types
* Support for Regex checks of strings
Features currently in development
* Metaschema validation
* Lazy validation and summary of all errors
* Allow schema variations: schmea 1 OR schema 2
* Add support for validation of type `number.Number`
Installation
============
*Schemadict* is available on `PyPI <https://pypi.org/project/schemadict/>`_ and may simply be installed with
.. code::
pip install schemadict
Idea
====
*Schemadict* is loosely inspired by `JSON schema <https://json-schema.org/>`_ and `jsonschema <https://github.com/Julian/jsonschema>`_, a JSON schema validator for Python.
License
=======
**License:** Apache-2.0
| 0.954658 | 0.652961 |
# schemadiff
schemadiff is a niche package designed for situations where a — large — number of files on a filesystem are expected to have identical schemas, but they don't. This can present a challenge when working with distributed computing systems like `Apache Spark` or `Google BigQuery`, as unexpected schema differences can disrupt data loading and processing.
Consider a scenario where you are processing thousands of files, and a subset of them have schemas that are almost identical but not completely matching. This can lead to errors such as:
- BigQuery: `Error while reading data, error message: Parquet column '<COLUMN_NAME>' has type INT32 which does not match the target cpp_type DOUBLE File: gs://bucket/file.parquet`
- Spark: `Error: java.lang.UnsupportedOperationException: org.apache.parquet.column.values.dictionary.PlainValuesDictionary$PlainDoubleDictionary`
schemadiff addresses these issues by efficiently identifying the files with schema inconsistencies through reading file metadata.
## Installation
Install the package with pip:
```bash
pip install schemadiffed # schemadiff taken :p
```
## Usage
The package can be used as a Python library or as a command-line tool.
### Python Library
Here's an example of using schemadiff to group files by their schema:
```python
import os
from schemadiff import compare_schemas
os.environ['GOOGLE_CLOUD_CREDENTIALS'] = 'key.json'
grouped_files = compare_schemas('path/to/parquet_files', report_path='/desired/path/to/report.json')
```
In this example, `compare_schemas` groups the Parquet files in the directory `path/to/parquet_files` by their schema. It saves the results to `report.json` and also returns the grouped files as a list for potential downstream use.
### Command-Line Interface
schemadiff can also be used as a command-line tool. After installation, the command `compare-schemas` is available in your shell:
```bash
python schemadiff --dir_path 'gs://<bucket>/yellow/*_2020*.parquet' --fs_type 'gcs' --report_path 'report.json' --return_type 'as_list'
```
## Features
- Efficient processing by reading the metadata of Parquet files.
- Supports local, GCS, S3 filesystems (you must be authenticated to your cloud service first).
- Supports wildcard characters for flexible file selection.
|
schemadiffed
|
/schemadiffed-0.1.0.1.tar.gz/schemadiffed-0.1.0.1/README.md
|
README.md
|
pip install schemadiffed # schemadiff taken :p
import os
from schemadiff import compare_schemas
os.environ['GOOGLE_CLOUD_CREDENTIALS'] = 'key.json'
grouped_files = compare_schemas('path/to/parquet_files', report_path='/desired/path/to/report.json')
python schemadiff --dir_path 'gs://<bucket>/yellow/*_2020*.parquet' --fs_type 'gcs' --report_path 'report.json' --return_type 'as_list'
| 0.324021 | 0.871092 |
from collections import defaultdict
from typing import Union
import pyarrow.parquet as pq
from schemadiff.filesystem import FileSystem
class SchemaExtractor:
"""A class for extracting schema from Parquet files."""
@staticmethod
def get_schema_from_parquet(parquet_file: pq.ParquetFile) -> list[tuple[str, str]]:
"""Returns a sorted list of tuples, where each tuple represents a field in the
schema.
Args:
parquet_file (pq.ParquetFile): The Parquet file to extract the schema from.
Returns:
list[Tuple[str, str]]: A sorted list of tuples, where each tuple represents a
field in the schema.
"""
arrow_schema = parquet_file.schema_arrow
return sorted((field.name, str(field.type)) for field in arrow_schema)
class SchemaComparer:
"""A class for comparing schemas of Parquet files."""
@staticmethod
def group_files_by_schema(
file_handler: FileSystem, dir_path: str, return_type: str = "as_dict"
) -> Union[dict[str, list[str]], list[list[str]]]:
"""Returns a dictionary or list that groups files by their schema.
Args:
file_handler (FileSystem): The file system handler.
dir_path (str): The directory path.
return_type (str, optional): The return type. Can be 'as_dict' or 'as_list'.
Defaults to 'as_dict'.
Returns:
Union[dict[str, list[str]], list[list[str]]]: A dictionary or list that groups
files by their schema.
"""
files = file_handler.list_files(dir_path)
schema_to_files = defaultdict(list)
for file in files:
parquet_file = file_handler.get_parquet_file(file)
schema = SchemaExtractor.get_schema_from_parquet(parquet_file)
schema_to_files[str(schema)].append(file)
if return_type == "as_list":
return list(schema_to_files.values())
else:
return dict(schema_to_files)
|
schemadiffed
|
/schemadiffed-0.1.0.1.tar.gz/schemadiffed-0.1.0.1/schemadiff/schema_comparer.py
|
schema_comparer.py
|
from collections import defaultdict
from typing import Union
import pyarrow.parquet as pq
from schemadiff.filesystem import FileSystem
class SchemaExtractor:
"""A class for extracting schema from Parquet files."""
@staticmethod
def get_schema_from_parquet(parquet_file: pq.ParquetFile) -> list[tuple[str, str]]:
"""Returns a sorted list of tuples, where each tuple represents a field in the
schema.
Args:
parquet_file (pq.ParquetFile): The Parquet file to extract the schema from.
Returns:
list[Tuple[str, str]]: A sorted list of tuples, where each tuple represents a
field in the schema.
"""
arrow_schema = parquet_file.schema_arrow
return sorted((field.name, str(field.type)) for field in arrow_schema)
class SchemaComparer:
"""A class for comparing schemas of Parquet files."""
@staticmethod
def group_files_by_schema(
file_handler: FileSystem, dir_path: str, return_type: str = "as_dict"
) -> Union[dict[str, list[str]], list[list[str]]]:
"""Returns a dictionary or list that groups files by their schema.
Args:
file_handler (FileSystem): The file system handler.
dir_path (str): The directory path.
return_type (str, optional): The return type. Can be 'as_dict' or 'as_list'.
Defaults to 'as_dict'.
Returns:
Union[dict[str, list[str]], list[list[str]]]: A dictionary or list that groups
files by their schema.
"""
files = file_handler.list_files(dir_path)
schema_to_files = defaultdict(list)
for file in files:
parquet_file = file_handler.get_parquet_file(file)
schema = SchemaExtractor.get_schema_from_parquet(parquet_file)
schema_to_files[str(schema)].append(file)
if return_type == "as_list":
return list(schema_to_files.values())
else:
return dict(schema_to_files)
| 0.935236 | 0.320609 |
import importlib.util
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Optional, Union
import pyarrow.parquet as pq
from schemadiff.m_exceptions import FileSystemError
class FileSystem(ABC):
"""An abstract base class for file system interactions.
This class defines the interface for a file system, including methods to
list files and retrieve Parquet files.
"""
@abstractmethod
def list_files(self, dir_path: Union[str, Path]) -> list[str]:
"""Abstract method that should return a list of file paths."""
pass
@abstractmethod
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Abstract method that should return a ParquetFile object."""
pass
class LocalFileSystem(FileSystem):
"""A class to interact with a local filesystem. Inherits from the abstract base class
FileSystem.
Methods:
list_files(dir_path: str) -> list[str]:
Returns a list of Parquet file paths in the directory.
get_parquet_file(file_path: str) -> pq.ParquetFile:
Returns a ParquetFile object.
"""
def list_files(self, dir_path: Path) -> list[str]:
"""Lists all Parquet files in the provided directory."""
dir_path = Path(dir_path)
if "*" in dir_path.name: # If the last part of the path contains a wildcard
file_pattern = dir_path.name
dir_path = dir_path.parent
else:
file_pattern = "*.parquet"
if not dir_path.is_dir():
raise FileSystemError(f"{dir_path} is not a directory.")
return sorted(str(path) for path in dir_path.glob(file_pattern))
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Loads a Parquet file from the local filesystem."""
file_path_obj = Path(file_path)
if not file_path_obj.is_file():
raise FileSystemError(f"{file_path} is not a file.")
if file_path_obj.suffix != ".parquet":
raise FileSystemError(f"{file_path} is not a Parquet file.")
try:
return pq.ParquetFile(file_path)
except Exception as e:
raise FileSystemError(
f"Error opening {file_path} as a Parquet file: {str(e)}"
)
class S3FileSystem(FileSystem):
"""A class to interact with Amazon S3. Inherits from the abstract base class
FileSystem."""
def __init__(self, **kwargs):
if importlib.util.find_spec("s3fs") is None:
raise ImportError(
"The s3fs library is required to use the S3FileSystem class."
)
import s3fs
self.fs = s3fs.S3Filesystem()
def list_files(self, dir_path: str) -> list[str]:
"""Lists all files in the provided S3 directory."""
return ["s3://" + path for path in sorted(self.fs.glob(dir_path))]
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Loads a Parquet file from Amazon S3."""
try:
with self.fs.open(file_path) as f:
return pq.ParquetFile(f)
except Exception as e:
raise FileSystemError(
f"Error opening {file_path} as a Parquet file: {str(e)}"
)
class GCSFileSystem(FileSystem):
"""A class to interact with Google Cloud Storage. Inherits from the abstract base
class FileSystem."""
def __init__(self):
if importlib.util.find_spec("gcsfs") is None:
raise ImportError(
"The gcsfs library is required to use the GCSFileSystem class."
)
import gcsfs
self.fs = gcsfs.GCSFileSystem()
def list_files(self, dir_path: str) -> list[str]:
"""Lists all files in the provided GCS directory."""
return ["gs://" + path for path in sorted(self.fs.glob(dir_path))] # type: ignore
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Loads a Parquet file from Google Cloud Storage."""
try:
with self.fs.open(file_path) as f:
return pq.ParquetFile(f)
except Exception as e:
raise FileSystemError(
f"Error opening {file_path} as a Parquet file: {str(e)}"
)
class FileSystemFactory:
"""A factory class for creating FileSystem instances.
Methods:
create_filesystem(type: str) -> Union[LocalFileSystem, None]:
Returns a FileSystem object of the specified type.
"""
@staticmethod
def create_filesystem(
type: Optional[str] = None, path: Optional[str] = None
) -> Union[LocalFileSystem, GCSFileSystem, S3FileSystem]:
"""
Returns a FileSystem object of the specified type.
Args:
type (str, optional): The type of filesystem. Can be 'local', 'gcs', or 's3'.
path (str, optional): The path from which to infer the filesystem type if no type is provided.
Returns:
Union[LocalFileSystem, GCSFileSystem, S3FileSystem]: A FileSystem object of the specified type.
Raises:
ValueError: If an unsupported filesystem type is provided.
"""
if type is None:
if path.startswith("gs://"): # type: ignore
type = "gcs"
elif path.startswith("s3://"): # type: ignore
type = "s3"
else:
type = "local"
if type == "local":
return LocalFileSystem()
elif type == "gcs":
return GCSFileSystem()
elif type == "s3":
return S3FileSystem()
else:
raise ValueError(f"Unsupported filesystem type: {type}")
|
schemadiffed
|
/schemadiffed-0.1.0.1.tar.gz/schemadiffed-0.1.0.1/schemadiff/filesystem.py
|
filesystem.py
|
import importlib.util
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Optional, Union
import pyarrow.parquet as pq
from schemadiff.m_exceptions import FileSystemError
class FileSystem(ABC):
"""An abstract base class for file system interactions.
This class defines the interface for a file system, including methods to
list files and retrieve Parquet files.
"""
@abstractmethod
def list_files(self, dir_path: Union[str, Path]) -> list[str]:
"""Abstract method that should return a list of file paths."""
pass
@abstractmethod
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Abstract method that should return a ParquetFile object."""
pass
class LocalFileSystem(FileSystem):
"""A class to interact with a local filesystem. Inherits from the abstract base class
FileSystem.
Methods:
list_files(dir_path: str) -> list[str]:
Returns a list of Parquet file paths in the directory.
get_parquet_file(file_path: str) -> pq.ParquetFile:
Returns a ParquetFile object.
"""
def list_files(self, dir_path: Path) -> list[str]:
"""Lists all Parquet files in the provided directory."""
dir_path = Path(dir_path)
if "*" in dir_path.name: # If the last part of the path contains a wildcard
file_pattern = dir_path.name
dir_path = dir_path.parent
else:
file_pattern = "*.parquet"
if not dir_path.is_dir():
raise FileSystemError(f"{dir_path} is not a directory.")
return sorted(str(path) for path in dir_path.glob(file_pattern))
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Loads a Parquet file from the local filesystem."""
file_path_obj = Path(file_path)
if not file_path_obj.is_file():
raise FileSystemError(f"{file_path} is not a file.")
if file_path_obj.suffix != ".parquet":
raise FileSystemError(f"{file_path} is not a Parquet file.")
try:
return pq.ParquetFile(file_path)
except Exception as e:
raise FileSystemError(
f"Error opening {file_path} as a Parquet file: {str(e)}"
)
class S3FileSystem(FileSystem):
"""A class to interact with Amazon S3. Inherits from the abstract base class
FileSystem."""
def __init__(self, **kwargs):
if importlib.util.find_spec("s3fs") is None:
raise ImportError(
"The s3fs library is required to use the S3FileSystem class."
)
import s3fs
self.fs = s3fs.S3Filesystem()
def list_files(self, dir_path: str) -> list[str]:
"""Lists all files in the provided S3 directory."""
return ["s3://" + path for path in sorted(self.fs.glob(dir_path))]
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Loads a Parquet file from Amazon S3."""
try:
with self.fs.open(file_path) as f:
return pq.ParquetFile(f)
except Exception as e:
raise FileSystemError(
f"Error opening {file_path} as a Parquet file: {str(e)}"
)
class GCSFileSystem(FileSystem):
"""A class to interact with Google Cloud Storage. Inherits from the abstract base
class FileSystem."""
def __init__(self):
if importlib.util.find_spec("gcsfs") is None:
raise ImportError(
"The gcsfs library is required to use the GCSFileSystem class."
)
import gcsfs
self.fs = gcsfs.GCSFileSystem()
def list_files(self, dir_path: str) -> list[str]:
"""Lists all files in the provided GCS directory."""
return ["gs://" + path for path in sorted(self.fs.glob(dir_path))] # type: ignore
def get_parquet_file(self, file_path: str) -> pq.ParquetFile:
"""Loads a Parquet file from Google Cloud Storage."""
try:
with self.fs.open(file_path) as f:
return pq.ParquetFile(f)
except Exception as e:
raise FileSystemError(
f"Error opening {file_path} as a Parquet file: {str(e)}"
)
class FileSystemFactory:
"""A factory class for creating FileSystem instances.
Methods:
create_filesystem(type: str) -> Union[LocalFileSystem, None]:
Returns a FileSystem object of the specified type.
"""
@staticmethod
def create_filesystem(
type: Optional[str] = None, path: Optional[str] = None
) -> Union[LocalFileSystem, GCSFileSystem, S3FileSystem]:
"""
Returns a FileSystem object of the specified type.
Args:
type (str, optional): The type of filesystem. Can be 'local', 'gcs', or 's3'.
path (str, optional): The path from which to infer the filesystem type if no type is provided.
Returns:
Union[LocalFileSystem, GCSFileSystem, S3FileSystem]: A FileSystem object of the specified type.
Raises:
ValueError: If an unsupported filesystem type is provided.
"""
if type is None:
if path.startswith("gs://"): # type: ignore
type = "gcs"
elif path.startswith("s3://"): # type: ignore
type = "s3"
else:
type = "local"
if type == "local":
return LocalFileSystem()
elif type == "gcs":
return GCSFileSystem()
elif type == "s3":
return S3FileSystem()
else:
raise ValueError(f"Unsupported filesystem type: {type}")
| 0.889075 | 0.34392 |
from typing import Optional, Union
from schemadiff.filesystem import FileSystemFactory
from schemadiff.report_generator import ReportGenerator
from schemadiff.schema_comparer import SchemaComparer, SchemaExtractor
def compare_schemas(
dir_path: str,
fs_type: Optional[str] = None,
report_path: Union[str, None] = None,
return_type: str = "as_list",
) -> Union[dict[str, list[str]], list[list[str]]]:
"""Compares schemas of Parquet files in a directory and optionally generates a report.
Args:
dir_path (str): The directory path.
fs_type (str, optional): The type of filesystem. Can be 'local', 'gcs', or 's3'.
Defaults to 'local'.
report_path (Union[str, None], optional): The file path where the report will be
saved. If None, no report is generated. Defaults to None.
return_type (str, optional): The return type. Can be 'as_dict' or 'as_list'.
Defaults to 'as_list'.
Returns:
Union[dict[str, list[str]], list[list[str]]]: A dictionary or list that groups
files by their schema.
"""
fs = FileSystemFactory.create_filesystem(fs_type, dir_path)
grouped_files = SchemaComparer.group_files_by_schema(fs, dir_path, return_type)
if report_path is not None:
all_schemas = [
SchemaExtractor.get_schema_from_parquet(fs.get_parquet_file(file_group[0]))
for file_group in grouped_files
]
# Finding differences between all schemas
common_schema = set(all_schemas[0])
for schema in all_schemas[1:]:
common_schema.intersection_update(schema)
differences = [list(set(schema) - common_schema) for schema in all_schemas]
schema_to_files = {
str(difference): file_group
for difference, file_group in zip(differences, grouped_files)
}
report = ReportGenerator.generate_report(schema_to_files) # type: ignore
ReportGenerator.save_report(report, report_path)
return grouped_files
|
schemadiffed
|
/schemadiffed-0.1.0.1.tar.gz/schemadiffed-0.1.0.1/schemadiff/__init__.py
|
__init__.py
|
from typing import Optional, Union
from schemadiff.filesystem import FileSystemFactory
from schemadiff.report_generator import ReportGenerator
from schemadiff.schema_comparer import SchemaComparer, SchemaExtractor
def compare_schemas(
dir_path: str,
fs_type: Optional[str] = None,
report_path: Union[str, None] = None,
return_type: str = "as_list",
) -> Union[dict[str, list[str]], list[list[str]]]:
"""Compares schemas of Parquet files in a directory and optionally generates a report.
Args:
dir_path (str): The directory path.
fs_type (str, optional): The type of filesystem. Can be 'local', 'gcs', or 's3'.
Defaults to 'local'.
report_path (Union[str, None], optional): The file path where the report will be
saved. If None, no report is generated. Defaults to None.
return_type (str, optional): The return type. Can be 'as_dict' or 'as_list'.
Defaults to 'as_list'.
Returns:
Union[dict[str, list[str]], list[list[str]]]: A dictionary or list that groups
files by their schema.
"""
fs = FileSystemFactory.create_filesystem(fs_type, dir_path)
grouped_files = SchemaComparer.group_files_by_schema(fs, dir_path, return_type)
if report_path is not None:
all_schemas = [
SchemaExtractor.get_schema_from_parquet(fs.get_parquet_file(file_group[0]))
for file_group in grouped_files
]
# Finding differences between all schemas
common_schema = set(all_schemas[0])
for schema in all_schemas[1:]:
common_schema.intersection_update(schema)
differences = [list(set(schema) - common_schema) for schema in all_schemas]
schema_to_files = {
str(difference): file_group
for difference, file_group in zip(differences, grouped_files)
}
report = ReportGenerator.generate_report(schema_to_files) # type: ignore
ReportGenerator.save_report(report, report_path)
return grouped_files
| 0.916507 | 0.257042 |
# ipython_magic_sqlalchemy_schemadisplay
Magic for [sqlalchemy_schemadisplay](https://github.com/fschulze/sqlalchemy_schemadisplay)
[](https://mybinder.org/v2/gh/innovationOUtside/ipython_magic_sqlalchemy_schemadisplay/master?filepath=notebooks/SchemaDemo.ipynb)
Magic originally created for the Open University module [TM351 Data Management and Analysis](http://www.open.ac.uk/courses/modules/tm351). This magic complements the [`ipython-sql`](https://github.com/catherinedevlin/ipython-sql), which provides magics for connecting to a SQL database, with a magic that uses [sqlalchemy_schemadisplay](https://github.com/fschulze/sqlalchemy_schemadisplay) to generate ERDs over a SQL database. *The code for generating the ER diagram can be found in the third party `sqlalchemy_schemadisplay` package.*
At the moment, the schema display and `ipython-sql` magics and independent, but they really should be combined into a single package.
## Usage
Install from PyPi: `pip install schemadisplay-magic`
Install from this repo:
`pip install git+https://github.com/innovationOUtside/ipython_magic_sqlalchemy_schemadisplay.git`
Note there are several other dependencies:
- Python: see *requirements.txt*
- O/S: see *apt.txt*
Set up a database. For example, load the SQL magic:
```python
%load_ext sql
```
Create a database connection string — we can use a SQLite database for demo purposes — and connect the SQL magic to the database:
```
DB_CONNECTION = 'sqlite:///./test.db'
%sql $DB_CONNECTION
```
Populate the database with a couple of foreign key related tables:
```
%%sql
DROP TABLE IF EXISTS doctor;
CREATE TABLE doctor (
doctor_id CHAR(4),
doctor_name VARCHAR(20),
PRIMARY KEY (doctor_id)
);
DROP TABLE IF EXISTS patient;
CREATE TABLE patient (
patient_id CHAR(4),
patient_name VARCHAR(20),
date_of_birth DATE,
gender CHAR(6),
height_cm DECIMAL(4,1),
weight_kg DECIMAL(4,1),
doctor_id CHAR(4),
PRIMARY KEY (patient_id),
FOREIGN KEY (doctor_id) REFERENCES doctor
);
```
Load the schema display magic, and render the schema from the connected database:
```python
%load_ext schemadisplay_magic
%schema --connection_string $DB_CONNECTION
```
<img src='example_erd.png' width=500/>
At the moment, the database connection string needs to be provided to the schem magic for each diagram. [TO DO - fix this to use a single persistemt connection for the life of the notebook session, once connected.]
## Using the Magic in a Teaching and Learning Context
The magic was developed to support teaching and learning around the topic of *relational databases*. Students were working with a PostgreSQL database, creating, editing and deleting tables, and creating foreign key relationships between tables. The magic provided an easy way to visualise the current state of the tables available in the database, and any foreign key relationships between them.
In this way, students could run database and table modifying statements in a notebook. A single line magic invocation could then be used to generate a visual representation of the current state of the database to check that their intended changes had worked correctly.
|
schemadisplay-magic
|
/schemadisplay-magic-0.0.6.tar.gz/schemadisplay-magic-0.0.6/README.md
|
README.md
|
%load_ext sql
DB_CONNECTION = 'sqlite:///./test.db'
%sql $DB_CONNECTION
%%sql
DROP TABLE IF EXISTS doctor;
CREATE TABLE doctor (
doctor_id CHAR(4),
doctor_name VARCHAR(20),
PRIMARY KEY (doctor_id)
);
DROP TABLE IF EXISTS patient;
CREATE TABLE patient (
patient_id CHAR(4),
patient_name VARCHAR(20),
date_of_birth DATE,
gender CHAR(6),
height_cm DECIMAL(4,1),
weight_kg DECIMAL(4,1),
doctor_id CHAR(4),
PRIMARY KEY (patient_id),
FOREIGN KEY (doctor_id) REFERENCES doctor
);
%load_ext schemadisplay_magic
%schema --connection_string $DB_CONNECTION
| 0.221687 | 0.833392 |
[](https://travis-ci.org/jorgecarleitao/schemaflow)
[](https://coveralls.io/github/jorgecarleitao/schemaflow)
[](https://schemaflow.readthedocs.io/en/latest/?badge=latest)
# SchemaFlow
This is a a package to write data pipelines for data science systematically in Python.
Thanks for checking it out.
Check out the very comprehensive documentation [here](https://schemaflow.readthedocs.io/en/latest/).
## The problem that this package solves
A major challenge in creating a robust data pipeline is guaranteeing interoperability between
pipes: how do we guarantee that the pipe that someone wrote is compatible
with others' pipe *without* running the whole pipeline multiple times until we get it right?
## The solution that this package adopts
This package declares an API to define a stateful data transformation that gives
the developer the opportunity to declare what comes in, what comes out, and what states are modified
on each pipe and therefore the whole pipeline. Check out
[`tests/test_pipeline.py`](https://github.com/jorgecarleitao/schemaflow/blob/master/tests/test_pipeline.py) or
[`examples/end_to_end_kaggle.py`](https://github.com/jorgecarleitao/schemaflow/blob/master/examples/end_to_end_kaggle.py)
## Install
pip install schemaflow
or, install the latest (recommended for now):
git clone https://github.com/jorgecarleitao/schemaflow
cd schemaflow && pip install -e .
## Run examples
We provide one example that demonstrate the usage of SchemaFlow's API
on developing an end-to-end pipeline applied to
[one of Kaggle's exercises](https://www.kaggle.com/c/house-prices-advanced-regression-techniques).
To run it, download the data in that exercise to `examples/all/` and run
pip install -r examples/requirements.txt
python examples/end_to_end_kaggle.py
You should see some prints to the console as well as the generation of 3 files at
`examples/`: two plots and one `submission.txt`.
## Run tests
pip install -r tests/requirements.txt
python -m unittest discover
## Build documentation
pip install -r docs/requirements.txt
cd docs && make html && cd ..
open docs/build/html/index.html
|
schemaflow
|
/schemaflow-0.2.0.tar.gz/schemaflow-0.2.0/README.md
|
README.md
|
[](https://travis-ci.org/jorgecarleitao/schemaflow)
[](https://coveralls.io/github/jorgecarleitao/schemaflow)
[](https://schemaflow.readthedocs.io/en/latest/?badge=latest)
# SchemaFlow
This is a a package to write data pipelines for data science systematically in Python.
Thanks for checking it out.
Check out the very comprehensive documentation [here](https://schemaflow.readthedocs.io/en/latest/).
## The problem that this package solves
A major challenge in creating a robust data pipeline is guaranteeing interoperability between
pipes: how do we guarantee that the pipe that someone wrote is compatible
with others' pipe *without* running the whole pipeline multiple times until we get it right?
## The solution that this package adopts
This package declares an API to define a stateful data transformation that gives
the developer the opportunity to declare what comes in, what comes out, and what states are modified
on each pipe and therefore the whole pipeline. Check out
[`tests/test_pipeline.py`](https://github.com/jorgecarleitao/schemaflow/blob/master/tests/test_pipeline.py) or
[`examples/end_to_end_kaggle.py`](https://github.com/jorgecarleitao/schemaflow/blob/master/examples/end_to_end_kaggle.py)
## Install
pip install schemaflow
or, install the latest (recommended for now):
git clone https://github.com/jorgecarleitao/schemaflow
cd schemaflow && pip install -e .
## Run examples
We provide one example that demonstrate the usage of SchemaFlow's API
on developing an end-to-end pipeline applied to
[one of Kaggle's exercises](https://www.kaggle.com/c/house-prices-advanced-regression-techniques).
To run it, download the data in that exercise to `examples/all/` and run
pip install -r examples/requirements.txt
python examples/end_to_end_kaggle.py
You should see some prints to the console as well as the generation of 3 files at
`examples/`: two plots and one `submission.txt`.
## Run tests
pip install -r tests/requirements.txt
python -m unittest discover
## Build documentation
pip install -r docs/requirements.txt
cd docs && make html && cd ..
open docs/build/html/index.html
| 0.809841 | 0.801237 |
# SchemaGen
Schema Gen is a simple CLI tool that generates Python GraphQL implementations(using Graphene) from a Graphql Schema file.
# Installation
```shell
pip install schemagen
```
# Usage
Here's the content of a sample input file(we will call it *test.graphql*)
```graphql
type User {
id: ID!
username: String
first_name: String
last_name: String
full_name: String
name: String
name: String
}
```
Now let's use SchemaGen to generate python code from it.
As a CLI tool:
```shell
schemagen parse test.graphql -o test.py
```
As a Python Package:
```python
from schemagen import SchemaGen
gen = SchemaGen(
input_file='test.graphql',
output_file='test.py'
)
# parse input file
gen()
```
Output(*test.py*):
```python
# This file was generated by CodegenTool
from graphene import *
class User(ObjectType):
id = Field(ID, required=True)
username = Field(String)
first_name = Field(String)
last_name = Field(String)
full_name = Field(String)
name = Field(String)
```
# Notes
Here are some things you should know about using SchemaGen:
* SchemaGen is not guaranteed to catch errors in your GraphQL schema file.
SchemaGen will only catch a very small percentage of errors that might occur in defining a GraphQL schema.
It is the developer's responsibility to ensure the GraphQL schema file is error-free.
* SchemaGen will not install the graphene package on your local machine however it will import it in the generated python file.
You can easily install the package by running:
```shell
pip install graphene
```
* GraphQL type declarations in your schema file **must be ordered**.
Because of the way Python and SchemaGen works, you cannot use a GraphQL type
before declaring it. For example, the following graphql schema definition would be invalid because we are using the **Url** scalar in our **User** type before declaring it:
```graphql
type User {
id: ID!
username: String
avatar_url: Url
}
scalar Url
```
The correct version of the above code is:
```graphql
scalar Url
type User {
id: ID!
username: String
avatar_url: Url
}
```
* Using a GraphQL SDL keyword as an object field name in your schema will throw an error.
For example, doing this:
```graphql
enum UserType {
Example
}
type User{
name: String
type: UserType
}
```
will throw an error.
Do this instead:
```graphql
enum UserType {
Example
}
type User{
name: String
user_type: UserType
}
```
I plan to fix the last two issues stated above in the future. Pull requests are welcome!
|
schemagen
|
/schemagen-1.0.5.tar.gz/schemagen-1.0.5/README.md
|
README.md
|
pip install schemagen
type User {
id: ID!
username: String
first_name: String
last_name: String
full_name: String
name: String
name: String
}
schemagen parse test.graphql -o test.py
from schemagen import SchemaGen
gen = SchemaGen(
input_file='test.graphql',
output_file='test.py'
)
# parse input file
gen()
# This file was generated by CodegenTool
from graphene import *
class User(ObjectType):
id = Field(ID, required=True)
username = Field(String)
first_name = Field(String)
last_name = Field(String)
full_name = Field(String)
name = Field(String)
| 0.500488 | 0.910346 |
=========================
Schemagic / Schemagic.web
=========================
.. image:: https://img.shields.io/badge/pypi-v0.9.1-blue.svg
:target: https://pypi.python.org/pypi/schemagic
.. image:: https://img.shields.io/badge/ReadTheDocs-latest-red.svg
:target: http://schemagic.readthedocs.io/en/latest/schemagic.html
.. image:: https://travis-ci.org/Mechrophile/schemagic.svg?branch=master
:target: https://travis-ci.org/Mechrophile/schemagic/
Remove the Guesswork from Data Processing
=========================================
Schemagic is a rather utilitarian re-imagining of the wonderful and powerful clojure library `Schema <https://github.com/plumatic/schema>`_!
Schemagic.web is what programmers do when they hate web programming, but want to make their programs accessible to the web.
Installation
------------
It's a wheel on Pypi, and it's 2 and 3 compatible.
To install Schemagic, simply:
.. code-block:: bash
$ pip install schemagic
What is schemagic?
------------------
One of the difficulties with large scale, multi-team python efforts is the overhead of understanding the kind of data
(e.g., list of strings, nested map from long to string to double) that a function or a webservice expects and returns.
Python lacks static typing and, moreover, static typing is insufficient to capture and validate custom business types,
which ultimately is what holds back teams from rapidly iterating on each others work.[1]
To you, the programmer, schemagic is all about three things:
* data **description** using the simplest python data structures and an easily extensible syntax
* data **communication** between teams, enhancing documentation, giving feedback when something went wrong.
* data **validation** based on descriptions of data that have been documented and communicated.
Comments describing the shape of data are insufficient in real world applications.
Unless the documentation is backed up by programmatic verification, the documentation gets initially ignored,
and ultimately falls behind the actual program behavior.
In other words, **schemagic is all about data**.
Getting Acquainted with Schemagic
---------------------------------
Lets build a schema and start using it.
.. code-block:: python
>>> import schemagic
>>> list_of_ints = [int]
>>> schemagic.validate_against_schema(list_of_ints, [1, 2, 3])
[1, 2, 3]
>>> schemagic.validate_against_schema(list_of_ints, ["hello", "my friends"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'hello'
The error you see here (customizeable) is the error you get when you try to call:
.. code-block:: python
>>> int("hello")
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'hello'
And it occurred because list_of_ints specified that the function to check every member of the list against was int()
Basic Schemagic Usage
---------------------
Schema checking is quite flexible, and all checks are done recursively. Lets go through some more examples:
**Map Template**:
*if you only provide a schema with one (callable) key and one value*
.. code-block:: python
>>> string_to_int_map = {str:int}
>>> schemagic.validate_against_schema(string_to_int_map, {"hello": 5, "friends": 6})
{'friends': 6, 'hello': 5}
**Map with Specific Keys**
*if you provide a schema with strings as keys*
.. code-block:: python
>>> friend_record = {"name":str, "age": int}
>>> schemagic.validate_against_schema(friend_record, {"name": "Tyler", "age": 400})
{'name': 'Tyler', 'age': 400}
**Sequence Template**:
*if you provide a sequence containing only one item as a schema*
.. code-block:: python
>>> list_of_ints = [int]
>>> schemagic.validate_against_schema(list_of_ints, [1, 2, 3, 4])
[1, 2, 3, 4]
**Strict Sequence**:
*if you provide a sequence with multiple items as a schema*
.. code-block:: python
>>> list_with_3_items_int_str_and_intstrmap = [int, str, {int: str}]
>>> schemagic.validate_against_schema(list_with_3_items_int_str_and_intstrmap, [1, "hello", {5: "friends", 12: "and", 90: "world"}])
[1, "hello", {5: "friends", 12: "and", 90: "world"}]
**Validation Function**:
*if you provide a function as a schema*
.. code-block:: python
>>> def null(data):
... if data is not None:
... raise TypeError("expected Nonetype, got {0}".format(data))
>>> schemagic.validate_against_schema(null, None)
>>> schemagic.validate_against_schema(null, "hello!")
Traceback (most recent call last):
...
TypeError: expected Nonetype, got hello
**Compose Schema Definitions Recursively Ad Nauseam**:
*this is where the real value lies*
.. code-block:: python
>>> def enum(*possible_values):
... def _validator(data):
... if not data in possible_values:
... raise ValueError()
... return data
... return _validator
>>> event = {
... "event_type": enum("PRODUCTION", "DEVELOPMENT"),
... "event_name": str
...}
>>> dispatch_request = {
... "events": [event],
... "requested_by": str
...}
>>> schemagic.validate_against_schema(dispatch_request,
... {"events": [{"event_type": "DEVELOPMENT",
... "event_name": "demo_business_process"},
... {"event_type": "DEVELOPMENT",
... "event_name": "demo_other_business_process"}],
... "requested_by": "Tyler Tolton"})
{"events": [{"event_type": "DEVELOPMENT", "event_name": "demo_business_process"}, {"event_type": "DEVELOPMENT", "event_name": "demo_other_business_process"}], "requested_by": "Tyler Tolton"}
Schemagic.validator Usage
-------------------------
**Use the Schemagic.validator for increased message clarity and control**:
.. code-block:: python
>>> list_of_ints_validator = schemagic.validator([int], "Business Type: list of integers")
>>> list_of_ints_validator([1, "not an int", 3])
Traceback (most recent call last):
...
ValueError: Bad value provided for Business Type: list of integers. - error: ValueError: invalid literal for int() with base 10: 'not an int' schema: [<type 'int'>] value: [1, 'not an int', 3]
**Supply predicate to prevent/enable validation conditionally**:
.. code-block:: python
>>> __env__ = None
>>> WHEN_IN_DEV_ENV = lambda: __env__ == "DEV"
>>> validate_in_dev = partial(schemagic.validator, validation_predicate=WHEN_IN_DEV_ENV)
>>> list_of_ints_validator = validate_in_dev([int], "integer list")
>>> __env__ = "DEV"
>>> list_of_ints_validator([1, "not an int", 3])
Traceback (most recent call last):
...
ValueError: Bad value provided for integer list. - error: ValueError: invalid literal for int() with base 10: 'not an int' schema: [<type 'int'>] value: [1, 'not an int', 3]
>>> __env__ = "PROD"
>>> list_of_ints_validator([1, "not an int", 3])
[1, "not an int", 3]
**Coerce data as it is validated**:
*note: validate_against_schema will do this automatically. see docs on validator.*
.. code-block:: python
>>> validate_and_coerce = partial(schemagic.validator, coerce_data=True)
>>> list_of_ints_validator_and_coercer = validate_and_coerce([int], "integer list")
>>> list_of_ints_validator_only = schemagic.validator([int], "integer_list")
>>> list_of_ints_validator_only(["1", "2", "3"])
["1", "2", "3"]
>>> # Note that the if you pass an integer string to int() it returns an integer.
>>> # this makes it s dual purpose validator and coercer.
>>> list_of_ints_validator_and_coercer(["1", "2", "3"])
[1, 2, 3]
Schemagic.web
-------------
Schemagic.web is where rubber meets the road in practical usage. It provides an easy way to communicate between
services, between developers, and between development teams in an agile environment. The webservice business world was
the furnace in which schemagic was forged. Get ready to outsource yourself.
To demo the schemagic.web workflow, lets assume the roles of the first people in the world to discover a way
to (gasp) compute the fibonacci sequence in python.
*note: this code is all pulled from Peter Norvig's excellent* `Design of Computer Programs <https://www.udacity.com/course/design-of-computer-programs--cs212>`_ *Udacity class.*
.. code-block:: python
def memo(fn):
_cache = {}
def _f(*args):
try:
return _cache[args]
except KeyError:
_cache[args] = result = fn(*args)
return result
except TypeError:
return fn(*args)
_f.cache = _cache
return _f
@memo
def fib(n):
if n == 0 or n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
>>> fib(30)
1346269
Brilliant! Well, now we'll of course want to share this discovery with the world in the form of a microservice, so that
others need not know the inner workings of this complex and dangerous algorithm.
Lets walk through how we might set up this webservice in flask:
.. code-block:: python
from flask import Flask, json
from fibonacci import fib # assuming we implemented the function in fibonnaci.py
app = Flask(__name__)
@app.route("/fibonacci/<index>")
def web_fib_endpoint(index):
try:
index = int(index)
except ValueError:
return Response(
status=400,
response="Argument to /fibonacci/ must be an integer"
)
return Response(
status=200,
response=json.dumps(fib(index))
)
if __name__ == '__main__':
app.run(port=5000)
While this pattern is certainly serviceable, it is rather heavyweight to simply expose a function to the web.
Additionally, the code doesn't lend itself well to easily documenting its input and output.
Lets see an adapted version of this code using schemagic.web utilities.
.. code-block:: python
from flask.app import Flask
from fibonacci import fib # assuming we implemented the function in fibonnaci.py
from schemagic.web import service_registry
app = Flask(__name__)
register_fibonnacci_services = service_registry(app)
register_fibonnacci_services(
dict(rule="/fibonacci",
input_schema=int,
output_schema=int,
fn=fib))
if __name__ == '__main__':
app.run(port=5000)
There, now we simply *describe* our service with data.
What is the service endpoint, what is the input, what is the output,
and what is the implementation that delivers the contract defined herein.
#. The webservices all uniformally use POST requests to transmit data. The data supplied to the endpoints comes from the payload of the request.
How to Contribute
-----------------
#. This codebase uses the popular `git flow <http://nvie.com/posts/a-successful-git-branching-model/>`_ model for version control
#. Fork `the repository`_ and make a branch off of develop, (ideally using the naming convention feature/your-feature)
#. When you've finished your feature, make a pull request back into develop.
#. Once you've made your pull request, email `the maintainer`_ and let me know!
#. Finally, if you ever have any questions about how or what to contribute, feel free to send an email!
.. _`the repository`: https://github.com/TJTolton/schemagic
.. _`the maintainer`: [email protected]
Documentation
=============
This project autogenerates it's documentation using sphinx and hosts it using readthedocs. It can be viewed `here <http://schemagic.readthedocs.io/en/latest/schemagic.html>`_
.. [1] Please note: this description is adapted from the excellently phrased introduction to the `prismatic/schema <https://github.com/plumatic/schema>`_ clojure library this project was based on
|
schemagic
|
/schemagic-0.9.1.tar.gz/schemagic-0.9.1/README.rst
|
README.rst
|
=========================
Schemagic / Schemagic.web
=========================
.. image:: https://img.shields.io/badge/pypi-v0.9.1-blue.svg
:target: https://pypi.python.org/pypi/schemagic
.. image:: https://img.shields.io/badge/ReadTheDocs-latest-red.svg
:target: http://schemagic.readthedocs.io/en/latest/schemagic.html
.. image:: https://travis-ci.org/Mechrophile/schemagic.svg?branch=master
:target: https://travis-ci.org/Mechrophile/schemagic/
Remove the Guesswork from Data Processing
=========================================
Schemagic is a rather utilitarian re-imagining of the wonderful and powerful clojure library `Schema <https://github.com/plumatic/schema>`_!
Schemagic.web is what programmers do when they hate web programming, but want to make their programs accessible to the web.
Installation
------------
It's a wheel on Pypi, and it's 2 and 3 compatible.
To install Schemagic, simply:
.. code-block:: bash
$ pip install schemagic
What is schemagic?
------------------
One of the difficulties with large scale, multi-team python efforts is the overhead of understanding the kind of data
(e.g., list of strings, nested map from long to string to double) that a function or a webservice expects and returns.
Python lacks static typing and, moreover, static typing is insufficient to capture and validate custom business types,
which ultimately is what holds back teams from rapidly iterating on each others work.[1]
To you, the programmer, schemagic is all about three things:
* data **description** using the simplest python data structures and an easily extensible syntax
* data **communication** between teams, enhancing documentation, giving feedback when something went wrong.
* data **validation** based on descriptions of data that have been documented and communicated.
Comments describing the shape of data are insufficient in real world applications.
Unless the documentation is backed up by programmatic verification, the documentation gets initially ignored,
and ultimately falls behind the actual program behavior.
In other words, **schemagic is all about data**.
Getting Acquainted with Schemagic
---------------------------------
Lets build a schema and start using it.
.. code-block:: python
>>> import schemagic
>>> list_of_ints = [int]
>>> schemagic.validate_against_schema(list_of_ints, [1, 2, 3])
[1, 2, 3]
>>> schemagic.validate_against_schema(list_of_ints, ["hello", "my friends"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'hello'
The error you see here (customizeable) is the error you get when you try to call:
.. code-block:: python
>>> int("hello")
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'hello'
And it occurred because list_of_ints specified that the function to check every member of the list against was int()
Basic Schemagic Usage
---------------------
Schema checking is quite flexible, and all checks are done recursively. Lets go through some more examples:
**Map Template**:
*if you only provide a schema with one (callable) key and one value*
.. code-block:: python
>>> string_to_int_map = {str:int}
>>> schemagic.validate_against_schema(string_to_int_map, {"hello": 5, "friends": 6})
{'friends': 6, 'hello': 5}
**Map with Specific Keys**
*if you provide a schema with strings as keys*
.. code-block:: python
>>> friend_record = {"name":str, "age": int}
>>> schemagic.validate_against_schema(friend_record, {"name": "Tyler", "age": 400})
{'name': 'Tyler', 'age': 400}
**Sequence Template**:
*if you provide a sequence containing only one item as a schema*
.. code-block:: python
>>> list_of_ints = [int]
>>> schemagic.validate_against_schema(list_of_ints, [1, 2, 3, 4])
[1, 2, 3, 4]
**Strict Sequence**:
*if you provide a sequence with multiple items as a schema*
.. code-block:: python
>>> list_with_3_items_int_str_and_intstrmap = [int, str, {int: str}]
>>> schemagic.validate_against_schema(list_with_3_items_int_str_and_intstrmap, [1, "hello", {5: "friends", 12: "and", 90: "world"}])
[1, "hello", {5: "friends", 12: "and", 90: "world"}]
**Validation Function**:
*if you provide a function as a schema*
.. code-block:: python
>>> def null(data):
... if data is not None:
... raise TypeError("expected Nonetype, got {0}".format(data))
>>> schemagic.validate_against_schema(null, None)
>>> schemagic.validate_against_schema(null, "hello!")
Traceback (most recent call last):
...
TypeError: expected Nonetype, got hello
**Compose Schema Definitions Recursively Ad Nauseam**:
*this is where the real value lies*
.. code-block:: python
>>> def enum(*possible_values):
... def _validator(data):
... if not data in possible_values:
... raise ValueError()
... return data
... return _validator
>>> event = {
... "event_type": enum("PRODUCTION", "DEVELOPMENT"),
... "event_name": str
...}
>>> dispatch_request = {
... "events": [event],
... "requested_by": str
...}
>>> schemagic.validate_against_schema(dispatch_request,
... {"events": [{"event_type": "DEVELOPMENT",
... "event_name": "demo_business_process"},
... {"event_type": "DEVELOPMENT",
... "event_name": "demo_other_business_process"}],
... "requested_by": "Tyler Tolton"})
{"events": [{"event_type": "DEVELOPMENT", "event_name": "demo_business_process"}, {"event_type": "DEVELOPMENT", "event_name": "demo_other_business_process"}], "requested_by": "Tyler Tolton"}
Schemagic.validator Usage
-------------------------
**Use the Schemagic.validator for increased message clarity and control**:
.. code-block:: python
>>> list_of_ints_validator = schemagic.validator([int], "Business Type: list of integers")
>>> list_of_ints_validator([1, "not an int", 3])
Traceback (most recent call last):
...
ValueError: Bad value provided for Business Type: list of integers. - error: ValueError: invalid literal for int() with base 10: 'not an int' schema: [<type 'int'>] value: [1, 'not an int', 3]
**Supply predicate to prevent/enable validation conditionally**:
.. code-block:: python
>>> __env__ = None
>>> WHEN_IN_DEV_ENV = lambda: __env__ == "DEV"
>>> validate_in_dev = partial(schemagic.validator, validation_predicate=WHEN_IN_DEV_ENV)
>>> list_of_ints_validator = validate_in_dev([int], "integer list")
>>> __env__ = "DEV"
>>> list_of_ints_validator([1, "not an int", 3])
Traceback (most recent call last):
...
ValueError: Bad value provided for integer list. - error: ValueError: invalid literal for int() with base 10: 'not an int' schema: [<type 'int'>] value: [1, 'not an int', 3]
>>> __env__ = "PROD"
>>> list_of_ints_validator([1, "not an int", 3])
[1, "not an int", 3]
**Coerce data as it is validated**:
*note: validate_against_schema will do this automatically. see docs on validator.*
.. code-block:: python
>>> validate_and_coerce = partial(schemagic.validator, coerce_data=True)
>>> list_of_ints_validator_and_coercer = validate_and_coerce([int], "integer list")
>>> list_of_ints_validator_only = schemagic.validator([int], "integer_list")
>>> list_of_ints_validator_only(["1", "2", "3"])
["1", "2", "3"]
>>> # Note that the if you pass an integer string to int() it returns an integer.
>>> # this makes it s dual purpose validator and coercer.
>>> list_of_ints_validator_and_coercer(["1", "2", "3"])
[1, 2, 3]
Schemagic.web
-------------
Schemagic.web is where rubber meets the road in practical usage. It provides an easy way to communicate between
services, between developers, and between development teams in an agile environment. The webservice business world was
the furnace in which schemagic was forged. Get ready to outsource yourself.
To demo the schemagic.web workflow, lets assume the roles of the first people in the world to discover a way
to (gasp) compute the fibonacci sequence in python.
*note: this code is all pulled from Peter Norvig's excellent* `Design of Computer Programs <https://www.udacity.com/course/design-of-computer-programs--cs212>`_ *Udacity class.*
.. code-block:: python
def memo(fn):
_cache = {}
def _f(*args):
try:
return _cache[args]
except KeyError:
_cache[args] = result = fn(*args)
return result
except TypeError:
return fn(*args)
_f.cache = _cache
return _f
@memo
def fib(n):
if n == 0 or n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
>>> fib(30)
1346269
Brilliant! Well, now we'll of course want to share this discovery with the world in the form of a microservice, so that
others need not know the inner workings of this complex and dangerous algorithm.
Lets walk through how we might set up this webservice in flask:
.. code-block:: python
from flask import Flask, json
from fibonacci import fib # assuming we implemented the function in fibonnaci.py
app = Flask(__name__)
@app.route("/fibonacci/<index>")
def web_fib_endpoint(index):
try:
index = int(index)
except ValueError:
return Response(
status=400,
response="Argument to /fibonacci/ must be an integer"
)
return Response(
status=200,
response=json.dumps(fib(index))
)
if __name__ == '__main__':
app.run(port=5000)
While this pattern is certainly serviceable, it is rather heavyweight to simply expose a function to the web.
Additionally, the code doesn't lend itself well to easily documenting its input and output.
Lets see an adapted version of this code using schemagic.web utilities.
.. code-block:: python
from flask.app import Flask
from fibonacci import fib # assuming we implemented the function in fibonnaci.py
from schemagic.web import service_registry
app = Flask(__name__)
register_fibonnacci_services = service_registry(app)
register_fibonnacci_services(
dict(rule="/fibonacci",
input_schema=int,
output_schema=int,
fn=fib))
if __name__ == '__main__':
app.run(port=5000)
There, now we simply *describe* our service with data.
What is the service endpoint, what is the input, what is the output,
and what is the implementation that delivers the contract defined herein.
#. The webservices all uniformally use POST requests to transmit data. The data supplied to the endpoints comes from the payload of the request.
How to Contribute
-----------------
#. This codebase uses the popular `git flow <http://nvie.com/posts/a-successful-git-branching-model/>`_ model for version control
#. Fork `the repository`_ and make a branch off of develop, (ideally using the naming convention feature/your-feature)
#. When you've finished your feature, make a pull request back into develop.
#. Once you've made your pull request, email `the maintainer`_ and let me know!
#. Finally, if you ever have any questions about how or what to contribute, feel free to send an email!
.. _`the repository`: https://github.com/TJTolton/schemagic
.. _`the maintainer`: [email protected]
Documentation
=============
This project autogenerates it's documentation using sphinx and hosts it using readthedocs. It can be viewed `here <http://schemagic.readthedocs.io/en/latest/schemagic.html>`_
.. [1] Please note: this description is adapted from the excellently phrased introduction to the `prismatic/schema <https://github.com/plumatic/schema>`_ clojure library this project was based on
| 0.881513 | 0.689547 |
# migra: Like diff but for Postgres schemas
- ## compare schemas
- ## autogenerate migration scripts
- ## autosync your development database from your application models
- ## make your schema changes testable, robust, and (mostly) automatic
`migra` is a schema diff tool for PostgreSQL, written in Python. Use it in your python scripts, or from the command line like this:
$ migra postgresql:///a postgresql:///b
alter table "public"."products" add column newcolumn text;
alter table "public"."products" add constraint "x" CHECK ((price > (0)::numeric));
`migra` magically figures out all the statements required to get from A to B.
Most features of PostgreSQL are supported.
**Migra supports PostgreSQL >= 10 only.** Known issues exist with earlier versions. Development resources are limited, and feature support rather than backwards compatibility is prioritised.
## THE DOCS
Documentation is at [djrobstep.com/docs/migra](https://djrobstep.com/docs/migra).
## Folks, schemas are good
Schema migrations are without doubt the most cumbersome and annoying part of working with SQL databases. So much so that some people think that schemas themselves are bad!
But schemas are actually good. Enforcing data consistency and structure is a good thing. It’s the migration tooling that is bad, because it’s harder to use than it should be. ``migra`` is an attempt to change that, and make migrations easy, safe, and reliable instead of something to dread.
## Contributing
Contributing is easy. [Jump into the issues](https://github.com/djrobstep/migra/issues), find a feature or fix you'd like to work on, and get involved. Or create a new issue and suggest something completely different. If you're unsure about any aspect of the process, just ask.
## Credits
- [https://github.com/djrobstep](djrobstep): initial development, maintenance
- [https://github.com/alvarogzp](alvarogzp): privileges support
- [https://github.com/seblucas](seblucas): docker improvements
- [https://github.com/MOZGIII](MOZGIII): docker support
- [https://github.com/mshahbazi](mshahbazi): misc fixes and enhancements
|
schemahq-migra
|
/schemahq-migra-1.0.11.tar.gz/schemahq-migra-1.0.11/README.md
|
README.md
|
# migra: Like diff but for Postgres schemas
- ## compare schemas
- ## autogenerate migration scripts
- ## autosync your development database from your application models
- ## make your schema changes testable, robust, and (mostly) automatic
`migra` is a schema diff tool for PostgreSQL, written in Python. Use it in your python scripts, or from the command line like this:
$ migra postgresql:///a postgresql:///b
alter table "public"."products" add column newcolumn text;
alter table "public"."products" add constraint "x" CHECK ((price > (0)::numeric));
`migra` magically figures out all the statements required to get from A to B.
Most features of PostgreSQL are supported.
**Migra supports PostgreSQL >= 10 only.** Known issues exist with earlier versions. Development resources are limited, and feature support rather than backwards compatibility is prioritised.
## THE DOCS
Documentation is at [djrobstep.com/docs/migra](https://djrobstep.com/docs/migra).
## Folks, schemas are good
Schema migrations are without doubt the most cumbersome and annoying part of working with SQL databases. So much so that some people think that schemas themselves are bad!
But schemas are actually good. Enforcing data consistency and structure is a good thing. It’s the migration tooling that is bad, because it’s harder to use than it should be. ``migra`` is an attempt to change that, and make migrations easy, safe, and reliable instead of something to dread.
## Contributing
Contributing is easy. [Jump into the issues](https://github.com/djrobstep/migra/issues), find a feature or fix you'd like to work on, and get involved. Or create a new issue and suggest something completely different. If you're unsure about any aspect of the process, just ask.
## Credits
- [https://github.com/djrobstep](djrobstep): initial development, maintenance
- [https://github.com/alvarogzp](alvarogzp): privileges support
- [https://github.com/seblucas](seblucas): docker improvements
- [https://github.com/MOZGIII](MOZGIII): docker support
- [https://github.com/mshahbazi](mshahbazi): misc fixes and enhancements
| 0.605333 | 0.312147 |
from __future__ import unicode_literals
from schemainspect import DBInspector, get_inspector
from sqlbag import raw_execute
from .changes import Changes
from .statements import Statements
class Migration(object):
"""
The main class of migra
"""
def __init__(self, x_from, x_target, schema=None):
self.statements = Statements()
self.changes = Changes(None, None)
self.schema = schema
if isinstance(x_from, DBInspector):
self.changes.i_from = x_from
else:
self.changes.i_from = get_inspector(x_from, schema=schema)
if x_from:
self.s_from = x_from
if isinstance(x_target, DBInspector):
self.changes.i_target = x_target
else:
self.changes.i_target = get_inspector(x_target, schema=schema)
if x_target:
self.s_target = x_target
def inspect_from(self):
self.changes.i_from = get_inspector(self.s_from, schema=self.schema)
def inspect_target(self):
self.changes.i_target = get_inspector(self.s_target, schema=self.schema)
def clear(self):
self.statements = Statements()
def apply(self):
for stmt in self.statements:
raw_execute(self.s_from, stmt)
self.changes.i_from = get_inspector(self.s_from, schema=self.schema)
safety_on = self.statements.safe
self.clear()
self.set_safety(safety_on)
def add(self, statements):
self.statements += [statement for statement in statements if statement != ""]
def add_sql(self, sql):
self.statements += Statements([sql])
def set_safety(self, safety_on):
self.statements.safe = safety_on
def add_extension_changes(self, creates=True, drops=True):
if creates:
self.add(self.changes.extensions(creations_only=True))
if drops:
self.add(self.changes.extensions(drops_only=True))
def add_all_changes(self, privileges=False, roles=False):
if roles:
if privileges:
self.add(self.changes.memberships(drops_only=True))
self.add(self.changes.roles(drops_only=True))
self.add(self.changes.roles(creations_only=True))
self.add(self.changes.schemas(creations_only=True))
self.add(self.changes.extensions(creations_only=True))
self.add(self.changes.collations(creations_only=True))
self.add(self.changes.enums(creations_only=True, modifications=False))
self.add(self.changes.sequences(creations_only=True))
self.add(self.changes.triggers(drops_only=True))
self.add(self.changes.rlspolicies(drops_only=True))
if privileges:
self.add(self.changes.privileges(drops_only=True))
self.add(self.changes.schema_privileges(drops_only=True))
self.add(self.changes.sequence_privileges(drops_only=True))
self.add(self.changes.column_privileges(drops_only=True))
self.add(self.changes.non_pk_constraints(drops_only=True))
self.add(self.changes.pk_constraints(drops_only=True))
self.add(self.changes.indexes(drops_only=True))
self.add(self.changes.selectables())
self.add(self.changes.sequences(drops_only=True))
self.add(self.changes.enums(drops_only=True, modifications=False))
self.add(self.changes.extensions(drops_only=True))
self.add(self.changes.indexes(creations_only=True))
self.add(self.changes.pk_constraints(creations_only=True))
self.add(self.changes.non_pk_constraints(creations_only=True))
if privileges:
self.add(self.changes.privileges(creations_only=True))
self.add(self.changes.schema_privileges(creations_only=True))
self.add(self.changes.sequence_privileges(creations_only=True))
self.add(self.changes.column_privileges(creations_only=True))
if roles:
self.add(self.changes.memberships(creations_only=True))
self.add(self.changes.rlspolicies(creations_only=True))
self.add(self.changes.triggers(creations_only=True))
self.add(self.changes.collations(drops_only=True))
self.add(self.changes.schemas(drops_only=True))
@property
def sql(self):
return self.statements.sql
|
schemahq-migra
|
/schemahq-migra-1.0.11.tar.gz/schemahq-migra-1.0.11/migra/migra.py
|
migra.py
|
from __future__ import unicode_literals
from schemainspect import DBInspector, get_inspector
from sqlbag import raw_execute
from .changes import Changes
from .statements import Statements
class Migration(object):
"""
The main class of migra
"""
def __init__(self, x_from, x_target, schema=None):
self.statements = Statements()
self.changes = Changes(None, None)
self.schema = schema
if isinstance(x_from, DBInspector):
self.changes.i_from = x_from
else:
self.changes.i_from = get_inspector(x_from, schema=schema)
if x_from:
self.s_from = x_from
if isinstance(x_target, DBInspector):
self.changes.i_target = x_target
else:
self.changes.i_target = get_inspector(x_target, schema=schema)
if x_target:
self.s_target = x_target
def inspect_from(self):
self.changes.i_from = get_inspector(self.s_from, schema=self.schema)
def inspect_target(self):
self.changes.i_target = get_inspector(self.s_target, schema=self.schema)
def clear(self):
self.statements = Statements()
def apply(self):
for stmt in self.statements:
raw_execute(self.s_from, stmt)
self.changes.i_from = get_inspector(self.s_from, schema=self.schema)
safety_on = self.statements.safe
self.clear()
self.set_safety(safety_on)
def add(self, statements):
self.statements += [statement for statement in statements if statement != ""]
def add_sql(self, sql):
self.statements += Statements([sql])
def set_safety(self, safety_on):
self.statements.safe = safety_on
def add_extension_changes(self, creates=True, drops=True):
if creates:
self.add(self.changes.extensions(creations_only=True))
if drops:
self.add(self.changes.extensions(drops_only=True))
def add_all_changes(self, privileges=False, roles=False):
if roles:
if privileges:
self.add(self.changes.memberships(drops_only=True))
self.add(self.changes.roles(drops_only=True))
self.add(self.changes.roles(creations_only=True))
self.add(self.changes.schemas(creations_only=True))
self.add(self.changes.extensions(creations_only=True))
self.add(self.changes.collations(creations_only=True))
self.add(self.changes.enums(creations_only=True, modifications=False))
self.add(self.changes.sequences(creations_only=True))
self.add(self.changes.triggers(drops_only=True))
self.add(self.changes.rlspolicies(drops_only=True))
if privileges:
self.add(self.changes.privileges(drops_only=True))
self.add(self.changes.schema_privileges(drops_only=True))
self.add(self.changes.sequence_privileges(drops_only=True))
self.add(self.changes.column_privileges(drops_only=True))
self.add(self.changes.non_pk_constraints(drops_only=True))
self.add(self.changes.pk_constraints(drops_only=True))
self.add(self.changes.indexes(drops_only=True))
self.add(self.changes.selectables())
self.add(self.changes.sequences(drops_only=True))
self.add(self.changes.enums(drops_only=True, modifications=False))
self.add(self.changes.extensions(drops_only=True))
self.add(self.changes.indexes(creations_only=True))
self.add(self.changes.pk_constraints(creations_only=True))
self.add(self.changes.non_pk_constraints(creations_only=True))
if privileges:
self.add(self.changes.privileges(creations_only=True))
self.add(self.changes.schema_privileges(creations_only=True))
self.add(self.changes.sequence_privileges(creations_only=True))
self.add(self.changes.column_privileges(creations_only=True))
if roles:
self.add(self.changes.memberships(creations_only=True))
self.add(self.changes.rlspolicies(creations_only=True))
self.add(self.changes.triggers(creations_only=True))
self.add(self.changes.collations(drops_only=True))
self.add(self.changes.schemas(drops_only=True))
@property
def sql(self):
return self.statements.sql
| 0.632616 | 0.148201 |
from __future__ import print_function, unicode_literals
import argparse
import sys
from contextlib import contextmanager
from sqlbag import S
from .migra import Migration
from .statements import UnsafeMigrationException
@contextmanager
def arg_context(x):
if x == "EMPTY":
yield None
else:
with S(x) as s:
yield s
def parse_args(args):
parser = argparse.ArgumentParser(description="Generate a database migration.")
parser.add_argument(
"--unsafe",
dest="unsafe",
action="store_true",
help="Prevent migra from erroring upon generation of drop statements.",
)
parser.add_argument(
"--schema",
dest="schema",
default=None,
help="Restrict output to statements for a particular schema",
)
parser.add_argument(
"--create-extensions-only",
dest="create_extensions_only",
action="store_true",
default=False,
help='Only output "create extension..." statements, nothing else.',
)
parser.add_argument(
"--with-privileges",
dest="with_privileges",
action="store_true",
default=False,
help="Also output privilege differences (ie. grant/revoke statements)",
)
parser.add_argument(
"--with-roles",
dest="with_roles",
action="store_true",
default=False,
help='Also output "create/drop role" statements.',
)
parser.add_argument(
"--force-utf8",
dest="force_utf8",
action="store_true",
default=False,
help="Force UTF-8 encoding for output",
)
parser.add_argument("dburl_from", help="The database you want to migrate.")
parser.add_argument(
"dburl_target", help="The database you want to use as the target."
)
return parser.parse_args(args)
def run(args, out=None, err=None):
schema = args.schema
if not out:
out = sys.stdout # pragma: no cover
if not err:
err = sys.stderr # pragma: no cover
with arg_context(args.dburl_from) as ac0, arg_context(args.dburl_target) as ac1:
m = Migration(ac0, ac1, schema=schema)
if args.unsafe:
m.set_safety(False)
if args.create_extensions_only:
m.add_extension_changes(drops=False)
else:
m.add_all_changes(privileges=args.with_privileges, roles=args.with_roles)
try:
if m.statements:
if args.force_utf8:
print(m.sql.encode("utf8"), file=out)
else:
print(m.sql, file=out)
except UnsafeMigrationException:
print(
"-- ERROR: destructive statements generated. Use the --unsafe flag to suppress this error.",
file=err,
)
return 3
if not m.statements:
return 0
else:
return 2
def do_command(): # pragma: no cover
args = parse_args(sys.argv[1:])
status = run(args)
sys.exit(status)
|
schemahq-migra
|
/schemahq-migra-1.0.11.tar.gz/schemahq-migra-1.0.11/migra/command.py
|
command.py
|
from __future__ import print_function, unicode_literals
import argparse
import sys
from contextlib import contextmanager
from sqlbag import S
from .migra import Migration
from .statements import UnsafeMigrationException
@contextmanager
def arg_context(x):
if x == "EMPTY":
yield None
else:
with S(x) as s:
yield s
def parse_args(args):
parser = argparse.ArgumentParser(description="Generate a database migration.")
parser.add_argument(
"--unsafe",
dest="unsafe",
action="store_true",
help="Prevent migra from erroring upon generation of drop statements.",
)
parser.add_argument(
"--schema",
dest="schema",
default=None,
help="Restrict output to statements for a particular schema",
)
parser.add_argument(
"--create-extensions-only",
dest="create_extensions_only",
action="store_true",
default=False,
help='Only output "create extension..." statements, nothing else.',
)
parser.add_argument(
"--with-privileges",
dest="with_privileges",
action="store_true",
default=False,
help="Also output privilege differences (ie. grant/revoke statements)",
)
parser.add_argument(
"--with-roles",
dest="with_roles",
action="store_true",
default=False,
help='Also output "create/drop role" statements.',
)
parser.add_argument(
"--force-utf8",
dest="force_utf8",
action="store_true",
default=False,
help="Force UTF-8 encoding for output",
)
parser.add_argument("dburl_from", help="The database you want to migrate.")
parser.add_argument(
"dburl_target", help="The database you want to use as the target."
)
return parser.parse_args(args)
def run(args, out=None, err=None):
schema = args.schema
if not out:
out = sys.stdout # pragma: no cover
if not err:
err = sys.stderr # pragma: no cover
with arg_context(args.dburl_from) as ac0, arg_context(args.dburl_target) as ac1:
m = Migration(ac0, ac1, schema=schema)
if args.unsafe:
m.set_safety(False)
if args.create_extensions_only:
m.add_extension_changes(drops=False)
else:
m.add_all_changes(privileges=args.with_privileges, roles=args.with_roles)
try:
if m.statements:
if args.force_utf8:
print(m.sql.encode("utf8"), file=out)
else:
print(m.sql, file=out)
except UnsafeMigrationException:
print(
"-- ERROR: destructive statements generated. Use the --unsafe flag to suppress this error.",
file=err,
)
return 3
if not m.statements:
return 0
else:
return 2
def do_command(): # pragma: no cover
args = parse_args(sys.argv[1:])
status = run(args)
sys.exit(status)
| 0.359027 | 0.113949 |
from __future__ import unicode_literals
from collections import OrderedDict as od
from functools import partial
from .statements import Statements
from .util import differences
THINGS = [
"schemas",
"enums",
"sequences",
"constraints",
"functions",
"views",
"indexes",
"extensions",
"privileges",
"schema_privileges",
"sequence_privileges",
"column_privileges",
"collations",
"rlspolicies",
"triggers",
"roles",
"memberships",
]
PK = "PRIMARY KEY"
def statements_for_changes(
things_from,
things_target,
creations_only=False,
drops_only=False,
modifications=True,
dependency_ordering=False,
add_dependents_for_modified=False,
):
added, removed, modified, unmodified = differences(things_from, things_target)
return statements_from_differences(
added=added,
removed=removed,
modified=modified,
replaceable=None,
creations_only=creations_only,
drops_only=drops_only,
modifications=modifications,
dependency_ordering=dependency_ordering,
old=things_from,
)
def statements_from_differences(
added,
removed,
modified,
replaceable=None,
creations_only=False,
drops_only=False,
modifications=True,
dependency_ordering=False,
old=None,
):
replaceable = replaceable or set()
statements = Statements()
if not creations_only:
pending_drops = set(removed)
if modifications:
pending_drops |= set(modified) - replaceable
else:
pending_drops = set()
if not drops_only:
pending_creations = set(added)
if modifications:
pending_creations |= set(modified)
else:
pending_creations = set()
def has_remaining_dependents(v, pending_drops):
if not dependency_ordering:
return False
return bool(set(v.dependents) & pending_drops)
def has_uncreated_dependencies(v, pending_creations):
if not dependency_ordering:
return False
return bool(set(v.dependent_on) & pending_creations)
while True:
before = pending_drops | pending_creations
if not creations_only:
for k, v in removed.items():
if not has_remaining_dependents(v, pending_drops):
if k in pending_drops:
statements.append(old[k].drop_statement)
pending_drops.remove(k)
if not drops_only:
for k, v in added.items():
if not has_uncreated_dependencies(v, pending_creations):
if k in pending_creations:
statements.append(v.create_statement)
pending_creations.remove(k)
if modifications:
for k, v in modified.items():
if hasattr(v, "update_statement"):
statements.append(v.update_statement)
pending_drops.remove(k)
pending_creations.remove(k)
else:
if not creations_only:
if not has_remaining_dependents(v, pending_drops):
if k in pending_drops:
statements.append(old[k].drop_statement)
pending_drops.remove(k)
if not drops_only:
if not has_uncreated_dependencies(v, pending_creations):
if k in pending_creations:
statements.append(v.create_statement)
pending_creations.remove(k)
after = pending_drops | pending_creations
if not after:
break
elif (
after == before
): # this should never happen because there shouldn't be circular dependencies
raise ValueError("cannot resolve dependencies") # pragma: no cover
return statements
def get_enum_modifications(tables_from, tables_target, enums_from, enums_target):
_, _, e_modified, _ = differences(enums_from, enums_target)
_, _, t_modified, _ = differences(tables_from, tables_target)
pre = Statements()
recreate = Statements()
post = Statements()
enums_to_change = e_modified
for t, v in t_modified.items():
t_before = tables_from[t]
_, _, c_modified, _ = differences(t_before.columns, v.columns)
for k, c in c_modified.items():
before = t_before.columns[k]
if (
c.is_enum == before.is_enum
and c.dbtypestr == before.dbtypestr
and c.enum != before.enum
):
has_default = c.default and not c.is_generated
if has_default:
pre.append(before.drop_default_statement(t))
pre.append(before.change_enum_to_string_statement(t))
post.append(before.change_string_to_enum_statement(t))
if has_default:
post.append(before.add_default_statement(t))
for e in enums_to_change.values():
recreate.append(e.drop_statement)
recreate.append(e.create_statement)
return pre + recreate + post
def get_table_changes(tables_from, tables_target, enums_from, enums_target):
added, removed, modified, _ = differences(tables_from, tables_target)
statements = Statements()
for t, v in removed.items():
statements.append(v.drop_statement)
for t, v in added.items():
statements.append(v.create_statement)
if v.rowsecurity:
rls_alter = v.alter_rls_statement
statements += [rls_alter]
statements += get_enum_modifications(
tables_from, tables_target, enums_from, enums_target
)
for t, v in modified.items():
before = tables_from[t]
# drop/recreate tables which have changed from partitioned to non-partitioned
if v.is_partitioned != before.is_partitioned:
statements.append(v.drop_statement)
statements.append(v.create_statement)
continue
if v.is_unlogged != before.is_unlogged:
statements += [v.alter_unlogged_statement]
# attach/detach tables with changed parent tables
if v.parent_table != before.parent_table:
statements += v.attach_detach_statements(before)
for t, v in modified.items():
before = tables_from[t]
if not v.is_alterable:
continue
c_added, c_removed, c_modified, _ = differences(before.columns, v.columns)
for k in list(c_modified):
c = v.columns[k]
c_before = before.columns[k]
# there's no way to alter a table into/out of generated state
# so you gotta drop/recreate
if c.is_generated != c_before.is_generated:
del c_modified[k]
c_added[k] = c
c_removed[k] = c_before
for k, c in c_removed.items():
alter = v.alter_table_statement(c.drop_column_clause)
statements.append(alter)
for k, c in c_added.items():
alter = v.alter_table_statement(c.add_column_clause)
statements.append(alter)
for k, c in c_modified.items():
statements += c.alter_table_statements(before.columns[k], t)
if v.rowsecurity != before.rowsecurity:
rls_alter = v.alter_rls_statement
statements += [rls_alter]
return statements
def get_selectable_changes(
selectables_from,
selectables_target,
enums_from,
enums_target,
add_dependents_for_modified=True,
):
tables_from = od((k, v) for k, v in selectables_from.items() if v.is_table)
tables_target = od((k, v) for k, v in selectables_target.items() if v.is_table)
other_from = od((k, v) for k, v in selectables_from.items() if not v.is_table)
other_target = od((k, v) for k, v in selectables_target.items() if not v.is_table)
added_tables, removed_tables, modified_tables, unmodified_tables = differences(
tables_from, tables_target
)
added_other, removed_other, modified_other, unmodified_other = differences(
other_from, other_target
)
changed_all = {}
changed_all.update(modified_tables)
changed_all.update(modified_other)
modified_all = dict(changed_all)
changed_all.update(removed_tables)
changed_all.update(removed_other)
replaceable = set()
not_replaceable = set()
if add_dependents_for_modified:
for k, m in changed_all.items():
old = selectables_from[k]
if k in modified_all and m.can_replace(old):
if not m.is_table:
replaceable.add(k)
continue
for d in m.dependents_all:
if d in unmodified_other:
dd = unmodified_other.pop(d)
modified_other[d] = dd
not_replaceable.add(d)
modified_other = od(sorted(modified_other.items()))
replaceable -= not_replaceable
statements = Statements()
def functions(d):
return {k: v for k, v in d.items() if v.relationtype == "f"}
statements += statements_from_differences(
added_other,
removed_other,
modified_other,
replaceable=replaceable,
drops_only=True,
dependency_ordering=True,
old=selectables_from,
)
statements += get_table_changes(
tables_from, tables_target, enums_from, enums_target
)
if any([functions(added_other), functions(modified_other)]):
statements += ["set check_function_bodies = off;"]
statements += statements_from_differences(
added_other,
removed_other,
modified_other,
replaceable=replaceable,
creations_only=True,
dependency_ordering=True,
old=selectables_from,
)
return statements
class Changes(object):
def __init__(self, i_from, i_target):
self.i_from = i_from
self.i_target = i_target
def __getattr__(self, name):
if name == "non_pk_constraints":
a = self.i_from.constraints.items()
b = self.i_target.constraints.items()
a_od = od((k, v) for k, v in a if v.constraint_type != PK)
b_od = od((k, v) for k, v in b if v.constraint_type != PK)
return partial(statements_for_changes, a_od, b_od)
elif name == "pk_constraints":
a = self.i_from.constraints.items()
b = self.i_target.constraints.items()
a_od = od((k, v) for k, v in a if v.constraint_type == PK)
b_od = od((k, v) for k, v in b if v.constraint_type == PK)
return partial(statements_for_changes, a_od, b_od)
elif name == "selectables":
return partial(
get_selectable_changes,
od(sorted(self.i_from.selectables.items())),
od(sorted(self.i_target.selectables.items())),
self.i_from.enums,
self.i_target.enums,
)
elif name in THINGS:
return partial(
statements_for_changes,
getattr(self.i_from, name),
getattr(self.i_target, name),
)
else:
raise AttributeError(name)
|
schemahq-migra
|
/schemahq-migra-1.0.11.tar.gz/schemahq-migra-1.0.11/migra/changes.py
|
changes.py
|
from __future__ import unicode_literals
from collections import OrderedDict as od
from functools import partial
from .statements import Statements
from .util import differences
THINGS = [
"schemas",
"enums",
"sequences",
"constraints",
"functions",
"views",
"indexes",
"extensions",
"privileges",
"schema_privileges",
"sequence_privileges",
"column_privileges",
"collations",
"rlspolicies",
"triggers",
"roles",
"memberships",
]
PK = "PRIMARY KEY"
def statements_for_changes(
things_from,
things_target,
creations_only=False,
drops_only=False,
modifications=True,
dependency_ordering=False,
add_dependents_for_modified=False,
):
added, removed, modified, unmodified = differences(things_from, things_target)
return statements_from_differences(
added=added,
removed=removed,
modified=modified,
replaceable=None,
creations_only=creations_only,
drops_only=drops_only,
modifications=modifications,
dependency_ordering=dependency_ordering,
old=things_from,
)
def statements_from_differences(
added,
removed,
modified,
replaceable=None,
creations_only=False,
drops_only=False,
modifications=True,
dependency_ordering=False,
old=None,
):
replaceable = replaceable or set()
statements = Statements()
if not creations_only:
pending_drops = set(removed)
if modifications:
pending_drops |= set(modified) - replaceable
else:
pending_drops = set()
if not drops_only:
pending_creations = set(added)
if modifications:
pending_creations |= set(modified)
else:
pending_creations = set()
def has_remaining_dependents(v, pending_drops):
if not dependency_ordering:
return False
return bool(set(v.dependents) & pending_drops)
def has_uncreated_dependencies(v, pending_creations):
if not dependency_ordering:
return False
return bool(set(v.dependent_on) & pending_creations)
while True:
before = pending_drops | pending_creations
if not creations_only:
for k, v in removed.items():
if not has_remaining_dependents(v, pending_drops):
if k in pending_drops:
statements.append(old[k].drop_statement)
pending_drops.remove(k)
if not drops_only:
for k, v in added.items():
if not has_uncreated_dependencies(v, pending_creations):
if k in pending_creations:
statements.append(v.create_statement)
pending_creations.remove(k)
if modifications:
for k, v in modified.items():
if hasattr(v, "update_statement"):
statements.append(v.update_statement)
pending_drops.remove(k)
pending_creations.remove(k)
else:
if not creations_only:
if not has_remaining_dependents(v, pending_drops):
if k in pending_drops:
statements.append(old[k].drop_statement)
pending_drops.remove(k)
if not drops_only:
if not has_uncreated_dependencies(v, pending_creations):
if k in pending_creations:
statements.append(v.create_statement)
pending_creations.remove(k)
after = pending_drops | pending_creations
if not after:
break
elif (
after == before
): # this should never happen because there shouldn't be circular dependencies
raise ValueError("cannot resolve dependencies") # pragma: no cover
return statements
def get_enum_modifications(tables_from, tables_target, enums_from, enums_target):
_, _, e_modified, _ = differences(enums_from, enums_target)
_, _, t_modified, _ = differences(tables_from, tables_target)
pre = Statements()
recreate = Statements()
post = Statements()
enums_to_change = e_modified
for t, v in t_modified.items():
t_before = tables_from[t]
_, _, c_modified, _ = differences(t_before.columns, v.columns)
for k, c in c_modified.items():
before = t_before.columns[k]
if (
c.is_enum == before.is_enum
and c.dbtypestr == before.dbtypestr
and c.enum != before.enum
):
has_default = c.default and not c.is_generated
if has_default:
pre.append(before.drop_default_statement(t))
pre.append(before.change_enum_to_string_statement(t))
post.append(before.change_string_to_enum_statement(t))
if has_default:
post.append(before.add_default_statement(t))
for e in enums_to_change.values():
recreate.append(e.drop_statement)
recreate.append(e.create_statement)
return pre + recreate + post
def get_table_changes(tables_from, tables_target, enums_from, enums_target):
added, removed, modified, _ = differences(tables_from, tables_target)
statements = Statements()
for t, v in removed.items():
statements.append(v.drop_statement)
for t, v in added.items():
statements.append(v.create_statement)
if v.rowsecurity:
rls_alter = v.alter_rls_statement
statements += [rls_alter]
statements += get_enum_modifications(
tables_from, tables_target, enums_from, enums_target
)
for t, v in modified.items():
before = tables_from[t]
# drop/recreate tables which have changed from partitioned to non-partitioned
if v.is_partitioned != before.is_partitioned:
statements.append(v.drop_statement)
statements.append(v.create_statement)
continue
if v.is_unlogged != before.is_unlogged:
statements += [v.alter_unlogged_statement]
# attach/detach tables with changed parent tables
if v.parent_table != before.parent_table:
statements += v.attach_detach_statements(before)
for t, v in modified.items():
before = tables_from[t]
if not v.is_alterable:
continue
c_added, c_removed, c_modified, _ = differences(before.columns, v.columns)
for k in list(c_modified):
c = v.columns[k]
c_before = before.columns[k]
# there's no way to alter a table into/out of generated state
# so you gotta drop/recreate
if c.is_generated != c_before.is_generated:
del c_modified[k]
c_added[k] = c
c_removed[k] = c_before
for k, c in c_removed.items():
alter = v.alter_table_statement(c.drop_column_clause)
statements.append(alter)
for k, c in c_added.items():
alter = v.alter_table_statement(c.add_column_clause)
statements.append(alter)
for k, c in c_modified.items():
statements += c.alter_table_statements(before.columns[k], t)
if v.rowsecurity != before.rowsecurity:
rls_alter = v.alter_rls_statement
statements += [rls_alter]
return statements
def get_selectable_changes(
selectables_from,
selectables_target,
enums_from,
enums_target,
add_dependents_for_modified=True,
):
tables_from = od((k, v) for k, v in selectables_from.items() if v.is_table)
tables_target = od((k, v) for k, v in selectables_target.items() if v.is_table)
other_from = od((k, v) for k, v in selectables_from.items() if not v.is_table)
other_target = od((k, v) for k, v in selectables_target.items() if not v.is_table)
added_tables, removed_tables, modified_tables, unmodified_tables = differences(
tables_from, tables_target
)
added_other, removed_other, modified_other, unmodified_other = differences(
other_from, other_target
)
changed_all = {}
changed_all.update(modified_tables)
changed_all.update(modified_other)
modified_all = dict(changed_all)
changed_all.update(removed_tables)
changed_all.update(removed_other)
replaceable = set()
not_replaceable = set()
if add_dependents_for_modified:
for k, m in changed_all.items():
old = selectables_from[k]
if k in modified_all and m.can_replace(old):
if not m.is_table:
replaceable.add(k)
continue
for d in m.dependents_all:
if d in unmodified_other:
dd = unmodified_other.pop(d)
modified_other[d] = dd
not_replaceable.add(d)
modified_other = od(sorted(modified_other.items()))
replaceable -= not_replaceable
statements = Statements()
def functions(d):
return {k: v for k, v in d.items() if v.relationtype == "f"}
statements += statements_from_differences(
added_other,
removed_other,
modified_other,
replaceable=replaceable,
drops_only=True,
dependency_ordering=True,
old=selectables_from,
)
statements += get_table_changes(
tables_from, tables_target, enums_from, enums_target
)
if any([functions(added_other), functions(modified_other)]):
statements += ["set check_function_bodies = off;"]
statements += statements_from_differences(
added_other,
removed_other,
modified_other,
replaceable=replaceable,
creations_only=True,
dependency_ordering=True,
old=selectables_from,
)
return statements
class Changes(object):
def __init__(self, i_from, i_target):
self.i_from = i_from
self.i_target = i_target
def __getattr__(self, name):
if name == "non_pk_constraints":
a = self.i_from.constraints.items()
b = self.i_target.constraints.items()
a_od = od((k, v) for k, v in a if v.constraint_type != PK)
b_od = od((k, v) for k, v in b if v.constraint_type != PK)
return partial(statements_for_changes, a_od, b_od)
elif name == "pk_constraints":
a = self.i_from.constraints.items()
b = self.i_target.constraints.items()
a_od = od((k, v) for k, v in a if v.constraint_type == PK)
b_od = od((k, v) for k, v in b if v.constraint_type == PK)
return partial(statements_for_changes, a_od, b_od)
elif name == "selectables":
return partial(
get_selectable_changes,
od(sorted(self.i_from.selectables.items())),
od(sorted(self.i_target.selectables.items())),
self.i_from.enums,
self.i_target.enums,
)
elif name in THINGS:
return partial(
statements_for_changes,
getattr(self.i_from, name),
getattr(self.i_target, name),
)
else:
raise AttributeError(name)
| 0.47244 | 0.131759 |
# `schemainspect`: SQL Schema Inspection
Schema inspection for PostgreSQL (and potentially others in the future).
Inspects tables, views, materialized views, constraints, indexes, sequences, enums, functions, and extensions. Handles table partitioning and inheritance.
**Limitations:** Function inspection only confirmed to work with SQL/PLPGSQL languages so far.
Basic Usage
-----------
Get an inspection object from an already opened SQLAlchemy session or connection as follows:
from schemainspect import get_inspector
from sqlbag import S
with S('postgresql:///example') as s:
i = get_inspector(s)
The inspection object has attributes for tables, views, and all the other things it tracks. At each of these attributes you'll find a dictionary (OrderedDict) mapping from fully-qualified-and-quoted-name-of-thing-in-database to information object.
For instance, the information about a table *books* would be accessed as follows:
>>> books_table = i.tables['"public"."books"']
>>> books_table.name
'books'
>>> books_table.schema
'public'
>>> [each.name for each in books_table.columns]
['id', 'title', 'isbn']
## Documentation
Documentation is a bit patchy at the moment. Watch this space!
## Author Credits
Initial development, maintenance:
- [djrobstep](https://github.com/djrobstep)
Contributions:
- [BenSjoberg](https://github.com/BenSjoberg)
- [johto](https://github.com/johto)
## Install
Install with [pip](https://pip.pypa.io):
$ pip install schemainspect
To install psycopg2 (the PostgreSQL driver) at the same time as well:
$ pip install schemainspect[pg]
|
schemahq-schemainspect
|
/schemahq-schemainspect-1.0.12.tar.gz/schemahq-schemainspect-1.0.12/README.md
|
README.md
|
# `schemainspect`: SQL Schema Inspection
Schema inspection for PostgreSQL (and potentially others in the future).
Inspects tables, views, materialized views, constraints, indexes, sequences, enums, functions, and extensions. Handles table partitioning and inheritance.
**Limitations:** Function inspection only confirmed to work with SQL/PLPGSQL languages so far.
Basic Usage
-----------
Get an inspection object from an already opened SQLAlchemy session or connection as follows:
from schemainspect import get_inspector
from sqlbag import S
with S('postgresql:///example') as s:
i = get_inspector(s)
The inspection object has attributes for tables, views, and all the other things it tracks. At each of these attributes you'll find a dictionary (OrderedDict) mapping from fully-qualified-and-quoted-name-of-thing-in-database to information object.
For instance, the information about a table *books* would be accessed as follows:
>>> books_table = i.tables['"public"."books"']
>>> books_table.name
'books'
>>> books_table.schema
'public'
>>> [each.name for each in books_table.columns]
['id', 'title', 'isbn']
## Documentation
Documentation is a bit patchy at the moment. Watch this space!
## Author Credits
Initial development, maintenance:
- [djrobstep](https://github.com/djrobstep)
Contributions:
- [BenSjoberg](https://github.com/BenSjoberg)
- [johto](https://github.com/johto)
## Install
Install with [pip](https://pip.pypa.io):
$ pip install schemainspect
To install psycopg2 (the PostgreSQL driver) at the same time as well:
$ pip install schemainspect[pg]
| 0.838911 | 0.587736 |
from collections import OrderedDict as od
from .misc import AutoRepr, quoted_identifier
class Inspected(AutoRepr):
@property
def quoted_full_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.name)
)
@property
def signature(self):
return self.quoted_full_name
@property
def unquoted_full_name(self):
return "{}.{}".format(self.schema, self.name)
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def quoted_schema(self):
return quoted_identifier(self.schema)
def __ne__(self, other):
return not self == other
class TableRelated(object):
@property
def quoted_full_table_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.table_name)
)
class ColumnInfo(AutoRepr):
def __init__(
self,
name,
dbtype,
pytype,
default=None,
not_null=False,
is_enum=False,
enum=None,
dbtypestr=None,
collation=None,
is_identity=False,
is_identity_always=False,
is_generated=False,
):
self.name = name or ""
self.dbtype = dbtype
self.dbtypestr = dbtypestr or dbtype
self.pytype = pytype
self.default = default or None
self.not_null = not_null
self.is_enum = is_enum
self.enum = enum
self.collation = collation
self.is_identity = is_identity
self.is_identity_always = is_identity_always
self.is_generated = is_generated
def __eq__(self, other):
return (
self.name == other.name
and self.dbtype == other.dbtype
and self.dbtypestr == other.dbtypestr
and self.pytype == other.pytype
and self.default == other.default
and self.not_null == other.not_null
and self.enum == other.enum
and self.collation == other.collation
and self.is_identity == other.is_identity
and self.is_identity_always == other.is_identity_always
and self.is_generated == other.is_generated
)
def alter_clauses(self, other):
# ordering:
# identify must be dropped before notnull
# notnull must be added before identity
clauses = []
not_null_change = self.not_null != other.not_null
if not_null_change and self.not_null:
clauses.append(self.alter_not_null_clause)
if self.default != other.default and not self.default:
clauses.append(self.alter_default_clause)
if (
self.is_identity != other.is_identity
or self.is_identity_always != other.is_identity_always
):
clauses.append(self.alter_identity_clause(other))
elif self.default != other.default and self.default:
clauses.append(self.alter_default_clause)
if not_null_change and not self.not_null:
clauses.append(self.alter_not_null_clause)
if self.dbtypestr != other.dbtypestr or self.collation != other.collation:
clauses.append(self.alter_data_type_clause)
return clauses
def change_enum_to_string_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type varchar using {}::varchar;".format(
table_name, self.quoted_name, self.quoted_name
)
else:
raise ValueError
def change_string_to_enum_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type {} using {}::{};".format(
table_name,
self.quoted_name,
self.dbtypestr,
self.quoted_name,
self.dbtypestr,
)
else:
raise ValueError
def drop_default_statement(self, table_name):
return "alter table {} alter column {} drop default;".format(
table_name, self.quoted_name
)
def add_default_statement(self, table_name):
return "alter table {} alter column {} set default {};".format(
table_name, self.quoted_name, self.default
)
def alter_table_statements(self, other, table_name):
prefix = "alter table {}".format(table_name)
return ["{} {};".format(prefix, c) for c in self.alter_clauses(other)]
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def creation_clause(self):
x = "{} {}".format(self.quoted_name, self.dbtypestr)
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
x += " generated {} as identity".format(identity_type)
if self.not_null:
x += " not null"
if self.is_generated:
x += " generated always as ({}) stored".format(self.default)
elif self.default:
x += " default {}".format(self.default)
return x
@property
def add_column_clause(self):
return "add column {}{}".format(self.creation_clause, self.collation_subclause)
@property
def drop_column_clause(self):
return "drop column {k}".format(k=self.quoted_name)
@property
def alter_not_null_clause(self):
keyword = "set" if self.not_null else "drop"
return "alter column {} {} not null".format(self.quoted_name, keyword)
@property
def alter_default_clause(self):
if self.default:
alter = "alter column {} set default {}".format(
self.quoted_name, self.default
)
else:
alter = "alter column {} drop default".format(self.quoted_name)
return alter
def alter_identity_clause(self, other):
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
if other.is_identity:
alter = "alter column {} set generated {}".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} add generated {} as identity".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} drop identity".format(self.quoted_name)
return alter
@property
def collation_subclause(self):
if self.collation:
collate = " collate {}".format(quoted_identifier(self.collation))
else:
collate = ""
return collate
@property
def alter_data_type_clause(self):
return "alter column {} set data type {}{} using {}::{}".format(
self.quoted_name,
self.dbtypestr,
self.collation_subclause,
self.quoted_name,
self.dbtypestr,
)
class InspectedSelectable(Inspected):
def __init__(
self,
name,
schema,
columns,
inputs=None,
definition=None,
dependent_on=None,
dependents=None,
comment=None,
relationtype="unknown",
parent_table=None,
partition_def=None,
rowsecurity=False,
forcerowsecurity=False,
persistence=None,
):
self.name = name
self.schema = schema
self.inputs = inputs or []
self.columns = columns
self.definition = definition
self.relationtype = relationtype
self.dependent_on = dependent_on or []
self.dependents = dependents or []
self.dependent_on_all = []
self.dependents_all = []
self.constraints = od()
self.indexes = od()
self.comment = comment
self.parent_table = parent_table
self.partition_def = partition_def
self.rowsecurity = rowsecurity
self.forcerowsecurity = forcerowsecurity
self.persistence = persistence
def __eq__(self, other):
equalities = (
type(self) == type(other),
self.relationtype == other.relationtype,
self.name == other.name,
self.schema == other.schema,
dict(self.columns) == dict(other.columns),
self.inputs == other.inputs,
self.definition == other.definition,
self.parent_table == other.parent_table,
self.partition_def == other.partition_def,
self.rowsecurity == other.rowsecurity,
self.persistence == other.persistence,
)
return all(equalities)
|
schemahq-schemainspect
|
/schemahq-schemainspect-1.0.12.tar.gz/schemahq-schemainspect-1.0.12/schemainspect/inspected.py
|
inspected.py
|
from collections import OrderedDict as od
from .misc import AutoRepr, quoted_identifier
class Inspected(AutoRepr):
@property
def quoted_full_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.name)
)
@property
def signature(self):
return self.quoted_full_name
@property
def unquoted_full_name(self):
return "{}.{}".format(self.schema, self.name)
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def quoted_schema(self):
return quoted_identifier(self.schema)
def __ne__(self, other):
return not self == other
class TableRelated(object):
@property
def quoted_full_table_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.table_name)
)
class ColumnInfo(AutoRepr):
def __init__(
self,
name,
dbtype,
pytype,
default=None,
not_null=False,
is_enum=False,
enum=None,
dbtypestr=None,
collation=None,
is_identity=False,
is_identity_always=False,
is_generated=False,
):
self.name = name or ""
self.dbtype = dbtype
self.dbtypestr = dbtypestr or dbtype
self.pytype = pytype
self.default = default or None
self.not_null = not_null
self.is_enum = is_enum
self.enum = enum
self.collation = collation
self.is_identity = is_identity
self.is_identity_always = is_identity_always
self.is_generated = is_generated
def __eq__(self, other):
return (
self.name == other.name
and self.dbtype == other.dbtype
and self.dbtypestr == other.dbtypestr
and self.pytype == other.pytype
and self.default == other.default
and self.not_null == other.not_null
and self.enum == other.enum
and self.collation == other.collation
and self.is_identity == other.is_identity
and self.is_identity_always == other.is_identity_always
and self.is_generated == other.is_generated
)
def alter_clauses(self, other):
# ordering:
# identify must be dropped before notnull
# notnull must be added before identity
clauses = []
not_null_change = self.not_null != other.not_null
if not_null_change and self.not_null:
clauses.append(self.alter_not_null_clause)
if self.default != other.default and not self.default:
clauses.append(self.alter_default_clause)
if (
self.is_identity != other.is_identity
or self.is_identity_always != other.is_identity_always
):
clauses.append(self.alter_identity_clause(other))
elif self.default != other.default and self.default:
clauses.append(self.alter_default_clause)
if not_null_change and not self.not_null:
clauses.append(self.alter_not_null_clause)
if self.dbtypestr != other.dbtypestr or self.collation != other.collation:
clauses.append(self.alter_data_type_clause)
return clauses
def change_enum_to_string_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type varchar using {}::varchar;".format(
table_name, self.quoted_name, self.quoted_name
)
else:
raise ValueError
def change_string_to_enum_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type {} using {}::{};".format(
table_name,
self.quoted_name,
self.dbtypestr,
self.quoted_name,
self.dbtypestr,
)
else:
raise ValueError
def drop_default_statement(self, table_name):
return "alter table {} alter column {} drop default;".format(
table_name, self.quoted_name
)
def add_default_statement(self, table_name):
return "alter table {} alter column {} set default {};".format(
table_name, self.quoted_name, self.default
)
def alter_table_statements(self, other, table_name):
prefix = "alter table {}".format(table_name)
return ["{} {};".format(prefix, c) for c in self.alter_clauses(other)]
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def creation_clause(self):
x = "{} {}".format(self.quoted_name, self.dbtypestr)
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
x += " generated {} as identity".format(identity_type)
if self.not_null:
x += " not null"
if self.is_generated:
x += " generated always as ({}) stored".format(self.default)
elif self.default:
x += " default {}".format(self.default)
return x
@property
def add_column_clause(self):
return "add column {}{}".format(self.creation_clause, self.collation_subclause)
@property
def drop_column_clause(self):
return "drop column {k}".format(k=self.quoted_name)
@property
def alter_not_null_clause(self):
keyword = "set" if self.not_null else "drop"
return "alter column {} {} not null".format(self.quoted_name, keyword)
@property
def alter_default_clause(self):
if self.default:
alter = "alter column {} set default {}".format(
self.quoted_name, self.default
)
else:
alter = "alter column {} drop default".format(self.quoted_name)
return alter
def alter_identity_clause(self, other):
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
if other.is_identity:
alter = "alter column {} set generated {}".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} add generated {} as identity".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} drop identity".format(self.quoted_name)
return alter
@property
def collation_subclause(self):
if self.collation:
collate = " collate {}".format(quoted_identifier(self.collation))
else:
collate = ""
return collate
@property
def alter_data_type_clause(self):
return "alter column {} set data type {}{} using {}::{}".format(
self.quoted_name,
self.dbtypestr,
self.collation_subclause,
self.quoted_name,
self.dbtypestr,
)
class InspectedSelectable(Inspected):
def __init__(
self,
name,
schema,
columns,
inputs=None,
definition=None,
dependent_on=None,
dependents=None,
comment=None,
relationtype="unknown",
parent_table=None,
partition_def=None,
rowsecurity=False,
forcerowsecurity=False,
persistence=None,
):
self.name = name
self.schema = schema
self.inputs = inputs or []
self.columns = columns
self.definition = definition
self.relationtype = relationtype
self.dependent_on = dependent_on or []
self.dependents = dependents or []
self.dependent_on_all = []
self.dependents_all = []
self.constraints = od()
self.indexes = od()
self.comment = comment
self.parent_table = parent_table
self.partition_def = partition_def
self.rowsecurity = rowsecurity
self.forcerowsecurity = forcerowsecurity
self.persistence = persistence
def __eq__(self, other):
equalities = (
type(self) == type(other),
self.relationtype == other.relationtype,
self.name == other.name,
self.schema == other.schema,
dict(self.columns) == dict(other.columns),
self.inputs == other.inputs,
self.definition == other.definition,
self.parent_table == other.parent_table,
self.partition_def == other.partition_def,
self.rowsecurity == other.rowsecurity,
self.persistence == other.persistence,
)
return all(equalities)
| 0.715623 | 0.169543 |
Overview
============
SchemaIndex is designed for data scientists to find data more efficiently. It can index the tables
and files known to the user.
With schemaindex, you can:
1. Create a data source (e.g. Mysql, Oracle, etc) by registering its connection information.
2. Reflect the data source and index the metadata.
3. Search for all tables/entities in those data sources by their names.
Supported Data Sources
-------------
I plan to support at least top 10 of most popular databases in db-ranking. This table tracks the progress.:
* HDFS
* Mysql
* Sqlite
| #Data Source | #pip install | #Cloud | #Notes |
| :--- | :--- | :---: | :--- |
| Oracle | requires: 1. pip install cx_oracle, 2. instal oracle instantClient. | OOTB |
| Mysql | requires: 1. pip install pymysql | OOTB |
| MS SQL Server | requires: 1. conda install pymssql | OOTB |
| Sqlite | OOTB | OOTB |
| HDFS | OOTB | OOTB |
| HDFS_inotify | OOTB | OOTB |
Data Sources to Support on Roadmap
-------------
* HDP (Hive)
*
Installation
============
On Linux
-------------
Stardard pip should be able to install schemaindex:
.. code-block:: bash
$ pip install schemaindex
How to use
============
Basic Usage
-------------
To start the schemaindex server, please run this command:
.. code-block:: bash
$ schemaindex runserver
The following is a sample output:
.. code-block:: bash
(py3env1) duan:py3env1$ schemaindex runserver
Server started, please visit : http://localhost:8088/
*runserver* command should boot up a webserver and also open a browser for you.
In the browser, click "datasources" and then click "create ..." to register your own data source.
For example, to register a new HDFS data source, you can input information like the following screenshot:
.. image:: doc/pic/create_data_source.png
The next step is to reflect the data source and extract all metadata.
You can do so by clicking button "Relfect Now!" to extract the metadata of the data source,
or check the box "Reflect Data Source Immediately" during data source creation.
If all previous two steps are successful, you should be able to search the files in "search" box
appearing in "overview" and "search" page, like the following screenshot:
.. image:: doc/pic/global_search.png
Work with HDFS Index
-------------
While creating data source, you can select 'hdfsindex' plugin. This plugin is based on hdfscli library (pip install hdfs).
You need to input those parameters:
1. HDFS Web URL: sometimes is also known as Namenode-UI. Note: Kerberos authentication is not supported. If you need it, please raise a ticket in github.
2. HDFS Native URL: Usually you can find this link after you openned the namenode-ui/web url. THis should start with hdfs://localhost:9000 (or 8020)
If you check "Real time synchronization:" and you have reflected the hdfs data source,
it will start a background java process to capture all hdfs changes and update the index in real time.
In background, you should be able to see a process similar to "java ... HdfsINotify2Restful".
If you do not see this process, try to restart schemaindex server, or look at the logs at $SCHEMAINDEX/log
Work with Databases
-------------
By default, schemaindex comes with a predefined plugin to extract metadata from mainstream databases. It is sqlalchemyindex.
This reflect engine is based on python library Sqlalchemy, which works for many databases, including Mysql, Sqlite, etc.
For mysql to work, you need to install pymysql (python3) or mysql-python (python2) in advance.
How to start a SchemaIndex Server
-------------
All the plugins are located in $SCHEMAINDEX/plugin. Currently only HDFS and SQLALCHEMY are implemented.
If you want to add more plugins, you can put the plugin into this folder and run this command:
.. code-block:: bash
$ schemaindex reload plugin
The following is a sample output:
.. code-block:: bash
(py3env1) duan:py3env1$ schemaindex reload plugin
Plugins are reloaded.
Reflect Plugin Name: Path:
hdfsindex /home/duan/virenv/py3env1/local/lib/python2.7/site-packages/schemaindex/plugin/hdfsindex
sqlalchemy /home/duan/virenv/py3env1/local/lib/python2.7/site-packages/schemaindex/plugin/sqlalchemyindex
Reference
============
Those questions explain why I created this software:
1. `What is the best way to index and search files in HDFS? <https://www.quora.com/What-is-the-best-way-to-index-and-search-files-in-HDFS>`_
2. `Search/Find a file and file content in Hadoop <https://stackoverflow.com/questions/6297533/search-find-a-file-and-file-content-in-hadoop>`_
3. `find file in hadoop filesystem <https://stackoverflow.com/questions/42903113/find-file-in-hadoop-filesystem>`_
|
schemaindex
|
/schemaindex-0.2451.tar.gz/schemaindex-0.2451/README.rst
|
README.rst
|
Overview
============
SchemaIndex is designed for data scientists to find data more efficiently. It can index the tables
and files known to the user.
With schemaindex, you can:
1. Create a data source (e.g. Mysql, Oracle, etc) by registering its connection information.
2. Reflect the data source and index the metadata.
3. Search for all tables/entities in those data sources by their names.
Supported Data Sources
-------------
I plan to support at least top 10 of most popular databases in db-ranking. This table tracks the progress.:
* HDFS
* Mysql
* Sqlite
| #Data Source | #pip install | #Cloud | #Notes |
| :--- | :--- | :---: | :--- |
| Oracle | requires: 1. pip install cx_oracle, 2. instal oracle instantClient. | OOTB |
| Mysql | requires: 1. pip install pymysql | OOTB |
| MS SQL Server | requires: 1. conda install pymssql | OOTB |
| Sqlite | OOTB | OOTB |
| HDFS | OOTB | OOTB |
| HDFS_inotify | OOTB | OOTB |
Data Sources to Support on Roadmap
-------------
* HDP (Hive)
*
Installation
============
On Linux
-------------
Stardard pip should be able to install schemaindex:
.. code-block:: bash
$ pip install schemaindex
How to use
============
Basic Usage
-------------
To start the schemaindex server, please run this command:
.. code-block:: bash
$ schemaindex runserver
The following is a sample output:
.. code-block:: bash
(py3env1) duan:py3env1$ schemaindex runserver
Server started, please visit : http://localhost:8088/
*runserver* command should boot up a webserver and also open a browser for you.
In the browser, click "datasources" and then click "create ..." to register your own data source.
For example, to register a new HDFS data source, you can input information like the following screenshot:
.. image:: doc/pic/create_data_source.png
The next step is to reflect the data source and extract all metadata.
You can do so by clicking button "Relfect Now!" to extract the metadata of the data source,
or check the box "Reflect Data Source Immediately" during data source creation.
If all previous two steps are successful, you should be able to search the files in "search" box
appearing in "overview" and "search" page, like the following screenshot:
.. image:: doc/pic/global_search.png
Work with HDFS Index
-------------
While creating data source, you can select 'hdfsindex' plugin. This plugin is based on hdfscli library (pip install hdfs).
You need to input those parameters:
1. HDFS Web URL: sometimes is also known as Namenode-UI. Note: Kerberos authentication is not supported. If you need it, please raise a ticket in github.
2. HDFS Native URL: Usually you can find this link after you openned the namenode-ui/web url. THis should start with hdfs://localhost:9000 (or 8020)
If you check "Real time synchronization:" and you have reflected the hdfs data source,
it will start a background java process to capture all hdfs changes and update the index in real time.
In background, you should be able to see a process similar to "java ... HdfsINotify2Restful".
If you do not see this process, try to restart schemaindex server, or look at the logs at $SCHEMAINDEX/log
Work with Databases
-------------
By default, schemaindex comes with a predefined plugin to extract metadata from mainstream databases. It is sqlalchemyindex.
This reflect engine is based on python library Sqlalchemy, which works for many databases, including Mysql, Sqlite, etc.
For mysql to work, you need to install pymysql (python3) or mysql-python (python2) in advance.
How to start a SchemaIndex Server
-------------
All the plugins are located in $SCHEMAINDEX/plugin. Currently only HDFS and SQLALCHEMY are implemented.
If you want to add more plugins, you can put the plugin into this folder and run this command:
.. code-block:: bash
$ schemaindex reload plugin
The following is a sample output:
.. code-block:: bash
(py3env1) duan:py3env1$ schemaindex reload plugin
Plugins are reloaded.
Reflect Plugin Name: Path:
hdfsindex /home/duan/virenv/py3env1/local/lib/python2.7/site-packages/schemaindex/plugin/hdfsindex
sqlalchemy /home/duan/virenv/py3env1/local/lib/python2.7/site-packages/schemaindex/plugin/sqlalchemyindex
Reference
============
Those questions explain why I created this software:
1. `What is the best way to index and search files in HDFS? <https://www.quora.com/What-is-the-best-way-to-index-and-search-files-in-HDFS>`_
2. `Search/Find a file and file content in Hadoop <https://stackoverflow.com/questions/6297533/search-find-a-file-and-file-content-in-hadoop>`_
3. `find file in hadoop filesystem <https://stackoverflow.com/questions/42903113/find-file-in-hadoop-filesystem>`_
| 0.622689 | 0.57335 |
# `schemainspect`: SQL Schema Inspection
Schema inspection for PostgreSQL (and potentially others in the future).
Inspects tables, views, materialized views, constraints, indexes, sequences, enums, functions, and extensions. Handles table partitioning and inheritance.
**Limitations:** Function inspection only confirmed to work with SQL/PLPGSQL languages so far.
## Basic Usage
Get an inspection object from an already opened SQLAlchemy session or connection as follows:
from schemainspect import get_inspector
from sqlbag import S
with S('postgresql:///example') as s:
i = get_inspector(s)
The inspection object has attributes for tables, views, and all the other things it tracks. At each of these attributes you'll find a dictionary (OrderedDict) mapping from fully-qualified-and-quoted-name-of-thing-in-database to information object.
For instance, the information about a table *books* would be accessed as follows:
>>> books_table = i.tables['"public"."books"']
>>> books_table.name
'books'
>>> books_table.schema
'public'
>>> [each.name for each in books_table.columns]
['id', 'title', 'isbn']
## Documentation
Documentation is a bit patchy at the moment. Watch this space!
## Author Credits
Initial development, maintenance:
- [djrobstep](https://github.com/djrobstep)
Contributions:
- [BenSjoberg](https://github.com/BenSjoberg)
- [johto](https://github.com/johto)
## Install
Install with [pip](https://pip.pypa.io):
$ pip install schemainspect
To install psycopg2 (the PostgreSQL driver) at the same time as well:
$ pip install schemainspect[pg]
|
schemainspect-idempotent
|
/schemainspect_idempotent-0.6.tar.gz/schemainspect_idempotent-0.6/README.md
|
README.md
|
# `schemainspect`: SQL Schema Inspection
Schema inspection for PostgreSQL (and potentially others in the future).
Inspects tables, views, materialized views, constraints, indexes, sequences, enums, functions, and extensions. Handles table partitioning and inheritance.
**Limitations:** Function inspection only confirmed to work with SQL/PLPGSQL languages so far.
## Basic Usage
Get an inspection object from an already opened SQLAlchemy session or connection as follows:
from schemainspect import get_inspector
from sqlbag import S
with S('postgresql:///example') as s:
i = get_inspector(s)
The inspection object has attributes for tables, views, and all the other things it tracks. At each of these attributes you'll find a dictionary (OrderedDict) mapping from fully-qualified-and-quoted-name-of-thing-in-database to information object.
For instance, the information about a table *books* would be accessed as follows:
>>> books_table = i.tables['"public"."books"']
>>> books_table.name
'books'
>>> books_table.schema
'public'
>>> [each.name for each in books_table.columns]
['id', 'title', 'isbn']
## Documentation
Documentation is a bit patchy at the moment. Watch this space!
## Author Credits
Initial development, maintenance:
- [djrobstep](https://github.com/djrobstep)
Contributions:
- [BenSjoberg](https://github.com/BenSjoberg)
- [johto](https://github.com/johto)
## Install
Install with [pip](https://pip.pypa.io):
$ pip install schemainspect
To install psycopg2 (the PostgreSQL driver) at the same time as well:
$ pip install schemainspect[pg]
| 0.847242 | 0.602442 |
from collections import OrderedDict as od
from .misc import AutoRepr, quoted_identifier
class Inspected(AutoRepr):
@property
def quoted_full_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.name)
)
@property
def signature(self):
return self.quoted_full_name
@property
def unquoted_full_name(self):
return "{}.{}".format(self.schema, self.name)
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def quoted_schema(self):
return quoted_identifier(self.schema)
def __ne__(self, other):
return not self == other
class TableRelated(object):
@property
def quoted_full_table_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.table_name)
)
class ColumnInfo(AutoRepr):
def __init__(
self,
name,
dbtype,
pytype,
default=None,
not_null=False,
is_enum=False,
enum=None,
dbtypestr=None,
collation=None,
is_identity=False,
is_identity_always=False,
is_generated=False,
is_inherited=False,
):
self.name = name or ""
self.dbtype = dbtype
self.dbtypestr = dbtypestr or dbtype
self.pytype = pytype
self.default = default or None
self.not_null = not_null
self.is_enum = is_enum
self.enum = enum
self.collation = collation
self.is_identity = is_identity
self.is_identity_always = is_identity_always
self.is_generated = is_generated
self.is_inherited = is_inherited
def __eq__(self, other):
return (
self.name == other.name
and self.dbtype == other.dbtype
and self.dbtypestr == other.dbtypestr
and self.pytype == other.pytype
and self.default == other.default
and self.not_null == other.not_null
and self.enum == other.enum
and self.collation == other.collation
and self.is_identity == other.is_identity
and self.is_identity_always == other.is_identity_always
and self.is_generated == other.is_generated
and self.is_inherited == other.is_inherited
)
def alter_clauses(self, other):
# ordering:
# identify must be dropped before notnull
# notnull must be added before identity
clauses = []
notnull_changed = self.not_null != other.not_null
notnull_added = notnull_changed and self.not_null
notnull_dropped = notnull_changed and not self.not_null
default_changed = self.default != other.default
# default_added = default_changed and self.default
# default_dropped = default_changed and not self.default
identity_changed = (
self.is_identity != other.is_identity
or self.is_identity_always != other.is_identity_always
)
type_or_collation_changed = (
self.dbtypestr != other.dbtypestr or self.collation != other.collation
)
if default_changed:
clauses.append(self.alter_default_clause)
if notnull_added:
clauses.append(self.alter_not_null_clause)
if identity_changed:
clauses.append(self.alter_identity_clause(other))
if notnull_dropped:
clauses.append(self.alter_not_null_clause)
if type_or_collation_changed:
if self.is_enum and other.is_enum:
clauses.append(self.alter_enum_type_clause)
else:
clauses.append(self.alter_data_type_clause)
return clauses
def change_enum_to_string_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type varchar using {}::varchar;".format(
table_name, self.quoted_name, self.quoted_name
)
else:
raise ValueError
def change_string_to_enum_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type {} using {}::{};".format(
table_name,
self.quoted_name,
self.dbtypestr,
self.quoted_name,
self.dbtypestr,
)
else:
raise ValueError
def change_enum_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} type {} using {}::text::{};".format(
table_name,
self.name,
self.enum.quoted_full_name,
self.name,
self.enum.quoted_full_name,
)
else:
raise ValueError
def drop_default_statement(self, table_name):
return "alter table {} alter column {} drop default;".format(
table_name, self.quoted_name
)
def add_default_statement(self, table_name):
return "alter table {} alter column {} set default {};".format(
table_name, self.quoted_name, self.default
)
def alter_table_statements(self, other, table_name):
prefix = "alter table {}".format(table_name)
return ["{} {};".format(prefix, c) for c in self.alter_clauses(other)]
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def creation_clause(self):
x = "{} {}".format(self.quoted_name, self.dbtypestr)
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
x += " generated {} as identity".format(identity_type)
if self.not_null:
x += " not null"
if self.is_generated:
x += " generated always as ({}) stored".format(self.default)
elif self.default:
x += " default {}".format(self.default)
return x
@property
def add_column_clause(self):
return "add column if not exists {}{}".format(self.creation_clause, self.collation_subclause)
@property
def drop_column_clause(self):
return "drop column if exists {k}".format(k=self.quoted_name)
@property
def alter_not_null_clause(self):
keyword = "set" if self.not_null else "drop"
return "alter column {} {} not null".format(self.quoted_name, keyword)
@property
def alter_default_clause(self):
if self.default:
alter = "alter column {} set default {}".format(
self.quoted_name, self.default
)
else:
alter = "alter column {} drop default".format(self.quoted_name)
return alter
def alter_identity_clause(self, other):
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
if other.is_identity:
alter = "alter column {} set generated {}".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} add generated {} as identity".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} drop identity".format(self.quoted_name)
return alter
@property
def collation_subclause(self):
if self.collation:
collate = " collate {}".format(quoted_identifier(self.collation))
else:
collate = ""
return collate
@property
def alter_data_type_clause(self):
return "alter column {} set data type {}{} using {}::{}".format(
self.quoted_name,
self.dbtypestr,
self.collation_subclause,
self.quoted_name,
self.dbtypestr,
)
@property
def alter_enum_type_clause(self):
return "alter column {} set data type {}{} using {}::text::{}".format(
self.quoted_name,
self.dbtypestr,
self.collation_subclause,
self.quoted_name,
self.dbtypestr,
)
class InspectedSelectable(Inspected):
def __init__(
self,
name,
schema,
columns,
inputs=None,
definition=None,
dependent_on=None,
dependents=None,
comment=None,
relationtype="unknown",
parent_table=None,
partition_def=None,
rowsecurity=False,
forcerowsecurity=False,
persistence=None,
):
self.name = name
self.schema = schema
self.inputs = inputs or []
self.columns = columns
self.definition = definition
self.relationtype = relationtype
self.dependent_on = dependent_on or []
self.dependents = dependents or []
self.dependent_on_all = []
self.dependents_all = []
self.constraints = od()
self.indexes = od()
self.comment = comment
self.parent_table = parent_table
self.partition_def = partition_def
self.rowsecurity = rowsecurity
self.forcerowsecurity = forcerowsecurity
self.persistence = persistence
def __eq__(self, other):
equalities = (
type(self) == type(other),
self.relationtype == other.relationtype,
self.name == other.name,
self.schema == other.schema,
dict(self.columns) == dict(other.columns),
self.inputs == other.inputs,
self.definition == other.definition,
self.parent_table == other.parent_table,
self.partition_def == other.partition_def,
self.rowsecurity == other.rowsecurity,
self.persistence == other.persistence,
)
return all(equalities)
|
schemainspect-idempotent
|
/schemainspect_idempotent-0.6.tar.gz/schemainspect_idempotent-0.6/schemainspect_idempotent/inspected.py
|
inspected.py
|
from collections import OrderedDict as od
from .misc import AutoRepr, quoted_identifier
class Inspected(AutoRepr):
@property
def quoted_full_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.name)
)
@property
def signature(self):
return self.quoted_full_name
@property
def unquoted_full_name(self):
return "{}.{}".format(self.schema, self.name)
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def quoted_schema(self):
return quoted_identifier(self.schema)
def __ne__(self, other):
return not self == other
class TableRelated(object):
@property
def quoted_full_table_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.table_name)
)
class ColumnInfo(AutoRepr):
def __init__(
self,
name,
dbtype,
pytype,
default=None,
not_null=False,
is_enum=False,
enum=None,
dbtypestr=None,
collation=None,
is_identity=False,
is_identity_always=False,
is_generated=False,
is_inherited=False,
):
self.name = name or ""
self.dbtype = dbtype
self.dbtypestr = dbtypestr or dbtype
self.pytype = pytype
self.default = default or None
self.not_null = not_null
self.is_enum = is_enum
self.enum = enum
self.collation = collation
self.is_identity = is_identity
self.is_identity_always = is_identity_always
self.is_generated = is_generated
self.is_inherited = is_inherited
def __eq__(self, other):
return (
self.name == other.name
and self.dbtype == other.dbtype
and self.dbtypestr == other.dbtypestr
and self.pytype == other.pytype
and self.default == other.default
and self.not_null == other.not_null
and self.enum == other.enum
and self.collation == other.collation
and self.is_identity == other.is_identity
and self.is_identity_always == other.is_identity_always
and self.is_generated == other.is_generated
and self.is_inherited == other.is_inherited
)
def alter_clauses(self, other):
# ordering:
# identify must be dropped before notnull
# notnull must be added before identity
clauses = []
notnull_changed = self.not_null != other.not_null
notnull_added = notnull_changed and self.not_null
notnull_dropped = notnull_changed and not self.not_null
default_changed = self.default != other.default
# default_added = default_changed and self.default
# default_dropped = default_changed and not self.default
identity_changed = (
self.is_identity != other.is_identity
or self.is_identity_always != other.is_identity_always
)
type_or_collation_changed = (
self.dbtypestr != other.dbtypestr or self.collation != other.collation
)
if default_changed:
clauses.append(self.alter_default_clause)
if notnull_added:
clauses.append(self.alter_not_null_clause)
if identity_changed:
clauses.append(self.alter_identity_clause(other))
if notnull_dropped:
clauses.append(self.alter_not_null_clause)
if type_or_collation_changed:
if self.is_enum and other.is_enum:
clauses.append(self.alter_enum_type_clause)
else:
clauses.append(self.alter_data_type_clause)
return clauses
def change_enum_to_string_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type varchar using {}::varchar;".format(
table_name, self.quoted_name, self.quoted_name
)
else:
raise ValueError
def change_string_to_enum_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} set data type {} using {}::{};".format(
table_name,
self.quoted_name,
self.dbtypestr,
self.quoted_name,
self.dbtypestr,
)
else:
raise ValueError
def change_enum_statement(self, table_name):
if self.is_enum:
return "alter table {} alter column {} type {} using {}::text::{};".format(
table_name,
self.name,
self.enum.quoted_full_name,
self.name,
self.enum.quoted_full_name,
)
else:
raise ValueError
def drop_default_statement(self, table_name):
return "alter table {} alter column {} drop default;".format(
table_name, self.quoted_name
)
def add_default_statement(self, table_name):
return "alter table {} alter column {} set default {};".format(
table_name, self.quoted_name, self.default
)
def alter_table_statements(self, other, table_name):
prefix = "alter table {}".format(table_name)
return ["{} {};".format(prefix, c) for c in self.alter_clauses(other)]
@property
def quoted_name(self):
return quoted_identifier(self.name)
@property
def creation_clause(self):
x = "{} {}".format(self.quoted_name, self.dbtypestr)
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
x += " generated {} as identity".format(identity_type)
if self.not_null:
x += " not null"
if self.is_generated:
x += " generated always as ({}) stored".format(self.default)
elif self.default:
x += " default {}".format(self.default)
return x
@property
def add_column_clause(self):
return "add column if not exists {}{}".format(self.creation_clause, self.collation_subclause)
@property
def drop_column_clause(self):
return "drop column if exists {k}".format(k=self.quoted_name)
@property
def alter_not_null_clause(self):
keyword = "set" if self.not_null else "drop"
return "alter column {} {} not null".format(self.quoted_name, keyword)
@property
def alter_default_clause(self):
if self.default:
alter = "alter column {} set default {}".format(
self.quoted_name, self.default
)
else:
alter = "alter column {} drop default".format(self.quoted_name)
return alter
def alter_identity_clause(self, other):
if self.is_identity:
identity_type = "always" if self.is_identity_always else "by default"
if other.is_identity:
alter = "alter column {} set generated {}".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} add generated {} as identity".format(
self.quoted_name, identity_type
)
else:
alter = "alter column {} drop identity".format(self.quoted_name)
return alter
@property
def collation_subclause(self):
if self.collation:
collate = " collate {}".format(quoted_identifier(self.collation))
else:
collate = ""
return collate
@property
def alter_data_type_clause(self):
return "alter column {} set data type {}{} using {}::{}".format(
self.quoted_name,
self.dbtypestr,
self.collation_subclause,
self.quoted_name,
self.dbtypestr,
)
@property
def alter_enum_type_clause(self):
return "alter column {} set data type {}{} using {}::text::{}".format(
self.quoted_name,
self.dbtypestr,
self.collation_subclause,
self.quoted_name,
self.dbtypestr,
)
class InspectedSelectable(Inspected):
def __init__(
self,
name,
schema,
columns,
inputs=None,
definition=None,
dependent_on=None,
dependents=None,
comment=None,
relationtype="unknown",
parent_table=None,
partition_def=None,
rowsecurity=False,
forcerowsecurity=False,
persistence=None,
):
self.name = name
self.schema = schema
self.inputs = inputs or []
self.columns = columns
self.definition = definition
self.relationtype = relationtype
self.dependent_on = dependent_on or []
self.dependents = dependents or []
self.dependent_on_all = []
self.dependents_all = []
self.constraints = od()
self.indexes = od()
self.comment = comment
self.parent_table = parent_table
self.partition_def = partition_def
self.rowsecurity = rowsecurity
self.forcerowsecurity = forcerowsecurity
self.persistence = persistence
def __eq__(self, other):
equalities = (
type(self) == type(other),
self.relationtype == other.relationtype,
self.name == other.name,
self.schema == other.schema,
dict(self.columns) == dict(other.columns),
self.inputs == other.inputs,
self.definition == other.definition,
self.parent_table == other.parent_table,
self.partition_def == other.partition_def,
self.rowsecurity == other.rowsecurity,
self.persistence == other.persistence,
)
return all(equalities)
| 0.701917 | 0.141786 |
# `schemainspect`: SQL Schema Inspection
Schema inspection for PostgreSQL (and potentially others in the future).
Inspects tables, views, materialized views, constraints, indexes, sequences, enums, functions, and extensions. Handles table partitioning and inheritance.
**Limitations:** Function inspection only confirmed to work with SQL/PLPGSQL languages so far.
## Basic Usage
Get an inspection object from an already opened SQLAlchemy session or connection as follows:
from schemainspect import get_inspector
from sqlbag import S
with S('postgresql:///example') as s:
i = get_inspector(s)
The inspection object has attributes for tables, views, and all the other things it tracks. At each of these attributes you'll find a dictionary (OrderedDict) mapping from fully-qualified-and-quoted-name-of-thing-in-database to information object.
For instance, the information about a table *books* would be accessed as follows:
>>> books_table = i.tables['"public"."books"']
>>> books_table.name
'books'
>>> books_table.schema
'public'
>>> [each.name for each in books_table.columns]
['id', 'title', 'isbn']
## Documentation
Documentation is a bit patchy at the moment. Watch this space!
## Author Credits
Initial development, maintenance:
- [djrobstep](https://github.com/djrobstep)
Contributions:
- [BenSjoberg](https://github.com/BenSjoberg)
- [johto](https://github.com/johto)
## Install
Install with [pip](https://pip.pypa.io):
$ pip install schemainspect
To install psycopg2 (the PostgreSQL driver) at the same time as well:
$ pip install schemainspect[pg]
|
schemainspect
|
/schemainspect-3.1.1652015119.tar.gz/schemainspect-3.1.1652015119/README.md
|
README.md
|
# `schemainspect`: SQL Schema Inspection
Schema inspection for PostgreSQL (and potentially others in the future).
Inspects tables, views, materialized views, constraints, indexes, sequences, enums, functions, and extensions. Handles table partitioning and inheritance.
**Limitations:** Function inspection only confirmed to work with SQL/PLPGSQL languages so far.
## Basic Usage
Get an inspection object from an already opened SQLAlchemy session or connection as follows:
from schemainspect import get_inspector
from sqlbag import S
with S('postgresql:///example') as s:
i = get_inspector(s)
The inspection object has attributes for tables, views, and all the other things it tracks. At each of these attributes you'll find a dictionary (OrderedDict) mapping from fully-qualified-and-quoted-name-of-thing-in-database to information object.
For instance, the information about a table *books* would be accessed as follows:
>>> books_table = i.tables['"public"."books"']
>>> books_table.name
'books'
>>> books_table.schema
'public'
>>> [each.name for each in books_table.columns]
['id', 'title', 'isbn']
## Documentation
Documentation is a bit patchy at the moment. Watch this space!
## Author Credits
Initial development, maintenance:
- [djrobstep](https://github.com/djrobstep)
Contributions:
- [BenSjoberg](https://github.com/BenSjoberg)
- [johto](https://github.com/johto)
## Install
Install with [pip](https://pip.pypa.io):
$ pip install schemainspect
To install psycopg2 (the PostgreSQL driver) at the same time as well:
$ pip install schemainspect[pg]
| 0.847242 | 0.602442 |
About Schemaish
===============
Schemaish is a schema library that was initially written to support the formish form library. However it has been designed to work as a standalone schema package with validation from validatish.
How does Schemaish work?
------------------------
There are two ways of creating schemas, the procedural way and the declarative way. Here is the procedural way.
>>> import schemaish
>>> schema = schemaish.Integer(title='My Integer', description='This is really my integer')
The schema can now be used to validate a value..
>>> schema.validate(10)
.. note:: Validation does not validate types, it only calls the validation that has been applied to the schema. If you need to have type validation, add a specific validator
A Schemaish Structure
---------------------
Just create a structure and add schema attributes to it!
>>> schema = schemaish.Structure()
>>> schema.add( 'myfield', schemaish.Integer() )
>>> schema.add( 'myotherfield', schemaish.String() )
and we can now validate a dictionary
>>> schema.validate( {'myfield': 12, 'myotherfield': 'foo'} )
.. note:: The title and description are used by Formish as the label and description of each field.
Declarative Schema Generation
-----------------------------
This will be familiar to many developers..
>>> class Name(Structure):
... title = String()
... first = String(title="First Name")
... last = String(title="Last Name")
Validation
==========
See the validatish module documentation to learn more about the validators available.
>>> import validatish
>>> schema = schemaish.Integer(validator=validatish.Required())
>>> schema.validate(None)
...schemaish.attr.Invalid: is required
>>> schema.validate(10)
|
schemaish
|
/schemaish-0.5.6.tar.gz/schemaish-0.5.6/docs-build/introduction.rst
|
introduction.rst
|
About Schemaish
===============
Schemaish is a schema library that was initially written to support the formish form library. However it has been designed to work as a standalone schema package with validation from validatish.
How does Schemaish work?
------------------------
There are two ways of creating schemas, the procedural way and the declarative way. Here is the procedural way.
>>> import schemaish
>>> schema = schemaish.Integer(title='My Integer', description='This is really my integer')
The schema can now be used to validate a value..
>>> schema.validate(10)
.. note:: Validation does not validate types, it only calls the validation that has been applied to the schema. If you need to have type validation, add a specific validator
A Schemaish Structure
---------------------
Just create a structure and add schema attributes to it!
>>> schema = schemaish.Structure()
>>> schema.add( 'myfield', schemaish.Integer() )
>>> schema.add( 'myotherfield', schemaish.String() )
and we can now validate a dictionary
>>> schema.validate( {'myfield': 12, 'myotherfield': 'foo'} )
.. note:: The title and description are used by Formish as the label and description of each field.
Declarative Schema Generation
-----------------------------
This will be familiar to many developers..
>>> class Name(Structure):
... title = String()
... first = String(title="First Name")
... last = String(title="Last Name")
Validation
==========
See the validatish module documentation to learn more about the validators available.
>>> import validatish
>>> schema = schemaish.Integer(validator=validatish.Required())
>>> schema.validate(None)
...schemaish.attr.Invalid: is required
>>> schema.validate(10)
| 0.800887 | 0.522202 |
function count_previous_fields(o) {
var f = o.prevAll('.field').length;
var g = o.prevAll('.group').length;
if (f > g) {
return f;
} else {
return g;
};
};
function create_addlinks(o) {
o.find('.adder').each(function() {
$(this).before('<a class="adderlink">Add</a>');
});
};
function get_sequence_numbers(segments, l) {
var result = Array();
for each (segment in segments) {
if (isNaN(parseInt(segment)) == false) {
result.push(segment);
}
}
result.push(l);
return result
}
function replace_stars(original, nums, divider) {
var result = Array();
var segments = original.split(divider);
var n = 0;
for each (segment in segments) {
if ((segment == '*' || isNaN(parseInt(segment)) == false) && n < nums.length) {
// If the segment is a * or a number then we check replace it with the right number (the target number is probably right anyway)
result.push(nums[n]);
n=n+1;
} else {
// If not then we just push the segment
result.push(segment);
}
}
return result.join(divider);
}
function construct(start_segments, n, remainder, divider, strip) {
var remainder_bits = remainder.split(divider);
var remainder = remainder_bits.slice(1,remainder_bits.length-strip).join(divider);
var result = Array();
for each (segment in start_segments) {
if (segment != '') {
result.push(segment);
}
}
result.push(n);
if (remainder != '') {
var out = result.join(divider)+divider+remainder;
} else {
var out = result.join(divider);
}
return out
};
function convert_id_to_name(s) {
var segments=s.split('-');
var out = segments.slice(1,segments.length).join('.');
return out
}
function renumber_sequences(o) {
var n = 0;
var previous_seqid_prefix = '';
o.find('.sequence > div').each( function () {
var seqid = $(this).parent().attr('id');
var seqid_prefix = seqid.substr(0,seqid.length-5);
if (seqid_prefix != previous_seqid_prefix) {
n = 0;
} else {
n=n+1;
}
// replace id occurences
var thisid = $(this).attr('id');
var newid = seqid_prefix + n + '-field';
$(this).attr('id',newid);
// Replace 'for' occurences
$(this).find("[for^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('for');
$(this).text(n);
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('for', construct(seqid_prefix.split('-'),n,name_remainder,'-', 1));
});
// Replace 'id' occurences
$(this).find("[id^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('id');
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('id', construct(seqid_prefix.split('-'),n,name_remainder,'-', 1));
});
// replace 'name' occurences
$(this).find("[name^='"+convert_id_to_name(seqid_prefix)+"']").each( function () {
var name = $(this).attr('name');
var name_remainder = name.substring(convert_id_to_name(seqid_prefix).length, name.length);
$(this).attr('name', construct(convert_id_to_name(seqid_prefix).split('.'),n,name_remainder,'.', 1));
});
previous_seqid_prefix = seqid_prefix;
});
o.find('.sequence > fieldset').each( function () {
var seqid = $(this).parent().attr('id');
var seqid_prefix = seqid.substr(0,seqid.length-5);
if (seqid_prefix != previous_seqid_prefix) {
n = 0;
} else {
n=n+1;
}
// replace id occurences
var thisid = $(this).attr('id');
$(this).find('> legend').text(n);
var newid = seqid_prefix + n + '-field';
$(this).attr('id',newid);
// Replace 'for' occurences
$(this).find("[for^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('for');
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('for', construct(seqid_prefix.split('-'),n,name_remainder,'-', 0));
});
// Replace 'id' occurences
$(this).find("[id^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('id');
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('id', construct(seqid_prefix.split('-'),n,name_remainder,'-', 0));
});
// replace 'name' occurences
$(this).find("[name^='"+convert_id_to_name(seqid_prefix)+"']").each( function () {
var name = $(this).attr('name');
var name_remainder = name.substring(convert_id_to_name(seqid_prefix).length, name.length);
$(this).attr('name', construct(convert_id_to_name(seqid_prefix).split('.'),n,name_remainder,'.',0));
});
previous_seqid_prefix = seqid_prefix;
});
}
function add_mousedown_to_addlinks(o) {
o.find('.adderlink').mousedown( function() {
// Get the base64 encoded template
var code = $(this).next('.adder').val();
// Find out how many fields we already have
var l = count_previous_fields($(this).next('.adder'));
// Get some variable to help with replacing (originalname, originalid, name, id)
var originalname = $(this).next('.adder').attr('name');
var segments = originalname.split('.');
// Get the numbers used in the originalname
var seqnums = get_sequence_numbers(segments, l);
var originalid = $(o).attr('id')+'-'+segments.join('-');
segments[ segments.length -1 ] = l;
var name = segments.join('.');
var id = $(o).attr('id')+'-'+segments.join('-');
// Decode the base64
var html = $.base64Decode(code);
// Add the links and mousedowns to this generated code
var h = $(html);
create_addlinks(h);
add_mousedown_to_addlinks(h);
h.find("[name]").each( function () {
var newname = replace_stars($(this).attr('name'), seqnums, '.');
$(this).attr('name', newname );
});
var newid = replace_stars(h.attr('id'),seqnums,'-')
h.attr('id',newid);
h.find("[id]").each( function () {
var newid = replace_stars($(this).attr('id'),seqnums, '-');
$(this).attr('id', newid );
});
h.find("[for]").each( function () {
var newid = replace_stars($(this).attr('for'),seqnums, '-');
$(this).attr('for', newid );
if ($(this).text() == '*') {
$(this).text(l);
}
});
h.find("label[for='"+id+"']").text(l);
h.find("legend:contains('*')").text(l);
$(this).before(h);
add_sortables($('form'));
add_remove_buttons($(this).parent().parent());
});
};
function add_remove_buttons(o) {
o.find('.sequence.sequencecontrols > div > label').each( function() {
if ($(this).next().text() != 'x') {
var x = $('<span class="remove">x</span>');
$(this).after(x);
x.mousedown(function () {
$(this).parent().remove();
renumber_sequences($('form'));
add_sortables($('form'));
});
};
});
o.find('.sequence.sequencecontrols > fieldset > legend').each( function() {
if ($(this).next().text() != 'x') {
var x = $('<span class="remove">x</span>');
$(this).after(x);
x.mousedown(function () {
$(this).parent().remove();
renumber_sequences($('form'));
add_sortables($('form'));
});
};
});
}
function order_changed(e,ui) {
renumber_sequences($('form'));
}
function add_sortables(o) {
//o.find('.sequence > div > label').after('<span class="handle">handle</span>');
//o.find('.sequence > fieldset > legend').after('<span class="handle">handle</span>');
o.find('.sequence').sortable({'items':'> div','stop':order_changed});
}
function formish() {
create_addlinks($('form'));
add_mousedown_to_addlinks($('form'));
add_remove_buttons($('form'));
add_sortables($('form'));
}
|
schemaish
|
/schemaish-0.5.6.tar.gz/schemaish-0.5.6/docs-build/.static/js/formish.js
|
formish.js
|
function count_previous_fields(o) {
var f = o.prevAll('.field').length;
var g = o.prevAll('.group').length;
if (f > g) {
return f;
} else {
return g;
};
};
function create_addlinks(o) {
o.find('.adder').each(function() {
$(this).before('<a class="adderlink">Add</a>');
});
};
function get_sequence_numbers(segments, l) {
var result = Array();
for each (segment in segments) {
if (isNaN(parseInt(segment)) == false) {
result.push(segment);
}
}
result.push(l);
return result
}
function replace_stars(original, nums, divider) {
var result = Array();
var segments = original.split(divider);
var n = 0;
for each (segment in segments) {
if ((segment == '*' || isNaN(parseInt(segment)) == false) && n < nums.length) {
// If the segment is a * or a number then we check replace it with the right number (the target number is probably right anyway)
result.push(nums[n]);
n=n+1;
} else {
// If not then we just push the segment
result.push(segment);
}
}
return result.join(divider);
}
function construct(start_segments, n, remainder, divider, strip) {
var remainder_bits = remainder.split(divider);
var remainder = remainder_bits.slice(1,remainder_bits.length-strip).join(divider);
var result = Array();
for each (segment in start_segments) {
if (segment != '') {
result.push(segment);
}
}
result.push(n);
if (remainder != '') {
var out = result.join(divider)+divider+remainder;
} else {
var out = result.join(divider);
}
return out
};
function convert_id_to_name(s) {
var segments=s.split('-');
var out = segments.slice(1,segments.length).join('.');
return out
}
function renumber_sequences(o) {
var n = 0;
var previous_seqid_prefix = '';
o.find('.sequence > div').each( function () {
var seqid = $(this).parent().attr('id');
var seqid_prefix = seqid.substr(0,seqid.length-5);
if (seqid_prefix != previous_seqid_prefix) {
n = 0;
} else {
n=n+1;
}
// replace id occurences
var thisid = $(this).attr('id');
var newid = seqid_prefix + n + '-field';
$(this).attr('id',newid);
// Replace 'for' occurences
$(this).find("[for^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('for');
$(this).text(n);
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('for', construct(seqid_prefix.split('-'),n,name_remainder,'-', 1));
});
// Replace 'id' occurences
$(this).find("[id^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('id');
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('id', construct(seqid_prefix.split('-'),n,name_remainder,'-', 1));
});
// replace 'name' occurences
$(this).find("[name^='"+convert_id_to_name(seqid_prefix)+"']").each( function () {
var name = $(this).attr('name');
var name_remainder = name.substring(convert_id_to_name(seqid_prefix).length, name.length);
$(this).attr('name', construct(convert_id_to_name(seqid_prefix).split('.'),n,name_remainder,'.', 1));
});
previous_seqid_prefix = seqid_prefix;
});
o.find('.sequence > fieldset').each( function () {
var seqid = $(this).parent().attr('id');
var seqid_prefix = seqid.substr(0,seqid.length-5);
if (seqid_prefix != previous_seqid_prefix) {
n = 0;
} else {
n=n+1;
}
// replace id occurences
var thisid = $(this).attr('id');
$(this).find('> legend').text(n);
var newid = seqid_prefix + n + '-field';
$(this).attr('id',newid);
// Replace 'for' occurences
$(this).find("[for^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('for');
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('for', construct(seqid_prefix.split('-'),n,name_remainder,'-', 0));
});
// Replace 'id' occurences
$(this).find("[id^='"+seqid_prefix+"']").each( function () {
var name = $(this).attr('id');
var name_remainder = name.substring(seqid_prefix.length, name.length);
$(this).attr('id', construct(seqid_prefix.split('-'),n,name_remainder,'-', 0));
});
// replace 'name' occurences
$(this).find("[name^='"+convert_id_to_name(seqid_prefix)+"']").each( function () {
var name = $(this).attr('name');
var name_remainder = name.substring(convert_id_to_name(seqid_prefix).length, name.length);
$(this).attr('name', construct(convert_id_to_name(seqid_prefix).split('.'),n,name_remainder,'.',0));
});
previous_seqid_prefix = seqid_prefix;
});
}
function add_mousedown_to_addlinks(o) {
o.find('.adderlink').mousedown( function() {
// Get the base64 encoded template
var code = $(this).next('.adder').val();
// Find out how many fields we already have
var l = count_previous_fields($(this).next('.adder'));
// Get some variable to help with replacing (originalname, originalid, name, id)
var originalname = $(this).next('.adder').attr('name');
var segments = originalname.split('.');
// Get the numbers used in the originalname
var seqnums = get_sequence_numbers(segments, l);
var originalid = $(o).attr('id')+'-'+segments.join('-');
segments[ segments.length -1 ] = l;
var name = segments.join('.');
var id = $(o).attr('id')+'-'+segments.join('-');
// Decode the base64
var html = $.base64Decode(code);
// Add the links and mousedowns to this generated code
var h = $(html);
create_addlinks(h);
add_mousedown_to_addlinks(h);
h.find("[name]").each( function () {
var newname = replace_stars($(this).attr('name'), seqnums, '.');
$(this).attr('name', newname );
});
var newid = replace_stars(h.attr('id'),seqnums,'-')
h.attr('id',newid);
h.find("[id]").each( function () {
var newid = replace_stars($(this).attr('id'),seqnums, '-');
$(this).attr('id', newid );
});
h.find("[for]").each( function () {
var newid = replace_stars($(this).attr('for'),seqnums, '-');
$(this).attr('for', newid );
if ($(this).text() == '*') {
$(this).text(l);
}
});
h.find("label[for='"+id+"']").text(l);
h.find("legend:contains('*')").text(l);
$(this).before(h);
add_sortables($('form'));
add_remove_buttons($(this).parent().parent());
});
};
function add_remove_buttons(o) {
o.find('.sequence.sequencecontrols > div > label').each( function() {
if ($(this).next().text() != 'x') {
var x = $('<span class="remove">x</span>');
$(this).after(x);
x.mousedown(function () {
$(this).parent().remove();
renumber_sequences($('form'));
add_sortables($('form'));
});
};
});
o.find('.sequence.sequencecontrols > fieldset > legend').each( function() {
if ($(this).next().text() != 'x') {
var x = $('<span class="remove">x</span>');
$(this).after(x);
x.mousedown(function () {
$(this).parent().remove();
renumber_sequences($('form'));
add_sortables($('form'));
});
};
});
}
function order_changed(e,ui) {
renumber_sequences($('form'));
}
function add_sortables(o) {
//o.find('.sequence > div > label').after('<span class="handle">handle</span>');
//o.find('.sequence > fieldset > legend').after('<span class="handle">handle</span>');
o.find('.sequence').sortable({'items':'> div','stop':order_changed});
}
function formish() {
create_addlinks($('form'));
add_mousedown_to_addlinks($('form'));
add_remove_buttons($('form'));
add_sortables($('form'));
}
| 0.213213 | 0.157655 |
var sIFR=new function(){var O=this;var E={ACTIVE:"sIFR-active",REPLACED:"sIFR-replaced",IGNORE:"sIFR-ignore",ALTERNATE:"sIFR-alternate",CLASS:"sIFR-class",LAYOUT:"sIFR-layout",FLASH:"sIFR-flash",FIX_FOCUS:"sIFR-fixfocus",DUMMY:"sIFR-dummy"};E.IGNORE_CLASSES=[E.REPLACED,E.IGNORE,E.ALTERNATE];this.MIN_FONT_SIZE=6;this.MAX_FONT_SIZE=126;this.FLASH_PADDING_BOTTOM=5;this.VERSION="436";this.isActive=false;this.isEnabled=true;this.fixHover=true;this.autoInitialize=true;this.setPrefetchCookie=true;this.cookiePath="/";this.domains=[];this.forceWidth=true;this.fitExactly=false;this.forceTextTransform=true;this.useDomLoaded=true;this.useStyleCheck=false;this.hasFlashClassSet=false;this.repaintOnResize=true;this.replacements=[];var L=0;var R=false;function Y(){}function D(c){function d(e){return e.toLocaleUpperCase()}this.normalize=function(e){return e.replace(/\n|\r|\xA0/g,D.SINGLE_WHITESPACE).replace(/\s+/g,D.SINGLE_WHITESPACE)};this.textTransform=function(e,f){switch(e){case"uppercase":return f.toLocaleUpperCase();case"lowercase":return f.toLocaleLowerCase();case"capitalize":return f.replace(/^\w|\s\w/g,d)}return f};this.toHexString=function(e){if(e.charAt(0)!="#"||e.length!=4&&e.length!=7){return e}e=e.substring(1);return"0x"+(e.length==3?e.replace(/(.)(.)(.)/,"$1$1$2$2$3$3"):e)};this.toJson=function(g,f){var e="";switch(typeof(g)){case"string":e='"'+f(g)+'"';break;case"number":case"boolean":e=g.toString();break;case"object":e=[];for(var h in g){if(g[h]==Object.prototype[h]){continue}e.push('"'+h+'":'+this.toJson(g[h]))}e="{"+e.join(",")+"}";break}return e};this.convertCssArg=function(e){if(!e){return{}}if(typeof(e)=="object"){if(e.constructor==Array){e=e.join("")}else{return e}}var l={};var m=e.split("}");for(var h=0;h<m.length;h++){var k=m[h].match(/([^\s{]+)\s*\{(.+)\s*;?\s*/);if(!k||k.length!=3){continue}if(!l[k[1]]){l[k[1]]={}}var g=k[2].split(";");for(var f=0;f<g.length;f++){var n=g[f].match(/\s*([^:\s]+)\s*\:\s*([^;]+)/);if(!n||n.length!=3){continue}l[k[1]][n[1]]=n[2].replace(/\s+$/,"")}}return l};this.extractFromCss=function(g,f,i,e){var h=null;if(g&&g[f]&&g[f][i]){h=g[f][i];if(e){delete g[f][i]}}return h};this.cssToString=function(f){var g=[];for(var e in f){var j=f[e];if(j==Object.prototype[e]){continue}g.push(e,"{");for(var i in j){if(j[i]==Object.prototype[i]){continue}var h=j[i];if(D.UNIT_REMOVAL_PROPERTIES[i]){h=parseInt(h,10)}g.push(i,":",h,";")}g.push("}")}return g.join("")};this.escape=function(e){return escape(e).replace(/\+/g,"%2B")};this.encodeVars=function(e){return e.join("&").replace(/%/g,"%25")};this.copyProperties=function(g,f){for(var e in g){if(f[e]===undefined){f[e]=g[e]}}return f};this.domain=function(){var f="";try{f=document.domain}catch(g){}return f};this.domainMatches=function(h,g){if(g=="*"||g==h){return true}var f=g.lastIndexOf("*");if(f>-1){g=g.substr(f+1);var e=h.lastIndexOf(g);if(e>-1&&(e+g.length)==h.length){return true}}return false};this.uriEncode=function(e){return encodeURI(decodeURIComponent(e))};this.delay=function(f,h,g){var e=Array.prototype.slice.call(arguments,3);setTimeout(function(){h.apply(g,e)},f)}}D.UNIT_REMOVAL_PROPERTIES={leading:true,"margin-left":true,"margin-right":true,"text-indent":true};D.SINGLE_WHITESPACE=" ";function U(e){var d=this;function c(g,j,h){var k=d.getStyleAsInt(g,j,e.ua.ie);if(k==0){k=g[h];for(var f=3;f<arguments.length;f++){k-=d.getStyleAsInt(g,arguments[f],true)}}return k}this.getBody=function(){return document.getElementsByTagName("body")[0]||null};this.querySelectorAll=function(f){return window.parseSelector(f)};this.addClass=function(f,g){if(g){g.className=((g.className||"")==""?"":g.className+" ")+f}};this.removeClass=function(f,g){if(g){g.className=g.className.replace(new RegExp("(^|\\s)"+f+"(\\s|$)"),"").replace(/^\s+|(\s)\s+/g,"$1")}};this.hasClass=function(f,g){return new RegExp("(^|\\s)"+f+"(\\s|$)").test(g.className)};this.hasOneOfClassses=function(h,g){for(var f=0;f<h.length;f++){if(this.hasClass(h[f],g)){return true}}return false};this.ancestorHasClass=function(g,f){g=g.parentNode;while(g&&g.nodeType==1){if(this.hasClass(f,g)){return true}g=g.parentNode}return false};this.create=function(f,g){var h=document.createElementNS?document.createElementNS(U.XHTML_NS,f):document.createElement(f);if(g){h.className=g}return h};this.getComputedStyle=function(h,i){var f;if(document.defaultView&&document.defaultView.getComputedStyle){var g=document.defaultView.getComputedStyle(h,null);f=g?g[i]:null}else{if(h.currentStyle){f=h.currentStyle[i]}}return f||""};this.getStyleAsInt=function(g,i,f){var h=this.getComputedStyle(g,i);if(f&&!/px$/.test(h)){return 0}return parseInt(h)||0};this.getWidthFromStyle=function(f){return c(f,"width","offsetWidth","paddingRight","paddingLeft","borderRightWidth","borderLeftWidth")};this.getHeightFromStyle=function(f){return c(f,"height","offsetHeight","paddingTop","paddingBottom","borderTopWidth","borderBottomWidth")};this.getDimensions=function(j){var h=j.offsetWidth;var f=j.offsetHeight;if(h==0||f==0){for(var g=0;g<j.childNodes.length;g++){var k=j.childNodes[g];if(k.nodeType!=1){continue}h=Math.max(h,k.offsetWidth);f=Math.max(f,k.offsetHeight)}}return{width:h,height:f}};this.getViewport=function(){return{width:window.innerWidth||document.documentElement.clientWidth||this.getBody().clientWidth,height:window.innerHeight||document.documentElement.clientHeight||this.getBody().clientHeight}};this.blurElement=function(g){try{g.blur();return}catch(h){}var f=this.create("input");f.style.width="0px";f.style.height="0px";g.parentNode.appendChild(f);f.focus();f.blur();f.parentNode.removeChild(f)}}U.XHTML_NS="http://www.w3.org/1999/xhtml";function H(r){var g=navigator.userAgent.toLowerCase();var q=(navigator.product||"").toLowerCase();var h=navigator.platform.toLowerCase();this.parseVersion=H.parseVersion;this.macintosh=/^mac/.test(h);this.windows=/^win/.test(h);this.linux=/^linux/.test(h);this.quicktime=false;this.opera=/opera/.test(g);this.konqueror=/konqueror/.test(g);this.ie=false/*@cc_on||true@*/;this.ieSupported=this.ie&&!/ppc|smartphone|iemobile|msie\s5\.5/.test(g)/*@cc_on&&@_jscript_version>=5.5@*/;this.ieWin=this.ie&&this.windows/*@cc_on&&@_jscript_version>=5.1@*/;this.windows=this.windows&&(!this.ie||this.ieWin);this.ieMac=this.ie&&this.macintosh/*@cc_on&&@_jscript_version<5.1@*/;this.macintosh=this.macintosh&&(!this.ie||this.ieMac);this.safari=/safari/.test(g);this.webkit=!this.konqueror&&/applewebkit/.test(g);this.khtml=this.webkit||this.konqueror;this.gecko=!this.khtml&&q=="gecko";this.ieVersion=this.ie&&/.*msie\s(\d\.\d)/.exec(g)?this.parseVersion(RegExp.$1):"0";this.operaVersion=this.opera&&/.*opera(\s|\/)(\d+\.\d+)/.exec(g)?this.parseVersion(RegExp.$2):"0";this.webkitVersion=this.webkit&&/.*applewebkit\/(\d+).*/.exec(g)?this.parseVersion(RegExp.$1):"0";this.geckoVersion=this.gecko&&/.*rv:\s*([^\)]+)\)\s+gecko/.exec(g)?this.parseVersion(RegExp.$1):"0";this.konquerorVersion=this.konqueror&&/.*konqueror\/([\d\.]+).*/.exec(g)?this.parseVersion(RegExp.$1):"0";this.flashVersion=0;if(this.ieWin){var l;var o=false;try{l=new ActiveXObject("ShockwaveFlash.ShockwaveFlash.7")}catch(m){try{l=new ActiveXObject("ShockwaveFlash.ShockwaveFlash.6");this.flashVersion=this.parseVersion("6");l.AllowScriptAccess="always"}catch(m){o=this.flashVersion==this.parseVersion("6")}if(!o){try{l=new ActiveXObject("ShockwaveFlash.ShockwaveFlash")}catch(m){}}}if(!o&&l){this.flashVersion=this.parseVersion((l.GetVariable("$version")||"").replace(/^\D+(\d+)\D+(\d+)\D+(\d+).*/g,"$1.$2.$3"))}}else{if(navigator.plugins&&navigator.plugins["Shockwave Flash"]){var n=navigator.plugins["Shockwave Flash"].description.replace(/^.*\s+(\S+\s+\S+$)/,"$1");var p=n.replace(/^\D*(\d+\.\d+).*$/,"$1");if(/r/.test(n)){p+=n.replace(/^.*r(\d*).*$/,".$1")}else{if(/d/.test(n)){p+=".0"}}this.flashVersion=this.parseVersion(p);var j=false;for(var k=0,c=this.flashVersion>=H.MIN_FLASH_VERSION;c&&k<navigator.mimeTypes.length;k++){var f=navigator.mimeTypes[k];if(f.type!="application/x-shockwave-flash"){continue}if(f.enabledPlugin){j=true;if(f.enabledPlugin.description.toLowerCase().indexOf("quicktime")>-1){c=false;this.quicktime=true}}}if(this.quicktime||!j){this.flashVersion=this.parseVersion("0")}}}this.flash=this.flashVersion>=H.MIN_FLASH_VERSION;this.transparencySupport=this.macintosh||this.windows||this.linux&&(this.flashVersion>=this.parseVersion("10")&&(this.gecko&&this.geckoVersion>=this.parseVersion("1.9")||this.opera));this.computedStyleSupport=this.ie||!!document.defaultView.getComputedStyle;this.fixFocus=this.gecko&&this.windows;this.nativeDomLoaded=this.gecko||this.webkit&&this.webkitVersion>=this.parseVersion("525")||this.konqueror&&this.konquerorMajor>this.parseVersion("03")||this.opera;this.mustCheckStyle=this.khtml||this.opera;this.forcePageLoad=this.webkit&&this.webkitVersion<this.parseVersion("523");this.properDocument=typeof(document.location)=="object";this.supported=this.flash&&this.properDocument&&(!this.ie||this.ieSupported)&&this.computedStyleSupport&&(!this.opera||this.operaVersion>=this.parseVersion("9.61"))&&(!this.webkit||this.webkitVersion>=this.parseVersion("412"))&&(!this.gecko||this.geckoVersion>=this.parseVersion("1.8.0.12"))&&(!this.konqueror)}H.parseVersion=function(c){return c.replace(/(^|\D)(\d+)(?=\D|$)/g,function(f,e,g){f=e;for(var d=4-g.length;d>=0;d--){f+="0"}return f+g})};H.MIN_FLASH_VERSION=H.parseVersion("8");function F(c){this.fix=c.ua.ieWin&&window.location.hash!="";var d;this.cache=function(){d=document.title};function e(){document.title=d}this.restore=function(){if(this.fix){setTimeout(e,0)}}}function S(l){var e=null;function c(){try{if(l.ua.ie||document.readyState!="loaded"&&document.readyState!="complete"){document.documentElement.doScroll("left")}}catch(n){return setTimeout(c,10)}i()}function i(){if(l.useStyleCheck){h()}else{if(!l.ua.mustCheckStyle){d(null,true)}}}function h(){e=l.dom.create("div",E.DUMMY);l.dom.getBody().appendChild(e);m()}function m(){if(l.dom.getComputedStyle(e,"marginLeft")=="42px"){g()}else{setTimeout(m,10)}}function g(){if(e&&e.parentNode){e.parentNode.removeChild(e)}e=null;d(null,true)}function d(n,o){l.initialize(o);if(n&&n.type=="load"){if(document.removeEventListener){document.removeEventListener("DOMContentLoaded",d,false)}if(window.removeEventListener){window.removeEventListener("load",d,false)}}}function j(){l.prepareClearReferences();if(document.readyState=="interactive"){document.attachEvent("onstop",f);setTimeout(function(){document.detachEvent("onstop",f)},0)}}function f(){document.detachEvent("onstop",f);k()}function k(){l.clearReferences()}this.attach=function(){if(window.addEventListener){window.addEventListener("load",d,false)}else{window.attachEvent("onload",d)}if(!l.useDomLoaded||l.ua.forcePageLoad||l.ua.ie&&window.top!=window){return}if(l.ua.nativeDomLoaded){document.addEventListener("DOMContentLoaded",i,false)}else{if(l.ua.ie||l.ua.khtml){c()}}};this.attachUnload=function(){if(!l.ua.ie){return}window.attachEvent("onbeforeunload",j);window.attachEvent("onunload",k)}}var Q="sifrFetch";function N(c){var e=false;this.fetchMovies=function(f){if(c.setPrefetchCookie&&new RegExp(";?"+Q+"=true;?").test(document.cookie)){return}try{e=true;d(f)}catch(g){}if(c.setPrefetchCookie){document.cookie=Q+"=true;path="+c.cookiePath}};this.clear=function(){if(!e){return}try{var f=document.getElementsByTagName("script");for(var g=f.length-1;g>=0;g--){var h=f[g];if(h.type=="sifr/prefetch"){h.parentNode.removeChild(h)}}}catch(j){}};function d(f){for(var g=0;g<f.length;g++){document.write('<script defer type="sifr/prefetch" src="'+f[g].src+'"><\/script>')}}}function b(e){var g=e.ua.ie;var f=g&&e.ua.flashVersion<e.ua.parseVersion("9.0.115");var d={};var c={};this.fixFlash=f;this.register=function(h){if(!g){return}var i=h.getAttribute("id");this.cleanup(i,false);c[i]=h;delete d[i];if(f){window[i]=h}};this.reset=function(){if(!g){return false}for(var j=0;j<e.replacements.length;j++){var h=e.replacements[j];var k=c[h.id];if(!d[h.id]&&(!k.parentNode||k.parentNode.nodeType==11)){h.resetMovie();d[h.id]=true}}return true};this.cleanup=function(l,h){var i=c[l];if(!i){return}for(var k in i){if(typeof(i[k])=="function"){i[k]=null}}c[l]=null;if(f){window[l]=null}if(i.parentNode){if(h&&i.parentNode.nodeType==1){var j=document.createElement("div");j.style.width=i.offsetWidth+"px";j.style.height=i.offsetHeight+"px";i.parentNode.replaceChild(j,i)}else{i.parentNode.removeChild(i)}}};this.prepareClearReferences=function(){if(!f){return}__flash_unloadHandler=function(){};__flash_savedUnloadHandler=function(){}};this.clearReferences=function(){if(f){var j=document.getElementsByTagName("object");for(var h=j.length-1;h>=0;h--){c[j[h].getAttribute("id")]=j[h]}}for(var k in c){if(Object.prototype[k]!=c[k]){this.cleanup(k,true)}}}}function K(d,g,f,c,e){this.sIFR=d;this.id=g;this.vars=f;this.movie=null;this.__forceWidth=c;this.__events=e;this.__resizing=0}K.prototype={getFlashElement:function(){return document.getElementById(this.id)},getAlternate:function(){return document.getElementById(this.id+"_alternate")},getAncestor:function(){var c=this.getFlashElement().parentNode;return !this.sIFR.dom.hasClass(E.FIX_FOCUS,c)?c:c.parentNode},available:function(){var c=this.getFlashElement();return c&&c.parentNode},call:function(c){var d=this.getFlashElement();if(!d[c]){return false}return Function.prototype.apply.call(d[c],d,Array.prototype.slice.call(arguments,1))},attempt:function(){if(!this.available()){return false}try{this.call.apply(this,arguments)}catch(c){if(this.sIFR.debug){throw c}return false}return true},updateVars:function(c,e){for(var d=0;d<this.vars.length;d++){if(this.vars[d].split("=")[0]==c){this.vars[d]=c+"="+e;break}}var f=this.sIFR.util.encodeVars(this.vars);this.movie.injectVars(this.getFlashElement(),f);this.movie.injectVars(this.movie.html,f)},storeSize:function(c,d){this.movie.setSize(c,d);this.updateVars(c,d)},fireEvent:function(c){if(this.available()&&this.__events[c]){this.sIFR.util.delay(0,this.__events[c],this,this)}},resizeFlashElement:function(c,d,e){if(!this.available()){return}this.__resizing++;var f=this.getFlashElement();f.setAttribute("height",c);this.getAncestor().style.minHeight="";this.updateVars("renderheight",c);this.storeSize("height",c);if(d!==null){f.setAttribute("width",d);this.movie.setSize("width",d)}if(this.__events.onReplacement){this.sIFR.util.delay(0,this.__events.onReplacement,this,this);delete this.__events.onReplacement}if(e){this.sIFR.util.delay(0,function(){this.attempt("scaleMovie");this.__resizing--},this)}else{this.__resizing--}},blurFlashElement:function(){if(this.available()){this.sIFR.dom.blurElement(this.getFlashElement())}},resetMovie:function(){this.sIFR.util.delay(0,this.movie.reset,this.movie,this.getFlashElement(),this.getAlternate())},resizeAfterScale:function(){if(this.available()&&this.__resizing==0){this.sIFR.util.delay(0,this.resize,this)}},resize:function(){if(!this.available()){return}this.__resizing++;var g=this.getFlashElement();var f=g.offsetWidth;if(f==0){return}var e=g.getAttribute("width");var l=g.getAttribute("height");var m=this.getAncestor();var o=this.sIFR.dom.getHeightFromStyle(m);g.style.width="1px";g.style.height="1px";m.style.minHeight=o+"px";var c=this.getAlternate().childNodes;var n=[];for(var k=0;k<c.length;k++){var h=c[k].cloneNode(true);n.push(h);m.appendChild(h)}var d=this.sIFR.dom.getWidthFromStyle(m);for(var k=0;k<n.length;k++){m.removeChild(n[k])}g.style.width=g.style.height=m.style.minHeight="";g.setAttribute("width",this.__forceWidth?d:e);g.setAttribute("height",l);if(sIFR.ua.ie){g.style.display="none";var j=g.offsetHeight;g.style.display=""}if(d!=f){if(this.__forceWidth){this.storeSize("width",d)}this.attempt("resize",d)}this.__resizing--},replaceText:function(g,j){var d=this.sIFR.util.escape(g);if(!this.attempt("replaceText",d)){return false}this.updateVars("content",d);var f=this.getAlternate();if(j){while(f.firstChild){f.removeChild(f.firstChild)}for(var c=0;c<j.length;c++){f.appendChild(j[c])}}else{try{f.innerHTML=g}catch(h){}}return true},changeCSS:function(c){c=this.sIFR.util.escape(this.sIFR.util.cssToString(this.sIFR.util.convertCssArg(c)));this.updateVars("css",c);return this.attempt("changeCSS",c)},remove:function(){if(this.movie&&this.available()){this.movie.remove(this.getFlashElement(),this.id)}}};var X=new function(){this.create=function(p,n,j,i,f,e,g,o,l,h,m){var k=p.ua.ie?d:c;return new k(p,n,j,i,f,e,g,o,["flashvars",l,"wmode",h,"bgcolor",m,"allowScriptAccess","always","quality","best"])};function c(s,q,l,h,f,e,g,r,n){var m=s.dom.create("object",E.FLASH);var p=["type","application/x-shockwave-flash","id",f,"name",f,"data",e,"width",g,"height",r];for(var o=0;o<p.length;o+=2){m.setAttribute(p[o],p[o+1])}var j=m;if(h){j=W.create("div",E.FIX_FOCUS);j.appendChild(m)}for(var o=0;o<n.length;o+=2){if(n[o]=="name"){continue}var k=W.create("param");k.setAttribute("name",n[o]);k.setAttribute("value",n[o+1]);m.appendChild(k)}l.style.minHeight=r+"px";while(l.firstChild){l.removeChild(l.firstChild)}l.appendChild(j);this.html=j.cloneNode(true)}c.prototype={reset:function(e,f){e.parentNode.replaceChild(this.html.cloneNode(true),e)},remove:function(e,f){e.parentNode.removeChild(e)},setSize:function(e,f){this.html.setAttribute(e,f)},injectVars:function(e,g){var h=e.getElementsByTagName("param");for(var f=0;f<h.length;f++){if(h[f].getAttribute("name")=="flashvars"){h[f].setAttribute("value",g);break}}}};function d(p,n,j,h,f,e,g,o,k){this.dom=p.dom;this.broken=n;this.html='<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" id="'+f+'" width="'+g+'" height="'+o+'" class="'+E.FLASH+'"><param name="movie" value="'+e+'"></param></object>';var m="";for(var l=0;l<k.length;l+=2){m+='<param name="'+k[l]+'" value="'+k[l+1]+'"></param>'}this.html=this.html.replace(/(<\/object>)/,m+"$1");j.style.minHeight=o+"px";j.innerHTML=this.html;this.broken.register(j.firstChild)}d.prototype={reset:function(f,g){g=g.cloneNode(true);var e=f.parentNode;e.innerHTML=this.html;this.broken.register(e.firstChild);e.appendChild(g)},remove:function(e,f){this.broken.cleanup(f)},setSize:function(e,f){this.html=this.html.replace(e=="height"?/(height)="\d+"/:/(width)="\d+"/,'$1="'+f+'"')},injectVars:function(e,f){if(e!=this.html){return}this.html=this.html.replace(/(flashvars(=|\"\svalue=)\")[^\"]+/,"$1"+f)}}};this.errors=new Y(O);var A=this.util=new D(O);var W=this.dom=new U(O);var T=this.ua=new H(O);var G={fragmentIdentifier:new F(O),pageLoad:new S(O),prefetch:new N(O),brokenFlashIE:new b(O)};this.__resetBrokenMovies=G.brokenFlashIE.reset;var J={kwargs:[],replaceAll:function(d){for(var c=0;c<this.kwargs.length;c++){O.replace(this.kwargs[c])}if(!d){this.kwargs=[]}}};this.activate=function(){if(!T.supported||!this.isEnabled||this.isActive||!C()||a()){return}G.prefetch.fetchMovies(arguments);this.isActive=true;this.setFlashClass();G.fragmentIdentifier.cache();G.pageLoad.attachUnload();if(!this.autoInitialize){return}G.pageLoad.attach()};this.setFlashClass=function(){if(this.hasFlashClassSet){return}W.addClass(E.ACTIVE,W.getBody()||document.documentElement);this.hasFlashClassSet=true};this.removeFlashClass=function(){if(!this.hasFlashClassSet){return}W.removeClass(E.ACTIVE,W.getBody());W.removeClass(E.ACTIVE,document.documentElement);this.hasFlashClassSet=false};this.initialize=function(c){if(!this.isActive||!this.isEnabled){return}if(R){if(!c){J.replaceAll(false)}return}R=true;J.replaceAll(c);if(O.repaintOnResize){if(window.addEventListener){window.addEventListener("resize",Z,false)}else{window.attachEvent("onresize",Z)}}G.prefetch.clear()};this.replace=function(x,u){if(!T.supported){return}if(u){x=A.copyProperties(x,u)}if(!R){return J.kwargs.push(x)}if(this.onReplacementStart){this.onReplacementStart(x)}var AM=x.elements||W.querySelectorAll(x.selector);if(AM.length==0){return}var w=M(x.src);var AR=A.convertCssArg(x.css);var v=B(x.filters);var AN=x.forceSingleLine===true;var AS=x.preventWrap===true&&!AN;var q=AN||(x.fitExactly==null?this.fitExactly:x.fitExactly)===true;var AD=q||(x.forceWidth==null?this.forceWidth:x.forceWidth)===true;var s=x.ratios||[];var AE=x.pixelFont===true;var r=parseInt(x.tuneHeight)||0;var z=!!x.onRelease||!!x.onRollOver||!!x.onRollOut;if(q){A.extractFromCss(AR,".sIFR-root","text-align",true)}var t=A.extractFromCss(AR,".sIFR-root","font-size",true)||"0";var e=A.extractFromCss(AR,".sIFR-root","background-color",true)||"#FFFFFF";var o=A.extractFromCss(AR,".sIFR-root","kerning",true)||"";var AW=A.extractFromCss(AR,".sIFR-root","opacity",true)||"100";var k=A.extractFromCss(AR,".sIFR-root","cursor",true)||"default";var AP=parseInt(A.extractFromCss(AR,".sIFR-root","leading"))||0;var AJ=x.gridFitType||(A.extractFromCss(AR,".sIFR-root","text-align")=="right")?"subpixel":"pixel";var h=this.forceTextTransform===false?"none":A.extractFromCss(AR,".sIFR-root","text-transform",true)||"none";t=/^\d+(px)?$/.test(t)?parseInt(t):0;AW=parseFloat(AW)<1?100*parseFloat(AW):AW;var AC=x.modifyCss?"":A.cssToString(AR);var AG=x.wmode||"";if(!AG){if(x.transparent){AG="transparent"}else{if(x.opaque){AG="opaque"}}}if(AG=="transparent"){if(!T.transparencySupport){AG="opaque"}else{e="transparent"}}else{if(e=="transparent"){e="#FFFFFF"}}for(var AV=0;AV<AM.length;AV++){var AF=AM[AV];if(W.hasOneOfClassses(E.IGNORE_CLASSES,AF)||W.ancestorHasClass(AF,E.ALTERNATE)){continue}var AO=W.getDimensions(AF);var f=AO.height;var c=AO.width;var AA=W.getComputedStyle(AF,"display");if(!f||!c||!AA||AA=="none"){continue}c=W.getWidthFromStyle(AF);var n,AH;if(!t){var AL=I(AF);n=Math.min(this.MAX_FONT_SIZE,Math.max(this.MIN_FONT_SIZE,AL.fontSize));if(AE){n=Math.max(8,8*Math.round(n/8))}AH=AL.lines}else{n=t;AH=1}var d=W.create("span",E.ALTERNATE);var AX=AF.cloneNode(true);AF.parentNode.appendChild(AX);for(var AU=0,AT=AX.childNodes.length;AU<AT;AU++){var m=AX.childNodes[AU];if(!/^(style|script)$/i.test(m.nodeName)){d.appendChild(m.cloneNode(true))}}if(x.modifyContent){x.modifyContent(AX,x.selector)}if(x.modifyCss){AC=x.modifyCss(AR,AX,x.selector)}var p=P(AX,h,x.uriEncode);AX.parentNode.removeChild(AX);if(x.modifyContentString){p.text=x.modifyContentString(p.text,x.selector)}if(p.text==""){continue}var AK=Math.round(AH*V(n,s)*n)+this.FLASH_PADDING_BOTTOM+r;if(AH>1&&AP){AK+=Math.round((AH-1)*AP)}var AB=AD?c:"100%";var AI="sIFR_replacement_"+L++;var AQ=["id="+AI,"content="+A.escape(p.text),"width="+c,"renderheight="+AK,"link="+A.escape(p.primaryLink.href||""),"target="+A.escape(p.primaryLink.target||""),"size="+n,"css="+A.escape(AC),"cursor="+k,"tunewidth="+(x.tuneWidth||0),"tuneheight="+r,"offsetleft="+(x.offsetLeft||""),"offsettop="+(x.offsetTop||""),"fitexactly="+q,"preventwrap="+AS,"forcesingleline="+AN,"antialiastype="+(x.antiAliasType||""),"thickness="+(x.thickness||""),"sharpness="+(x.sharpness||""),"kerning="+o,"gridfittype="+AJ,"flashfilters="+v,"opacity="+AW,"blendmode="+(x.blendMode||""),"selectable="+(x.selectable==null||AG!=""&&!sIFR.ua.macintosh&&sIFR.ua.gecko&&sIFR.ua.geckoVersion>=sIFR.ua.parseVersion("1.9")?"true":x.selectable===true),"fixhover="+(this.fixHover===true),"events="+z,"delayrun="+G.brokenFlashIE.fixFlash,"version="+this.VERSION];var y=A.encodeVars(AQ);var g=new K(O,AI,AQ,AD,{onReplacement:x.onReplacement,onRollOver:x.onRollOver,onRollOut:x.onRollOut,onRelease:x.onRelease});g.movie=X.create(sIFR,G.brokenFlashIE,AF,T.fixFocus&&x.fixFocus,AI,w,AB,AK,y,AG,e);this.replacements.push(g);this.replacements[AI]=g;if(x.selector){if(!this.replacements[x.selector]){this.replacements[x.selector]=[g]}else{this.replacements[x.selector].push(g)}}d.setAttribute("id",AI+"_alternate");AF.appendChild(d);W.addClass(E.REPLACED,AF)}G.fragmentIdentifier.restore()};this.getReplacementByFlashElement=function(d){for(var c=0;c<O.replacements.length;c++){if(O.replacements[c].id==d.getAttribute("id")){return O.replacements[c]}}};this.redraw=function(){for(var c=0;c<O.replacements.length;c++){O.replacements[c].resetMovie()}};this.prepareClearReferences=function(){G.brokenFlashIE.prepareClearReferences()};this.clearReferences=function(){G.brokenFlashIE.clearReferences();G=null;J=null;delete O.replacements};function C(){if(O.domains.length==0){return true}var d=A.domain();for(var c=0;c<O.domains.length;c++){if(A.domainMatches(d,O.domains[c])){return true}}return false}function a(){if(document.location.protocol=="file:"){if(O.debug){O.errors.fire("isFile")}return true}return false}function M(c){if(T.ie&&c.charAt(0)=="/"){c=window.location.toString().replace(/([^:]+)(:\/?\/?)([^\/]+).*/,"$1$2$3")+c}return c}function V(d,e){for(var c=0;c<e.length;c+=2){if(d<=e[c]){return e[c+1]}}return e[e.length-1]||1}function B(g){var e=[];for(var d in g){if(g[d]==Object.prototype[d]){continue}var c=g[d];d=[d.replace(/filter/i,"")+"Filter"];for(var f in c){if(c[f]==Object.prototype[f]){continue}d.push(f+":"+A.escape(A.toJson(c[f],A.toHexString)))}e.push(d.join(","))}return A.escape(e.join(";"))}function Z(d){var e=Z.viewport;var c=W.getViewport();if(e&&c.width==e.width&&c.height==e.height){return}Z.viewport=c;if(O.replacements.length==0){return}if(Z.timer){clearTimeout(Z.timer)}Z.timer=setTimeout(function(){delete Z.timer;for(var f=0;f<O.replacements.length;f++){O.replacements[f].resize()}},200)}function I(f){var g=W.getComputedStyle(f,"fontSize");var d=g.indexOf("px")==-1;var e=f.innerHTML;if(d){f.innerHTML="X"}f.style.paddingTop=f.style.paddingBottom=f.style.borderTopWidth=f.style.borderBottomWidth="0px";f.style.lineHeight="2em";f.style.display="block";g=d?f.offsetHeight/2:parseInt(g,10);if(d){f.innerHTML=e}var c=Math.round(f.offsetHeight/(2*g));f.style.paddingTop=f.style.paddingBottom=f.style.borderTopWidth=f.style.borderBottomWidth=f.style.lineHeight=f.style.display="";if(isNaN(c)||!isFinite(c)||c==0){c=1}return{fontSize:g,lines:c}}function P(c,g,s){s=s||A.uriEncode;var q=[],m=[];var k=null;var e=c.childNodes;var o=false,p=false;var j=0;while(j<e.length){var f=e[j];if(f.nodeType==3){var t=A.textTransform(g,A.normalize(f.nodeValue)).replace(/</g,"<");if(o&&p){t=t.replace(/^\s+/,"")}m.push(t);o=/\s$/.test(t);p=false}if(f.nodeType==1&&!/^(style|script)$/i.test(f.nodeName)){var h=[];var r=f.nodeName.toLowerCase();var n=f.className||"";if(/\s+/.test(n)){if(n.indexOf(E.CLASS)>-1){n=n.match("(\\s|^)"+E.CLASS+"-([^\\s$]*)(\\s|$)")[2]}else{n=n.match(/^([^\s]+)/)[1]}}if(n!=""){h.push('class="'+n+'"')}if(r=="a"){var d=s(f.getAttribute("href")||"");var l=f.getAttribute("target")||"";h.push('href="'+d+'"','target="'+l+'"');if(!k){k={href:d,target:l}}}m.push("<"+r+(h.length>0?" ":"")+h.join(" ")+">");p=true;if(f.hasChildNodes()){q.push(j);j=0;e=f.childNodes;continue}else{if(!/^(br|img)$/i.test(f.nodeName)){m.push("</",f.nodeName.toLowerCase(),">")}}}if(q.length>0&&!f.nextSibling){do{j=q.pop();e=f.parentNode.parentNode.childNodes;f=e[j];if(f){m.push("</",f.nodeName.toLowerCase(),">")}}while(j==e.length-1&&q.length>0)}j++}return{text:m.join("").replace(/^\s+|\s+$|\s*(<br>)\s*/g,"$1"),primaryLink:k||{}}}};
var parseSelector=(function(){var B=/\s*,\s*/;var A=/\s*([\s>+~(),]|^|$)\s*/g;var L=/([\s>+~,]|[^(]\+|^)([#.:@])/g;var F=/(^|\))[^\s>+~]/g;var M=/(\)|^)/;var K=/[\s#.:>+~()@]|[^\s#.:>+~()@]+/g;function H(R,P){P=P||document.documentElement;var S=R.split(B),X=[];for(var U=0;U<S.length;U++){var N=[P],W=G(S[U]);for(var T=0;T<W.length;){var Q=W[T++],O=W[T++],V="";if(W[T]=="("){while(W[T++]!=")"&&T<W.length){V+=W[T]}V=V.slice(0,-1)}N=I(N,Q,O,V)}X=X.concat(N)}return X}function G(N){var O=N.replace(A,"$1").replace(L,"$1*$2").replace(F,D);return O.match(K)||[]}function D(N){return N.replace(M,"$1 ")}function I(N,P,Q,O){return(H.selectors[P])?H.selectors[P](N,Q,O):[]}var E={toArray:function(O){var N=[];for(var P=0;P<O.length;P++){N.push(O[P])}return N}};var C={isTag:function(O,N){return(N=="*")||(N.toLowerCase()==O.nodeName.toLowerCase())},previousSiblingElement:function(N){do{N=N.previousSibling}while(N&&N.nodeType!=1);return N},nextSiblingElement:function(N){do{N=N.nextSibling}while(N&&N.nodeType!=1);return N},hasClass:function(N,O){return(O.className||"").match("(^|\\s)"+N+"(\\s|$)")},getByTag:function(N,O){return O.getElementsByTagName(N)}};var J={"#":function(N,P){for(var O=0;O<N.length;O++){if(N[O].getAttribute("id")==P){return[N[O]]}}return[]}," ":function(O,Q){var N=[];for(var P=0;P<O.length;P++){N=N.concat(E.toArray(C.getByTag(Q,O[P])))}return N},">":function(O,R){var N=[];for(var Q=0,S;Q<O.length;Q++){S=O[Q];for(var P=0,T;P<S.childNodes.length;P++){T=S.childNodes[P];if(T.nodeType==1&&C.isTag(T,R)){N.push(T)}}}return N},".":function(O,Q){var N=[];for(var P=0,R;P<O.length;P++){R=O[P];if(C.hasClass([Q],R)){N.push(R)}}return N},":":function(N,P,O){return(H.pseudoClasses[P])?H.pseudoClasses[P](N,O):[]}};H.selectors=J;H.pseudoClasses={};H.util=E;H.dom=C;return H})();
|
schemaish
|
/schemaish-0.5.6.tar.gz/schemaish-0.5.6/docs-build/.static/js/sifr.js
|
sifr.js
|
var sIFR=new function(){var O=this;var E={ACTIVE:"sIFR-active",REPLACED:"sIFR-replaced",IGNORE:"sIFR-ignore",ALTERNATE:"sIFR-alternate",CLASS:"sIFR-class",LAYOUT:"sIFR-layout",FLASH:"sIFR-flash",FIX_FOCUS:"sIFR-fixfocus",DUMMY:"sIFR-dummy"};E.IGNORE_CLASSES=[E.REPLACED,E.IGNORE,E.ALTERNATE];this.MIN_FONT_SIZE=6;this.MAX_FONT_SIZE=126;this.FLASH_PADDING_BOTTOM=5;this.VERSION="436";this.isActive=false;this.isEnabled=true;this.fixHover=true;this.autoInitialize=true;this.setPrefetchCookie=true;this.cookiePath="/";this.domains=[];this.forceWidth=true;this.fitExactly=false;this.forceTextTransform=true;this.useDomLoaded=true;this.useStyleCheck=false;this.hasFlashClassSet=false;this.repaintOnResize=true;this.replacements=[];var L=0;var R=false;function Y(){}function D(c){function d(e){return e.toLocaleUpperCase()}this.normalize=function(e){return e.replace(/\n|\r|\xA0/g,D.SINGLE_WHITESPACE).replace(/\s+/g,D.SINGLE_WHITESPACE)};this.textTransform=function(e,f){switch(e){case"uppercase":return f.toLocaleUpperCase();case"lowercase":return f.toLocaleLowerCase();case"capitalize":return f.replace(/^\w|\s\w/g,d)}return f};this.toHexString=function(e){if(e.charAt(0)!="#"||e.length!=4&&e.length!=7){return e}e=e.substring(1);return"0x"+(e.length==3?e.replace(/(.)(.)(.)/,"$1$1$2$2$3$3"):e)};this.toJson=function(g,f){var e="";switch(typeof(g)){case"string":e='"'+f(g)+'"';break;case"number":case"boolean":e=g.toString();break;case"object":e=[];for(var h in g){if(g[h]==Object.prototype[h]){continue}e.push('"'+h+'":'+this.toJson(g[h]))}e="{"+e.join(",")+"}";break}return e};this.convertCssArg=function(e){if(!e){return{}}if(typeof(e)=="object"){if(e.constructor==Array){e=e.join("")}else{return e}}var l={};var m=e.split("}");for(var h=0;h<m.length;h++){var k=m[h].match(/([^\s{]+)\s*\{(.+)\s*;?\s*/);if(!k||k.length!=3){continue}if(!l[k[1]]){l[k[1]]={}}var g=k[2].split(";");for(var f=0;f<g.length;f++){var n=g[f].match(/\s*([^:\s]+)\s*\:\s*([^;]+)/);if(!n||n.length!=3){continue}l[k[1]][n[1]]=n[2].replace(/\s+$/,"")}}return l};this.extractFromCss=function(g,f,i,e){var h=null;if(g&&g[f]&&g[f][i]){h=g[f][i];if(e){delete g[f][i]}}return h};this.cssToString=function(f){var g=[];for(var e in f){var j=f[e];if(j==Object.prototype[e]){continue}g.push(e,"{");for(var i in j){if(j[i]==Object.prototype[i]){continue}var h=j[i];if(D.UNIT_REMOVAL_PROPERTIES[i]){h=parseInt(h,10)}g.push(i,":",h,";")}g.push("}")}return g.join("")};this.escape=function(e){return escape(e).replace(/\+/g,"%2B")};this.encodeVars=function(e){return e.join("&").replace(/%/g,"%25")};this.copyProperties=function(g,f){for(var e in g){if(f[e]===undefined){f[e]=g[e]}}return f};this.domain=function(){var f="";try{f=document.domain}catch(g){}return f};this.domainMatches=function(h,g){if(g=="*"||g==h){return true}var f=g.lastIndexOf("*");if(f>-1){g=g.substr(f+1);var e=h.lastIndexOf(g);if(e>-1&&(e+g.length)==h.length){return true}}return false};this.uriEncode=function(e){return encodeURI(decodeURIComponent(e))};this.delay=function(f,h,g){var e=Array.prototype.slice.call(arguments,3);setTimeout(function(){h.apply(g,e)},f)}}D.UNIT_REMOVAL_PROPERTIES={leading:true,"margin-left":true,"margin-right":true,"text-indent":true};D.SINGLE_WHITESPACE=" ";function U(e){var d=this;function c(g,j,h){var k=d.getStyleAsInt(g,j,e.ua.ie);if(k==0){k=g[h];for(var f=3;f<arguments.length;f++){k-=d.getStyleAsInt(g,arguments[f],true)}}return k}this.getBody=function(){return document.getElementsByTagName("body")[0]||null};this.querySelectorAll=function(f){return window.parseSelector(f)};this.addClass=function(f,g){if(g){g.className=((g.className||"")==""?"":g.className+" ")+f}};this.removeClass=function(f,g){if(g){g.className=g.className.replace(new RegExp("(^|\\s)"+f+"(\\s|$)"),"").replace(/^\s+|(\s)\s+/g,"$1")}};this.hasClass=function(f,g){return new RegExp("(^|\\s)"+f+"(\\s|$)").test(g.className)};this.hasOneOfClassses=function(h,g){for(var f=0;f<h.length;f++){if(this.hasClass(h[f],g)){return true}}return false};this.ancestorHasClass=function(g,f){g=g.parentNode;while(g&&g.nodeType==1){if(this.hasClass(f,g)){return true}g=g.parentNode}return false};this.create=function(f,g){var h=document.createElementNS?document.createElementNS(U.XHTML_NS,f):document.createElement(f);if(g){h.className=g}return h};this.getComputedStyle=function(h,i){var f;if(document.defaultView&&document.defaultView.getComputedStyle){var g=document.defaultView.getComputedStyle(h,null);f=g?g[i]:null}else{if(h.currentStyle){f=h.currentStyle[i]}}return f||""};this.getStyleAsInt=function(g,i,f){var h=this.getComputedStyle(g,i);if(f&&!/px$/.test(h)){return 0}return parseInt(h)||0};this.getWidthFromStyle=function(f){return c(f,"width","offsetWidth","paddingRight","paddingLeft","borderRightWidth","borderLeftWidth")};this.getHeightFromStyle=function(f){return c(f,"height","offsetHeight","paddingTop","paddingBottom","borderTopWidth","borderBottomWidth")};this.getDimensions=function(j){var h=j.offsetWidth;var f=j.offsetHeight;if(h==0||f==0){for(var g=0;g<j.childNodes.length;g++){var k=j.childNodes[g];if(k.nodeType!=1){continue}h=Math.max(h,k.offsetWidth);f=Math.max(f,k.offsetHeight)}}return{width:h,height:f}};this.getViewport=function(){return{width:window.innerWidth||document.documentElement.clientWidth||this.getBody().clientWidth,height:window.innerHeight||document.documentElement.clientHeight||this.getBody().clientHeight}};this.blurElement=function(g){try{g.blur();return}catch(h){}var f=this.create("input");f.style.width="0px";f.style.height="0px";g.parentNode.appendChild(f);f.focus();f.blur();f.parentNode.removeChild(f)}}U.XHTML_NS="http://www.w3.org/1999/xhtml";function H(r){var g=navigator.userAgent.toLowerCase();var q=(navigator.product||"").toLowerCase();var h=navigator.platform.toLowerCase();this.parseVersion=H.parseVersion;this.macintosh=/^mac/.test(h);this.windows=/^win/.test(h);this.linux=/^linux/.test(h);this.quicktime=false;this.opera=/opera/.test(g);this.konqueror=/konqueror/.test(g);this.ie=false/*@cc_on||true@*/;this.ieSupported=this.ie&&!/ppc|smartphone|iemobile|msie\s5\.5/.test(g)/*@cc_on&&@_jscript_version>=5.5@*/;this.ieWin=this.ie&&this.windows/*@cc_on&&@_jscript_version>=5.1@*/;this.windows=this.windows&&(!this.ie||this.ieWin);this.ieMac=this.ie&&this.macintosh/*@cc_on&&@_jscript_version<5.1@*/;this.macintosh=this.macintosh&&(!this.ie||this.ieMac);this.safari=/safari/.test(g);this.webkit=!this.konqueror&&/applewebkit/.test(g);this.khtml=this.webkit||this.konqueror;this.gecko=!this.khtml&&q=="gecko";this.ieVersion=this.ie&&/.*msie\s(\d\.\d)/.exec(g)?this.parseVersion(RegExp.$1):"0";this.operaVersion=this.opera&&/.*opera(\s|\/)(\d+\.\d+)/.exec(g)?this.parseVersion(RegExp.$2):"0";this.webkitVersion=this.webkit&&/.*applewebkit\/(\d+).*/.exec(g)?this.parseVersion(RegExp.$1):"0";this.geckoVersion=this.gecko&&/.*rv:\s*([^\)]+)\)\s+gecko/.exec(g)?this.parseVersion(RegExp.$1):"0";this.konquerorVersion=this.konqueror&&/.*konqueror\/([\d\.]+).*/.exec(g)?this.parseVersion(RegExp.$1):"0";this.flashVersion=0;if(this.ieWin){var l;var o=false;try{l=new ActiveXObject("ShockwaveFlash.ShockwaveFlash.7")}catch(m){try{l=new ActiveXObject("ShockwaveFlash.ShockwaveFlash.6");this.flashVersion=this.parseVersion("6");l.AllowScriptAccess="always"}catch(m){o=this.flashVersion==this.parseVersion("6")}if(!o){try{l=new ActiveXObject("ShockwaveFlash.ShockwaveFlash")}catch(m){}}}if(!o&&l){this.flashVersion=this.parseVersion((l.GetVariable("$version")||"").replace(/^\D+(\d+)\D+(\d+)\D+(\d+).*/g,"$1.$2.$3"))}}else{if(navigator.plugins&&navigator.plugins["Shockwave Flash"]){var n=navigator.plugins["Shockwave Flash"].description.replace(/^.*\s+(\S+\s+\S+$)/,"$1");var p=n.replace(/^\D*(\d+\.\d+).*$/,"$1");if(/r/.test(n)){p+=n.replace(/^.*r(\d*).*$/,".$1")}else{if(/d/.test(n)){p+=".0"}}this.flashVersion=this.parseVersion(p);var j=false;for(var k=0,c=this.flashVersion>=H.MIN_FLASH_VERSION;c&&k<navigator.mimeTypes.length;k++){var f=navigator.mimeTypes[k];if(f.type!="application/x-shockwave-flash"){continue}if(f.enabledPlugin){j=true;if(f.enabledPlugin.description.toLowerCase().indexOf("quicktime")>-1){c=false;this.quicktime=true}}}if(this.quicktime||!j){this.flashVersion=this.parseVersion("0")}}}this.flash=this.flashVersion>=H.MIN_FLASH_VERSION;this.transparencySupport=this.macintosh||this.windows||this.linux&&(this.flashVersion>=this.parseVersion("10")&&(this.gecko&&this.geckoVersion>=this.parseVersion("1.9")||this.opera));this.computedStyleSupport=this.ie||!!document.defaultView.getComputedStyle;this.fixFocus=this.gecko&&this.windows;this.nativeDomLoaded=this.gecko||this.webkit&&this.webkitVersion>=this.parseVersion("525")||this.konqueror&&this.konquerorMajor>this.parseVersion("03")||this.opera;this.mustCheckStyle=this.khtml||this.opera;this.forcePageLoad=this.webkit&&this.webkitVersion<this.parseVersion("523");this.properDocument=typeof(document.location)=="object";this.supported=this.flash&&this.properDocument&&(!this.ie||this.ieSupported)&&this.computedStyleSupport&&(!this.opera||this.operaVersion>=this.parseVersion("9.61"))&&(!this.webkit||this.webkitVersion>=this.parseVersion("412"))&&(!this.gecko||this.geckoVersion>=this.parseVersion("1.8.0.12"))&&(!this.konqueror)}H.parseVersion=function(c){return c.replace(/(^|\D)(\d+)(?=\D|$)/g,function(f,e,g){f=e;for(var d=4-g.length;d>=0;d--){f+="0"}return f+g})};H.MIN_FLASH_VERSION=H.parseVersion("8");function F(c){this.fix=c.ua.ieWin&&window.location.hash!="";var d;this.cache=function(){d=document.title};function e(){document.title=d}this.restore=function(){if(this.fix){setTimeout(e,0)}}}function S(l){var e=null;function c(){try{if(l.ua.ie||document.readyState!="loaded"&&document.readyState!="complete"){document.documentElement.doScroll("left")}}catch(n){return setTimeout(c,10)}i()}function i(){if(l.useStyleCheck){h()}else{if(!l.ua.mustCheckStyle){d(null,true)}}}function h(){e=l.dom.create("div",E.DUMMY);l.dom.getBody().appendChild(e);m()}function m(){if(l.dom.getComputedStyle(e,"marginLeft")=="42px"){g()}else{setTimeout(m,10)}}function g(){if(e&&e.parentNode){e.parentNode.removeChild(e)}e=null;d(null,true)}function d(n,o){l.initialize(o);if(n&&n.type=="load"){if(document.removeEventListener){document.removeEventListener("DOMContentLoaded",d,false)}if(window.removeEventListener){window.removeEventListener("load",d,false)}}}function j(){l.prepareClearReferences();if(document.readyState=="interactive"){document.attachEvent("onstop",f);setTimeout(function(){document.detachEvent("onstop",f)},0)}}function f(){document.detachEvent("onstop",f);k()}function k(){l.clearReferences()}this.attach=function(){if(window.addEventListener){window.addEventListener("load",d,false)}else{window.attachEvent("onload",d)}if(!l.useDomLoaded||l.ua.forcePageLoad||l.ua.ie&&window.top!=window){return}if(l.ua.nativeDomLoaded){document.addEventListener("DOMContentLoaded",i,false)}else{if(l.ua.ie||l.ua.khtml){c()}}};this.attachUnload=function(){if(!l.ua.ie){return}window.attachEvent("onbeforeunload",j);window.attachEvent("onunload",k)}}var Q="sifrFetch";function N(c){var e=false;this.fetchMovies=function(f){if(c.setPrefetchCookie&&new RegExp(";?"+Q+"=true;?").test(document.cookie)){return}try{e=true;d(f)}catch(g){}if(c.setPrefetchCookie){document.cookie=Q+"=true;path="+c.cookiePath}};this.clear=function(){if(!e){return}try{var f=document.getElementsByTagName("script");for(var g=f.length-1;g>=0;g--){var h=f[g];if(h.type=="sifr/prefetch"){h.parentNode.removeChild(h)}}}catch(j){}};function d(f){for(var g=0;g<f.length;g++){document.write('<script defer type="sifr/prefetch" src="'+f[g].src+'"><\/script>')}}}function b(e){var g=e.ua.ie;var f=g&&e.ua.flashVersion<e.ua.parseVersion("9.0.115");var d={};var c={};this.fixFlash=f;this.register=function(h){if(!g){return}var i=h.getAttribute("id");this.cleanup(i,false);c[i]=h;delete d[i];if(f){window[i]=h}};this.reset=function(){if(!g){return false}for(var j=0;j<e.replacements.length;j++){var h=e.replacements[j];var k=c[h.id];if(!d[h.id]&&(!k.parentNode||k.parentNode.nodeType==11)){h.resetMovie();d[h.id]=true}}return true};this.cleanup=function(l,h){var i=c[l];if(!i){return}for(var k in i){if(typeof(i[k])=="function"){i[k]=null}}c[l]=null;if(f){window[l]=null}if(i.parentNode){if(h&&i.parentNode.nodeType==1){var j=document.createElement("div");j.style.width=i.offsetWidth+"px";j.style.height=i.offsetHeight+"px";i.parentNode.replaceChild(j,i)}else{i.parentNode.removeChild(i)}}};this.prepareClearReferences=function(){if(!f){return}__flash_unloadHandler=function(){};__flash_savedUnloadHandler=function(){}};this.clearReferences=function(){if(f){var j=document.getElementsByTagName("object");for(var h=j.length-1;h>=0;h--){c[j[h].getAttribute("id")]=j[h]}}for(var k in c){if(Object.prototype[k]!=c[k]){this.cleanup(k,true)}}}}function K(d,g,f,c,e){this.sIFR=d;this.id=g;this.vars=f;this.movie=null;this.__forceWidth=c;this.__events=e;this.__resizing=0}K.prototype={getFlashElement:function(){return document.getElementById(this.id)},getAlternate:function(){return document.getElementById(this.id+"_alternate")},getAncestor:function(){var c=this.getFlashElement().parentNode;return !this.sIFR.dom.hasClass(E.FIX_FOCUS,c)?c:c.parentNode},available:function(){var c=this.getFlashElement();return c&&c.parentNode},call:function(c){var d=this.getFlashElement();if(!d[c]){return false}return Function.prototype.apply.call(d[c],d,Array.prototype.slice.call(arguments,1))},attempt:function(){if(!this.available()){return false}try{this.call.apply(this,arguments)}catch(c){if(this.sIFR.debug){throw c}return false}return true},updateVars:function(c,e){for(var d=0;d<this.vars.length;d++){if(this.vars[d].split("=")[0]==c){this.vars[d]=c+"="+e;break}}var f=this.sIFR.util.encodeVars(this.vars);this.movie.injectVars(this.getFlashElement(),f);this.movie.injectVars(this.movie.html,f)},storeSize:function(c,d){this.movie.setSize(c,d);this.updateVars(c,d)},fireEvent:function(c){if(this.available()&&this.__events[c]){this.sIFR.util.delay(0,this.__events[c],this,this)}},resizeFlashElement:function(c,d,e){if(!this.available()){return}this.__resizing++;var f=this.getFlashElement();f.setAttribute("height",c);this.getAncestor().style.minHeight="";this.updateVars("renderheight",c);this.storeSize("height",c);if(d!==null){f.setAttribute("width",d);this.movie.setSize("width",d)}if(this.__events.onReplacement){this.sIFR.util.delay(0,this.__events.onReplacement,this,this);delete this.__events.onReplacement}if(e){this.sIFR.util.delay(0,function(){this.attempt("scaleMovie");this.__resizing--},this)}else{this.__resizing--}},blurFlashElement:function(){if(this.available()){this.sIFR.dom.blurElement(this.getFlashElement())}},resetMovie:function(){this.sIFR.util.delay(0,this.movie.reset,this.movie,this.getFlashElement(),this.getAlternate())},resizeAfterScale:function(){if(this.available()&&this.__resizing==0){this.sIFR.util.delay(0,this.resize,this)}},resize:function(){if(!this.available()){return}this.__resizing++;var g=this.getFlashElement();var f=g.offsetWidth;if(f==0){return}var e=g.getAttribute("width");var l=g.getAttribute("height");var m=this.getAncestor();var o=this.sIFR.dom.getHeightFromStyle(m);g.style.width="1px";g.style.height="1px";m.style.minHeight=o+"px";var c=this.getAlternate().childNodes;var n=[];for(var k=0;k<c.length;k++){var h=c[k].cloneNode(true);n.push(h);m.appendChild(h)}var d=this.sIFR.dom.getWidthFromStyle(m);for(var k=0;k<n.length;k++){m.removeChild(n[k])}g.style.width=g.style.height=m.style.minHeight="";g.setAttribute("width",this.__forceWidth?d:e);g.setAttribute("height",l);if(sIFR.ua.ie){g.style.display="none";var j=g.offsetHeight;g.style.display=""}if(d!=f){if(this.__forceWidth){this.storeSize("width",d)}this.attempt("resize",d)}this.__resizing--},replaceText:function(g,j){var d=this.sIFR.util.escape(g);if(!this.attempt("replaceText",d)){return false}this.updateVars("content",d);var f=this.getAlternate();if(j){while(f.firstChild){f.removeChild(f.firstChild)}for(var c=0;c<j.length;c++){f.appendChild(j[c])}}else{try{f.innerHTML=g}catch(h){}}return true},changeCSS:function(c){c=this.sIFR.util.escape(this.sIFR.util.cssToString(this.sIFR.util.convertCssArg(c)));this.updateVars("css",c);return this.attempt("changeCSS",c)},remove:function(){if(this.movie&&this.available()){this.movie.remove(this.getFlashElement(),this.id)}}};var X=new function(){this.create=function(p,n,j,i,f,e,g,o,l,h,m){var k=p.ua.ie?d:c;return new k(p,n,j,i,f,e,g,o,["flashvars",l,"wmode",h,"bgcolor",m,"allowScriptAccess","always","quality","best"])};function c(s,q,l,h,f,e,g,r,n){var m=s.dom.create("object",E.FLASH);var p=["type","application/x-shockwave-flash","id",f,"name",f,"data",e,"width",g,"height",r];for(var o=0;o<p.length;o+=2){m.setAttribute(p[o],p[o+1])}var j=m;if(h){j=W.create("div",E.FIX_FOCUS);j.appendChild(m)}for(var o=0;o<n.length;o+=2){if(n[o]=="name"){continue}var k=W.create("param");k.setAttribute("name",n[o]);k.setAttribute("value",n[o+1]);m.appendChild(k)}l.style.minHeight=r+"px";while(l.firstChild){l.removeChild(l.firstChild)}l.appendChild(j);this.html=j.cloneNode(true)}c.prototype={reset:function(e,f){e.parentNode.replaceChild(this.html.cloneNode(true),e)},remove:function(e,f){e.parentNode.removeChild(e)},setSize:function(e,f){this.html.setAttribute(e,f)},injectVars:function(e,g){var h=e.getElementsByTagName("param");for(var f=0;f<h.length;f++){if(h[f].getAttribute("name")=="flashvars"){h[f].setAttribute("value",g);break}}}};function d(p,n,j,h,f,e,g,o,k){this.dom=p.dom;this.broken=n;this.html='<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" id="'+f+'" width="'+g+'" height="'+o+'" class="'+E.FLASH+'"><param name="movie" value="'+e+'"></param></object>';var m="";for(var l=0;l<k.length;l+=2){m+='<param name="'+k[l]+'" value="'+k[l+1]+'"></param>'}this.html=this.html.replace(/(<\/object>)/,m+"$1");j.style.minHeight=o+"px";j.innerHTML=this.html;this.broken.register(j.firstChild)}d.prototype={reset:function(f,g){g=g.cloneNode(true);var e=f.parentNode;e.innerHTML=this.html;this.broken.register(e.firstChild);e.appendChild(g)},remove:function(e,f){this.broken.cleanup(f)},setSize:function(e,f){this.html=this.html.replace(e=="height"?/(height)="\d+"/:/(width)="\d+"/,'$1="'+f+'"')},injectVars:function(e,f){if(e!=this.html){return}this.html=this.html.replace(/(flashvars(=|\"\svalue=)\")[^\"]+/,"$1"+f)}}};this.errors=new Y(O);var A=this.util=new D(O);var W=this.dom=new U(O);var T=this.ua=new H(O);var G={fragmentIdentifier:new F(O),pageLoad:new S(O),prefetch:new N(O),brokenFlashIE:new b(O)};this.__resetBrokenMovies=G.brokenFlashIE.reset;var J={kwargs:[],replaceAll:function(d){for(var c=0;c<this.kwargs.length;c++){O.replace(this.kwargs[c])}if(!d){this.kwargs=[]}}};this.activate=function(){if(!T.supported||!this.isEnabled||this.isActive||!C()||a()){return}G.prefetch.fetchMovies(arguments);this.isActive=true;this.setFlashClass();G.fragmentIdentifier.cache();G.pageLoad.attachUnload();if(!this.autoInitialize){return}G.pageLoad.attach()};this.setFlashClass=function(){if(this.hasFlashClassSet){return}W.addClass(E.ACTIVE,W.getBody()||document.documentElement);this.hasFlashClassSet=true};this.removeFlashClass=function(){if(!this.hasFlashClassSet){return}W.removeClass(E.ACTIVE,W.getBody());W.removeClass(E.ACTIVE,document.documentElement);this.hasFlashClassSet=false};this.initialize=function(c){if(!this.isActive||!this.isEnabled){return}if(R){if(!c){J.replaceAll(false)}return}R=true;J.replaceAll(c);if(O.repaintOnResize){if(window.addEventListener){window.addEventListener("resize",Z,false)}else{window.attachEvent("onresize",Z)}}G.prefetch.clear()};this.replace=function(x,u){if(!T.supported){return}if(u){x=A.copyProperties(x,u)}if(!R){return J.kwargs.push(x)}if(this.onReplacementStart){this.onReplacementStart(x)}var AM=x.elements||W.querySelectorAll(x.selector);if(AM.length==0){return}var w=M(x.src);var AR=A.convertCssArg(x.css);var v=B(x.filters);var AN=x.forceSingleLine===true;var AS=x.preventWrap===true&&!AN;var q=AN||(x.fitExactly==null?this.fitExactly:x.fitExactly)===true;var AD=q||(x.forceWidth==null?this.forceWidth:x.forceWidth)===true;var s=x.ratios||[];var AE=x.pixelFont===true;var r=parseInt(x.tuneHeight)||0;var z=!!x.onRelease||!!x.onRollOver||!!x.onRollOut;if(q){A.extractFromCss(AR,".sIFR-root","text-align",true)}var t=A.extractFromCss(AR,".sIFR-root","font-size",true)||"0";var e=A.extractFromCss(AR,".sIFR-root","background-color",true)||"#FFFFFF";var o=A.extractFromCss(AR,".sIFR-root","kerning",true)||"";var AW=A.extractFromCss(AR,".sIFR-root","opacity",true)||"100";var k=A.extractFromCss(AR,".sIFR-root","cursor",true)||"default";var AP=parseInt(A.extractFromCss(AR,".sIFR-root","leading"))||0;var AJ=x.gridFitType||(A.extractFromCss(AR,".sIFR-root","text-align")=="right")?"subpixel":"pixel";var h=this.forceTextTransform===false?"none":A.extractFromCss(AR,".sIFR-root","text-transform",true)||"none";t=/^\d+(px)?$/.test(t)?parseInt(t):0;AW=parseFloat(AW)<1?100*parseFloat(AW):AW;var AC=x.modifyCss?"":A.cssToString(AR);var AG=x.wmode||"";if(!AG){if(x.transparent){AG="transparent"}else{if(x.opaque){AG="opaque"}}}if(AG=="transparent"){if(!T.transparencySupport){AG="opaque"}else{e="transparent"}}else{if(e=="transparent"){e="#FFFFFF"}}for(var AV=0;AV<AM.length;AV++){var AF=AM[AV];if(W.hasOneOfClassses(E.IGNORE_CLASSES,AF)||W.ancestorHasClass(AF,E.ALTERNATE)){continue}var AO=W.getDimensions(AF);var f=AO.height;var c=AO.width;var AA=W.getComputedStyle(AF,"display");if(!f||!c||!AA||AA=="none"){continue}c=W.getWidthFromStyle(AF);var n,AH;if(!t){var AL=I(AF);n=Math.min(this.MAX_FONT_SIZE,Math.max(this.MIN_FONT_SIZE,AL.fontSize));if(AE){n=Math.max(8,8*Math.round(n/8))}AH=AL.lines}else{n=t;AH=1}var d=W.create("span",E.ALTERNATE);var AX=AF.cloneNode(true);AF.parentNode.appendChild(AX);for(var AU=0,AT=AX.childNodes.length;AU<AT;AU++){var m=AX.childNodes[AU];if(!/^(style|script)$/i.test(m.nodeName)){d.appendChild(m.cloneNode(true))}}if(x.modifyContent){x.modifyContent(AX,x.selector)}if(x.modifyCss){AC=x.modifyCss(AR,AX,x.selector)}var p=P(AX,h,x.uriEncode);AX.parentNode.removeChild(AX);if(x.modifyContentString){p.text=x.modifyContentString(p.text,x.selector)}if(p.text==""){continue}var AK=Math.round(AH*V(n,s)*n)+this.FLASH_PADDING_BOTTOM+r;if(AH>1&&AP){AK+=Math.round((AH-1)*AP)}var AB=AD?c:"100%";var AI="sIFR_replacement_"+L++;var AQ=["id="+AI,"content="+A.escape(p.text),"width="+c,"renderheight="+AK,"link="+A.escape(p.primaryLink.href||""),"target="+A.escape(p.primaryLink.target||""),"size="+n,"css="+A.escape(AC),"cursor="+k,"tunewidth="+(x.tuneWidth||0),"tuneheight="+r,"offsetleft="+(x.offsetLeft||""),"offsettop="+(x.offsetTop||""),"fitexactly="+q,"preventwrap="+AS,"forcesingleline="+AN,"antialiastype="+(x.antiAliasType||""),"thickness="+(x.thickness||""),"sharpness="+(x.sharpness||""),"kerning="+o,"gridfittype="+AJ,"flashfilters="+v,"opacity="+AW,"blendmode="+(x.blendMode||""),"selectable="+(x.selectable==null||AG!=""&&!sIFR.ua.macintosh&&sIFR.ua.gecko&&sIFR.ua.geckoVersion>=sIFR.ua.parseVersion("1.9")?"true":x.selectable===true),"fixhover="+(this.fixHover===true),"events="+z,"delayrun="+G.brokenFlashIE.fixFlash,"version="+this.VERSION];var y=A.encodeVars(AQ);var g=new K(O,AI,AQ,AD,{onReplacement:x.onReplacement,onRollOver:x.onRollOver,onRollOut:x.onRollOut,onRelease:x.onRelease});g.movie=X.create(sIFR,G.brokenFlashIE,AF,T.fixFocus&&x.fixFocus,AI,w,AB,AK,y,AG,e);this.replacements.push(g);this.replacements[AI]=g;if(x.selector){if(!this.replacements[x.selector]){this.replacements[x.selector]=[g]}else{this.replacements[x.selector].push(g)}}d.setAttribute("id",AI+"_alternate");AF.appendChild(d);W.addClass(E.REPLACED,AF)}G.fragmentIdentifier.restore()};this.getReplacementByFlashElement=function(d){for(var c=0;c<O.replacements.length;c++){if(O.replacements[c].id==d.getAttribute("id")){return O.replacements[c]}}};this.redraw=function(){for(var c=0;c<O.replacements.length;c++){O.replacements[c].resetMovie()}};this.prepareClearReferences=function(){G.brokenFlashIE.prepareClearReferences()};this.clearReferences=function(){G.brokenFlashIE.clearReferences();G=null;J=null;delete O.replacements};function C(){if(O.domains.length==0){return true}var d=A.domain();for(var c=0;c<O.domains.length;c++){if(A.domainMatches(d,O.domains[c])){return true}}return false}function a(){if(document.location.protocol=="file:"){if(O.debug){O.errors.fire("isFile")}return true}return false}function M(c){if(T.ie&&c.charAt(0)=="/"){c=window.location.toString().replace(/([^:]+)(:\/?\/?)([^\/]+).*/,"$1$2$3")+c}return c}function V(d,e){for(var c=0;c<e.length;c+=2){if(d<=e[c]){return e[c+1]}}return e[e.length-1]||1}function B(g){var e=[];for(var d in g){if(g[d]==Object.prototype[d]){continue}var c=g[d];d=[d.replace(/filter/i,"")+"Filter"];for(var f in c){if(c[f]==Object.prototype[f]){continue}d.push(f+":"+A.escape(A.toJson(c[f],A.toHexString)))}e.push(d.join(","))}return A.escape(e.join(";"))}function Z(d){var e=Z.viewport;var c=W.getViewport();if(e&&c.width==e.width&&c.height==e.height){return}Z.viewport=c;if(O.replacements.length==0){return}if(Z.timer){clearTimeout(Z.timer)}Z.timer=setTimeout(function(){delete Z.timer;for(var f=0;f<O.replacements.length;f++){O.replacements[f].resize()}},200)}function I(f){var g=W.getComputedStyle(f,"fontSize");var d=g.indexOf("px")==-1;var e=f.innerHTML;if(d){f.innerHTML="X"}f.style.paddingTop=f.style.paddingBottom=f.style.borderTopWidth=f.style.borderBottomWidth="0px";f.style.lineHeight="2em";f.style.display="block";g=d?f.offsetHeight/2:parseInt(g,10);if(d){f.innerHTML=e}var c=Math.round(f.offsetHeight/(2*g));f.style.paddingTop=f.style.paddingBottom=f.style.borderTopWidth=f.style.borderBottomWidth=f.style.lineHeight=f.style.display="";if(isNaN(c)||!isFinite(c)||c==0){c=1}return{fontSize:g,lines:c}}function P(c,g,s){s=s||A.uriEncode;var q=[],m=[];var k=null;var e=c.childNodes;var o=false,p=false;var j=0;while(j<e.length){var f=e[j];if(f.nodeType==3){var t=A.textTransform(g,A.normalize(f.nodeValue)).replace(/</g,"<");if(o&&p){t=t.replace(/^\s+/,"")}m.push(t);o=/\s$/.test(t);p=false}if(f.nodeType==1&&!/^(style|script)$/i.test(f.nodeName)){var h=[];var r=f.nodeName.toLowerCase();var n=f.className||"";if(/\s+/.test(n)){if(n.indexOf(E.CLASS)>-1){n=n.match("(\\s|^)"+E.CLASS+"-([^\\s$]*)(\\s|$)")[2]}else{n=n.match(/^([^\s]+)/)[1]}}if(n!=""){h.push('class="'+n+'"')}if(r=="a"){var d=s(f.getAttribute("href")||"");var l=f.getAttribute("target")||"";h.push('href="'+d+'"','target="'+l+'"');if(!k){k={href:d,target:l}}}m.push("<"+r+(h.length>0?" ":"")+h.join(" ")+">");p=true;if(f.hasChildNodes()){q.push(j);j=0;e=f.childNodes;continue}else{if(!/^(br|img)$/i.test(f.nodeName)){m.push("</",f.nodeName.toLowerCase(),">")}}}if(q.length>0&&!f.nextSibling){do{j=q.pop();e=f.parentNode.parentNode.childNodes;f=e[j];if(f){m.push("</",f.nodeName.toLowerCase(),">")}}while(j==e.length-1&&q.length>0)}j++}return{text:m.join("").replace(/^\s+|\s+$|\s*(<br>)\s*/g,"$1"),primaryLink:k||{}}}};
var parseSelector=(function(){var B=/\s*,\s*/;var A=/\s*([\s>+~(),]|^|$)\s*/g;var L=/([\s>+~,]|[^(]\+|^)([#.:@])/g;var F=/(^|\))[^\s>+~]/g;var M=/(\)|^)/;var K=/[\s#.:>+~()@]|[^\s#.:>+~()@]+/g;function H(R,P){P=P||document.documentElement;var S=R.split(B),X=[];for(var U=0;U<S.length;U++){var N=[P],W=G(S[U]);for(var T=0;T<W.length;){var Q=W[T++],O=W[T++],V="";if(W[T]=="("){while(W[T++]!=")"&&T<W.length){V+=W[T]}V=V.slice(0,-1)}N=I(N,Q,O,V)}X=X.concat(N)}return X}function G(N){var O=N.replace(A,"$1").replace(L,"$1*$2").replace(F,D);return O.match(K)||[]}function D(N){return N.replace(M,"$1 ")}function I(N,P,Q,O){return(H.selectors[P])?H.selectors[P](N,Q,O):[]}var E={toArray:function(O){var N=[];for(var P=0;P<O.length;P++){N.push(O[P])}return N}};var C={isTag:function(O,N){return(N=="*")||(N.toLowerCase()==O.nodeName.toLowerCase())},previousSiblingElement:function(N){do{N=N.previousSibling}while(N&&N.nodeType!=1);return N},nextSiblingElement:function(N){do{N=N.nextSibling}while(N&&N.nodeType!=1);return N},hasClass:function(N,O){return(O.className||"").match("(^|\\s)"+N+"(\\s|$)")},getByTag:function(N,O){return O.getElementsByTagName(N)}};var J={"#":function(N,P){for(var O=0;O<N.length;O++){if(N[O].getAttribute("id")==P){return[N[O]]}}return[]}," ":function(O,Q){var N=[];for(var P=0;P<O.length;P++){N=N.concat(E.toArray(C.getByTag(Q,O[P])))}return N},">":function(O,R){var N=[];for(var Q=0,S;Q<O.length;Q++){S=O[Q];for(var P=0,T;P<S.childNodes.length;P++){T=S.childNodes[P];if(T.nodeType==1&&C.isTag(T,R)){N.push(T)}}}return N},".":function(O,Q){var N=[];for(var P=0,R;P<O.length;P++){R=O[P];if(C.hasClass([Q],R)){N.push(R)}}return N},":":function(N,P,O){return(H.pseudoClasses[P])?H.pseudoClasses[P](N,O):[]}};H.selectors=J;H.pseudoClasses={};H.util=E;H.dom=C;return H})();
| 0.021792 | 0.114196 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.