id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/nonebot_plugin_gocqhttp-0.6.12-py3-none-any.whl/nonebot_plugin_gocqhttp/__init__.py
|
import asyncio
from fastapi import FastAPI
from nonebot import get_driver
from nonebot.adapters.onebot.v11 import Adapter
from nonebot.drivers import ReverseDriver
from nonebot.log import default_filter, default_format
import nonebot_plugin_gocqhttp.plugin # noqa: F401
from nonebot_plugin_gocqhttp import web
from nonebot_plugin_gocqhttp.log import LOG_STORAGE, logger
from nonebot_plugin_gocqhttp.plugin_config import config
from nonebot_plugin_gocqhttp.process import (
ACCOUNTS_LEGACY_SAVE_PATH,
ACCOUNTS_SAVE_PATH,
BINARY_PATH,
ProcessesManager,
download_gocq,
)
driver = get_driver()
if (adapter_name := Adapter.get_name()) not in driver._adapters:
raise ValueError(f"Adapter {adapter_name!r} is not registered yet.")
if not isinstance(driver, ReverseDriver) or not isinstance(driver.server_app, FastAPI):
raise NotImplementedError("Only FastAPI reverse driver is supported.")
driver.server_app.mount("/go-cqhttp", web.app, name="go-cqhttp plugin")
@driver.on_startup
async def startup():
loop = asyncio.get_running_loop()
def log_sink(message: str):
loop.create_task(LOG_STORAGE.add(message.rstrip("\n")))
logger.add(log_sink, colorize=True, filter=default_filter, format=default_format)
if config.FORCE_DOWNLOAD or not BINARY_PATH.is_file():
await download_gocq()
ProcessesManager.load_config()
if ACCOUNTS_SAVE_PATH.is_file():
await ProcessesManager.load_saved(
ACCOUNTS_SAVE_PATH, is_dumps=False, ignore_loaded=True
)
elif ACCOUNTS_LEGACY_SAVE_PATH.is_file():
logger.warning("Legacy accounts data detected, converting...")
await ProcessesManager.load_saved(
ACCOUNTS_LEGACY_SAVE_PATH, is_dumps=True, ignore_loaded=True
)
await ProcessesManager.save() # update to new format
await asyncio.gather(
*map(lambda process: process.start(), ProcessesManager.all()),
return_exceptions=True,
)
if tunnel_port := config.TUNNEL_PORT:
try:
from .external_proxy import ProxyServiceManager
await ProxyServiceManager.start(tunnel_port)
except ImportError as e:
logger.opt(colors=True).error(
"Tunnel configured but required dependencies missing: "
f"<r><b>{e}</b></r>"
)
logger.info(
"Startup complete, Web UI has served to "
f"<u><e>http://{driver.config.host}:{driver.config.port}/go-cqhttp/</e></u>"
)
@driver.on_shutdown
async def shutdown():
await asyncio.gather(
*map(lambda process: process.stop(), ProcessesManager.all()),
return_exceptions=True,
)
|
PypiClean
|
/test_version_powerhouse_helper_7.4-1.1.1-py3-none-any.whl/test_version_powerhouse_helper/Sort_Files.py
|
import os
import shutil
from prettytable import PrettyTable
#Dict for normalize()
transliterate_dict = {'а':'a','б':'b','в':'v','г':'g','д':'d','е':'e','ё':'e',
'ж':'zh','з':'z','и':'i','й':'i','к':'k','л':'l','м':'m','н':'n',
'о':'o','п':'p','р':'r','с':'s','т':'t','у':'u','ф':'f','х':'h',
'ц':'c','ч':'cz','ш':'sh','щ':'scz','ъ':'','ы':'y','ь':'','э':'e',
'ю':'u','я':'ja', 'А':'A','Б':'B','В':'V','Г':'G','Д':'D','Е':'E','Ё':'E',
'Ж':'ZH','З':'Z','И':'I','Й':'I','К':'K','Л':'L','М':'M','Н':'N',
'О':'O','П':'P','Р':'R','С':'S','Т':'T','У':'U','Ф':'F','Х':'H',
'Ц':'C','Ч':'CZ','Ш':'SH','Щ':'Shch','Ъ':'','Ы':'y','Ь':'','Э':'E',
'Ю':'U','Я':'Ya','ґ':'','ї':'', 'є':'','Ґ':'g','Ї':'i',
'Є':'e', '.':'.', 'x':'x', 'X':'X', 'j':'j', 'J':'J', 'w':'w', 'W':'W'}
# List of numbers for the normalize() function
numbers = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
# Folders to skip
ignore_folders = ["images", "video", "documents", "audio", "archives", "other"]
EXTENDS = {
("png", "jpeg", "jpg", "svg"): "\\images\\",
(".avi", ".mp4", ".mov", ".mkv"): "\\video\\",
(".doc", ".docx", ".txt", ".pdf", ".xlsx", ".pptx"): "\\documents\\",
(".mp3", ".ogg", ".wav", ".amr"): "\\audio\\",
(".zip", ".tar", ".gz"): "\\archives\\",
}
#File sorted counter
counter = {category: 0 for category in ignore_folders}
# Function to normalize the file name
def normalize(file_name: str) -> str:
for key in transliterate_dict:
file_name = file_name.replace(key, transliterate_dict.get(key))
for i in file_name:
if (
i not in transliterate_dict.values()
and i not in transliterate_dict.keys()
and i not in numbers
):
file_name = file_name.replace(i, "_")
return file_name
def create_folders(path):
for ignore_folder in ignore_folders:
if ignore_folder not in os.listdir(path):
os.mkdir(path + "\\" + ignore_folder)
return "Folder to sort has been created"
# Recursive folder sort function
def sorting_function(path):
for elem in os.listdir(path):
# The basic part
if len(elem.split(".")) > 1:
for extend, category in EXTENDS.items():
if os.path.splitext(elem)[-1] in extend:
current_file = path + "\\" + elem
new_path = path + category + normalize(elem)
counter[category[1:-1]] += 1
#os.rename(current_file, new_path)
shutil.move(current_file, new_path)
if os.path.splitext(elem)[-1] not in [extend for extends_tup in EXTENDS.keys() for extend in extends_tup]:
current_file = path + "\\" + elem
new_path = path + "\\other\\" + normalize(elem)
counter["other"] += 1
shutil.move(current_file, new_path)
# The recursive part
if (
os.path.isdir(path + "\\" + elem)
and len(os.listdir(path + "\\" + elem)) == 0
and elem not in ignore_folders
):
os.rmdir(path + "\\" + elem)
elif os.path.isdir(path + "\\" + elem) and elem not in ignore_folders:
sorting_function(path + "\\" + elem)
return "The folder has been sorted!"
def start_sorting():
path = input("\033[1mEnter the path to the folder to sort: ")
try:
create_folders(path)
print(sorting_function(path))
except FileNotFoundError:
return "\033[31mThere is no such file\033[0m"
table = PrettyTable()
table.field_names = ["Filetype", "Count"]
for category, count in counter.items():
table.add_row([category, count])
return table
|
PypiClean
|
/django-comments-ink-0.3.0.tar.gz/django-comments-ink-0.3.0/django_comments_ink/static/django_comments_ink/js/comment_form.js
|
export default class CommentForm {
constructor(formWrapper) {
this.formWrapper = formWrapper;
this.init();
}
click_on_post(_) { return this.post("post"); }
click_on_preview(_) { return this.post("preview"); }
init() {
this.formWrapperEl = document.querySelector(this.formWrapper);
this.formEl = this.formWrapperEl.querySelector("form");
const post_btn = this.formEl.elements.post;
const preview_btn = this.formEl.elements.preview;
post_btn.addEventListener("click", (_) => this.post("post"));
preview_btn.addEventListener("click", (_) => this.post("preview"));
// Change the type of the buttons, otherwise the form is submitted.
post_btn.type = "button";
preview_btn.type = "button";
}
disable_btns(value) {
this.formEl.elements.post.disabled = value;
this.formEl.elements.preview.disabled = value;
}
is_valid() {
for (const el of this.formEl.querySelectorAll("[required]")) {
if (!el.reportValidity()) {
el.focus();
return false;
}
}
return true;
}
post(submit_button_name) {
if (!this.is_valid()) {
return;
}
this.disable_btns(true);
// If the <section data-dci="preview">...</section> does exist,
// delete it. If the user clicks again in the "preview" button
// it will be displayed again.
const preview = this.formWrapperEl.querySelector("[data-dci=preview]");
if (preview) {
preview.remove();
}
const formData = new FormData(this.formEl);
if (submit_button_name !== undefined) {
formData.append(submit_button_name, 1);
}
fetch(this.formEl.action, {
method: 'POST',
headers: {
"X-Requested-With": "XMLHttpRequest",
},
body: formData
}).then(response => {
if (submit_button_name === "preview") {
this.handle_preview_comment_response(response);
} else if (submit_button_name === "post") {
this.handle_post_comment_response(response);
}
});
this.disable_btns(false);
return false; // To prevent calling the action attribute.
}
async handle_preview_comment_response(response) {
const data = await response.json();
if (response.status === 200) {
this.formWrapperEl.innerHTML = data.html;
this.init();
if (data.field_focus) {
this.formEl.querySelector(`[name=${data.field_focus}]`).focus();
}
} else if (response.status === 400) {
this.formEl.innerHTML = data.html;
}
}
async handle_post_comment_response(response) {
const data = await response.json();
if (response.status === 200) {
this.formWrapperEl.innerHTML = data.html;
this.init();
if (data.field_focus) {
this.formEl.querySelector(`[name=${data.field_focus}]`).focus();
}
}
else if (
response.status === 201 ||
response.status === 202 ||
response.status === 400
) {
this.formEl.innerHTML = data.html;
}
else if (response.status > 400) {
alert(
"Something went wrong and your comment could not be " +
"processed. Please, reload the page and try again."
);
}
}
}
|
PypiClean
|
/django_books-0.0.3-py3-none-any.whl/django_books/objects/objects.py
|
from datetime import datetime
from lxml import etree
def add_customer(name='bill'):
if name is None:
raise ValueError('Name is a required field')
name = name + datetime.now().strftime('%H%s')
reqXML = """
<?qbxml version="15.0"?>
<QBXML>
<QBXMLMsgsRq onError="stopOnError">
<CustomerAddRq>
<CustomerAdd><Name>{}</Name></CustomerAdd>
</CustomerAddRq>
</QBXMLMsgsRq>
</QBXML>
""".format(name)
return reqXML
def add_credit_card_payment(credit_card='CalOil Card',
vendor='ODI',
date='2022-01-01',
ref_number='3123',
memo='MEMO',
expense_account='',
amount=102.12,
expense_description=''):
reqXML = """
<?qbxml version="15.0"?>
<QBXML>
<QBXMLMsgsRq onError="stopOnError">
<CreditCardChargeAddRq>
<CreditCardChargeAdd> <!-- required -->
<AccountRef> <!-- required -->
<FullName>{credit_card}</FullName> <!-- optional -->
</AccountRef>
<PayeeEntityRef> <!-- optional -->
<FullName >{vendor}</FullName> <!-- optional -->
</PayeeEntityRef>
<TxnDate >{date}</TxnDate> <!-- optional -->
<RefNumber >{ref_number}</RefNumber> <!-- optional -->
<Memo >{memo}</Memo> <!-- optional -->
<ExpenseLineAdd> <!-- optional, may repeat -->
<AccountRef> <!-- optional -->
<FullName >{expense_account}</FullName> <!-- optional -->
</AccountRef>
<Amount >{amount}</Amount> <!-- optional -->
<Memo >{expense_description}</Memo> <!-- optional -->
</ExpenseLineAdd>
</CreditCardChargeAdd>
</CreditCardChargeAddRq>
</QBXMLMsgsRq>
</QBXML>
""".format(credit_card=credit_card,
vendor=vendor,
date=date,
ref_number=ref_number,
memo=memo,
expense_account=expense_account,
amount=amount,
expense_description=expense_description
)
return reqXML
def process_response(response):
qbxml_root = etree.fromstring(response)
assert qbxml_root.tag == 'QBXML'
qbxml_msg_rs = qbxml_root[0]
assert qbxml_msg_rs.tag == 'QBXMLMsgsRs'
response_body_root = qbxml_msg_rs[0]
assert 'statusCode' in response_body_root.attrib
return response_body_root.attrib
|
PypiClean
|
/v1/model/delete_security_group_rule_request.py
|
import pprint
import re
import six
class DeleteSecurityGroupRuleRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'security_group_rule_id': 'str'
}
attribute_map = {
'security_group_rule_id': 'security_group_rule_id'
}
def __init__(self, security_group_rule_id=None):
"""DeleteSecurityGroupRuleRequest - a model defined in huaweicloud sdk"""
self._security_group_rule_id = None
self.discriminator = None
self.security_group_rule_id = security_group_rule_id
@property
def security_group_rule_id(self):
"""Gets the security_group_rule_id of this DeleteSecurityGroupRuleRequest.
:return: The security_group_rule_id of this DeleteSecurityGroupRuleRequest.
:rtype: str
"""
return self._security_group_rule_id
@security_group_rule_id.setter
def security_group_rule_id(self, security_group_rule_id):
"""Sets the security_group_rule_id of this DeleteSecurityGroupRuleRequest.
:param security_group_rule_id: The security_group_rule_id of this DeleteSecurityGroupRuleRequest.
:type: str
"""
self._security_group_rule_id = security_group_rule_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteSecurityGroupRuleRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/mime_parser-1.2.0-py3-none-any.whl/mime_parser/arguments.py
|
from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter
from functools import lru_cache
from typing import Final, List, Optional
from mime_parser.iana.registered_mime_types import REGISTERED_TYPES
from mime_parser.logging.logging import SEVERITIES, SEVERITY_NAME_INFO
PROG: Final[str] = "mime_parser"
DESCRIPTION: Final[str] = "Encodes and decodes MIME data"
EPILOG: Final[str] = ""
DEFAULT_SEVERITY: Final[str] = SEVERITY_NAME_INFO
CMD_IANA = "iana"
CMD_LIST = "list"
CMD_ENCODE = "encode"
CMD_DECODE = "decode"
CMDS = (CMD_IANA, CMD_LIST, CMD_ENCODE, CMD_DECODE)
CMD_IANA_HELP: Final[str] = "Prints the MIME types registered with IANA"
CMD_IANA_EPILOG: Final[str] = ""
CMD_LIST_HELP: Final[str] = "Prints a list of installed codecs"
CMD_LIST_EPILOG: Final[str] = ""
CMD_ENCODE_HELP: Final[str] = "Encodes MIME data"
CMD_ENCODE_EPILOG: Final[str] = ""
CMD_DECODE_HELP: Final[str] = "Decodes MIME data"
CMD_DECODE_EPILOG: Final[str] = ""
@lru_cache
def version() -> str:
# [IMPORTANT] Avoid 'circular import' issues
from mime_parser import __version__
return __version__
def add_cmd_iana_parser(subparsers) -> None:
# noinspection SpellCheckingInspection
parser = subparsers.add_parser(
name=CMD_IANA,
help=CMD_IANA_HELP,
formatter_class=RawDescriptionHelpFormatter,
epilog=CMD_IANA_EPILOG,
)
assert isinstance(parser, ArgumentParser)
parser.add_argument(
"family",
default=None,
choices=REGISTERED_TYPES,
nargs="?",
help="Only the entered MIME family type is printed",
)
parser.add_argument(
"--without-template",
action="store_true",
default=False,
help="Suppress the printing of the 'template' attribute",
)
parser.add_argument(
"--without-reference",
action="store_true",
default=False,
help="Suppress the printing of the 'reference' attribute",
)
def add_cmd_list_parser(subparsers) -> None:
# noinspection SpellCheckingInspection
parser = subparsers.add_parser(
name=CMD_LIST,
help=CMD_LIST_HELP,
formatter_class=RawDescriptionHelpFormatter,
epilog=CMD_LIST_EPILOG,
)
assert isinstance(parser, ArgumentParser)
parser.add_argument(
"--without-header",
action="store_true",
default=False,
help="Suppress the printing of the header",
)
def add_cmd_encode_parser(subparsers) -> None:
# noinspection SpellCheckingInspection
parser = subparsers.add_parser(
name=CMD_ENCODE,
help=CMD_ENCODE_HELP,
formatter_class=RawDescriptionHelpFormatter,
epilog=CMD_ENCODE_EPILOG,
)
assert isinstance(parser, ArgumentParser)
parser.add_argument("mime", help="Mime Type")
def add_cmd_decode_parser(subparsers) -> None:
# noinspection SpellCheckingInspection
parser = subparsers.add_parser(
name=CMD_DECODE,
help=CMD_DECODE_HELP,
formatter_class=RawDescriptionHelpFormatter,
epilog=CMD_DECODE_EPILOG,
)
assert isinstance(parser, ArgumentParser)
parser.add_argument("mime", help="Mime Type")
def default_argument_parser() -> ArgumentParser:
parser = ArgumentParser(
prog=PROG,
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument(
"--simple-logging",
"-s",
action="store_true",
default=False,
help="Use simple logging",
)
parser.add_argument(
"--severity",
choices=SEVERITIES,
default=DEFAULT_SEVERITY,
help=f"Logging severity (default: '{DEFAULT_SEVERITY}')",
)
parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="Be more verbose/talkative during the operation",
)
parser.add_argument(
"--version",
"-V",
action="version",
version=version(),
)
subparsers = parser.add_subparsers(dest="cmd")
add_cmd_iana_parser(subparsers)
add_cmd_list_parser(subparsers)
add_cmd_encode_parser(subparsers)
add_cmd_decode_parser(subparsers)
return parser
def get_default_arguments(
cmdline: Optional[List[str]] = None,
namespace: Optional[Namespace] = None,
) -> Namespace:
parser = default_argument_parser()
return parser.parse_known_args(cmdline, namespace)[0]
|
PypiClean
|
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/sessions/string.py
|
import base64
import ipaddress
import struct
from .abstract import Session
from .memory import MemorySession
from ..crypto import AuthKey
_STRUCT_PREFORMAT = ">B{}sH256s"
CURRENT_VERSION = "1"
class StringSession(MemorySession):
"""
This session file can be easily saved and loaded as a string. According
to the initial design, it contains only the data that is necessary for
successful connection and authentication, so takeout ID is not stored.
It is thought to be used where you don't want to create any on-disk
files but would still like to be able to save and load existing sessions
by other means.
You can use custom `encode` and `decode` functions, if present:
* `encode` definition must be ``def encode(value: bytes) -> str:``.
* `decode` definition must be ``def decode(value: str) -> bytes:``.
"""
def __init__(self, string: str = None):
super().__init__()
if string:
if string[0] != CURRENT_VERSION:
raise ValueError("Not a valid string")
string = string[1:]
ip_len = 4 if len(string) == 352 else 16
self._dc_id, ip, self._port, key = struct.unpack(
_STRUCT_PREFORMAT.format(ip_len), StringSession.decode(string)
)
self._server_address = ipaddress.ip_address(ip).compressed
if any(key):
self._auth_key = AuthKey(key)
@staticmethod
def encode(x: bytes) -> str:
return base64.urlsafe_b64encode(x).decode("ascii")
@staticmethod
def decode(x: str) -> bytes:
return base64.urlsafe_b64decode(x)
def save(self: Session):
if not self.auth_key:
return ""
ip = ipaddress.ip_address(self.server_address).packed
return CURRENT_VERSION + StringSession.encode(
struct.pack(
_STRUCT_PREFORMAT.format(len(ip)),
self.dc_id,
ip,
self.port,
self.auth_key.key,
)
)
|
PypiClean
|
/tensorflow_cpu_aws-2.14.0rc1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/tensorflow/python/eager/wrap_function.py
|
"""Prototype decorator for defining legacy-graph-mode functions."""
import weakref
from tensorflow.core.function.polymorphism import function_type as function_type_lib
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.eager.polymorphic_function import atomic_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor as tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.trackable import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(
kwargs.get("name", None), "Variable", skip_on_eager=False) as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = nested_structure_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError(f"Invalid TensorInfo.encoding: {encoding}. Valid "
"encodings are 'name', 'coo_sparse', and "
"'composite_tensor'.")
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running tf.compat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
self._signature = signature
function_type = function_type_lib.from_structured_signature(
fn_graph.structured_input_signature,
fn_graph.structured_outputs,
fn_graph.function_captures.capture_types,
)
atomic_fn = atomic_function.from_func_graph(
function._inference_name(fn_graph.name), fn_graph, attrs, function_type
)
super().__init__(atomic_fn)
def _call_impl(self, args, kwargs):
if self._arg_keywords is None:
if kwargs:
raise NotImplementedError(
"Keyword arguments are not supported when calling a "
f"wrap_function-decorated function. Got {kwargs}.")
if self._signature is not None:
args = list(args)
for i, arg in enumerate(args):
if isinstance(self._signature[i], tensor_lib.DenseSpec):
args[i] = ops.convert_to_tensor(arg, self._signature[i].dtype)
return self._call_flat(args, self.captured_inputs)
else:
return super()._call_impl(args, kwargs)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, tensor_lib.Tensor):
raise ValueError("All memebers of argument `feeds` must be tensors. "
f"Got {f} with type {type(f)}.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocessing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tf_type(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(
fetch, (tensor_lib.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocessing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocessing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in flat_feeds + tensor_fetches + operation_fetches:
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
f"from graph {self._func_graph}. Input "
f"{f} is from a different graph {f.graph}.")
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures,
base_graph=self._func_graph)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tf_type(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
if input_signature:
# canonicalize the signature before setting
args, kwargs = input_signature
args = () if args is None else args
input_signature = (args, kwargs)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.compat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs, captures=None):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
captures: (Optional) A dictionary mapping node names in `graph_def` that
should be captured as inputs to tensors containing the value of the
captured inputs.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
graph = ops.get_default_graph()
if captures is not None:
for c in captures:
graph.add_capture(captures[c], graph.get_tensor_by_name(str(c) + ":0"))
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
|
PypiClean
|
/stigg_api_client-0.554.0.tar.gz/stigg_api_client-0.554.0/stigg/generated/operations.py
|
import sgqlc.types
import sgqlc.operation
from . import schema
_schema = schema
_schema_root = _schema.schema
__all__ = ('Operations',)
def fragment_coupon_fragment():
_frag = sgqlc.operation.Fragment(_schema.Coupon, 'CouponFragment')
_frag.id()
_frag.discount_value()
_frag.type()
_frag.additional_meta_data()
_frag.ref_id()
_frag.name()
_frag.description()
_frag.created_at()
_frag.updated_at()
_frag.billing_id()
_frag.billing_link_url()
_frag.status()
_frag_sync_states = _frag.sync_states()
_frag_sync_states.vendor_identifier()
_frag_sync_states.status()
_frag_customers = _frag.customers()
_frag_customers.id()
return _frag
def fragment_price_tier_fragment():
_frag = sgqlc.operation.Fragment(_schema.PriceTier, 'PriceTierFragment')
_frag.up_to()
_frag_unit_price = _frag.unit_price()
_frag_unit_price.amount()
_frag_unit_price.currency()
return _frag
def fragment_price_fragment():
_frag = sgqlc.operation.Fragment(_schema.Price, 'PriceFragment')
_frag.billing_model()
_frag.billing_period()
_frag.billing_id()
_frag.min_unit_quantity()
_frag.max_unit_quantity()
_frag.billing_country_code()
_frag_price = _frag.price()
_frag_price.amount()
_frag_price.currency()
_frag.tiers_mode()
_frag_tiers = _frag.tiers()
_frag_tiers.__fragment__(fragment_price_tier_fragment())
_frag_feature = _frag.feature()
_frag_feature.ref_id()
_frag_feature.feature_units()
_frag_feature.feature_units_plural()
_frag_feature.display_name()
_frag_feature.description()
return _frag
def fragment_total_price_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerSubscriptionTotalPrice, 'TotalPriceFragment')
_frag_sub_total = _frag.sub_total()
_frag_sub_total.amount()
_frag_sub_total.currency()
_frag_total = _frag.total()
_frag_total.amount()
_frag_total.currency()
return _frag
def fragment_package_entitlement_fragment():
_frag = sgqlc.operation.Fragment(_schema.PackageEntitlement, 'PackageEntitlementFragment')
_frag.usage_limit()
_frag.has_unlimited_usage()
_frag.feature_id()
_frag.reset_period()
_frag.hidden_from_widgets()
_frag.is_custom()
_frag.display_name_override()
_frag_feature = _frag.feature()
_frag_feature.feature_type()
_frag_feature.meter_type()
_frag_feature.feature_units()
_frag_feature.feature_units_plural()
_frag_feature.display_name()
_frag_feature.description()
_frag_feature.ref_id()
_frag_feature.additional_meta_data()
return _frag
def fragment_addon_fragment():
_frag = sgqlc.operation.Fragment(_schema.Addon, 'AddonFragment')
_frag.id()
_frag.ref_id()
_frag.billing_id()
_frag.display_name()
_frag.description()
_frag.additional_meta_data()
_frag_entitlements = _frag.entitlements()
_frag_entitlements.__fragment__(fragment_package_entitlement_fragment())
_frag_prices = _frag.prices()
_frag_prices.__fragment__(fragment_price_fragment())
_frag.pricing_type()
return _frag
def fragment_plan_fragment():
_frag = sgqlc.operation.Fragment(_schema.Plan, 'PlanFragment')
_frag.id()
_frag.ref_id()
_frag.display_name()
_frag.description()
_frag.billing_id()
_frag.version_number()
_frag.additional_meta_data()
_frag_product = _frag.product()
_frag_product.__fragment__(fragment_product_fragment())
_frag_base_plan = _frag.base_plan()
_frag_base_plan.ref_id()
_frag_base_plan.display_name()
_frag_entitlements = _frag.entitlements()
_frag_entitlements.__fragment__(fragment_package_entitlement_fragment())
_frag_inherited_entitlements = _frag.inherited_entitlements()
_frag_inherited_entitlements.__fragment__(fragment_package_entitlement_fragment())
_frag_compatible_addons = _frag.compatible_addons()
_frag_compatible_addons.__fragment__(fragment_addon_fragment())
_frag_prices = _frag.prices()
_frag_prices.__fragment__(fragment_price_fragment())
_frag.pricing_type()
_frag_default_trial_config = _frag.default_trial_config()
_frag_default_trial_config.duration()
_frag_default_trial_config.units()
return _frag
def fragment_customer_resource_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerResource, 'CustomerResourceFragment')
_frag.resource_id()
return _frag
def fragment_slim_subscription_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerSubscription, 'SlimSubscriptionFragment')
_frag.id()
_frag.ref_id()
_frag.status()
_frag.additional_meta_data()
_frag.billing_id()
_frag.billing_link_url()
_frag.effective_end_date()
_frag.current_billing_period_end()
_frag.pricing_type()
_frag_latest_invoice = _frag.latest_invoice()
_frag_latest_invoice.__fragment__(fragment_subscription_invoice_fragment())
_frag.payment_collection()
_frag_resource = _frag.resource()
_frag_resource.__fragment__(fragment_customer_resource_fragment())
_frag_experiment_info = _frag.experiment_info()
_frag_experiment_info.name()
_frag_experiment_info.id()
_frag_experiment_info.group_name()
_frag_experiment_info.group_type()
_frag_prices = _frag.prices()
_frag_prices.usage_limit()
_frag_prices_price = _frag_prices.price()
_frag_prices_price.__fragment__(fragment_price_fragment())
_frag_total_price = _frag.total_price()
_frag_total_price.__fragment__(fragment_total_price_fragment())
_frag_plan = _frag.plan()
_frag_plan.id()
_frag_plan.ref_id()
_frag_addons = _frag.addons()
_frag_addons.quantity()
_frag_addons_addon = _frag_addons.addon()
_frag_addons_addon.id()
_frag_addons_addon.ref_id()
_frag_customer = _frag.customer()
_frag_customer.id()
_frag_customer.ref_id()
return _frag
def fragment_subscription_scheduled_update_data():
_frag = sgqlc.operation.Fragment(_schema.SubscriptionScheduledUpdate, 'SubscriptionScheduledUpdateData')
_frag.subscription_schedule_type()
_frag.schedule_status()
_frag.scheduled_execution_time()
_frag_target_package = _frag.target_package()
_frag_target_package.id()
_frag_target_package.ref_id()
_frag_target_package.display_name()
_frag_schedule_variables = _frag.schedule_variables()
_frag_schedule_variables__as__DowngradeChangeVariables = _frag_schedule_variables.__as__(_schema.DowngradeChangeVariables)
_frag_schedule_variables__as__DowngradeChangeVariables.addon_ref_ids()
_frag_schedule_variables__as__DowngradeChangeVariables.billing_period()
_frag_schedule_variables__as__DowngradeChangeVariables.downgrade_plan_ref_id()
_frag_schedule_variables__as__BillingPeriodChangeVariables = _frag_schedule_variables.__as__(_schema.BillingPeriodChangeVariables)
_frag_schedule_variables__as__BillingPeriodChangeVariables.billing_period()
_frag_schedule_variables__as__UnitAmountChangeVariables = _frag_schedule_variables.__as__(_schema.UnitAmountChangeVariables)
_frag_schedule_variables__as__UnitAmountChangeVariables.new_unit_amount()
_frag_schedule_variables__as__UnitAmountChangeVariables.feature_id()
_frag_schedule_variables__as__AddonChangeVariables = _frag_schedule_variables.__as__(_schema.AddonChangeVariables)
_frag_schedule_variables__as__AddonChangeVariables.addon_ref_id()
_frag_schedule_variables__as__AddonChangeVariables.new_quantity()
return _frag
def fragment_subscription_future_update_data():
_frag = sgqlc.operation.Fragment(_schema.SubscriptionFutureUpdate, 'SubscriptionFutureUpdateData')
_frag.subscription_schedule_type()
_frag.schedule_status()
_frag.scheduled_execution_time()
_frag_target_package = _frag.target_package()
_frag_target_package.id()
_frag_target_package.ref_id()
_frag_target_package.display_name()
_frag_schedule_variables = _frag.schedule_variables()
_frag_schedule_variables__as__DowngradeChangeVariables = _frag_schedule_variables.__as__(_schema.DowngradeChangeVariables)
_frag_schedule_variables__as__DowngradeChangeVariables.addon_ref_ids()
_frag_schedule_variables__as__DowngradeChangeVariables.billing_period()
_frag_schedule_variables__as__DowngradeChangeVariables.downgrade_plan_ref_id()
_frag_schedule_variables__as__BillingPeriodChangeVariables = _frag_schedule_variables.__as__(_schema.BillingPeriodChangeVariables)
_frag_schedule_variables__as__BillingPeriodChangeVariables.billing_period()
_frag_schedule_variables__as__UnitAmountChangeVariables = _frag_schedule_variables.__as__(_schema.UnitAmountChangeVariables)
_frag_schedule_variables__as__UnitAmountChangeVariables.new_unit_amount()
_frag_schedule_variables__as__UnitAmountChangeVariables.feature_id()
_frag_schedule_variables__as__AddonChangeVariables = _frag_schedule_variables.__as__(_schema.AddonChangeVariables)
_frag_schedule_variables__as__AddonChangeVariables.addon_ref_id()
_frag_schedule_variables__as__AddonChangeVariables.new_quantity()
return _frag
def fragment_subscription_invoice_fragment():
_frag = sgqlc.operation.Fragment(_schema.SubscriptionInvoice, 'SubscriptionInvoiceFragment')
_frag.billing_id()
_frag.status()
_frag.created_at()
_frag.updated_at()
_frag.requires_action()
_frag.payment_url()
_frag.payment_secret()
_frag.error_message()
return _frag
def fragment_subscription_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerSubscription, 'SubscriptionFragment')
_frag.id()
_frag.start_date()
_frag.end_date()
_frag.trial_end_date()
_frag.cancellation_date()
_frag.effective_end_date()
_frag.status()
_frag.ref_id()
_frag.current_billing_period_end()
_frag.additional_meta_data()
_frag.billing_id()
_frag.billing_link_url()
_frag_latest_invoice = _frag.latest_invoice()
_frag_latest_invoice.__fragment__(fragment_subscription_invoice_fragment())
_frag.payment_collection()
_frag_resource = _frag.resource()
_frag_resource.__fragment__(fragment_customer_resource_fragment())
_frag_experiment_info = _frag.experiment_info()
_frag_experiment_info.name()
_frag_experiment_info.group_type()
_frag_experiment_info.group_name()
_frag_experiment_info.id()
_frag_prices = _frag.prices()
_frag_prices.usage_limit()
_frag_prices_price = _frag_prices.price()
_frag_prices_price.__fragment__(fragment_price_fragment())
_frag_total_price = _frag.total_price()
_frag_total_price.__fragment__(fragment_total_price_fragment())
_frag.pricing_type()
_frag_plan = _frag.plan()
_frag_plan.__fragment__(fragment_plan_fragment())
_frag_addons = _frag.addons()
_frag_addons.id()
_frag_addons.quantity()
_frag_addons_addon = _frag_addons.addon()
_frag_addons_addon.__fragment__(fragment_addon_fragment())
_frag_scheduled_updates = _frag.scheduled_updates()
_frag_scheduled_updates.__fragment__(fragment_subscription_scheduled_update_data())
_frag_future_updates = _frag.future_updates()
_frag_future_updates.__fragment__(fragment_subscription_future_update_data())
return _frag
def fragment_promotional_entitlement_fragment():
_frag = sgqlc.operation.Fragment(_schema.PromotionalEntitlement, 'PromotionalEntitlementFragment')
_frag.status()
_frag.usage_limit()
_frag.feature_id()
_frag.has_unlimited_usage()
_frag.reset_period()
_frag.end_date()
_frag.is_visible()
_frag_feature = _frag.feature()
_frag_feature.feature_type()
_frag_feature.meter_type()
_frag_feature.feature_units()
_frag_feature.feature_units_plural()
_frag_feature.display_name()
_frag_feature.description()
_frag_feature.ref_id()
_frag_feature.additional_meta_data()
return _frag
def fragment_slim_customer_fragment():
_frag = sgqlc.operation.Fragment(_schema.Customer, 'SlimCustomerFragment')
_frag.id()
_frag.name()
_frag.email()
_frag.created_at()
_frag.updated_at()
_frag.ref_id()
_frag.billing_id()
_frag.additional_meta_data()
return _frag
def fragment_customer_fragment():
_frag = sgqlc.operation.Fragment(_schema.Customer, 'CustomerFragment')
_frag.__fragment__(fragment_slim_customer_fragment())
_frag.has_payment_method()
_frag.has_active_subscription()
_frag.default_payment_expiration_month()
_frag.default_payment_expiration_year()
_frag.default_payment_method_last4_digits()
_frag_trialed_plans = _frag.trialed_plans()
_frag_trialed_plans.product_id()
_frag_trialed_plans.product_ref_id()
_frag_trialed_plans.plan_ref_id()
_frag_trialed_plans.plan_id()
_frag_experiment_info = _frag.experiment_info()
_frag_experiment_info.group_type()
_frag_experiment_info.group_name()
_frag_experiment_info.id()
_frag_experiment_info.name()
_frag_coupon = _frag.coupon()
_frag_coupon.__fragment__(fragment_coupon_fragment())
_frag_eligible_for_trial = _frag.eligible_for_trial()
_frag_eligible_for_trial.product_id()
_frag_eligible_for_trial.product_ref_id()
_frag_eligible_for_trial.eligible()
_frag_promotional_entitlements = _frag.promotional_entitlements()
_frag_promotional_entitlements.__fragment__(fragment_promotional_entitlement_fragment())
return _frag
def fragment_customer_with_subscriptions_fragment():
_frag = sgqlc.operation.Fragment(_schema.Customer, 'CustomerWithSubscriptionsFragment')
_frag.__fragment__(fragment_customer_fragment())
_frag_subscriptions = _frag.subscriptions()
_frag_subscriptions.__fragment__(fragment_subscription_fragment())
return _frag
def fragment_subscription_preview_fragment():
_frag = sgqlc.operation.Fragment(_schema.SubscriptionPreview, 'SubscriptionPreviewFragment')
_frag_sub_total = _frag.sub_total()
_frag_sub_total.amount()
_frag_sub_total.currency()
_frag_total_excluding_tax = _frag.total_excluding_tax()
_frag_total_excluding_tax.amount()
_frag_total_excluding_tax.currency()
_frag_total = _frag.total()
_frag_total.amount()
_frag_total.currency()
_frag_tax_details = _frag.tax_details()
_frag_tax_details.display_name()
_frag_tax_details.percentage()
_frag_tax_details.inclusive()
_frag_tax = _frag.tax()
_frag_tax.amount()
_frag_tax.currency()
_frag_billing_period_range = _frag.billing_period_range()
_frag_billing_period_range.start()
_frag_billing_period_range.end()
_frag_discount = _frag.discount()
_frag_discount.type()
_frag_discount.value()
_frag_discount.duration_type()
_frag_discount.duration_in_months()
_frag_subscription = _frag.subscription()
_frag_subscription_sub_total = _frag_subscription.sub_total()
_frag_subscription_sub_total.amount()
_frag_subscription_sub_total.currency()
_frag_subscription_total_excluding_tax = _frag_subscription.total_excluding_tax()
_frag_subscription_total_excluding_tax.amount()
_frag_subscription_total_excluding_tax.currency()
_frag_subscription_total = _frag_subscription.total()
_frag_subscription_total.amount()
_frag_subscription_total.currency()
_frag_subscription_tax = _frag_subscription.tax()
_frag_subscription_tax.amount()
_frag_subscription_tax.currency()
_frag_proration = _frag.proration()
_frag_proration.proration_date()
_frag_proration_credit = _frag_proration.credit()
_frag_proration_credit.amount()
_frag_proration_credit.currency()
_frag_proration_debit = _frag_proration.debit()
_frag_proration_debit.amount()
_frag_proration_debit.currency()
_frag_proration_net_amount = _frag_proration.net_amount()
_frag_proration_net_amount.amount()
_frag_proration_net_amount.currency()
_frag.is_plan_downgrade()
_frag.has_scheduled_updates()
_frag_credits = _frag.credits()
_frag_credits_initial = _frag_credits.initial()
_frag_credits_initial.amount()
_frag_credits_initial.currency()
_frag_credits_used = _frag_credits.used()
_frag_credits_used.amount()
_frag_credits_used.currency()
_frag_credits_remaining = _frag_credits.remaining()
_frag_credits_remaining.amount()
_frag_credits_remaining.currency()
return _frag
def fragment_feature_fragment():
_frag = sgqlc.operation.Fragment(_schema.EntitlementFeature, 'FeatureFragment')
_frag.feature_type()
_frag.meter_type()
_frag.feature_units()
_frag.feature_units_plural()
_frag.description()
_frag.display_name()
_frag.ref_id()
return _frag
def fragment_reset_period_configuration_fragment():
_frag = sgqlc.operation.Fragment(_schema.ResetPeriodConfiguration, 'ResetPeriodConfigurationFragment')
_frag.__typename__()
_frag__as__MonthlyResetPeriodConfig = _frag.__as__(_schema.MonthlyResetPeriodConfig)
_frag__as__MonthlyResetPeriodConfig.monthly_according_to()
_frag__as__WeeklyResetPeriodConfig = _frag.__as__(_schema.WeeklyResetPeriodConfig)
_frag__as__WeeklyResetPeriodConfig.weekly_according_to()
return _frag
def fragment_usage_updated_fragment():
_frag = sgqlc.operation.Fragment(_schema.UsageMeasurementUpdated, 'UsageUpdatedFragment')
_frag.customer_id()
_frag.resource_id()
_frag.feature_id()
_frag.current_usage()
_frag.next_reset_date()
return _frag
def fragment_entitlement_fragment():
_frag = sgqlc.operation.Fragment(_schema.Entitlement, 'EntitlementFragment')
_frag.is_granted()
_frag.access_denied_reason()
_frag.customer_id()
_frag.resource_id()
_frag.usage_limit()
_frag.has_unlimited_usage()
_frag.current_usage()
_frag.requested_usage()
_frag.entitlement_updated_at()
_frag.usage_updated_at()
_frag.next_reset_date()
_frag.reset_period()
_frag_reset_period_configuration = _frag.reset_period_configuration()
_frag_reset_period_configuration.__fragment__(fragment_reset_period_configuration_fragment())
_frag_feature = _frag.feature()
_frag_feature.__fragment__(fragment_feature_fragment())
return _frag
def fragment_typography_configuration_fragment():
_frag = sgqlc.operation.Fragment(_schema.TypographyConfiguration, 'TypographyConfigurationFragment')
_frag.font_family()
_frag_h1 = _frag.h1()
_frag_h1.__fragment__(fragment_font_variant_fragment())
_frag_h2 = _frag.h2()
_frag_h2.__fragment__(fragment_font_variant_fragment())
_frag_h3 = _frag.h3()
_frag_h3.__fragment__(fragment_font_variant_fragment())
_frag_body = _frag.body()
_frag_body.__fragment__(fragment_font_variant_fragment())
return _frag
def fragment_font_variant_fragment():
_frag = sgqlc.operation.Fragment(_schema.FontVariant, 'FontVariantFragment')
_frag.font_size()
_frag.font_weight()
return _frag
def fragment_layout_configuration_fragment():
_frag = sgqlc.operation.Fragment(_schema.PaywallLayoutConfiguration, 'LayoutConfigurationFragment')
_frag.alignment()
_frag.plan_width()
_frag.plan_margin()
_frag.plan_padding()
return _frag
def fragment_paywall_configuration_fragment():
_frag = sgqlc.operation.Fragment(_schema.PaywallConfiguration, 'PaywallConfigurationFragment')
_frag_palette = _frag.palette()
_frag_palette.primary()
_frag_palette.text_color()
_frag_palette.background_color()
_frag_palette.border_color()
_frag_palette.current_plan_background()
_frag_typography = _frag.typography()
_frag_typography.__fragment__(fragment_typography_configuration_fragment())
_frag_layout = _frag.layout()
_frag_layout.__fragment__(fragment_layout_configuration_fragment())
_frag.custom_css()
return _frag
def fragment_paywall_currency_fragment():
_frag = sgqlc.operation.Fragment(_schema.PaywallCurrency, 'PaywallCurrencyFragment')
_frag.code()
_frag.symbol()
return _frag
def fragment_product_fragment():
_frag = sgqlc.operation.Fragment(_schema.Product, 'ProductFragment')
_frag.ref_id()
_frag.display_name()
_frag.description()
_frag.additional_meta_data()
_frag_product_settings = _frag.product_settings()
_frag_product_settings_downgrade_plan = _frag_product_settings.downgrade_plan()
_frag_product_settings_downgrade_plan.ref_id()
_frag_product_settings_downgrade_plan.display_name()
return _frag
def fragment_entitlements_updated_payload():
_frag = sgqlc.operation.Fragment(_schema.EntitlementsUpdated, 'EntitlementsUpdatedPayload')
_frag.customer_id()
_frag.resource_id()
_frag_entitlements = _frag.entitlements()
_frag_entitlements.__fragment__(fragment_entitlement_fragment())
return _frag
def fragment_entitlement_usage_updated():
_frag = sgqlc.operation.Fragment(_schema.UsageUpdated, 'EntitlementUsageUpdated')
_frag_usage = _frag.usage()
_frag_usage.__fragment__(fragment_usage_updated_fragment())
_frag_entitlement = _frag.entitlement()
_frag_entitlement.__fragment__(fragment_entitlement_fragment())
return _frag
def fragment_customer_portal_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerPortal, 'CustomerPortalFragment')
_frag_subscriptions = _frag.subscriptions()
_frag_subscriptions.__fragment__(fragment_customer_portal_subscription_fragment())
_frag_entitlements = _frag.entitlements()
_frag_entitlements.__fragment__(fragment_customer_portal_entitlement_fragment())
_frag_promotional_entitlements = _frag.promotional_entitlements()
_frag_promotional_entitlements.__fragment__(fragment_customer_portal_promotional_entitlement_fragment())
_frag_billing_information = _frag.billing_information()
_frag_billing_information.__fragment__(fragment_customer_portal_billing_information_fragment())
_frag.show_watermark()
_frag.billing_portal_url()
_frag.can_upgrade_subscription()
_frag_configuration = _frag.configuration()
_frag_configuration.__fragment__(fragment_customer_portal_configuration_fragment())
_frag_resource = _frag.resource()
_frag_resource.__fragment__(fragment_customer_resource_fragment())
return _frag
def fragment_checkout_state_fragment():
_frag = sgqlc.operation.Fragment(_schema.CheckoutState, 'CheckoutStateFragment')
_frag_configuration = _frag.configuration()
_frag_configuration.__fragment__(fragment_checkout_configuration_fragment())
_frag.setup_secret()
_frag_customer = _frag.customer()
_frag_customer.__fragment__(fragment_customer_fragment())
_frag_active_subscription = _frag.active_subscription()
_frag_active_subscription.__fragment__(fragment_subscription_fragment())
_frag_resource = _frag.resource()
_frag_resource.__fragment__(fragment_customer_resource_fragment())
_frag_plan = _frag.plan()
_frag_plan.__fragment__(fragment_plan_fragment())
_frag_billing_integration = _frag.billing_integration()
_frag_billing_integration.billing_identifier()
_frag_billing_integration_credentials = _frag_billing_integration.credentials()
_frag_billing_integration_credentials.account_id()
_frag_billing_integration_credentials.public_key()
return _frag
def fragment_checkout_configuration_fragment():
_frag = sgqlc.operation.Fragment(_schema.CheckoutConfiguration, 'CheckoutConfigurationFragment')
_frag_palette = _frag.palette()
_frag_palette.primary()
_frag_palette.text_color()
_frag_palette.background_color()
_frag_palette.border_color()
_frag_palette.selection_color()
_frag_palette.summary_background_color()
_frag_palette.__typename__()
_frag_typography = _frag.typography()
_frag_typography.__fragment__(fragment_typography_configuration_fragment())
_frag_typography.__typename__()
_frag.custom_css()
_frag_content = _frag.content()
_frag_content.collect_phone_number()
_frag.__typename__()
return _frag
def fragment_customer_portal_configuration_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerPortalConfiguration, 'CustomerPortalConfigurationFragment')
_frag_palette = _frag.palette()
_frag_palette.primary()
_frag_palette.text_color()
_frag_palette.background_color()
_frag_palette.border_color()
_frag_palette.current_plan_background()
_frag_palette.icons_color()
_frag_palette.paywall_background_color()
_frag_typography = _frag.typography()
_frag_typography.__fragment__(fragment_typography_configuration_fragment())
_frag.custom_css()
return _frag
def fragment_customer_portal_subscription_price_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerPortalSubscriptionPrice, 'CustomerPortalSubscriptionPriceFragment')
_frag.billing_period()
_frag.billing_model()
_frag_price = _frag.price()
_frag_price.amount()
_frag_price.currency()
_frag_feature = _frag.feature()
_frag_feature.id()
_frag_feature.ref_id()
_frag_feature.display_name()
_frag_feature.feature_units()
_frag_feature.feature_units_plural()
return _frag
def fragment_customer_portal_subscription_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerPortalSubscription, 'CustomerPortalSubscriptionFragment')
_frag.subscription_id()
_frag.plan_name()
_frag.pricing_type()
_frag_prices = _frag.prices()
_frag_prices.__fragment__(fragment_customer_portal_subscription_price_fragment())
_frag_pricing = _frag.pricing()
_frag_pricing.unit_quantity()
_frag_pricing.billing_period()
_frag_pricing.billing_model()
_frag_pricing.pricing_type()
_frag_pricing.usage_based_estimated_bill()
_frag_pricing_price = _frag_pricing.price()
_frag_pricing_price.amount()
_frag_pricing_price.currency()
_frag_pricing_feature = _frag_pricing.feature()
_frag_pricing_feature.feature_units()
_frag_pricing_feature.feature_units_plural()
_frag_pricing_feature.display_name()
_frag.status()
_frag.trial_remaining_days()
_frag_billing_period_range = _frag.billing_period_range()
_frag_billing_period_range.start()
_frag_billing_period_range.end()
_frag_total_price = _frag.total_price()
_frag_total_price_sub_total = _frag_total_price.sub_total()
_frag_total_price_sub_total.amount()
_frag_total_price_sub_total.currency()
_frag_total_price_total = _frag_total_price.total()
_frag_total_price_total.amount()
_frag_total_price_total.currency()
_frag_total_price_addons_total = _frag_total_price.addons_total()
_frag_total_price_addons_total.amount()
_frag_total_price_addons_total.currency()
_frag_addons = _frag.addons()
_frag_addons.__fragment__(fragment_customer_portal_subscription_addon_fragment())
_frag_scheduled_updates = _frag.scheduled_updates()
_frag_scheduled_updates.__fragment__(fragment_customer_portal_subscription_scheduled_update_data_fragment())
return _frag
def fragment_customer_portal_subscription_addon_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerPortalAddon, 'CustomerPortalSubscriptionAddonFragment')
_frag.addon_id()
_frag.description()
_frag.display_name()
_frag.quantity()
return _frag
def fragment_customer_portal_subscription_scheduled_update_data_fragment():
_frag = sgqlc.operation.Fragment(_schema.SubscriptionScheduledUpdate, 'CustomerPortalSubscriptionScheduledUpdateDataFragment')
_frag.subscription_schedule_type()
_frag.schedule_status()
_frag.scheduled_execution_time()
_frag_target_package = _frag.target_package()
_frag_target_package.id()
_frag_target_package.ref_id()
_frag_target_package.display_name()
_frag_target_package.pricing_type()
_frag_schedule_variables = _frag.schedule_variables()
_frag_schedule_variables__as__DowngradeChangeVariables = _frag_schedule_variables.__as__(_schema.DowngradeChangeVariables)
_frag_schedule_variables__as__DowngradeChangeVariables.addon_ref_ids()
_frag_schedule_variables__as__DowngradeChangeVariables.billing_period()
_frag_schedule_variables__as__DowngradeChangeVariables.downgrade_plan_ref_id()
_frag_schedule_variables__as__BillingPeriodChangeVariables = _frag_schedule_variables.__as__(_schema.BillingPeriodChangeVariables)
_frag_schedule_variables__as__BillingPeriodChangeVariables.billing_period()
_frag_schedule_variables__as__UnitAmountChangeVariables = _frag_schedule_variables.__as__(_schema.UnitAmountChangeVariables)
_frag_schedule_variables__as__UnitAmountChangeVariables.new_unit_amount()
_frag_schedule_variables__as__UnitAmountChangeVariables.feature_id()
_frag_schedule_variables__as__AddonChangeVariables = _frag_schedule_variables.__as__(_schema.AddonChangeVariables)
_frag_schedule_variables__as__AddonChangeVariables.addon_ref_id()
_frag_schedule_variables__as__AddonChangeVariables.new_quantity()
return _frag
def fragment_customer_portal_entitlement_fragment():
_frag = sgqlc.operation.Fragment(_schema.Entitlement, 'CustomerPortalEntitlementFragment')
_frag.is_granted()
_frag.usage_limit()
_frag.current_usage()
_frag.has_unlimited_usage()
_frag.next_reset_date()
_frag.reset_period()
_frag_reset_period_configuration = _frag.reset_period_configuration()
_frag_reset_period_configuration.__fragment__(fragment_reset_period_configuration_fragment())
_frag_feature = _frag.feature()
_frag_feature.__fragment__(fragment_feature_fragment())
return _frag
def fragment_customer_portal_promotional_entitlement_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerPortalPromotionalEntitlement, 'CustomerPortalPromotionalEntitlementFragment')
_frag.display_name()
_frag.has_unlimited_usage()
_frag.usage_limit()
_frag.period()
_frag.start_date()
_frag.end_date()
return _frag
def fragment_customer_portal_billing_information_fragment():
_frag = sgqlc.operation.Fragment(_schema.CustomerPortalBillingInformation, 'CustomerPortalBillingInformationFragment')
_frag.email()
_frag.name()
_frag.default_payment_method_last4_digits()
_frag.default_payment_method_id()
_frag.default_payment_expiration_month()
_frag.default_payment_expiration_year()
return _frag
def fragment_mock_paywall_plan_fragment():
_frag = sgqlc.operation.Fragment(_schema.PaywallPlan, 'MockPaywallPlanFragment')
_frag.ref_id()
_frag.description()
_frag.display_name()
_frag.billing_id()
_frag.additional_meta_data()
_frag_product = _frag.product()
_frag_product.ref_id()
_frag_product.display_name()
_frag_product.description()
_frag_product.additional_meta_data()
_frag_base_plan = _frag.base_plan()
_frag_base_plan.ref_id()
_frag_base_plan.display_name()
_frag_entitlements = _frag.entitlements()
_frag_entitlements.__fragment__(fragment_mock_paywall_package_entitlement_fragment())
_frag_inherited_entitlements = _frag.inherited_entitlements()
_frag_inherited_entitlements.__fragment__(fragment_mock_paywall_package_entitlement_fragment())
_frag_prices = _frag.prices()
_frag_prices.__fragment__(fragment_mock_paywall_price_fragment())
_frag.pricing_type()
_frag_default_trial_config = _frag.default_trial_config()
_frag_default_trial_config.duration()
_frag_default_trial_config.units()
_frag_compatible_addons = _frag.compatible_addons()
_frag_compatible_addons.__fragment__(fragment_mock_paywall_addon_fragment())
return _frag
def fragment_mock_paywall_package_entitlement_fragment():
_frag = sgqlc.operation.Fragment(_schema.Entitlement, 'MockPaywallPackageEntitlementFragment')
_frag.usage_limit()
_frag.has_unlimited_usage()
_frag.reset_period()
_frag.hidden_from_widgets()
_frag.display_name_override()
_frag_feature = _frag.feature()
_frag_feature.feature_type()
_frag_feature.meter_type()
_frag_feature.feature_units()
_frag_feature.feature_units_plural()
_frag_feature.display_name()
_frag_feature.description()
_frag_feature.ref_id()
_frag_feature.additional_meta_data()
return _frag
def fragment_mock_paywall_price_fragment():
_frag = sgqlc.operation.Fragment(_schema.PaywallPrice, 'MockPaywallPriceFragment')
_frag.billing_model()
_frag.billing_period()
_frag.billing_id()
_frag.min_unit_quantity()
_frag.max_unit_quantity()
_frag.billing_country_code()
_frag_price = _frag.price()
_frag_price.amount()
_frag_price.currency()
_frag.tiers_mode()
_frag_tiers = _frag.tiers()
_frag_tiers.__fragment__(fragment_price_tier_fragment())
_frag_feature = _frag.feature()
_frag_feature.ref_id()
_frag_feature.feature_units()
_frag_feature.feature_units_plural()
_frag_feature.display_name()
return _frag
def fragment_paywall_calculated_price_points_fragment():
_frag = sgqlc.operation.Fragment(_schema.PaywallPricePoint, 'PaywallCalculatedPricePointsFragment')
_frag.plan_id()
_frag.additional_charges_may_apply()
_frag.billing_period()
_frag.amount()
_frag.currency()
_frag.billing_country_code()
_frag_feature = _frag.feature()
_frag_feature.ref_id()
_frag_feature.feature_units()
_frag_feature.feature_units_plural()
_frag_feature.display_name()
_frag_feature.description()
return _frag
def fragment_mock_paywall_addon_fragment():
_frag = sgqlc.operation.Fragment(_schema.PaywallAddon, 'MockPaywallAddonFragment')
_frag.ref_id()
_frag.display_name()
_frag.description()
_frag.additional_meta_data()
_frag.billing_id()
_frag_entitlements = _frag.entitlements()
_frag_entitlements.__fragment__(fragment_mock_paywall_package_entitlement_fragment())
_frag_prices = _frag.prices()
_frag_prices.__fragment__(fragment_mock_paywall_price_fragment())
_frag.pricing_type()
return _frag
def fragment_paywall_fragment():
_frag = sgqlc.operation.Fragment(_schema.Paywall, 'PaywallFragment')
_frag_plans = _frag.plans()
_frag_plans.__fragment__(fragment_plan_fragment())
_frag_currency = _frag.currency()
_frag_currency.__fragment__(fragment_paywall_currency_fragment())
_frag_configuration = _frag.configuration()
_frag_configuration.__fragment__(fragment_paywall_configuration_fragment())
_frag_customer = _frag.customer()
_frag_customer.__fragment__(fragment_customer_fragment())
_frag_active_subscriptions = _frag.active_subscriptions()
_frag_active_subscriptions.__fragment__(fragment_subscription_fragment())
_frag_resource = _frag.resource()
_frag_resource.__fragment__(fragment_customer_resource_fragment())
_frag_paywall_calculated_price_points = _frag.paywall_calculated_price_points()
_frag_paywall_calculated_price_points.__fragment__(fragment_paywall_calculated_price_points_fragment())
return _frag
def fragment_usage_history_fragment():
_frag = sgqlc.operation.Fragment(_schema.UsageHistory, 'UsageHistoryFragment')
_frag.start_date()
_frag.end_date()
_frag_usage_measurements = _frag.usage_measurements()
_frag_usage_measurements.date()
_frag_usage_measurements.value()
_frag_usage_measurements.is_reset_point()
return _frag
class Fragment:
addon_fragment = fragment_addon_fragment()
checkout_configuration_fragment = fragment_checkout_configuration_fragment()
checkout_state_fragment = fragment_checkout_state_fragment()
coupon_fragment = fragment_coupon_fragment()
customer_fragment = fragment_customer_fragment()
customer_portal_billing_information_fragment = fragment_customer_portal_billing_information_fragment()
customer_portal_configuration_fragment = fragment_customer_portal_configuration_fragment()
customer_portal_entitlement_fragment = fragment_customer_portal_entitlement_fragment()
customer_portal_fragment = fragment_customer_portal_fragment()
customer_portal_promotional_entitlement_fragment = fragment_customer_portal_promotional_entitlement_fragment()
customer_portal_subscription_addon_fragment = fragment_customer_portal_subscription_addon_fragment()
customer_portal_subscription_fragment = fragment_customer_portal_subscription_fragment()
customer_portal_subscription_price_fragment = fragment_customer_portal_subscription_price_fragment()
customer_portal_subscription_scheduled_update_data_fragment = fragment_customer_portal_subscription_scheduled_update_data_fragment()
customer_resource_fragment = fragment_customer_resource_fragment()
customer_with_subscriptions_fragment = fragment_customer_with_subscriptions_fragment()
entitlement_fragment = fragment_entitlement_fragment()
entitlement_usage_updated = fragment_entitlement_usage_updated()
entitlements_updated_payload = fragment_entitlements_updated_payload()
feature_fragment = fragment_feature_fragment()
font_variant_fragment = fragment_font_variant_fragment()
layout_configuration_fragment = fragment_layout_configuration_fragment()
mock_paywall_addon_fragment = fragment_mock_paywall_addon_fragment()
mock_paywall_package_entitlement_fragment = fragment_mock_paywall_package_entitlement_fragment()
mock_paywall_plan_fragment = fragment_mock_paywall_plan_fragment()
mock_paywall_price_fragment = fragment_mock_paywall_price_fragment()
package_entitlement_fragment = fragment_package_entitlement_fragment()
paywall_calculated_price_points_fragment = fragment_paywall_calculated_price_points_fragment()
paywall_configuration_fragment = fragment_paywall_configuration_fragment()
paywall_currency_fragment = fragment_paywall_currency_fragment()
paywall_fragment = fragment_paywall_fragment()
plan_fragment = fragment_plan_fragment()
price_fragment = fragment_price_fragment()
price_tier_fragment = fragment_price_tier_fragment()
product_fragment = fragment_product_fragment()
promotional_entitlement_fragment = fragment_promotional_entitlement_fragment()
reset_period_configuration_fragment = fragment_reset_period_configuration_fragment()
slim_customer_fragment = fragment_slim_customer_fragment()
slim_subscription_fragment = fragment_slim_subscription_fragment()
subscription_fragment = fragment_subscription_fragment()
subscription_future_update_data = fragment_subscription_future_update_data()
subscription_invoice_fragment = fragment_subscription_invoice_fragment()
subscription_preview_fragment = fragment_subscription_preview_fragment()
subscription_scheduled_update_data = fragment_subscription_scheduled_update_data()
total_price_fragment = fragment_total_price_fragment()
typography_configuration_fragment = fragment_typography_configuration_fragment()
usage_history_fragment = fragment_usage_history_fragment()
usage_updated_fragment = fragment_usage_updated_fragment()
def mutation_provision_customer():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ProvisionCustomer', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.ProvisionCustomerInput))))
_op_provision_customer = _op.provision_customer(input=sgqlc.types.Variable('input'))
_op_provision_customer_customer = _op_provision_customer.customer()
_op_provision_customer_customer.__fragment__(fragment_slim_customer_fragment())
_op_provision_customer.subscription_decision_strategy()
_op_provision_customer_subscription = _op_provision_customer.subscription()
_op_provision_customer_subscription.__fragment__(fragment_slim_subscription_fragment())
return _op
def mutation_import_customer_bulk():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ImportCustomerBulk', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.ImportCustomerBulk))))
_op.import_customers_bulk(input=sgqlc.types.Variable('input'))
return _op
def mutation_import_customer():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ImportCustomer', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.ImportCustomerInput))))
_op_import_customer = _op.import_one_customer(input=sgqlc.types.Variable('input'), __alias__='import_customer')
_op_import_customer.__fragment__(fragment_slim_customer_fragment())
return _op
def mutation_update_customer():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='UpdateCustomer', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.UpdateCustomerInput))))
_op_update_customer = _op.update_one_customer(input=sgqlc.types.Variable('input'), __alias__='update_customer')
_op_update_customer.__fragment__(fragment_slim_customer_fragment())
return _op
def mutation_grant_promotional_entitlements():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='GrantPromotionalEntitlements', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.GrantPromotionalEntitlementsInput))))
_op_grant_promotional_entitlements = _op.grant_promotional_entitlements(input=sgqlc.types.Variable('input'))
_op_grant_promotional_entitlements.__fragment__(fragment_promotional_entitlement_fragment())
return _op
def mutation_revoke_promotional_entitlement():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='RevokePromotionalEntitlement', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.RevokePromotionalEntitlementInput))))
_op_revoke_promotional_entitlement = _op.revoke_promotional_entitlement(input=sgqlc.types.Variable('input'))
_op_revoke_promotional_entitlement.id()
return _op
def mutation_provision_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ProvisionSubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.ProvisionSubscriptionInput))))
_op_provision_subscription = _op.provision_subscription_v2(input=sgqlc.types.Variable('input'), __alias__='provision_subscription')
_op_provision_subscription.checkout_url()
_op_provision_subscription.status()
_op_provision_subscription_subscription = _op_provision_subscription.subscription()
_op_provision_subscription_subscription.__fragment__(fragment_slim_subscription_fragment())
return _op
def mutation_apply_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ApplySubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.ApplySubscriptionInput))))
_op_apply_subscription = _op.apply_subscription(input=sgqlc.types.Variable('input'))
_op_apply_subscription_subscription = _op_apply_subscription.subscription()
_op_apply_subscription_subscription.__fragment__(fragment_subscription_fragment())
return _op
def mutation_import_subscriptions_bulk():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ImportSubscriptionsBulk', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.ImportSubscriptionsBulk))))
_op.import_subscriptions_bulk(input=sgqlc.types.Variable('input'))
return _op
def mutation_update_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='UpdateSubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.UpdateSubscriptionInput))))
_op_update_subscription = _op.update_one_subscription(input=sgqlc.types.Variable('input'), __alias__='update_subscription')
_op_update_subscription.__fragment__(fragment_slim_subscription_fragment())
return _op
def mutation_cancel_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='CancelSubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.SubscriptionCancellationInput))))
_op_cancel_subscription = _op.cancel_subscription(input=sgqlc.types.Variable('input'))
_op_cancel_subscription.__fragment__(fragment_slim_subscription_fragment())
return _op
def mutation_estimate_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='EstimateSubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.EstimateSubscriptionInput))))
_op_estimate_subscription = _op.estimate_subscription(input=sgqlc.types.Variable('input'))
_op_estimate_subscription.__fragment__(fragment_subscription_preview_fragment())
return _op
def mutation_estimate_subscription_update():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='EstimateSubscriptionUpdate', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.EstimateSubscriptionUpdateInput))))
_op_estimate_subscription_update = _op.estimate_subscription_update(input=sgqlc.types.Variable('input'))
_op_estimate_subscription_update.__fragment__(fragment_subscription_preview_fragment())
return _op
def mutation_preview_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='PreviewSubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.PreviewSubscriptionInput))))
_op_preview_subscription = _op.preview_subscription(input=sgqlc.types.Variable('input'))
_op_preview_subscription.__fragment__(fragment_subscription_preview_fragment())
return _op
def mutation_cancel_subscription_updates():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='CancelSubscriptionUpdates', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.SubscriptionUpdateScheduleCancellationInput))))
_op.cancel_schedule(input=sgqlc.types.Variable('input'))
return _op
def mutation_report_usage():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ReportUsage', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.UsageMeasurementCreateInput))))
_op_create_usage_measurement = _op.create_usage_measurement(usage_measurement=sgqlc.types.Variable('input'))
_op_create_usage_measurement.id()
_op_create_usage_measurement.current_usage()
_op_create_usage_measurement.next_reset_date()
_op_create_usage_measurement.timestamp()
return _op
def mutation_report_event():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ReportEvent', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.UsageEventsReportInput))))
_op.report_event(events=sgqlc.types.Variable('input'))
return _op
def mutation_report_entitlement_check_requested():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ReportEntitlementCheckRequested', variables=dict(entitlementCheckRequested=sgqlc.types.Arg(sgqlc.types.non_null(_schema.EntitlementCheckRequested))))
_op.report_entitlement_check_requested(entitlement_check_requested=sgqlc.types.Variable('entitlementCheckRequested'))
return _op
def mutation_create_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='CreateSubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.SubscriptionInput))))
_op_create_subscription = _op.create_subscription(subscription=sgqlc.types.Variable('input'))
_op_create_subscription.__fragment__(fragment_slim_subscription_fragment())
return _op
def mutation_migrate_subscription_to_latest():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='MigrateSubscriptionToLatest', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.SubscriptionMigrationInput))))
_op_migrate_subscription_to_latest = _op.migrate_subscription_to_latest(input=sgqlc.types.Variable('input'))
_op_migrate_subscription_to_latest.subscription_id()
return _op
def mutation_archive_customer():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='ArchiveCustomer', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.ArchiveCustomerInput))))
_op_archive_customer = _op.archive_customer(input=sgqlc.types.Variable('input'))
_op_archive_customer.customer_id()
return _op
def mutation_transfer_subscription():
_op = sgqlc.operation.Operation(_schema_root.mutation_type, name='TransferSubscription', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.TransferSubscriptionInput))))
_op_transfer_subscription = _op.transfer_subscription(input=sgqlc.types.Variable('input'))
_op_transfer_subscription.__fragment__(fragment_slim_subscription_fragment())
return _op
class Mutation:
apply_subscription = mutation_apply_subscription()
archive_customer = mutation_archive_customer()
cancel_subscription = mutation_cancel_subscription()
cancel_subscription_updates = mutation_cancel_subscription_updates()
create_subscription = mutation_create_subscription()
estimate_subscription = mutation_estimate_subscription()
estimate_subscription_update = mutation_estimate_subscription_update()
grant_promotional_entitlements = mutation_grant_promotional_entitlements()
import_customer = mutation_import_customer()
import_customer_bulk = mutation_import_customer_bulk()
import_subscriptions_bulk = mutation_import_subscriptions_bulk()
migrate_subscription_to_latest = mutation_migrate_subscription_to_latest()
preview_subscription = mutation_preview_subscription()
provision_customer = mutation_provision_customer()
provision_subscription = mutation_provision_subscription()
report_entitlement_check_requested = mutation_report_entitlement_check_requested()
report_event = mutation_report_event()
report_usage = mutation_report_usage()
revoke_promotional_entitlement = mutation_revoke_promotional_entitlement()
transfer_subscription = mutation_transfer_subscription()
update_customer = mutation_update_customer()
update_subscription = mutation_update_subscription()
def query_get_customer_by_id():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetCustomerById', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.GetCustomerByRefIdInput))))
_op_get_customer_by_ref_id = _op.get_customer_by_ref_id(input=sgqlc.types.Variable('input'))
_op_get_customer_by_ref_id.__fragment__(fragment_customer_with_subscriptions_fragment())
return _op
def query_get_active_subscriptions():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetActiveSubscriptions', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.GetActiveSubscriptionsInput))))
_op_get_active_subscriptions = _op.get_active_subscriptions(input=sgqlc.types.Variable('input'))
_op_get_active_subscriptions.__fragment__(fragment_subscription_fragment())
return _op
def query_get_coupons():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetCoupons')
_op_coupons = _op.coupons(filter={'status': {'eq': 'ACTIVE'}}, paging={'first': 50})
_op_coupons_edges = _op_coupons.edges()
_op_coupons_edges_node = _op_coupons_edges.node()
_op_coupons_edges_node.__fragment__(fragment_coupon_fragment())
return _op
def query_get_paywall():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetPaywall', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.GetPaywallInput))))
_op_paywall = _op.paywall(input=sgqlc.types.Variable('input'))
_op_paywall.__fragment__(fragment_paywall_fragment())
return _op
def query_get_entitlements():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetEntitlements', variables=dict(query=sgqlc.types.Arg(sgqlc.types.non_null(_schema.FetchEntitlementsQuery))))
_op_entitlements = _op.cached_entitlements(query=sgqlc.types.Variable('query'), __alias__='entitlements')
_op_entitlements.__fragment__(fragment_entitlement_fragment())
return _op
def query_get_entitlement():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetEntitlement', variables=dict(query=sgqlc.types.Arg(sgqlc.types.non_null(_schema.FetchEntitlementQuery))))
_op_entitlement = _op.entitlement(query=sgqlc.types.Variable('query'))
_op_entitlement.__fragment__(fragment_entitlement_fragment())
return _op
def query_get_products():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetProducts')
_op_products = _op.products(paging={'first': 50})
_op_products_edges = _op_products.edges()
_op_products_edges_node = _op_products_edges.node()
_op_products_edges_node.__fragment__(fragment_product_fragment())
return _op
def query_get_sdk_configuration():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetSdkConfiguration')
_op_sdk_configuration = _op.sdk_configuration()
_op_sdk_configuration.sentry_dsn()
_op_sdk_configuration.is_widget_watermark_enabled()
return _op
def query_get_customer_portal_by_ref_id():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetCustomerPortalByRefId', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.CustomerPortalInput))))
_op_customer_portal = _op.customer_portal(input=sgqlc.types.Variable('input'))
_op_customer_portal.__fragment__(fragment_customer_portal_fragment())
return _op
def query_get_checkout_state():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetCheckoutState', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.CheckoutStateInput))))
_op_checkout_state = _op.checkout_state(input=sgqlc.types.Variable('input'))
_op_checkout_state.__fragment__(fragment_checkout_state_fragment())
return _op
def query_get_mock_paywall():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetMockPaywall', variables=dict(input=sgqlc.types.Arg(sgqlc.types.non_null(_schema.GetPaywallInput))))
_op_mock_paywall = _op.mock_paywall(input=sgqlc.types.Variable('input'))
_op_mock_paywall_plans = _op_mock_paywall.plans()
_op_mock_paywall_plans.__fragment__(fragment_mock_paywall_plan_fragment())
_op_mock_paywall_configuration = _op_mock_paywall.configuration()
_op_mock_paywall_configuration.__fragment__(fragment_paywall_configuration_fragment())
return _op
def query_get_usage_history():
_op = sgqlc.operation.Operation(_schema_root.query_type, name='GetUsageHistory', variables=dict(usageHistoryInput=sgqlc.types.Arg(sgqlc.types.non_null(_schema.UsageHistoryInput))))
_op_usage_history = _op.usage_history(usage_history_input=sgqlc.types.Variable('usageHistoryInput'))
_op_usage_history.__fragment__(fragment_usage_history_fragment())
return _op
class Query:
get_active_subscriptions = query_get_active_subscriptions()
get_checkout_state = query_get_checkout_state()
get_coupons = query_get_coupons()
get_customer_by_id = query_get_customer_by_id()
get_customer_portal_by_ref_id = query_get_customer_portal_by_ref_id()
get_entitlement = query_get_entitlement()
get_entitlements = query_get_entitlements()
get_mock_paywall = query_get_mock_paywall()
get_paywall = query_get_paywall()
get_products = query_get_products()
get_sdk_configuration = query_get_sdk_configuration()
get_usage_history = query_get_usage_history()
def subscription_on_entitlements_updated():
_op = sgqlc.operation.Operation(_schema_root.subscription_type, name='OnEntitlementsUpdated')
_op_entitlements_updated = _op.entitlements_updated()
_op_entitlements_updated.__fragment__(fragment_entitlements_updated_payload())
return _op
def subscription_on_usage_updated():
_op = sgqlc.operation.Operation(_schema_root.subscription_type, name='OnUsageUpdated')
_op_usage_updated = _op.usage_updated()
_op_usage_updated.__fragment__(fragment_entitlement_usage_updated())
return _op
class Subscription:
on_entitlements_updated = subscription_on_entitlements_updated()
on_usage_updated = subscription_on_usage_updated()
class Operations:
fragment = Fragment
mutation = Mutation
query = Query
subscription = Subscription
|
PypiClean
|
/nm_transformers-1.5.1.42301-py3-none-any.whl/transformers/models/mpnet/tokenization_mpnet_fast.py
|
"""Fast Tokenization classes for MPNet."""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mpnet import MPNetTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/vocab.txt",
},
"tokenizer_file": {
"microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/mpnet-base": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/mpnet-base": {"do_lower_case": True},
}
class MPNetTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" MPNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = MPNetTokenizer
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="[UNK]",
pad_token="<pad>",
mask_token="<mask>",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["lowercase"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
@property
def mask_token(self) -> str:
"""
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
MPNet tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
comprise the space before the *<mask>*.
"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def mask_token(self, value):
"""
Overriding the default behavior of the mask token to have it eat the space before it.
This is needed to preserve backward compatibility with all the previously used models based on MPNet.
"""
# Mask token behave like a normal word, i.e. include the space before it
# So we set lstrip to True
value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
self._mask_token = value
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not
make use of token type ids, therefore a list of zeros is returned
Args:
token_ids_0 (`List[int]`):
List of ids.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
PypiClean
|
/online_judge_api_client-10.10.1-py3-none-any.whl/onlinejudge/_implementation/utils.py
|
import datetime
import http.client
import http.cookiejar
import posixpath
import urllib.parse
from logging import getLogger
from typing import *
import bs4
from onlinejudge.type import *
from onlinejudge.utils import * # re-export
logger = getLogger(__name__)
HTML_PARSER = 'lxml'
def previous_sibling_tag(tag: bs4.Tag) -> bs4.Tag:
tag = tag.previous_sibling
while tag and not isinstance(tag, bs4.Tag):
tag = tag.previous_sibling
return tag
def next_sibling_tag(tag: bs4.Tag) -> bs4.Tag:
tag = tag.next_sibling
while tag and not isinstance(tag, bs4.Tag):
tag = tag.next_sibling
return tag
def get_direct_children_text(tag: bs4.Tag) -> str:
"""get_direct_children_text collects the text which are direct children of the given tag.
For example, this returns "A - Hello world " for a tag `<h2>A - Hello world <a href="...">Editorial</a></h2>`.
"""
assert isinstance(tag, bs4.Tag)
text = ''
for child in tag.children:
if isinstance(child, bs4.NavigableString):
text += child.string
elif isinstance(child, bs4.Tag) and child.name == 'br':
text += '\n'
else:
pass
return text
# TODO: Why this returns bs4.NavigableString?
def parse_content(parent: Union[bs4.NavigableString, bs4.Tag, bs4.Comment]) -> bs4.NavigableString:
"""parse_content convert a tag to a string with interpretting `<br>` and ignoring other tags.
.. seealso::
https://github.com/kmyk/online-judge-tools/issues/553
"""
res = ''
if isinstance(parent, bs4.Comment):
pass
elif isinstance(parent, bs4.NavigableString):
return parent
else:
children = parent.contents
if len(children) == 0:
html_tag = str(parent)
return bs4.NavigableString('\n') if 'br' in html_tag else bs4.NavigableString('')
else:
for child in children:
res += parse_content(child)
if parent.name == 'div':
res += '\n'
return bs4.NavigableString(res)
class FormSender:
def __init__(self, form: bs4.Tag, url: str):
assert isinstance(form, bs4.Tag)
assert form.name == 'form'
self.form = form
self.url = url
self.payload = {} # type: Dict[str, str]
self.files = {} # type: Dict[str, IO[Any]]
for input in self.form.find_all('input'):
logger.debug('input: %s', str(input))
if input.attrs.get('type') in ['checkbox', 'radio']:
continue
if 'name' in input.attrs and 'value' in input.attrs:
self.payload[input['name']] = input['value']
def set(self, key: str, value: str) -> None:
self.payload[key] = value
def get(self) -> Dict[str, str]:
return self.payload
def set_file(self, key: str, filename: str, content: bytes) -> None:
self.files[key] = (filename, content) # type: ignore
def unset(self, key: str) -> None:
del self.payload[key]
def request(self, session: requests.Session, method: str = None, action: Optional[str] = None, raise_for_status: bool = True, headers: Optional[Dict[str, str]] = None, **kwargs) -> requests.Response:
if method is None:
method = self.form['method'].upper()
url = self.url
if action is None and 'action' in self.form.attrs:
action = self.form.attrs['action']
if action is not None:
url = urllib.parse.urljoin(self.url, action)
if headers is None:
headers = {}
if 'Referer' not in headers:
headers['Referer'] = self.url
return request(method, url, session=session, raise_for_status=raise_for_status, data=self.payload, files=self.files, headers=headers, **kwargs)
def dos2unix(s: str) -> str:
"""
.. deprecated:: 10.1.0
Use :func:`format_sample_case` instead.
"""
return s.replace('\r\n', '\n')
def textfile(s: str) -> str:
"""textfile convert a string s to the "text file" defined in POSIX
.. deprecated:: 10.1.0
Use :func:`format_sample_case` instead.
"""
if s.endswith('\n'):
return s
elif '\r\n' in s:
return s + '\r\n'
else:
return s + '\n'
def format_sample_case(s: str) -> str:
"""format_sample_case convert a string s to a good form as a sample case.
A good form means that, it use LR instead of CRLF, it has the trailing newline, and it has no superfluous whitespaces.
"""
if not s.strip():
return ''
lines = s.strip().splitlines()
lines = [line.strip() + '\n' for line in lines]
return ''.join(lines)
# We should use this instead of posixpath.normpath
# posixpath.normpath doesn't collapse a leading duplicated slashes. see: https://stackoverflow.com/questions/7816818/why-doesnt-os-normpath-collapse-a-leading-double-slash
def normpath(path: str) -> str:
path = posixpath.normpath(path)
if path.startswith('//'):
path = '/' + path.lstrip('/')
return path
def request(method: str, url: str, session: requests.Session, raise_for_status: bool = True, **kwargs) -> requests.Response:
"""`request()` is a wrapper of the `requests` package with logging.
There is a way to bring logs from `requests` via `urllib3`, but we don't use it, because it's not very intended feature ant not very customizable. See https://2.python-requests.org/en/master/api/#api-changes
"""
assert method in ['GET', 'POST']
kwargs.setdefault('allow_redirects', True)
logger.info('network: %s: %s', method, url)
if 'data' in kwargs:
logger.debug('network: data: %s', repr(kwargs['data'])) # TODO: prepare a nice filter. This may contain credentials.
resp = session.request(method, url, **kwargs)
if resp.url != url:
logger.info('network: redirected to: %s', resp.url)
logger.info('network: %s %s', resp.status_code, http.client.responses[resp.status_code]) # e.g. "200 OK" or "503 Service Unavailable"
if raise_for_status:
resp.raise_for_status()
return resp
def remove_prefix(s: str, prefix: str) -> str:
assert s.startswith(prefix)
return s[len(prefix):]
def remove_suffix(s: str, suffix: str) -> str:
assert s.endswith(suffix)
return s[:-len(suffix)]
tzinfo_jst = datetime.timezone(datetime.timedelta(hours=+9), 'JST')
class DummySubmission(Submission):
def __init__(self, url: str, problem: Problem):
self.url = url
self.problem = problem
def download_code(self, session: Optional[requests.Session] = None) -> bytes:
raise NotImplementedError
def get_url(self) -> str:
return self.url
def download_problem(self, *, session: Optional[requests.Session] = None) -> Problem:
raise NotImplementedError
def get_service(self) -> Service:
raise NotImplementedError
def __repr__(self) -> str:
return '{}({}, problem={})'.format(self.__class__, self.url, self.problem)
@classmethod
def from_url(cls, s: str) -> Optional[Submission]:
return None
|
PypiClean
|
/Nasdaq%20Data%20Link-1.0.4.tar.gz/Nasdaq Data Link-1.0.4/README.md
|
# Nasdaq Data Link Python Client
This is the official documentation for Nasdaq Data Link's Python Package. The package can be used to interact with the latest version of the [Nasdaq Data Link's RESTful API](https://docs.data.nasdaq.com/docs). This package is compatible with python v3.7+.
## Installation
The installation process varies depending on your python version and system used. However in most cases the following should work:
```shell
pip install nasdaq-data-link
```
Alternatively on some systems python3 may use a different pip executable and may need to be installed via an alternate pip command. For example:
```shell
pip3 install nasdaq-data-link
```
## Configuration
| Option | Explanation | Example |
|---|---|---|
| api_key | Your access key | `tEsTkEy123456789` | Used to identify who you are and provide full access. |
| use_retries | Whether API calls which return statuses in `retry_status_codes` should be automatically retried | True
| number_of_retries | Maximum number of retries that should be attempted. Only used if `use_retries` is True | 5
| max_wait_between_retries | Maximum amount of time in seconds that should be waited before attempting a retry. Only used if `use_retries` is True | 8
| retry_backoff_factor | Determines the amount of time in seconds that should be waited before attempting another retry. Note that this factor is exponential so a `retry_backoff_factor` of 0.5 will cause waits of [0.5, 1, 2, 4, etc]. Only used if `use_retries` is True | 0.5
| retry_status_codes | A list of HTTP status codes which will trigger a retry to occur. Only used if `use_retries` is True| [429, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511]
By default, SSL verification is enabled. To bypass SSL verification (not recommended), simply:
```python
nasdaqdatalink.ApiConfig.verify_ssl = False
```
### Environment Variables
You may use environment variables to configure the Data Link SDK to avoid any
inline boilerplate.
| Env | Description |
|---|---|
| NASDAQ_DATA_LINK_API_KEY | The SDK will configure itself to use the given API Key |
| NASDAQ_DATA_LINK_BASE_DOMAIN | The SDK will configure itself to use the provided domain |
### Local API Key Environment Variable
If you wish to store your API as an environment variable, you can do so by setting `NASDAQ_DATA_LINK_API_KEY`. If set, NASDAQ_DATA_LINK_API_KEY will take precedence over the API Key file mentioned below.
### Local API Key file
The default configuration file location is `~/.nasdaq/data_link_apikey`. The
client will attempt to load this file if it exists. Note: if the file exists
and empty, a ValueError will be thrown.
#### Alternative API Key file location
Since 1.0.1, the `nasdaq-data-link` module will attempt to autoload your API Key. If you prefer to store it in another location, you must
explicitly call `read_key()` with a custom path. See below:
```python
import nasdaqdatalink
nasdaqdatalink.read_key(filename="/data/.corporatenasdaqdatalinkapikey")
```
## Retrieving Data
There are two methods for retrieving data in Python: the Quick method and the Detailed method. The latter is more suitable to application programming. Both methods work with Nasdaq Data Link's two types of data structures: time-series (dataset) data and non-time series (datatable).
The following quick call can be used to retrieve a dataset:
```python
import nasdaqdatalink
data = nasdaqdatalink.get('NSE/OIL')
```
This example finds all data points for the dataset `NSE/OIL` and stores them in a pandas dataframe. You can then view the dataframe with data.head().
A similar quick call can be used to retrieve a datatable:
```python
import nasdaqdatalink
data = nasdaqdatalink.get_table('ZACKS/FC', ticker='AAPL')
```
This example retrieves all rows for `ZACKS/FC` where `ticker='AAPL'` and stores them in a pandas dataframe. Similarly you can then view the dataframe with data.head().
Note that in both examples if an `api_key` has not been set you may receive limited or sample data. You can find more details on these quick calls and others in our [Quick Method Guide](./FOR_ANALYSTS.md).
### Logging
Currently, Nasdaq Data Link debug logging is limited in scope. However, to enable debug
logs you can use the following snippet.
```python
import nasdaqdatalink
import logging
logging.basicConfig()
# logging.getLogger().setLevel(logging.DEBUG) # optionally set level for
everything. Useful to see dependency debug info as well.
data_link_log = logging.getLogger("nasdaqdatalink")
data_link_log.setLevel(logging.DEBUG)
```
### Detailed Usage
Our API can provide more than just data. It can also be used to search and provide metadata or to programmatically retrieve data. For these more advanced techniques please follow our [Detailed Method Guide](./FOR_DEVELOPERS.md).
## Local Development
### Setup
If you wish to work on local development please clone/fork the git repo and use `pip install -r requirements.txt` to setup the project.
### Testing
We recommend the following tools for testing any changes:
* [nose](https://nose.readthedocs.org/en/latest/) for running tests.
* [tox](https://pypi.python.org/pypi/tox) for testing against multiple versions of python.
* [flake8](https://flake8.readthedocs.org/en/latest/) for syntax checking.
* [virtualenv](https://virtualenv.pypa.io/en/latest/) for use with tox virtualization.
The following are instructions for running our tests:
1. Make sure a version of 3.x is installed locally in your system. To avoid permission issues on OSX we recommend installing the packages from: https://www.python.org/downloads/
2. Install `virtualenv` and `tox` using:
`pip install tox virtualenv`
3. Run following command (you may notice slow performance the first time):
`python setup.py install`
4. Run the following command to test the plugin in all versions of python we support:
`tox`
Once you have all required packages installed, you can run tests locally with:
Running all tests locally
```python
python -W always setup.py -q test
```
Running an individual test
```python
python -m unittest test.[test file name].[class name].[individual test name]`
```
Example:
```python
python -m unittest -v test.test_datatable.ExportDataTableTest.test_download_get_file_info
```
## Recommended Usage
We would suggest downloading the data in raw format in the highest frequency possible and performing any data manipulation
in pandas itself.
See [this link](http://pandas.pydata.org/pandas-docs/dev/timeseries.html) for more information about timeseries in pandas.
## Release the Package
To release the package, you can follow the instructions on this [page](https://packaging.python.org/tutorials/packaging-projects/#packaging-python-projects)
## Additional Links
* [Nasdaq Data Link](https://data.nasdaq.com)
* [Nasdaq Data Link Tools](https://data.nasdaq.com/tools/full-list)
* [API Docs](https://docs.data.nasdaq.com/docs)
## License
[MIT License](http://opensource.org/licenses/MIT)
|
PypiClean
|
/cloudmesh-common-4.3.141.tar.gz/cloudmesh-common-4.3.141/cloudmesh/common/strdb.py
|
import os
import os.path
from builtins import bytes
import oyaml as yaml
#
# TODO: this does not follow our conventions, should this not be done at __init__?
#
###############################################################
# make yaml understand unicode
def yaml_construct_unicode(self, node):
return self.construct_scalar(node)
yaml.Loader.add_constructor(u'tag:yaml.org,2002:python/unicode', yaml_construct_unicode)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:python/unicode', yaml_construct_unicode)
###############################################################
# the db api
class YamlDB(object):
"""A YAML-backed Key-Value database to store strings
"""
def __init__(self, path):
self._db = dict()
self.path = path
prefix = os.path.dirname(self.path)
if not os.path.exists(prefix):
os.makedirs(prefix)
if os.path.exists(self.path):
with open(self.path, 'rb') as dbfile:
self._db = yaml.safe_load(dbfile) or dict()
self.flush()
def flush(self):
string = yaml.dump(self._db, default_flow_style=False)
bits = bytes(string, encoding='utf-8')
with open(self.path, 'wb') as dbfile:
dbfile.write(bits)
def __setitem__(self, k, v):
self._db[str(k)] = str(v)
self.flush()
def __getitem__(self, k):
return self._db[k]
def __delitem__(self, k):
del self._db[k]
self.flush()
def __contains__(self, k):
return k in self._db
def __iter__(self):
return iter(self._db)
def __len__(self):
return len(self._db)
def close(self):
"""This is a NoOP for backwards compatibility"""
pass
def clear(self):
"""Truncate the database"""
self._db.clear()
self.flush()
def set(self, **kwargs):
for name in kwargs:
self._db[name] = kwargs[name]
def dict(self):
return self._db
def get(self, key, value=None):
self._db.get(key, value)
|
PypiClean
|
/nniv04-0.4.1-py3-none-any.whl/nniv04-0.4.1.data/data/nni/node_modules/tail-stream/test/wait-create.js
|
var assert = require('assert');
var child = require('child_process');
var fs = require('fs');
var path = require('path');
var ts = require('../index.js');
var tmpDir = path.resolve('test','tmp');
var newLine = 'The rain in spain falls mainly on the plain\n';
var filePath = path.resolve(tmpDir, 'wait-create');
var childOpts = { env: {
TEST_FILE_PATH: filePath,
TEST_LOG_LINE: newLine,
} };
describe('tail-stream', function () {
context('wait for file creation', function () {
before(function (done) {
// create test/tmp, if not already
fs.mkdir(tmpDir, function (err) {
fs.unlink(filePath, function(err2) {
done();
});
});
});
it('fails without waitForCreate option', function (done) {
assert.throws(
function() {
ts.createReadStream(filePath);
}
);
done();
});
it('detect append after file creation', function (done) {
var tstream = ts.createReadStream(filePath, {
waitForCreate: true
});
tstream.on('data', function (data) {
assert.equal(data.toString(), newLine);
done();
});
child.fork('./test/helpers/fileAppend.js', childOpts);
});
/*it('', function (done) {
dataCount = 0;
var tstream = ts.createReadStream(filePath, {
beginAt: 'end',
});
tstream.on('data', function(data) {
dataCount++;
assert.equal(data.toString(), newLine);
if (dataCount === 2) done();
});
tstream.on('error', function(err) {
assert.ifError(err);
});
// append in a separate process, so this one gets the watch event
var cp = child.fork('./test/helpers/fileAppend.js', childOpts);
cp.on('message', function (msg) {
// console.log(msg);
var cp2 = child.fork('./test/helpers/fileAppend.js', childOpts);
cp2.on('message', function (msg) {
// console.log(msg);
});
});
});*/
});
});
|
PypiClean
|
/python-parakeet-0.4.5.tar.gz/python-parakeet-0.4.5/pybind11/docs/benchmark.rst
|
Benchmark
=========
The following is the result of a synthetic benchmark comparing both compilation
time and module size of pybind11 against Boost.Python. A detailed report about a
Boost.Python to pybind11 conversion of a real project is available here: [#f1]_.
.. [#f1] http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf
Setup
-----
A python script (see the ``docs/benchmark.py`` file) was used to generate a set
of files with dummy classes whose count increases for each successive benchmark
(between 1 and 2048 classes in powers of two). Each class has four methods with
a randomly generated signature with a return value and four arguments. (There
was no particular reason for this setup other than the desire to generate many
unique function signatures whose count could be controlled in a simple way.)
Here is an example of the binding code for one class:
.. code-block:: cpp
...
class cl034 {
public:
cl279 *fn_000(cl084 *, cl057 *, cl065 *, cl042 *);
cl025 *fn_001(cl098 *, cl262 *, cl414 *, cl121 *);
cl085 *fn_002(cl445 *, cl297 *, cl145 *, cl421 *);
cl470 *fn_003(cl200 *, cl323 *, cl332 *, cl492 *);
};
...
PYBIND11_MODULE(example, m) {
...
py::class_<cl034>(m, "cl034")
.def("fn_000", &cl034::fn_000)
.def("fn_001", &cl034::fn_001)
.def("fn_002", &cl034::fn_002)
.def("fn_003", &cl034::fn_003)
...
}
The Boost.Python version looks almost identical except that a return value
policy had to be specified as an argument to ``def()``. For both libraries,
compilation was done with
.. code-block:: bash
Apple LLVM version 7.0.2 (clang-700.1.81)
and the following compilation flags
.. code-block:: bash
g++ -Os -shared -rdynamic -undefined dynamic_lookup -fvisibility=hidden -std=c++14
Compilation time
----------------
The following log-log plot shows how the compilation time grows for an
increasing number of class and function declarations. pybind11 includes many
fewer headers, which initially leads to shorter compilation times, but the
performance is ultimately fairly similar (pybind11 is 19.8 seconds faster for
the largest largest file with 2048 classes and a total of 8192 methods -- a
modest **1.2x** speedup relative to Boost.Python, which required 116.35
seconds).
.. only:: not latex
.. image:: pybind11_vs_boost_python1.svg
.. only:: latex
.. image:: pybind11_vs_boost_python1.png
Module size
-----------
Differences between the two libraries become much more pronounced when
considering the file size of the generated Python plugin: for the largest file,
the binary generated by Boost.Python required 16.8 MiB, which was **2.17
times** / **9.1 megabytes** larger than the output generated by pybind11. For
very small inputs, Boost.Python has an edge in the plot below -- however, note
that it stores many definitions in an external library, whose size was not
included here, hence the comparison is slightly shifted in Boost.Python's
favor.
.. only:: not latex
.. image:: pybind11_vs_boost_python2.svg
.. only:: latex
.. image:: pybind11_vs_boost_python2.png
|
PypiClean
|
/skailarestframework-3.14.0-py3-none-any.whl/rest_framework/negotiation.py
|
from skailar.http import Http404
from rest_framework import exceptions
from rest_framework.settings import api_settings
from rest_framework.utils.mediatypes import (
_MediaType, media_type_matches, order_by_precedence
)
class BaseContentNegotiation:
def select_parser(self, request, parsers):
raise NotImplementedError('.select_parser() must be implemented')
def select_renderer(self, request, renderers, format_suffix=None):
raise NotImplementedError('.select_renderer() must be implemented')
class DefaultContentNegotiation(BaseContentNegotiation):
settings = api_settings
def select_parser(self, request, parsers):
"""
Given a list of parsers and a media type, return the appropriate
parser to handle the incoming request.
"""
for parser in parsers:
if media_type_matches(parser.media_type, request.content_type):
return parser
return None
def select_renderer(self, request, renderers, format_suffix=None):
"""
Given a request and a list of renderers, return a two-tuple of:
(renderer, media type).
"""
# Allow URL style format override. eg. "?format=json
format_query_param = self.settings.URL_FORMAT_OVERRIDE
format = format_suffix or request.query_params.get(format_query_param)
if format:
renderers = self.filter_renderers(renderers, format)
accepts = self.get_accept_list(request)
# Check the acceptable media types against each renderer,
# attempting more specific media types first
# NB. The inner loop here isn't as bad as it first looks :)
# Worst case is we're looping over len(accept_list) * len(self.renderers)
for media_type_set in order_by_precedence(accepts):
for renderer in renderers:
for media_type in media_type_set:
if media_type_matches(renderer.media_type, media_type):
# Return the most specific media type as accepted.
media_type_wrapper = _MediaType(media_type)
if (
_MediaType(renderer.media_type).precedence >
media_type_wrapper.precedence
):
# Eg client requests '*/*'
# Accepted media type is 'application/json'
full_media_type = ';'.join(
(renderer.media_type,) +
tuple(
'{}={}'.format(key, value)
for key, value in media_type_wrapper.params.items()
)
)
return renderer, full_media_type
else:
# Eg client requests 'application/json; indent=8'
# Accepted media type is 'application/json; indent=8'
return renderer, media_type
raise exceptions.NotAcceptable(available_renderers=renderers)
def filter_renderers(self, renderers, format):
"""
If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format.
"""
renderers = [renderer for renderer in renderers
if renderer.format == format]
if not renderers:
raise Http404
return renderers
def get_accept_list(self, request):
"""
Given the incoming request, return a tokenized list of media
type strings.
"""
header = request.META.get('HTTP_ACCEPT', '*/*')
return [token.strip() for token in header.split(',')]
|
PypiClean
|
/Fabhacks-0.1.6.tar.gz/Fabhacks-0.1.6/fabhacks/__init__.py
|
from uuid import uuid4
from time import sleep
from fabric.api import run, sudo, put as fab_put
from fabric.context_managers import cd
# Restart something
# with a check to ensure running
def restart_confirm(check, command, backoff=0, use_sudo=False):
func = sudo if use_sudo else run
func(command, pty=False)
sleep(backoff)
status = func('ps aux | grep -v grep | grep {0}'.format(check), quiet=True, warn_only=True)
if not status.succeeded:
backoff += 1
print 'Restart command failed: {0}, retrying with {1}s backoff...'.format(command, backoff)
restart_confirm(check, command, backoff=backoff, use_sudo=use_sudo)
# Setup app user
# creates user, home directory and uploads ssh/deploy key
def create_user(username, directory, key=None, use_sudo=False):
func = sudo if use_sudo else run
if not func('find {0}'.format(directory), quiet=True, warn_only=True).succeeded:
print 'CREATE USER'
func('echo -e "\n\n\n\n\n\n" | adduser {0}'.format(username))
# Setup SSH deploy key/etc
func('mkdir -p {0}/.ssh'.format(directory))
func('chown -R {0}:{0} {1}/.ssh/'.format(username, directory))
# Install deploy key for GitHub => user
if key is not None:
put(local_path=key, remote_path='{0}/.ssh/id_rsa'.format(directory), use_sudo=use_sudo)
# Deploy git app
# deploys and/or updates a git based application
def deploy_git(destination, user, repository, branch='master', use_sudo=False):
func = sudo if use_sudo else run
if not func('find {0}/.git/index'.format(destination), quiet=True, warn_only=True).succeeded:
func('mkdir -p {0}'.format(destination))
func('chown -R {0}:{0} {1}'.format(user, destination))
func('git clone -b {0} {1} {2}'.format(branch, repository, destination), user=user)
else:
with cd(destination):
func('git checkout {0}'.format(branch), user=user)
func('git pull'.format(destination), user=user)
# Install Pip
# installs latest + installs 1.4 over the top because post-v1.4 is ruined
def install_pip(use_sudo=False):
func = sudo if use_sudo else run
# Got pip already?
if not func('which pip', quiet=True, warn_only=True).succeeded:
func('wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py -O /tmp/get-pip.py')
func('python /tmp/get-pip.py')
# Downgrade pip to something not built by idiots
# no sane person want's to deal with --allow-unverified shit
func('pip install pip==1.4')
# Put a file
# unlike fabric, it works with sudo!
def put(local_path='', remote_path='', use_sudo=False):
if not use_sudo:
return fab_put(local_path, remote_path, use_sudo=use_sudo)
# Put the file normally
home_dir = run('echo $HOME')
tmp_path = '{0}/{1}'.format(home_dir, uuid4())
fab_put(local_path, tmp_path)
# NOW sudo move it
return sudo('mv "{0}" "{1}"'.format(tmp_path, remote_path))
|
PypiClean
|
/neural_compressor_full-2.1.1.tar.gz/neural_compressor_full-2.1.1/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert.py
|
"""Graph rewriter BF16 Converter Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import copy
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework.kernels import get_registered_kernels_for_op
from ..graph_base import GraphRewriterBase
from neural_compressor.adaptor.tf_utils.graph_util import GraphAnalyzer
from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper as Helper
from ..generic.graph_cse_optimizer import GraphCseOptimizer
from ..generic.dequantize_cast_optimizer import DequantizeCastOptimizer
DT_FLOAT32 = attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)
DT_BFLOAT16 = attr_value_pb2.AttrValue(type=dtypes.bfloat16.as_datatype_enum)
class BF16Convert(GraphRewriterBase):
"""BF16 node convert transformation."""
def __init__(self,
model,
fp32_ops=[],
bf16_ops=[]):
"""Initilization.
Args: model: the model to be converted to BF16.
fp32_ops: keep with fp32 op list
bf16_ops: convert to bf16 op list
"""
super().__init__(model)
self.cur_graph = GraphAnalyzer()
self.cur_graph.graph = self.model
self.fp32_ops = fp32_ops
self.bf16_ops = bf16_ops
self.converted_ops = []
self.device = ["CPU", "DEFAULT"] #TODO support differnt device types, such as GPU
def _dtype(self, node):
"""Get the dtype of the node."""
op_def = op_def_registry.get(node.op)
inputs_dt = []
outputs_dt = []
for i in op_def.input_arg:
inputs_num = node.attr[i.number_attr].i if i.number_attr else 1
for j in range(inputs_num):
if i.type:
inputs_dt.append('')
else:
inputs_dt.append(i.type_attr)
for i in op_def.output_arg:
outputs_num = node.attr[i.number_attr].i if i.number_attr else 1
for j in range(outputs_num):
if i.type:
outputs_dt.append('')
else:
outputs_dt.append(i.type_attr)
return inputs_dt, outputs_dt
def _dtype_val(self, node):
"""Get the dtype value of the node."""
op_def = op_def_registry.get(node.op)
inputs_dt_val = []
outputs_dt_val = []
for i in op_def.input_arg:
inputs_num = node.attr[i.number_attr].i if i.number_attr else 1
for j in range(inputs_num):
if i.type:
inputs_dt_val.append(copy.deepcopy(attr_value_pb2.AttrValue(type=i.type)))
else:
inputs_dt_val.append(copy.deepcopy(node.attr[i.type_attr]))
for i in op_def.output_arg:
outputs_num = node.attr[i.number_attr].i if i.number_attr else 1
for j in range(outputs_num):
if i.type:
outputs_dt_val.append(copy.deepcopy(attr_value_pb2.AttrValue(type=i.type)))
else:
outputs_dt_val.append(copy.deepcopy(node.attr[i.type_attr]))
return inputs_dt_val, outputs_dt_val
def _allowed_dtype_val(self, node):
"""Get the allowed dtype value of the node."""
op_def = op_def_registry.get(node.op)
allowed_dt_val = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
if attr_def.HasField("allowed_values"):
allowed_dt_val[attr_def.name] = attr_def.allowed_values.list.type
# The supported data type in op_def may be different with registered kernels.
# Use the registered one if exists.
registered_dt_val = {}
registered_kernels = get_registered_kernels_for_op(node.op)
for kernel in registered_kernels.kernel:
if kernel.device_type in self.device:
for constraint in kernel.constraint:
if constraint.HasField("allowed_values"):
if constraint.name not in registered_dt_val:
registered_dt_val[constraint.name] = constraint.allowed_values.list.type
else:
registered_dt_val[constraint.name].extend(constraint.allowed_values.list.type)
for dt_val in registered_dt_val:
if registered_dt_val[dt_val] != []:
allowed_dt_val[dt_val] = registered_dt_val[dt_val]
return allowed_dt_val
def _bf16_convert(self, bf16_node_name):
"""BF16 convertion for the model.
Args: bf16_node_name: nodes converted to BF16 op list
"""
bf16_node_detail = self.cur_graph.node_name_details[bf16_node_name]
bf16_node = bf16_node_detail.node
bf16_node_outputs = copy.deepcopy(bf16_node_detail.outputs)
if bf16_node.name in self.converted_ops:
return
elif 'Dequantize' in bf16_node.op:
return
else:
self.converted_ops.append(bf16_node.name)
inputs_dt, outputs_dt = self._dtype(bf16_node)
inputs_dt_val, outputs_dt_val = self._dtype_val(bf16_node)
allowed_dt_val = self._allowed_dtype_val(bf16_node)
for index, input_name in enumerate(bf16_node.input):
if input_name.startswith('^'):
continue
input_detail = self.cur_graph.node_name_details[Helper.node_name_from_input(
input_name)]
input_node = input_detail.node
input_node_outputs = input_detail.outputs
if inputs_dt[index] in allowed_dt_val and \
dtypes.bfloat16.as_datatype_enum not in allowed_dt_val[inputs_dt[index]]:
continue
if inputs_dt_val[index] != DT_FLOAT32:
continue
if input_node.op == 'Cast' and \
input_node.attr["SrcT"] == DT_BFLOAT16 and \
input_node.attr["DstT"] == DT_FLOAT32 and len(input_node_outputs) == 1:
parent_input_name = Helper.node_name_from_input(input_node.input[0])
bf16_node.input[index] = input_node.input[0]
outputs = self.cur_graph.node_name_details[parent_input_name].outputs
outputs = list(map(lambda x: x.replace(input_name, bf16_node.name), outputs))
self.cur_graph.remove_node(input_name)
elif input_node.op == 'Cast' and \
input_node.attr["DstT"] == DT_FLOAT32 and len(input_node_outputs) == 1:
input_node.attr["DstT"].CopyFrom(DT_BFLOAT16)
elif input_node.op == "Const" and len(input_node_outputs) == 1:
fp32_value = tensor_util.MakeNdarray(input_node.attr.get('value').tensor)
Helper.set_attr_dtype(input_node, "dtype", dtypes.bfloat16)
input_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
fp32_value, dtypes.bfloat16, fp32_value.shape)))
elif 'Dequantize' == input_node.op and len(input_node_outputs) == 1 \
and input_node.attr['mode'].s != b'MIN_FIRST':
# Dequantize with mode MIN_FIRST does not support bf16 in both eigen and mkl
_, outputs_dt_input_node = self._dtype(input_node)
allowed_input_node_dt_val = self._allowed_dtype_val(input_node)
if outputs_dt_input_node[0] in allowed_input_node_dt_val and \
dtypes.bfloat16.as_datatype_enum in allowed_input_node_dt_val[outputs_dt_input_node[0]]:
input_node.attr[outputs_dt_input_node[0]].CopyFrom(DT_BFLOAT16)
# ResizeBilinear input can be of different types but output is always float
elif input_node.name in self.bf16_ops and "Dequantize" not in input_node.op and \
input_node.op != 'ResizeBilinear':
self._bf16_convert(input_node.name)
else:
cast_node_name = input_name.replace(':', '_') + "/" + bf16_node_name + "_FP32toBF16"
if cast_node_name not in list(self.cur_graph.node_name_details.keys()):
input_cast_node = Helper.create_node(
"Cast", cast_node_name, [input_name])
Helper.set_attr_dtype(input_cast_node, "DstT", dtypes.bfloat16)
Helper.set_attr_dtype(input_cast_node, "SrcT", dtypes.float32)
Helper.set_attr_bool(input_cast_node, "Truncate", False)
bf16_node.input[index] = cast_node_name
outputs = self.cur_graph.node_name_details[ \
Helper.node_name_from_input(input_name)].outputs
outputs = list(map(lambda x: x.replace(bf16_node.name, cast_node_name), outputs))
self.cur_graph.add_node(input_cast_node, input_name, [bf16_node_name])
bf16_node.attr[inputs_dt[index]].CopyFrom(
attr_value_pb2.AttrValue(type=dtypes.bfloat16.as_datatype_enum))
for output_name in bf16_node_outputs:
if bf16_node.op == 'ResizeBilinear':
continue
output_detail = self.cur_graph.node_name_details[output_name]
output_node = output_detail.node
inputs_dt_input_node, _ = self._dtype(output_node)
allowed_output_node_dt_val = self._allowed_dtype_val(output_node)
for i, input_name in enumerate(output_node.input):
if input_name.startswith('^'):
continue
if bf16_node.name != input_name.split(':')[0]:
continue
index = int(input_name.split(':')[-1]) if ':' in input_name else 0
if outputs_dt[index] in allowed_dt_val and \
dtypes.bfloat16.as_datatype_enum not in allowed_dt_val[outputs_dt[index]]:
continue
if outputs_dt_val[index] != DT_FLOAT32:
continue
if output_node.op == 'Cast':
output_node.attr["SrcT"].CopyFrom(DT_BFLOAT16)
elif output_node.op == 'QuantizeV2' and 'dtype' in output_node.attr:
if 'dtype' in allowed_output_node_dt_val and \
dtypes.bfloat16.as_datatype_enum in allowed_output_node_dt_val['dtype']:
output_node.attr["dtype"].CopyFrom(DT_BFLOAT16)
elif output_node.name not in self.bf16_ops or \
inputs_dt_input_node[i] in allowed_output_node_dt_val and \
dtypes.bfloat16.as_datatype_enum not in allowed_output_node_dt_val[inputs_dt_input_node[i]]:
cast_node_name = bf16_node_name + "/" + output_node.name + "_BF16toFP32"
if cast_node_name in self.cur_graph.node_name_details.keys():
continue
output_cast_node = Helper.create_node(
"Cast", cast_node_name, [input_name])
Helper.set_attr_dtype(output_cast_node, "DstT", dtypes.float32)
Helper.set_attr_dtype(output_cast_node, "SrcT", dtypes.bfloat16)
Helper.set_attr_bool(output_cast_node, "Truncate", False)
index = [i for i in output_node.input].index(input_name)
output_node.input[index] = output_cast_node.name
self.cur_graph.add_node(output_cast_node, bf16_node_name, [output_name])
def _model_bf16_convert(self):
"""Convert model to BF16."""
logging.debug("start convert bf16 graph")
self.cur_graph.parse_graph()
for bf16_node_name in set(self.bf16_ops):
if bf16_node_name not in self.cur_graph.node_name_details:
self.bf16_ops.remove(bf16_node_name)
for bf16_node_name in sorted(list(set(self.bf16_ops))):
self._bf16_convert(bf16_node_name)
return self.cur_graph.dump_graph()
def do_transformation(self):
"""Execute BF16 convert.
Returns: Transformed graph
"""
converted_graph_def = self._model_bf16_convert()
# remove those ops which could be shared by Graph Cse optimizer
converted_graph_def = GraphCseOptimizer(converted_graph_def).do_transformation()
# remove cast and set dequantize dtype bf16 when all outputs of dequantize are bf16
converted_graph_def = DequantizeCastOptimizer(converted_graph_def).do_transformation()
converted_graph_def.library.CopyFrom(self.model.library)
return converted_graph_def
|
PypiClean
|
/latexipy-1.0.1.tar.gz/latexipy-1.0.1/docs/usage.rst
|
=====
Usage
=====
To use LaTeXiPy in a project::
import latexipy as lp
Set up for LaTeX with::
lp.latexify()
And make your figures with::
with lp.figure('filename'):
draw_the_plot()
Import from LaTeX with:
.. code-block:: latex
\usepackage{pgf}
\input{filename.pgf}
Example files
=============
From the ``latexipy`` Github repository.
* Python_
* Latex_
* PDF_
.. _Python: https://github.com/masasin/latexipy/blob/master/examples/examples.py
.. _Latex: https://github.com/masasin/latexipy/blob/master/examples/example.tex
.. _PDF: https://github.com/masasin/latexipy/raw/master/examples/example.pdf
Minimum Working Example
=======================
.. code-block:: python
:caption: sincos_plotter.py
:emphasize-lines: 4, 6, 8
import numpy as np
import matplotlib.pyplot as plt
import latexipy as lp
lp.latexify()
with lp.figure('sincos'):
x = np.linspace(-np.pi, np.pi)
y_sin = np.sin(x)
y_cos = np.cos(x)
plt.plot(x, y_sin, label='sine')
plt.plot(x, y_cos, label='cosine')
plt.title('Sine and cosine')
plt.xlabel(r'$\theta$')
plt.ylabel('Value')
plt.legend()
.. code-block:: latex
:caption: sincos_report.tex
:emphasize-lines: 3, 8
\documentclass{article}
\usepackage{pgf}
\begin{document}
\begin{figure}[h]
\centering
\input{img/filename.pgf}
\caption[LOF caption]{Regular caption.}
\label{fig:pgf_example}
\end{figure}
\end{document}
Plotting
========
Without LaTeX
-------------
If you are not making your plots for LaTeX, Matplotlib's defaults are used.
The typeface is sans-serif, and the font, a bit large.
The default arguments save a PGF and PNG file in the ``img/`` directory.
.. code-block:: python
:emphasize-lines: 1
with lp.figure('sincos'):
plot_sin_and_cos()
.. image:: ../examples/img/sincos_no_latex.png
With LaTeX
----------
If you are building for LaTeX, just ``lp.latexify()``!
.. code-block:: python
:emphasize-lines: 1, 3
lp.latexify()
with lp.figure('sincos'):
plot_sin_and_cos()
.. image:: ../examples/img/sincos_defaults.png
Using custom parameters
-----------------------
By default, ``lp.latexify()`` uses ``lp.PARAMS``, which has the following values:
.. literalinclude:: ../latexipy/_latexipy.py
:caption: _latexipy.py
:linenos:
:lineno-start: 22
:lines: 22-41
Passing a different dictionary to ``lp.latexify()`` causes these changes to be permanent in the rest of the code.
For example, to increase the font size throughout:
.. literalinclude:: ../examples/examples.py
:caption: examples.py
:emphasize-lines: 2, 3, 6
:linenos:
:lineno-start: 93
:lines: 93-101
You can call ``lp.latexify()`` multiple times throughout your code, but if you want to change the setting only for a few figures, the recommended approach is to use ``lp.temp_params()``. This automatically reverts to the previous settings after saving (or attempting to save) the plot.
.. literalinclude:: ../examples/examples.py
:caption: examples.py
:emphasize-lines: 2
:linenos:
:lineno-start: 87
:lines: 87-90
Either way, the font size would have increased uniformly from 8 to 10 pt.
.. image:: ../examples/img/sincos_big_font_temp.png
Note that ``lp.temp_params()`` can also take a custom dictionary which can do more fine-grained tuning of fonts.
.. literalinclude:: ../examples/examples.py
:caption: examples.py
:emphasize-lines: 1
:linenos:
:lineno-start: 111
:lines: 111-116
.. image:: ../examples/img/sincos_big_label_title.png
Reverting
---------
To revert all changes made with ``lp.latexify()`` and other commands, just run ``lp.revert()``.
Avoiding repetition
===================
If you keep passing the same arguments to ``lp.figure()`` (for example, an output directory, a set of filetypes, or a certain size), you can save it for reuse by using ``functools.partial()``.
After that, you can use it just like ``lp.figure()``.
Note that you would not be able to redifine an argument that you had previously applied.
.. code-block:: python
:emphasize-lines: 1, 3
figure = partial(lp.figure, directory=DIRECTORY)
with figure('sincos_partial'):
plot_sin_and_cos()
where ``DIRECTORY`` is the default output directory.
This pattern was used extensively in the Python_ file from the examples_ directory.
Using in LaTeX
==============
To include a PGF file in your LaTeX document, make sure that the ``pgf`` package is loaded in the preamble.
.. code-block:: latex
\usepackage{pgf}
After that, you can include it in the correct location with:
.. code-block:: latex
\input{<filename>.pgf}
A minimum working example of an image within a figure is shown below.
.. code-block:: latex
:emphasize-lines: 3, 8
\documentclass{article}
\usepackage{pgf}
\begin{document}
\begin{figure}[h]
\centering
\input{img/filename.pgf}
\caption[LOF caption]{Regular caption.}
\label{fig:pgf_example}
\end{figure}
\end{document}
Note that figures using additional raster images can only be included by ``\input{}`` if they are in the same directory as the main LaTeX file.
To load figures from other directories, you can use the ``import`` package instead.
.. code-block:: latex
\usepackage{import}
\import{<path to file>}{<filename>.pgf}
A minimum working example of that scenario is shown below.
.. code-block:: latex
:emphasize-lines: 3, 4, 9
\documentclass{article}
\usepackage{import}
\usepackage{pgf}
\begin{document}
\begin{figure}[h]
\centering
\import{/path/to/file/}{filename.pgf} % Note trailing slash.
\caption[LOF caption]{Regular caption.}
\label{fig:pgf_example}
\end{figure}
\end{document}
.. _examples: https://github.com/masasin/latexipy/tree/master/examples
|
PypiClean
|
/simple-data-flow-0.0.1.tar.gz/simple-data-flow-0.0.1/src/simflow/base.py
|
import logging
import re
import traceback
import uuid
from confobj import Configurable, has_dict_handler, register_dict_handler, get_dict_handler, get_class
class Stoppable(object):
"""
Classes that can be stopped.
"""
def is_stopped(self):
"""
Returns whether the object has been stopped.
:return: whether stopped
:rtype: bool
"""
raise Exception("Not implemented!")
def stop_execution(self):
"""
Triggers the stopping of the object.
"""
raise Exception("Not implemented!")
class Actor(Configurable, Stoppable):
"""
The ancestor for all actors.
"""
def __init__(self, name=None, config=None):
"""
Initializes the actor.
:param name: the name of the actor
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(Actor, self).__init__(config=config)
self._name = self.__class__.__name__
self._parent = None
self._full_name = None
self._stopped = False
if name is not None:
self.name = name
if not has_dict_handler("Actor"):
register_dict_handler("Actor", Actor.from_dict)
def __str__(self):
"""
Returns a short representation of the actor's setup.
:return: the setup
:rtype: str
"""
return self.full_name + ": " + str(self._config)
def __repr__(self):
"""
Returns Python code for instantiating the object.
:return: the representation
:rtype: str
"""
return \
self.__class__.__module__ + "." + self.__class__.__name__ \
+ "(name=" + self.name + ", config=" + str(self.config) + ")"
def new_logger(self):
"""
Returns a new logger instance.
:return: the logger instance
:rtype: logger
"""
return logging.getLogger(self.full_name)
@property
def name(self):
"""
Obtains the currently set name of the actor.
:return: the name
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of the actor.
:param name: the name
:type name: str
"""
self._name = name
def unique_name(self, name):
"""
Generates a unique name.
:param name: the name to check
:type name: str
:return: the unique name
:rtype: str
"""
result = name
if self.parent is not None:
index = self.index
bname = re.sub(r'-[0-9]+$', '', name)
names = []
for idx, actor in enumerate(self.parent.actors):
if idx != index:
names.append(actor.name)
result = bname
count = 0
while result in names:
count += 1
result = bname + "-" + str(count)
return result
@property
def parent(self):
"""
Obtains the currently set parent of the actor.
:return: the name
:rtype: str
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of the actor.
:param parent: the parent
:type parent: Actor
"""
self._name = self.unique_name(self._name)
self._full_name = None
self._logger = None
self._parent = parent
@property
def index(self):
"""
Returns the index of this actor in its parent's list of actors.
:return: the index, -1 if not available
:rtype: int
"""
if self.parent is None:
return -1
else:
return self.parent.index_of(self.name)
@property
def full_name(self):
"""
Obtains the full name of the actor.
:return: the full name
:rtype: str
"""
if self._full_name is None:
fn = self.name.replace(".", "\\.")
parent = self._parent
if parent is not None:
fn = parent.full_name + "." + fn
self._full_name = fn
return self._full_name
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
opt = "annotation"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The (optional) annotation for this actor (string)."
opt = "skip"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to skip (disable) this actor (bool)."
return super(Actor, self).fix_config(options)
def to_dict(self):
"""
Returns a dictionary that represents this object, to be used for JSONification.
:return: the object dictionary
:rtype: dict
"""
result = super(Actor, self).to_dict()
result["type"] = "Actor"
result["name"] = self.name
return result
@classmethod
def from_dict(cls, d):
"""
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
"""
conf = {}
for k in d["config"]:
v = d["config"][k]
if isinstance(v, dict):
if u"type" in v:
typestr = v[u"type"]
else:
typestr = v["type"]
conf[str(k)] = get_dict_handler(typestr)(v)
else:
conf[str(k)] = v
return get_class(d["class"])(name=d["name"], config=conf)
def resolve_option(self, name, default=None):
"""
Resolves the option, i.e., interprets "@{...}" values and retrievs them instead from internal
storage.
:param name: the name of the option
:type name: str
:param default: the optional default value
:type default: object
:return: the resolved value
:rtype: object
"""
value = self.config[name]
if value is None:
return default
elif isinstance(value, str) \
and value.startswith("@{") \
and value.endswith("}") \
and (value.find("@{", 1) == -1):
stname = value[2:len(value)-1]
if (self.storagehandler is not None) and (stname in self.storagehandler.storage):
return self.storagehandler.storage[stname]
else:
return default
else:
return value
@property
def skip(self):
"""
Obtains whether the actor is disabled (skipped).
:return: True if skipped
:rtype: bool
"""
return self.resolve_option("skip")
@skip.setter
def skip(self, skip):
"""
Sets whether the actor is skipped.
:param skip: True if skipped
:type skip: bool
"""
self.config["skip"] = skip
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return None
@property
def storagehandler(self):
"""
Returns the storage handler available to thise actor.
:return: the storage handler, None if not available
"""
if isinstance(self, StorageHandler):
return self
elif self.parent is not None:
return self.parent.storagehandler
else:
return None
@property
def root(self):
"""
Returns the top-level actor.
:return: the top-level actor
:rtype: Actor
"""
if self.parent is None:
return self
else:
return self.parent.root
@property
def depth(self):
"""
Returns the depth of this actor inside the overall flow.
:return: the depth
:rtype: int
"""
if self.parent is None:
return 0
else:
return self.parent.depth + 1
def is_stopped(self):
"""
Returns whether the object has been stopped.
:return: whether stopped
:rtype: bool
"""
return self._stopped
def stop_execution(self):
"""
Triggers the stopping of the actor.
"""
self._stopped = True
def setup(self):
"""
Configures the actor before execution.
:return: None if successful, otherwise error message
:rtype: str
"""
return None
def pre_execute(self):
"""
Gets executed before the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
return None
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
raise Exception("Not implemented!")
def post_execute(self):
"""
Gets executed after the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
return None
def execute(self):
"""
Executes the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
if self.skip:
return None
result = self.pre_execute()
if result is None:
try:
result = self.do_execute()
except Exception as e:
result = traceback.format_exc()
print(self.full_name + "\n" + result)
if result is None:
result = self.post_execute()
return result
def wrapup(self):
"""
Finishes up after execution finishes, does not remove any graphical output.
"""
pass
def cleanup(self):
"""
Destructive finishing up after execution stopped.
"""
pass
class Token(object):
"""
Container for transporting data through the flow.
"""
def __init__(self, payload):
"""
Initializes the token with the given payload.
:param payload: the payload for the token.
:type payload: object
"""
self._id = str(uuid.uuid4())
self._payload = payload
@property
def id(self):
"""
Obtains the ID of the token.
:return: the ID
:rtype: str
"""
return self._id
@property
def payload(self):
"""
Obtains the currently set payload.
:return: the payload
:rtype: object
"""
return self._payload
def __str__(self):
"""
Returns a short representation of the token and its payload.
"""
return self._id + ": " + str(self._payload)
class InputConsumer(Actor):
"""
Actors that consume tokens inherit this class.
"""
def __init__(self, name=None, config=None):
"""
Initializes the actor.
:param name: the name of the actor
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(InputConsumer, self).__init__(name=name, config=config)
self._input = None
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
pass
@property
def input(self):
"""
Returns the current input token, None if not available.
:return: the input token
:rtype: Token
"""
return self._input
@input.setter
def input(self, token):
"""
Accepts the data for processing.
:param token: the token to process
:type token: Token
"""
self.check_input(token)
self._input = token
class OutputProducer(Actor):
"""
Actors that generate output tokens inherit this class.
"""
def __init__(self, name=None, config=None):
"""
Initializes the actor.
:param name: the name of the actor
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(OutputProducer, self).__init__(name=name, config=config)
self._output = None
def pre_execute(self):
"""
Gets executed before the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
self._output = []
return None
def has_output(self):
"""
Checks whether any output tokens are present.
:return: true if at least one output token present
:rtype: bool
"""
return (self._output is not None) and (len(self._output) > 0)
def output(self):
"""
Returns the next available output token.
:return: the next token, None if none available
:rtype: Token
"""
if (self._output is None) or (len(self._output) == 0):
result = None
else:
result = self._output.pop(0)
return result
class StorageHandler(object):
"""
For classes that support internal storage (= dictionary).
"""
@property
def storage(self):
"""
Returns the internal storage.
:return: the internal storage
:rtype: dict
"""
raise Exception("Not implemented!")
def expand(self, s):
"""
Expands all occurrences of "@{...}" within the string with the actual values currently stored
in internal storage.
:param s: the string to expand
:type s: str
:return: the expanded string
:rtype: str
"""
result = s
while result.find("@{") > -1:
start = result.index("@{")
end = result.index("}", start)
name = result[start + 2:end]
value = self.storage[name]
if value is None:
raise("Storage value '" + name + "' not present, failed to expand string: " + s)
else:
result = result[0:start] + str(value) + result[end + 1:]
return result
@classmethod
def pad(cls, name):
"""
Pads the name with "@{...}".
:param name: the name to pad
:type name: str
:return: the padded name
:rtype: str
"""
if name.startswith("@{"):
return name
else:
return "@{" + name + "}"
@classmethod
def extract(cls, padded):
"""
Removes the surrounding "@{...}" from the name.
:param padded: the padded string
:type padded: str
:return: the extracted name
:rtype: str
"""
if padded.startswith("@{") and padded.endswith("}"):
return padded[2:len(padded)-1]
else:
return padded
def is_source(actor):
"""
Checks whether the actor is a source.
:param actor: the actor to check
:type actor: Actor
:return: True if the actor is a source
:rtype: bool
"""
return not isinstance(actor, InputConsumer) and isinstance(actor, OutputProducer)
def is_transformer(actor):
"""
Checks whether the actor is a transformer.
:param actor: the actor to check
:type actor: Actor
:return: True if the actor is a transformer
:rtype: bool
"""
return isinstance(actor, InputConsumer) and isinstance(actor, OutputProducer)
def is_sink(actor):
"""
Checks whether the actor is a sink.
:param actor: the actor to check
:type actor: Actor
:return: True if the actor is a sink
:rtype: bool
"""
return isinstance(actor, InputConsumer) and not isinstance(actor, OutputProducer)
|
PypiClean
|
/invenio_app_ils-1.0.0a60.tar.gz/invenio_app_ils-1.0.0a60/invenio_app_ils/items/serializers/item.py
|
from invenio_records_rest.serializers.csv import CSVSerializer
from invenio_records_rest.serializers.json import JSONSerializer
from invenio_app_ils.permissions import patron_permission
FILTER_KEYS = [
"loan_pid",
"patron_pid",
"start_date",
"end_date",
"extension_count",
]
def filter_circulation(data):
"""Filter circulation status depending on user permissions."""
if "circulation" in data["metadata"]:
circulation = data["metadata"]["circulation"]
patron_pid = circulation.get("patron_pid", None)
if not patron_pid:
return circulation
allowed = patron_permission(patron_pid).can()
if not allowed:
for key in FILTER_KEYS:
if key in circulation:
del circulation[key]
data["metadata"]["circulation"] = circulation
class ItemCSVSerializer(CSVSerializer):
"""Serialize and filter item circulation status."""
def transform_record(self, pid, record, links_factory=None, **kwargs):
"""Transform record into an intermediate representation."""
item = super().transform_record(
pid, record, links_factory=links_factory, **kwargs
)
filter_circulation(item)
return item
def transform_search_hit(
self, pid, record_hit, links_factory=None, **kwargs
):
"""Transform search result hit into an intermediate representation."""
hit = super().transform_search_hit(
pid, record_hit, links_factory=links_factory, **kwargs
)
filter_circulation(hit)
return hit
class ItemJSONSerializer(JSONSerializer):
"""Serialize and filter item circulation status."""
def transform_record(self, pid, record, links_factory=None, **kwargs):
"""Transform record into an intermediate representation."""
item = super().transform_record(
pid, record, links_factory=links_factory, **kwargs
)
filter_circulation(item)
return item
def transform_search_hit(
self, pid, record_hit, links_factory=None, **kwargs
):
"""Transform search result hit into an intermediate representation."""
hit = super().transform_search_hit(
pid, record_hit, links_factory=links_factory, **kwargs
)
filter_circulation(hit)
return hit
|
PypiClean
|
/bigdl_orca-2.3.0b20230218-py3-none-manylinux1_x86_64.whl/bigdl/orca/learn/pytorch/callbacks/tensorboard.py
|
import os
import shutil
import tempfile
from torch.utils.tensorboard import SummaryWriter
from bigdl.orca.data.file import put_local_dir_tree_to_remote
from bigdl.orca.learn.pytorch.callbacks import Callback
from bigdl.dllib.utils.log4Error import invalidInputError
class TensorBoardCallback(Callback):
def __init__(
self,
log_dir=None,
freq="epoch",
**kwargs,
):
"""
:param log_dir: Log directory of TensorBoard.
:param freq: Frequency of logging metrics and loss.
Accept values: 'batch' or 'epoch' or integer. When using 'batch',
writes the losses and metrics to TensorBoard after each batch.
The same applies for 'epoch'. If using an integer, let's say 1000,
the callback will write the metrics and losses to TensorBoard every 1000 batches.
Note that writing too frequently to TensorBoard can slow down your training.
:param **kwargs: The keyword arguments will be pased to ``SummaryWriter``.
"""
self.log_dir = log_dir
self.tmp_dir = os.path.join(tempfile.mkdtemp(), os.path.basename(log_dir))
self.freq = freq
self.kwargs = kwargs
self.unlog_items = ["epoch", "batch_count", "num_samples"]
super().__init__()
def after_train_iter(self, runner):
"""
Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
:param batch: Integer, index of batch within the current epoch.
:param logs: Dict. Aggregated metric results up until this batch.
"""
if self.freq != "epoch" and self._is_rank_zero(runner):
if self.freq == "batch" or runner.batch_idx % int(self.freq) == 0:
writer = SummaryWriter(log_dir=self.tmp_dir, **self.kwargs)
for name, value in runner.metrics_stats.items():
if name not in self.unlog_items:
writer.add_scalar(name, value, runner.batch_idx)
writer.close()
def after_train_epoch(self, runner):
"""
Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
:param epoch: Integer, index of epoch.
:param logs: Dict, metric results for this training epoch, and for the validation epoch if
validation is performed. Validation result keys are prefixed with val_. For training
epoch, the values of the Model's metrics are returned.
Example : {'loss': 0.2, 'accuracy': 0.7}
"""
if self.freq == "epoch" and self._is_rank_zero(runner):
writer = SummaryWriter(log_dir=self.tmp_dir, **self.kwargs)
for name, value in runner.epoch_stats.items():
if name not in self.unlog_items:
writer.add_scalar(name, value, runner.epochs)
writer.close()
def after_run(self, runner):
"""
Called at the end of training.
Subclasses should override for any actions to run.
"""
if self._is_rank_zero(runner):
put_local_dir_tree_to_remote(self.tmp_dir, self.log_dir)
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
def _is_rank_zero(self, runner):
invalidInputError(runner, "Sanity check failed. Runner must not be None!")
rank = runner.rank
return rank == 0
|
PypiClean
|
/alx_test-1.tar.gz/alx_test-1/LICENSE.rst
|
MIT License
Copyright (c) 2020 Intercontinental Exchange
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/alpha_cord-2.0.0a1-py3-none-any.whl/discord/interactions.py
|
from __future__ import annotations
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Tuple, Union
import asyncio
from . import utils
from .enums import try_enum, InteractionType, InteractionResponseType
from .errors import InteractionResponded, HTTPException, ClientException, InvalidArgument
from .channel import PartialMessageable, ChannelType
from .file import File
from .user import User
from .member import Member
from .message import Message, Attachment
from .mentions import AllowedMentions
from .object import Object
from .permissions import Permissions
from .webhook.async_ import async_context, Webhook, handle_message_parameters
__all__ = (
'Interaction',
'InteractionMessage',
'InteractionResponse',
)
if TYPE_CHECKING:
from .types.interactions import (
Interaction as InteractionPayload,
InteractionData,
)
from .guild import Guild
from .state import ConnectionState
from .mentions import AllowedMentions
from aiohttp import ClientSession
from .embeds import Embed
from .ui.view import View
from .channel import VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel, PartialMessageable
from .threads import Thread
from .commands import OptionChoice
InteractionChannel = Union[
VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel, Thread, PartialMessageable
]
MISSING: Any = utils.MISSING
class Interaction:
"""Represents a Discord interaction.
An interaction happens when a user does an action that needs to
be notified. Current examples are slash commands and components.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The interaction's ID.
type: :class:`InteractionType`
The interaction type.
guild_id: Optional[:class:`int`]
The guild ID the interaction was sent from.
channel_id: Optional[:class:`int`]
The channel ID the interaction was sent from.
application_id: :class:`int`
The application ID that the interaction was for.
user: Optional[Union[:class:`User`, :class:`Member`]]
The user or member that sent the interaction.
message: Optional[:class:`Message`]
The message that sent this interaction.
token: :class:`str`
The token to continue the interaction. These are valid
for 15 minutes.
data: :class:`dict`
The raw interaction data.
"""
__slots__: Tuple[str, ...] = (
'id',
'type',
'guild_id',
'channel_id',
'data',
'application_id',
'message',
'user',
'token',
'version',
'_permissions',
'_state',
'_session',
'_original_message',
'_cs_response',
'_cs_followup',
'_cs_channel',
)
def __init__(self, *, data: InteractionPayload, state: ConnectionState):
self._state: ConnectionState = state
self._session: ClientSession = state.http._HTTPClient__session
self._original_message: Optional[InteractionMessage] = None
self._from_data(data)
def _from_data(self, data: InteractionPayload):
self.id: int = int(data['id'])
self.type: InteractionType = try_enum(InteractionType, data['type'])
self.data: Optional[InteractionData] = data.get('data')
self.token: str = data['token']
self.version: int = data['version']
self.channel_id: Optional[int] = utils._get_as_snowflake(data, 'channel_id')
self.guild_id: Optional[int] = utils._get_as_snowflake(data, 'guild_id')
self.application_id: int = int(data['application_id'])
self.message: Optional[Message]
try:
self.message = Message(state=self._state, channel=self.channel, data=data['message']) # type: ignore
except KeyError:
self.message = None
self.user: Optional[Union[User, Member]] = None
self._permissions: int = 0
# TODO: there's a potential data loss here
if self.guild_id:
guild = self.guild or Object(id=self.guild_id)
try:
member = data['member'] # type: ignore
except KeyError:
pass
else:
self.user = Member(state=self._state, guild=guild, data=member) # type: ignore
self._permissions = int(member.get('permissions', 0))
else:
try:
self.user = User(state=self._state, data=data['user'])
except KeyError:
pass
@property
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild the interaction was sent from."""
return self._state and self._state._get_guild(self.guild_id)
def is_command(self) -> bool:
""":class:`bool`: Indicates whether the interaction is an application command."""
return self.type == InteractionType.application_command
def is_component(self) -> bool:
""":class:`bool`: Indicates whether the interaction is a message component."""
return self.type == InteractionType.component
@utils.cached_slot_property('_cs_channel')
def channel(self) -> Optional[InteractionChannel]:
"""Optional[Union[:class:`abc.GuildChannel`, :class:`PartialMessageable`, :class:`Thread`]]: The channel the interaction was sent from.
Note that due to a Discord limitation, DM channels are not resolved since there is
no data to complete them. These are :class:`PartialMessageable` instead.
"""
guild = self.guild
channel = guild and guild._resolve_channel(self.channel_id)
if channel is None:
if self.channel_id is not None:
type = ChannelType.text if self.guild_id is not None else ChannelType.private
return PartialMessageable(state=self._state, id=self.channel_id, type=type)
return None
return channel
@property
def permissions(self) -> Permissions:
""":class:`Permissions`: The resolved permissions of the member in the channel, including overwrites.
In a non-guild context where this doesn't apply, an empty permissions object is returned.
"""
return Permissions(self._permissions)
@utils.cached_slot_property('_cs_response')
def response(self) -> InteractionResponse:
""":class:`InteractionResponse`: Returns an object responsible for handling responding to the interaction.
A response can only be done once. If secondary messages need to be sent, consider using :attr:`followup`
instead.
"""
return InteractionResponse(self)
@utils.cached_slot_property('_cs_followup')
def followup(self) -> Webhook:
""":class:`Webhook`: Returns the follow up webhook for follow up interactions."""
payload = {
'id': self.application_id,
'type': 3,
'token': self.token,
}
return Webhook.from_state(data=payload, state=self._state)
async def original_message(self) -> InteractionMessage:
"""|coro|
Fetches the original interaction response message associated with the interaction.
If the interaction response was :meth:`InteractionResponse.send_message` then this would
return the message that was sent using that response. Otherwise, this would return
the message that triggered the interaction.
Repeated calls to this will return a cached value.
Raises
-------
HTTPException
Fetching the original response message failed.
ClientException
The channel for the message could not be resolved.
Returns
--------
InteractionMessage
The original interaction response message.
"""
if self._original_message is not None:
return self._original_message
# TODO: fix later to not raise?
channel = self.channel
if channel is None:
raise ClientException('Channel for message could not be resolved')
adapter = async_context.get()
data = await adapter.get_original_interaction_response(
application_id=self.application_id,
token=self.token,
session=self._session,
)
state = _InteractionMessageState(self, self._state)
message = InteractionMessage(state=state, channel=channel, data=data) # type: ignore
self._original_message = message
return message
async def edit_original_message(
self,
*,
content: Optional[str] = MISSING,
embeds: List[Embed] = MISSING,
embed: Optional[Embed] = MISSING,
file: File = MISSING,
files: List[File] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = None,
) -> InteractionMessage:
"""|coro|
Edits the original interaction response message.
This is a lower level interface to :meth:`InteractionMessage.edit` in case
you do not want to fetch the message and save an HTTP request.
This method is also the only way to edit the original message if
the message sent was ephemeral.
Parameters
------------
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
file: :class:`File`
The file to upload. This cannot be mixed with ``files`` parameter.
files: List[:class:`File`]
A list of files to send with the content. This cannot be mixed with the
``file`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
TypeError
You specified both ``embed`` and ``embeds`` or ``file`` and ``files``
ValueError
The length of ``embeds`` was invalid.
Returns
--------
:class:`InteractionMessage`
The newly edited message.
"""
previous_mentions: Optional[AllowedMentions] = self._state.allowed_mentions
params = handle_message_parameters(
content=content,
file=file,
files=files,
embed=embed,
embeds=embeds,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_mentions,
)
adapter = async_context.get()
data = await adapter.edit_original_interaction_response(
self.application_id,
self.token,
session=self._session,
payload=params.payload,
multipart=params.multipart,
files=params.files,
)
# The message channel types should always match
message = InteractionMessage(state=self._state, channel=self.channel, data=data) # type: ignore
if view and not view.is_finished():
self._state.store_view(view, message.id)
return message
async def delete_original_message(self) -> None:
"""|coro|
Deletes the original interaction response message.
This is a lower level interface to :meth:`InteractionMessage.delete` in case
you do not want to fetch the message and save an HTTP request.
Raises
-------
HTTPException
Deleting the message failed.
Forbidden
Deleted a message that is not yours.
"""
adapter = async_context.get()
await adapter.delete_original_interaction_response(
self.application_id,
self.token,
session=self._session,
)
class InteractionResponse:
"""Represents a Discord interaction response.
This type can be accessed through :attr:`Interaction.response`.
.. versionadded:: 2.0
"""
__slots__: Tuple[str, ...] = (
'_responded',
'_parent',
)
def __init__(self, parent: Interaction):
self._parent: Interaction = parent
self._responded: bool = False
def is_done(self) -> bool:
""":class:`bool`: Indicates whether an interaction response has been done before.
An interaction can only be responded to once.
"""
return self._responded
async def defer(self, *, ephemeral: bool = False) -> None:
"""|coro|
Defers the interaction response.
This is typically used when the interaction is acknowledged
and a secondary action will be done later.
Parameters
-----------
ephemeral: :class:`bool`
Indicates whether the deferred message will eventually be ephemeral.
If ``True`` for interactions of type :attr:`InteractionType.component`, this will defer ephemerally.
Raises
-------
HTTPException
Deferring the interaction failed.
InteractionResponded
This interaction has already been responded to before.
"""
if self._responded:
raise InteractionResponded(self._parent)
defer_type: int = 0
data: Optional[Dict[str, Any]] = None
parent = self._parent
if parent.type is InteractionType.component:
if ephemeral:
data = {'flags': 64}
defer_type = InteractionResponseType.deferred_channel_message.value
else:
defer_type = InteractionResponseType.deferred_message_update.value
elif parent.type is InteractionType.application_command:
defer_type = InteractionResponseType.deferred_channel_message.value
if ephemeral:
data = {'flags': 64}
if defer_type:
adapter = async_context.get()
await adapter.create_interaction_response(
parent.id, parent.token, session=parent._session, type=defer_type, data=data
)
self._responded = True
async def pong(self) -> None:
"""|coro|
Pongs the ping interaction.
This should rarely be used.
Raises
-------
HTTPException
Ponging the interaction failed.
InteractionResponded
This interaction has already been responded to before.
"""
if self._responded:
raise InteractionResponded(self._parent)
parent = self._parent
if parent.type is InteractionType.ping:
adapter = async_context.get()
await adapter.create_interaction_response(
parent.id, parent.token, session=parent._session, type=InteractionResponseType.pong.value
)
self._responded = True
async def send_message(
self,
content: Optional[Any] = None,
*,
embed: Embed = MISSING,
embeds: List[Embed] = MISSING,
view: View = MISSING,
tts: bool = False,
ephemeral: bool = False,
allowed_mentions: AllowedMentions = None,
file: File = None,
files: List[File] = None,
delete_after: float = None
) -> Interaction:
"""|coro|
Responds to this interaction by sending a message.
Parameters
-----------
content: Optional[:class:`str`]
The content of the message to send.
embeds: List[:class:`Embed`]
A list of embeds to send with the content. Maximum of 10. This cannot
be mixed with the ``embed`` parameter.
embed: :class:`Embed`
The rich embed for the content to send. This cannot be mixed with
``embeds`` parameter.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
view: :class:`discord.ui.View`
The view to send with the message.
ephemeral: :class:`bool`
Indicates if the message should only be visible to the user who started the interaction.
If a view is sent with an ephemeral message and it has no timeout set then the timeout
is set to 15 minutes.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
delete_after: :class:`float`
If provided, the number of seconds to wait in the background
before deleting the message we just sent.
file: :class:`File`
The file to upload.
files: :class:`List[File]`
A list of files to upload. Must be a maximum of 10.
Raises
-------
HTTPException
Sending the message failed.
TypeError
You specified both ``embed`` and ``embeds``.
ValueError
The length of ``embeds`` was invalid.
InteractionResponded
This interaction has already been responded to before.
"""
if self._responded:
raise InteractionResponded(self._parent)
payload: Dict[str, Any] = {
'tts': tts,
}
if embed is not MISSING and embeds is not MISSING:
raise TypeError('cannot mix embed and embeds keyword arguments')
if embed is not MISSING:
embeds = [embed]
if embeds:
if len(embeds) > 10:
raise ValueError('embeds cannot exceed maximum of 10 elements')
payload['embeds'] = [e.to_dict() for e in embeds]
if content is not None:
payload['content'] = str(content)
if ephemeral:
payload['flags'] = 64
if view is not MISSING:
payload['components'] = view.to_components()
state = self._parent._state
if allowed_mentions is not None:
if state.allowed_mentions is not None:
payload['allowed_mentions'] = state.allowed_mentions.merge(allowed_mentions).to_dict()
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
else:
payload['allowed_mentions'] = state.allowed_mentions and state.allowed_mentions.to_dict()
if file is not None and files is not None:
raise InvalidArgument('cannot pass both file and files parameter to send()')
if file is not None:
if not isinstance(file, File):
raise InvalidArgument('file parameter must be File')
else:
files = [file]
if files is not None:
if len(files) > 10:
raise InvalidArgument('files parameter must be a list of up to 10 elements')
elif not all(isinstance(file, File) for file in files):
raise InvalidArgument('files parameter must be a list of File')
parent = self._parent
adapter = async_context.get()
try:
await adapter.create_interaction_response(
parent.id,
parent.token,
session=parent._session,
type=InteractionResponseType.channel_message.value,
data=payload,
files=files
)
finally:
if files:
for file in files:
file.close()
if view is not MISSING:
if ephemeral and view.timeout is None:
view.timeout = 15 * 60.0
self._parent._state.store_view(view)
self._responded = True
if delete_after is not None:
async def delete():
await asyncio.sleep(delete_after)
await self._parent.delete_original_message()
asyncio.ensure_future(delete(), loop=self._parent._state.loop)
return self._parent
async def edit_message(
self,
*,
content: Optional[Any] = MISSING,
embed: Optional[Embed] = MISSING,
embeds: List[Embed] = MISSING,
attachments: List[Attachment] = MISSING,
view: Optional[View] = MISSING,
) -> None:
"""|coro|
Responds to this interaction by editing the original message of
a component interaction.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with. ``None`` removes the content.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
attachments: List[:class:`Attachment`]
A list of attachments to keep in the message. If ``[]`` is passed
then all attachments are removed.
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
TypeError
You specified both ``embed`` and ``embeds``.
InteractionResponded
This interaction has already been responded to before.
"""
if self._responded:
raise InteractionResponded(self._parent)
parent = self._parent
msg = parent.message
state = parent._state
message_id = msg.id if msg else None
if parent.type is not InteractionType.component:
return
payload = {}
if content is not MISSING:
if content is None:
payload['content'] = None
else:
payload['content'] = str(content)
if embed is not MISSING and embeds is not MISSING:
raise TypeError('cannot mix both embed and embeds keyword arguments')
if embed is not MISSING:
if embed is None:
embeds = []
else:
embeds = [embed]
if embeds is not MISSING:
payload['embeds'] = [e.to_dict() for e in embeds]
if attachments is not MISSING:
payload['attachments'] = [a.to_dict() for a in attachments]
if view is not MISSING:
state.prevent_view_updates_for(message_id)
if view is None:
payload['components'] = []
else:
payload['components'] = view.to_components()
adapter = async_context.get()
await adapter.create_interaction_response(
parent.id,
parent.token,
session=parent._session,
type=InteractionResponseType.message_update.value,
data=payload,
)
if view and not view.is_finished():
state.store_view(view, message_id)
self._responded = True
async def send_autocomplete_result(
self,
*,
choices: List[OptionChoice],
) -> None:
"""|coro|
Responds to this interaction by sending the autocomplete choices.
Parameters
-----------
choices: List[:class:`OptionChoice`]
A list of choices.
Raises
-------
HTTPException
Sending the result failed.
InteractionResponded
This interaction has already been responded to before.
"""
if self._responded:
raise InteractionResponded(self._parent)
parent = self._parent
if parent.type is not InteractionType.auto_complete:
return
payload = {
"choices": [c.to_dict() for c in choices]
}
adapter = async_context.get()
await adapter.create_interaction_response(
parent.id,
parent.token,
session=parent._session,
type=InteractionResponseType.auto_complete_result.value,
data=payload,
)
self._responded = True
class _InteractionMessageState:
__slots__ = ('_parent', '_interaction')
def __init__(self, interaction: Interaction, parent: ConnectionState):
self._interaction: Interaction = interaction
self._parent: ConnectionState = parent
def _get_guild(self, guild_id):
return self._parent._get_guild(guild_id)
def store_user(self, data):
return self._parent.store_user(data)
def create_user(self, data):
return self._parent.create_user(data)
@property
def http(self):
return self._parent.http
def __getattr__(self, attr):
return getattr(self._parent, attr)
class InteractionMessage(Message):
"""Represents the original interaction response message.
This allows you to edit or delete the message associated with
the interaction response. To retrieve this object see :meth:`Interaction.original_message`.
This inherits from :class:`discord.Message` with changes to
:meth:`edit` and :meth:`delete` to work.
.. versionadded:: 2.0
"""
__slots__ = ()
_state: _InteractionMessageState
async def edit(
self,
content: Optional[str] = MISSING,
embeds: List[Embed] = MISSING,
embed: Optional[Embed] = MISSING,
file: File = MISSING,
files: List[File] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = None,
) -> InteractionMessage:
"""|coro|
Edits the message.
Parameters
------------
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
file: :class:`File`
The file to upload. This cannot be mixed with ``files`` parameter.
files: List[:class:`File`]
A list of files to send with the content. This cannot be mixed with the
``file`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
TypeError
You specified both ``embed`` and ``embeds`` or ``file`` and ``files``
ValueError
The length of ``embeds`` was invalid.
Returns
---------
:class:`InteractionMessage`
The newly edited message.
"""
return await self._state._interaction.edit_original_message(
content=content,
embeds=embeds,
embed=embed,
file=file,
files=files,
view=view,
allowed_mentions=allowed_mentions,
)
async def delete(self, *, delay: Optional[float] = None) -> None:
"""|coro|
Deletes the message.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait before deleting the message.
The waiting is done in the background and deletion failures are ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already.
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def inner_call(delay: float = delay):
await asyncio.sleep(delay)
try:
await self._state._interaction.delete_original_message()
except HTTPException:
pass
asyncio.create_task(inner_call())
else:
await self._state._interaction.delete_original_message()
|
PypiClean
|
/fern_merge-0.2.9-py3-none-any.whl/merge/resources/accounting/resources/expenses/client.py
|
import datetime as dt
import typing
import urllib.parse
from json.decoder import JSONDecodeError
import pydantic
from .....core.api_error import ApiError
from .....core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from .....core.datetime_utils import serialize_datetime
from .....core.jsonable_encoder import jsonable_encoder
from .....core.remove_none_from_dict import remove_none_from_dict
from .....environment import MergeEnvironment
from ...types.expense import Expense
from ...types.expense_request import ExpenseRequest
from ...types.expense_response import ExpenseResponse
from ...types.expenses_list_request_expand import ExpensesListRequestExpand
from ...types.expenses_retrieve_request_expand import ExpensesRetrieveRequestExpand
from ...types.meta_response import MetaResponse
from ...types.paginated_expense_list import PaginatedExpenseList
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
class ExpensesClient:
def __init__(
self, *, environment: MergeEnvironment = MergeEnvironment.PRODUCTION, client_wrapper: SyncClientWrapper
):
self._environment = environment
self._client_wrapper = client_wrapper
def list(
self,
*,
company_id: typing.Optional[str] = None,
created_after: typing.Optional[dt.datetime] = None,
created_before: typing.Optional[dt.datetime] = None,
cursor: typing.Optional[str] = None,
expand: typing.Optional[ExpensesListRequestExpand] = None,
include_deleted_data: typing.Optional[bool] = None,
include_remote_data: typing.Optional[bool] = None,
modified_after: typing.Optional[dt.datetime] = None,
modified_before: typing.Optional[dt.datetime] = None,
page_size: typing.Optional[int] = None,
remote_id: typing.Optional[str] = None,
transaction_date_after: typing.Optional[dt.datetime] = None,
transaction_date_before: typing.Optional[dt.datetime] = None,
) -> PaginatedExpenseList:
"""
Returns a list of `Expense` objects.
Parameters:
- company_id: typing.Optional[str]. If provided, will only return expenses for this company.
- created_after: typing.Optional[dt.datetime]. If provided, will only return objects created after this datetime.
- created_before: typing.Optional[dt.datetime]. If provided, will only return objects created before this datetime.
- cursor: typing.Optional[str]. The pagination cursor value.
- expand: typing.Optional[ExpensesListRequestExpand]. Which relations should be returned in expanded form. Multiple relation names should be comma separated without spaces.
- include_deleted_data: typing.Optional[bool]. Whether to include data that was marked as deleted by third party webhooks.
- include_remote_data: typing.Optional[bool]. Whether to include the original data Merge fetched from the third-party to produce these models.
- modified_after: typing.Optional[dt.datetime]. If provided, only objects synced by Merge after this date time will be returned.
- modified_before: typing.Optional[dt.datetime]. If provided, only objects synced by Merge before this date time will be returned.
- page_size: typing.Optional[int]. Number of results to return per page.
- remote_id: typing.Optional[str]. The API provider's ID for the given object.
- transaction_date_after: typing.Optional[dt.datetime]. If provided, will only return objects created after this datetime.
- transaction_date_before: typing.Optional[dt.datetime]. If provided, will only return objects created before this datetime.
"""
_response = self._client_wrapper.httpx_client.request(
"GET",
urllib.parse.urljoin(f"{self._environment.value}/", "api/accounting/v1/expenses"),
params=remove_none_from_dict(
{
"company_id": company_id,
"created_after": serialize_datetime(created_after) if created_after is not None else None,
"created_before": serialize_datetime(created_before) if created_before is not None else None,
"cursor": cursor,
"expand": expand,
"include_deleted_data": include_deleted_data,
"include_remote_data": include_remote_data,
"modified_after": serialize_datetime(modified_after) if modified_after is not None else None,
"modified_before": serialize_datetime(modified_before) if modified_before is not None else None,
"page_size": page_size,
"remote_id": remote_id,
"transaction_date_after": serialize_datetime(transaction_date_after)
if transaction_date_after is not None
else None,
"transaction_date_before": serialize_datetime(transaction_date_before)
if transaction_date_before is not None
else None,
}
),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(PaginatedExpenseList, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def create(
self,
*,
is_debug_mode: typing.Optional[bool] = None,
run_async: typing.Optional[bool] = None,
model: ExpenseRequest,
) -> ExpenseResponse:
"""
Creates an `Expense` object with the given values.
Parameters:
- is_debug_mode: typing.Optional[bool]. Whether to include debug fields (such as log file links) in the response.
- run_async: typing.Optional[bool]. Whether or not third-party updates should be run asynchronously.
- model: ExpenseRequest.
"""
_response = self._client_wrapper.httpx_client.request(
"POST",
urllib.parse.urljoin(f"{self._environment.value}/", "api/accounting/v1/expenses"),
params=remove_none_from_dict({"is_debug_mode": is_debug_mode, "run_async": run_async}),
json=jsonable_encoder({"model": model}),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(ExpenseResponse, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def retrieve(
self,
id: str,
*,
expand: typing.Optional[ExpensesRetrieveRequestExpand] = None,
include_remote_data: typing.Optional[bool] = None,
) -> Expense:
"""
Returns an `Expense` object with the given `id`.
Parameters:
- id: str.
- expand: typing.Optional[ExpensesRetrieveRequestExpand]. Which relations should be returned in expanded form. Multiple relation names should be comma separated without spaces.
- include_remote_data: typing.Optional[bool]. Whether to include the original data Merge fetched from the third-party to produce these models.
"""
_response = self._client_wrapper.httpx_client.request(
"GET",
urllib.parse.urljoin(f"{self._environment.value}/", f"api/accounting/v1/expenses/{id}"),
params=remove_none_from_dict({"expand": expand, "include_remote_data": include_remote_data}),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(Expense, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
def meta_post_retrieve(self) -> MetaResponse:
"""
Returns metadata for `Expense` POSTs.
"""
_response = self._client_wrapper.httpx_client.request(
"GET",
urllib.parse.urljoin(f"{self._environment.value}/", "api/accounting/v1/expenses/meta/post"),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(MetaResponse, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
class AsyncExpensesClient:
def __init__(
self, *, environment: MergeEnvironment = MergeEnvironment.PRODUCTION, client_wrapper: AsyncClientWrapper
):
self._environment = environment
self._client_wrapper = client_wrapper
async def list(
self,
*,
company_id: typing.Optional[str] = None,
created_after: typing.Optional[dt.datetime] = None,
created_before: typing.Optional[dt.datetime] = None,
cursor: typing.Optional[str] = None,
expand: typing.Optional[ExpensesListRequestExpand] = None,
include_deleted_data: typing.Optional[bool] = None,
include_remote_data: typing.Optional[bool] = None,
modified_after: typing.Optional[dt.datetime] = None,
modified_before: typing.Optional[dt.datetime] = None,
page_size: typing.Optional[int] = None,
remote_id: typing.Optional[str] = None,
transaction_date_after: typing.Optional[dt.datetime] = None,
transaction_date_before: typing.Optional[dt.datetime] = None,
) -> PaginatedExpenseList:
"""
Returns a list of `Expense` objects.
Parameters:
- company_id: typing.Optional[str]. If provided, will only return expenses for this company.
- created_after: typing.Optional[dt.datetime]. If provided, will only return objects created after this datetime.
- created_before: typing.Optional[dt.datetime]. If provided, will only return objects created before this datetime.
- cursor: typing.Optional[str]. The pagination cursor value.
- expand: typing.Optional[ExpensesListRequestExpand]. Which relations should be returned in expanded form. Multiple relation names should be comma separated without spaces.
- include_deleted_data: typing.Optional[bool]. Whether to include data that was marked as deleted by third party webhooks.
- include_remote_data: typing.Optional[bool]. Whether to include the original data Merge fetched from the third-party to produce these models.
- modified_after: typing.Optional[dt.datetime]. If provided, only objects synced by Merge after this date time will be returned.
- modified_before: typing.Optional[dt.datetime]. If provided, only objects synced by Merge before this date time will be returned.
- page_size: typing.Optional[int]. Number of results to return per page.
- remote_id: typing.Optional[str]. The API provider's ID for the given object.
- transaction_date_after: typing.Optional[dt.datetime]. If provided, will only return objects created after this datetime.
- transaction_date_before: typing.Optional[dt.datetime]. If provided, will only return objects created before this datetime.
"""
_response = await self._client_wrapper.httpx_client.request(
"GET",
urllib.parse.urljoin(f"{self._environment.value}/", "api/accounting/v1/expenses"),
params=remove_none_from_dict(
{
"company_id": company_id,
"created_after": serialize_datetime(created_after) if created_after is not None else None,
"created_before": serialize_datetime(created_before) if created_before is not None else None,
"cursor": cursor,
"expand": expand,
"include_deleted_data": include_deleted_data,
"include_remote_data": include_remote_data,
"modified_after": serialize_datetime(modified_after) if modified_after is not None else None,
"modified_before": serialize_datetime(modified_before) if modified_before is not None else None,
"page_size": page_size,
"remote_id": remote_id,
"transaction_date_after": serialize_datetime(transaction_date_after)
if transaction_date_after is not None
else None,
"transaction_date_before": serialize_datetime(transaction_date_before)
if transaction_date_before is not None
else None,
}
),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(PaginatedExpenseList, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
async def create(
self,
*,
is_debug_mode: typing.Optional[bool] = None,
run_async: typing.Optional[bool] = None,
model: ExpenseRequest,
) -> ExpenseResponse:
"""
Creates an `Expense` object with the given values.
Parameters:
- is_debug_mode: typing.Optional[bool]. Whether to include debug fields (such as log file links) in the response.
- run_async: typing.Optional[bool]. Whether or not third-party updates should be run asynchronously.
- model: ExpenseRequest.
"""
_response = await self._client_wrapper.httpx_client.request(
"POST",
urllib.parse.urljoin(f"{self._environment.value}/", "api/accounting/v1/expenses"),
params=remove_none_from_dict({"is_debug_mode": is_debug_mode, "run_async": run_async}),
json=jsonable_encoder({"model": model}),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(ExpenseResponse, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
async def retrieve(
self,
id: str,
*,
expand: typing.Optional[ExpensesRetrieveRequestExpand] = None,
include_remote_data: typing.Optional[bool] = None,
) -> Expense:
"""
Returns an `Expense` object with the given `id`.
Parameters:
- id: str.
- expand: typing.Optional[ExpensesRetrieveRequestExpand]. Which relations should be returned in expanded form. Multiple relation names should be comma separated without spaces.
- include_remote_data: typing.Optional[bool]. Whether to include the original data Merge fetched from the third-party to produce these models.
"""
_response = await self._client_wrapper.httpx_client.request(
"GET",
urllib.parse.urljoin(f"{self._environment.value}/", f"api/accounting/v1/expenses/{id}"),
params=remove_none_from_dict({"expand": expand, "include_remote_data": include_remote_data}),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(Expense, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
async def meta_post_retrieve(self) -> MetaResponse:
"""
Returns metadata for `Expense` POSTs.
"""
_response = await self._client_wrapper.httpx_client.request(
"GET",
urllib.parse.urljoin(f"{self._environment.value}/", "api/accounting/v1/expenses/meta/post"),
headers=self._client_wrapper.get_headers(),
timeout=60,
)
if 200 <= _response.status_code < 300:
return pydantic.parse_obj_as(MetaResponse, _response.json()) # type: ignore
try:
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
|
PypiClean
|
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/prettify.min.js
|
var IN_GLOBAL_SCOPE=true;window["PR_SHOULD_USE_CONTINUATION"]=true;var prettyPrintOne;var prettyPrint;(function(){var P=window;var i=["break,continue,do,else,for,if,return,while"];var u=[i,"auto,case,char,const,default,"+"double,enum,extern,float,goto,inline,int,long,register,short,signed,"+"sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"];var p=[u,"catch,class,delete,false,import,"+"new,operator,private,protected,public,this,throw,true,try,typeof"];var l=[p,"alignof,align_union,asm,axiom,bool,"+"concept,concept_map,const_cast,constexpr,decltype,delegate,"+"dynamic_cast,explicit,export,friend,generic,late_check,"+"mutable,namespace,nullptr,property,reinterpret_cast,static_assert,"+"static_cast,template,typeid,typename,using,virtual,where"];var y=[p,"abstract,assert,boolean,byte,extends,final,finally,implements,import,"+"instanceof,interface,null,native,package,strictfp,super,synchronized,"+"throws,transient"];var U=[y,"as,base,by,checked,decimal,delegate,descending,dynamic,event,"+"fixed,foreach,from,group,implicit,in,internal,into,is,let,"+"lock,object,out,override,orderby,params,partial,readonly,ref,sbyte,"+"sealed,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,"+"var,virtual,where"];var r="all,and,by,catch,class,else,extends,false,finally,"+"for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,"+"throw,true,try,unless,until,when,while,yes";var x=[p,"debugger,eval,export,function,get,null,set,undefined,var,with,"+"Infinity,NaN"];var s="caller,delete,die,do,dump,elsif,eval,exit,foreach,for,"+"goto,if,import,last,local,my,next,no,our,print,package,redo,require,"+"sub,undef,unless,until,use,wantarray,while,BEGIN,END";var K=[i,"and,as,assert,class,def,del,"+"elif,except,exec,finally,from,global,import,in,is,lambda,"+"nonlocal,not,or,pass,print,raise,try,with,yield,"+"False,True,None"];var g=[i,"alias,and,begin,case,class,"+"def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,"+"rescue,retry,self,super,then,true,undef,unless,until,when,yield,"+"BEGIN,END"];var z=[i,"as,assert,const,copy,drop,"+"enum,extern,fail,false,fn,impl,let,log,loop,match,mod,move,mut,priv,"+"pub,pure,ref,self,static,struct,true,trait,type,unsafe,use"];var J=[i,"case,done,elif,esac,eval,fi,"+"function,in,local,set,then,until"];var C=[l,U,x,s,K,g,J];var e=/^(DIR|FILE|vector|(de|priority_)?queue|list|stack|(const_)?iterator|(multi)?(set|map)|bitset|u?(int|float)\d*)\b/;var E="str";var B="kwd";var j="com";var R="typ";var I="lit";var N="pun";var H="pln";var m="tag";var G="dec";var L="src";var S="atn";var n="atv";var Q="nocode";var O="(?:^^\\.?|[+-]|[!=]=?=?|\\#|%=?|&&?=?|\\(|\\*=?|[+\\-]=|->|\\/=?|::?|<<?=?|>>?>?=?|,|;|\\?|@|\\[|~|{|\\^\\^?=?|\\|\\|?=?|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\\s*";function k(ac){var ag=0;var V=false;var af=false;for(var Y=0,X=ac.length;Y<X;++Y){var ah=ac[Y];if(ah.ignoreCase){af=true}else{if(/[a-z]/i.test(ah.source.replace(/\\u[0-9a-f]{4}|\\x[0-9a-f]{2}|\\[^ux]/gi,""))){V=true;af=false;break}}}var ab={"b":8,"t":9,"n":10,"v":11,"f":12,"r":13};function ae(ak){var aj=ak.charCodeAt(0);if(aj!==92){return aj}var ai=ak.charAt(1);aj=ab[ai];if(aj){return aj}else{if("0"<=ai&&ai<="7"){return parseInt(ak.substring(1),8)}else{if(ai==="u"||ai==="x"){return parseInt(ak.substring(2),16)}else{return ak.charCodeAt(1)}}}}function W(ai){if(ai<32){return(ai<16?"\\x0":"\\x")+ai.toString(16)}var aj=String.fromCharCode(ai);return(aj==="\\"||aj==="-"||aj==="]"||aj==="^")?"\\"+aj:aj}function aa(ao){var at=ao.substring(1,ao.length-1).match(new RegExp("\\\\u[0-9A-Fa-f]{4}"+"|\\\\x[0-9A-Fa-f]{2}"+"|\\\\[0-3][0-7]{0,2}"+"|\\\\[0-7]{1,2}"+"|\\\\[\\s\\S]"+"|-"+"|[^-\\\\]","g"));var ai=[];var aq=at[0]==="^";var ap=["["];if(aq){ap.push("^")}for(var au=aq?1:0,am=at.length;au<am;++au){var ak=at[au];if(/\\[bdsw]/i.test(ak)){ap.push(ak)}else{var aj=ae(ak);var an;if(au+2<am&&"-"===at[au+1]){an=ae(at[au+2]);au+=2}else{an=aj}ai.push([aj,an]);if(!(an<65||aj>122)){if(!(an<65||aj>90)){ai.push([Math.max(65,aj)|32,Math.min(an,90)|32])}if(!(an<97||aj>122)){ai.push([Math.max(97,aj)&~32,Math.min(an,122)&~32])}}}}ai.sort(function(ax,aw){return(ax[0]-aw[0])||(aw[1]-ax[1])});var al=[];var ar=[];for(var au=0;au<ai.length;++au){var av=ai[au];if(av[0]<=ar[1]+1){ar[1]=Math.max(ar[1],av[1])}else{al.push(ar=av)}}for(var au=0;au<al.length;++au){var av=al[au];ap.push(W(av[0]));if(av[1]>av[0]){if(av[1]+1>av[0]){ap.push("-")}ap.push(W(av[1]))}}ap.push("]");return ap.join("")}function Z(ao){var am=ao.source.match(new RegExp("(?:"+"\\[(?:[^\\x5C\\x5D]|\\\\[\\s\\S])*\\]"+"|\\\\u[A-Fa-f0-9]{4}"+"|\\\\x[A-Fa-f0-9]{2}"+"|\\\\[0-9]+"+"|\\\\[^ux0-9]"+"|\\(\\?[:!=]"+"|[\\(\\)\\^]"+"|[^\\x5B\\x5C\\(\\)\\^]+"+")","g"));var ak=am.length;var aq=[];for(var an=0,ap=0;an<ak;++an){var aj=am[an];if(aj==="("){++ap}else{if("\\"===aj.charAt(0)){var ai=+aj.substring(1);if(ai){if(ai<=ap){aq[ai]=-1}else{am[an]=W(ai)}}}}}for(var an=1;an<aq.length;++an){if(-1===aq[an]){aq[an]=++ag}}for(var an=0,ap=0;an<ak;++an){var aj=am[an];if(aj==="("){++ap;if(!aq[ap]){am[an]="(?:"}}else{if("\\"===aj.charAt(0)){var ai=+aj.substring(1);if(ai&&ai<=ap){am[an]="\\"+aq[ai]}}}}for(var an=0;an<ak;++an){if("^"===am[an]&&"^"!==am[an+1]){am[an]=""}}if(ao.ignoreCase&&V){for(var an=0;an<ak;++an){var aj=am[an];var al=aj.charAt(0);if(aj.length>=2&&al==="["){am[an]=aa(aj)}else{if(al!=="\\"){am[an]=aj.replace(/[a-zA-Z]/g,function(ar){var at=ar.charCodeAt(0);return"["+String.fromCharCode(at&~32,at|32)+"]"})}}}}return am.join("")}var ad=[];for(var Y=0,X=ac.length;Y<X;++Y){var ah=ac[Y];if(ah.global||ah.multiline){throw new Error(""+ah)}ad.push("(?:"+Z(ah)+")")}return new RegExp(ad.join("|"),af?"gi":"g")}function b(ab,Z){var X=/(?:^|\s)nocode(?:\s|$)/;var ac=[];var aa=0;var Y=[];var W=0;function V(ae){var ad=ae.nodeType;if(ad==1){if(X.test(ae.className)){return}for(var ah=ae.firstChild;ah;ah=ah.nextSibling){V(ah)}var ag=ae.nodeName.toLowerCase();if("br"===ag||"li"===ag){ac[W]="\n";Y[W<<1]=aa++;Y[(W++<<1)|1]=ae}}else{if(ad==3||ad==4){var af=ae.nodeValue;if(af.length){if(!Z){af=af.replace(/[ \t\r\n]+/g," ")}else{af=af.replace(/\r\n?/g,"\n")}ac[W]=af;Y[W<<1]=aa;aa+=af.length;Y[(W++<<1)|1]=ae}}}}V(ab);return{sourceCode:ac.join("").replace(/\n$/,""),spans:Y}}function D(V,X,Z,W){if(!X){return}var Y={sourceCode:X,basePos:V};Z(Y);W.push.apply(W,Y.decorations)}var v=/\S/;function o(V){var Y=undefined;for(var X=V.firstChild;X;X=X.nextSibling){var W=X.nodeType;Y=(W===1)?(Y?V:X):(W===3)?(v.test(X.nodeValue)?V:Y):Y}return Y===V?undefined:Y}function f(X,W){var V={};var Y;(function(){var ag=X.concat(W);var ak=[];var aj={};for(var ae=0,ac=ag.length;ae<ac;++ae){var ab=ag[ae];var af=ab[3];if(af){for(var ah=af.length;--ah>=0;){V[af.charAt(ah)]=ab}}var ai=ab[1];var ad=""+ai;if(!aj.hasOwnProperty(ad)){ak.push(ai);aj[ad]=null}}ak.push(/[\0-\uffff]/);Y=k(ak)})();var aa=W.length;var Z=function(ak){var ac=ak.sourceCode,ab=ak.basePos;var ag=[ab,H];var ai=0;var aq=ac.match(Y)||[];var am={};for(var ah=0,au=aq.length;ah<au;++ah){var aj=aq[ah];var at=am[aj];var al=void 0;var ap;if(typeof at==="string"){ap=false}else{var ad=V[aj.charAt(0)];if(ad){al=aj.match(ad[1]);at=ad[0]}else{for(var ar=0;ar<aa;++ar){ad=W[ar];al=aj.match(ad[1]);if(al){at=ad[0];break}}if(!al){at=H}}ap=at.length>=5&&"lang-"===at.substring(0,5);if(ap&&!(al&&typeof al[1]==="string")){ap=false;at=L}if(!ap){am[aj]=at}}var ae=ai;ai+=aj.length;if(!ap){ag.push(ab+ae,at)}else{var ao=al[1];var an=aj.indexOf(ao);var af=an+ao.length;if(al[2]){af=aj.length-al[2].length;an=af-ao.length}var av=at.substring(5);D(ab+ae,aj.substring(0,an),Z,ag);D(ab+ae+an,ao,q(av,ao),ag);D(ab+ae+af,aj.substring(af),Z,ag)}}ak.decorations=ag};return Z}function h(af){var X=[],ab=[];if(af["tripleQuotedStrings"]){X.push([E,/^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,null,"'\""])}else{if(af["multiLineStrings"]){X.push([E,/^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,null,"'\"`"])}else{X.push([E,/^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,null,"\"'"])}}if(af["verbatimStrings"]){ab.push([E,/^@\"(?:[^\"]|\"\")*(?:\"|$)/,null])}var ad=af["hashComments"];if(ad){if(af["cStyleComments"]){if(ad>1){X.push([j,/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,null,"#"])}else{X.push([j,/^#(?:(?:define|e(?:l|nd)if|else|error|ifn?def|include|line|pragma|undef|warning)\b|[^\r\n]*)/,null,"#"])}ab.push([E,/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h(?:h|pp|\+\+)?|[a-z]\w*)>/,null])}else{X.push([j,/^#[^\r\n]*/,null,"#"])}}if(af["cStyleComments"]){ab.push([j,/^\/\/[^\r\n]*/,null]);ab.push([j,/^\/\*[\s\S]*?(?:\*\/|$)/,null])}var W=af["regexLiterals"];if(W){var Y=W>1?"":"\n\r";var aa=Y?".":"[\\S\\s]";var Z=("/(?=[^/*"+Y+"])"+"(?:[^/\\x5B\\x5C"+Y+"]"+"|\\x5C"+aa+"|\\x5B(?:[^\\x5C\\x5D"+Y+"]"+"|\\x5C"+aa+")*(?:\\x5D|$))+"+"/");ab.push(["lang-regex",RegExp("^"+O+"("+Z+")")])}var ae=af["types"];if(ae){ab.push([R,ae])}var ac=(""+af["keywords"]).replace(/^ | $/g,"");if(ac.length){ab.push([B,new RegExp("^(?:"+ac.replace(/[\s,]+/g,"|")+")\\b"),null])}X.push([H,/^\s+/,null," \r\n\t\xA0"]);var V="^.[^\\s\\w.$@'\"`/\\\\]*";if(af["regexLiterals"]){V+="(?!s*/)"}ab.push([I,/^@[a-z_$][a-z_$@0-9]*/i,null],[R,/^(?:[@_]?[A-Z]+[a-z][A-Za-z_$@0-9]*|\w+_t\b)/,null],[H,/^[a-z_$][a-z_$@0-9]*/i,null],[I,new RegExp("^(?:"+"0x[a-f0-9]+"+"|(?:\\d(?:_\\d+)*\\d*(?:\\.\\d*)?|\\.\\d\\+)"+"(?:e[+\\-]?\\d+)?"+")"+"[a-z]*","i"),null,"0123456789"],[H,/^\\[\s\S]?/,null],[N,new RegExp(V),null]);return f(X,ab)}var M=h({"keywords":C,"hashComments":true,"cStyleComments":true,"multiLineStrings":true,"regexLiterals":true});function T(X,ai,ab){var W=/(?:^|\s)nocode(?:\s|$)/;var ad=/\r\n?|\n/;var ae=X.ownerDocument;var ah=ae.createElement("li");while(X.firstChild){ah.appendChild(X.firstChild)}var Y=[ah];function ag(ao){var an=ao.nodeType;if(an==1&&!W.test(ao.className)){if("br"===ao.nodeName){af(ao);if(ao.parentNode){ao.parentNode.removeChild(ao)}}else{for(var aq=ao.firstChild;aq;aq=aq.nextSibling){ag(aq)}}}else{if((an==3||an==4)&&ab){var ap=ao.nodeValue;var al=ap.match(ad);if(al){var ak=ap.substring(0,al.index);ao.nodeValue=ak;var aj=ap.substring(al.index+al[0].length);if(aj){var am=ao.parentNode;am.insertBefore(ae.createTextNode(aj),ao.nextSibling)}af(ao);if(!ak){ao.parentNode.removeChild(ao)}}}}}function af(am){while(!am.nextSibling){am=am.parentNode;if(!am){return}}function ak(an,au){var at=au?an.cloneNode(false):an;var aq=an.parentNode;if(aq){var ar=ak(aq,1);var ap=an.nextSibling;ar.appendChild(at);for(var ao=ap;ao;ao=ap){ap=ao.nextSibling;ar.appendChild(ao)}}return at}var aj=ak(am.nextSibling,0);for(var al;(al=aj.parentNode)&&al.nodeType===1;){aj=al}Y.push(aj)}for(var aa=0;aa<Y.length;++aa){ag(Y[aa])}if(ai===(ai|0)){Y[0].setAttribute("value",ai)}var ac=ae.createElement("ol");ac.className="linenums";var Z=Math.max(0,((ai-1))|0)||0;for(var aa=0,V=Y.length;aa<V;++aa){ah=Y[aa];ah.className="L"+((aa+Z)%10);if(!ah.firstChild){ah.appendChild(ae.createTextNode("\xA0"))}ac.appendChild(ah)}X.appendChild(ac)}function F(ag){var Y=/\bMSIE\s(\d+)/.exec(navigator.userAgent);Y=Y&&+Y[1]<=8;var ap=/\n/g;var ao=ag.sourceCode;var aq=ao.length;var Z=0;var ae=ag.spans;var W=ae.length;var ak=0;var ab=ag.decorations;var ac=ab.length;var ad=0;ab[ac]=aq;var aw,au;for(au=aw=0;au<ac;){if(ab[au]!==ab[au+2]){ab[aw++]=ab[au++];ab[aw++]=ab[au++]}else{au+=2}}ac=aw;for(au=aw=0;au<ac;){var ax=ab[au];var af=ab[au+1];var aa=au+2;while(aa+2<=ac&&ab[aa+1]===af){aa+=2}ab[aw++]=ax;ab[aw++]=af;au=aa}ac=ab.length=aw;var av=ag.sourceNode;var al;if(av){al=av.style.display;av.style.display="none"}try{var ai=null;while(ak<W){var aj=ae[ak];var V=ae[ak+2]||aq;var at=ab[ad+2]||aq;var aa=Math.min(V,at);var an=ae[ak+1];var X;if(an.nodeType!==1&&(X=ao.substring(Z,aa))){if(Y){X=X.replace(ap,"\r")}an.nodeValue=X;var am=an.ownerDocument;var ar=am.createElement("span");ar.className=ab[ad+1];var ah=an.parentNode;ah.replaceChild(ar,an);ar.appendChild(an);if(Z<V){ae[ak+1]=an=am.createTextNode(ao.substring(aa,V));ah.insertBefore(an,ar.nextSibling)}}Z=aa;if(Z>=V){ak+=2}if(Z>=at){ad+=2}}}finally{if(av){av.style.display=al}}}var t={};function c(X,Y){for(var V=Y.length;--V>=0;){var W=Y[V];if(!t.hasOwnProperty(W)){t[W]=X}else{if(P["console"]){console["warn"]("cannot override language handler %s",W)}}}}function q(W,V){if(!(W&&t.hasOwnProperty(W))){W=/^\s*</.test(V)?"default-markup":"default-code"}return t[W]}c(M,["default-code"]);c(f([],[[H,/^[^<?]+/],[G,/^<!\w[^>]*(?:>|$)/],[j,/^<\!--[\s\S]*?(?:-\->|$)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],[N,/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),["default-markup","htm","html","mxml","xhtml","xml","xsl"]);c(f([[H,/^[\s]+/,null," \t\r\n"],[n,/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,null,"\"'"]],[[m,/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],[S,/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],[N,/^[=<>\/]+/],["lang-js",/^on\w+\s*=\s*\"([^\"]+)\"/i],["lang-js",/^on\w+\s*=\s*\'([^\']+)\'/i],["lang-js",/^on\w+\s*=\s*([^\"\'>\s]+)/i],["lang-css",/^style\s*=\s*\"([^\"]+)\"/i],["lang-css",/^style\s*=\s*\'([^\']+)\'/i],["lang-css",/^style\s*=\s*([^\"\'>\s]+)/i]]),["in.tag"]);c(f([],[[n,/^[\s\S]+/]]),["uq.val"]);c(h({"keywords":l,"hashComments":true,"cStyleComments":true,"types":e}),["c","cc","cpp","cxx","cyc","m"]);c(h({"keywords":"null,true,false"}),["json"]);c(h({"keywords":U,"hashComments":true,"cStyleComments":true,"verbatimStrings":true,"types":e}),["cs"]);c(h({"keywords":y,"cStyleComments":true}),["java"]);c(h({"keywords":J,"hashComments":true,"multiLineStrings":true}),["bash","bsh","csh","sh"]);c(h({"keywords":K,"hashComments":true,"multiLineStrings":true,"tripleQuotedStrings":true}),["cv","py","python"]);c(h({"keywords":s,"hashComments":true,"multiLineStrings":true,"regexLiterals":2}),["perl","pl","pm"]);c(h({"keywords":g,"hashComments":true,"multiLineStrings":true,"regexLiterals":true}),["rb","ruby"]);c(h({"keywords":x,"cStyleComments":true,"regexLiterals":true}),["javascript","js"]);c(h({"keywords":r,"hashComments":3,"cStyleComments":true,"multilineStrings":true,"tripleQuotedStrings":true,"regexLiterals":true}),["coffee"]);c(h({"keywords":z,"cStyleComments":true,"multilineStrings":true}),["rc","rs","rust"]);c(f([],[[E,/^[\s\S]+/]]),["regex"]);function d(Y){var X=Y.langExtension;try{var V=b(Y.sourceNode,Y.pre);var W=V.sourceCode;Y.sourceCode=W;Y.spans=V.spans;Y.basePos=0;q(X,W)(Y);F(Y)}catch(Z){if(P["console"]){console["log"](Z&&Z["stack"]||Z)}}}function A(Z,Y,X){var V=document.createElement("div");V.innerHTML="<pre>"+Z+"</pre>";V=V.firstChild;if(X){T(V,X,true)}var W={langExtension:Y,numberLines:X,sourceNode:V,pre:1};d(W);return V.innerHTML}function w(al,ab){var ah=ab||document.body;var ao=ah.ownerDocument||document;function aa(aq){return ah.getElementsByTagName(aq)}var ad=[aa("pre"),aa("code"),aa("xmp")];var ae=[];for(var ak=0;ak<ad.length;++ak){for(var aj=0,ag=ad[ak].length;aj<ag;++aj){ae.push(ad[ak][aj])}}ad=null;var ap=Date;if(!ap["now"]){ap={"now":function(){return +(new Date)}}}var ai=0;var ac;var X=/\blang(?:uage)?-([\w.]+)(?!\S)/;var an=/\bprettyprint\b/;var W=/\bprettyprinted\b/;var Z=/pre|xmp/i;var V=/^code$/i;var Y=/^(?:pre|code|xmp)$/i;var am={};function af(){var ay=(P["PR_SHOULD_USE_CONTINUATION"]?ap["now"]()+250:Infinity);for(;ai<ae.length&&ap["now"]()<ay;ai++){var aA=ae[ai];var aH=am;for(var ax=aA;(ax=ax.previousSibling);){var aE=ax.nodeType;var aF=(aE===7||aE===8)&&ax.nodeValue;if(aF?!/^\??prettify\b/.test(aF):(aE!==3||/\S/.test(ax.nodeValue))){break}if(aF){aH={};aF.replace(/\b(\w+)=([\w:.%+-]+)/g,function(aJ,aI,aK){aH[aI]=aK});break}}var aB=aA.className;if((aH!==am||an.test(aB))&&!W.test(aB)){var aD=false;for(var au=aA.parentNode;au;au=au.parentNode){var aG=au.tagName;if(Y.test(aG)&&au.className&&an.test(au.className)){aD=true;break}}if(!aD){aA.className+=" prettyprinted";var aw=aH["lang"];if(!aw){aw=aB.match(X);var ar;if(!aw&&(ar=o(aA))&&V.test(ar.tagName)){aw=ar.className.match(X)}if(aw){aw=aw[1]}}var av;if(Z.test(aA.tagName)){av=1}else{var at=aA["currentStyle"];var az=ao.defaultView;var aq=(at?at["whiteSpace"]:(az&&az.getComputedStyle)?az.getComputedStyle(aA,null).getPropertyValue("white-space"):0);av=aq&&"pre"===aq.substring(0,3)}var aC=aH["linenums"];if(!(aC=aC==="true"||+aC)){aC=aB.match(/\blinenums\b(?::(\d+))?/);aC=aC?aC[1]&&aC[1].length?+aC[1]:true:false}if(aC){T(aA,aC,av)}ac={langExtension:aw,sourceNode:aA,numberLines:aC,pre:av};d(ac)}}}if(ai<ae.length){setTimeout(af,250)}else{if("function"===typeof al){al()}}}af()}var a=P["PR"]={"createSimpleLexer":f,"registerLangHandler":c,"sourceDecorator":h,"PR_ATTRIB_NAME":S,"PR_ATTRIB_VALUE":n,"PR_COMMENT":j,"PR_DECLARATION":G,"PR_KEYWORD":B,"PR_LITERAL":I,"PR_NOCODE":Q,"PR_PLAIN":H,"PR_PUNCTUATION":N,"PR_SOURCE":L,"PR_STRING":E,"PR_TAG":m,"PR_TYPE":R,"prettyPrintOne":IN_GLOBAL_SCOPE?(P["prettyPrintOne"]=A):(prettyPrintOne=A),"prettyPrint":prettyPrint=IN_GLOBAL_SCOPE?(P["prettyPrint"]=w):(prettyPrint=w)};if(typeof define==="function"&&define["amd"]){define("google-code-prettify",[],function(){return a})}})();
|
PypiClean
|
/backend.ai-webserver-23.3.11.tar.gz/backend.ai-webserver-23.3.11/ai/backend/web/static/static/js/546.91eb8c03.chunk.js
|
"use strict";(self.webpackChunkbackend_ai_webui_react=self.webpackChunkbackend_ai_webui_react||[]).push([[546],{9277:function(e,n,t){var o=t(1413),r=t(4925),i=t(4036),a=(t(4519),t(2556)),c=["direction","wrap","justify","align","gap","style","children"];n.Z=function(e){var n=e.direction,t=void 0===n?"row":n,l=e.wrap,s=void 0===l?"nowrap":l,d=e.justify,u=void 0===d?"flex-start":d,f=e.align,m=void 0===f?"center":f,g=e.gap,h=void 0===g?0:g,b=e.style,x=e.children,p=(0,r.Z)(e,c),v=i.Z.useToken().token,j=[u,m],Z=null===j||void 0===j?void 0:j.map((function(e){var n;switch(e){case"start":n="flex-start";break;case"end":n="flex-end";break;case"between":n="space-between";break;case"around":n="space-around";break;default:n=e}return n})),y=(0,o.Z)({display:"flex",flexDirection:t,flexWrap:s,justifyContent:Z[0],alignItems:Z[1]},b);return(0,a.jsx)("div",(0,o.Z)((0,o.Z)({style:(0,o.Z)({alignItems:"stretch",backgroundColor:"transparent",border:"0 solid black",boxSizing:"border-box",display:"flex",flexBasis:"auto",flexDirection:"column",flexShrink:0,listStyle:"none",margin:0,minHeight:0,minWidth:0,padding:0,position:"relative",textDecoration:"none",gap:"string"===typeof h?v["padding"+h.toUpperCase()]:h},y)},p),{},{children:x}))}},546:function(e,n,t){t.r(n),t.d(n,{default:function(){return K}});var o=t(3861),r=t(7462),i=t(4519),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M464 720a48 48 0 1096 0 48 48 0 10-96 0zm16-304v184c0 4.4 3.6 8 8 8h48c4.4 0 8-3.6 8-8V416c0-4.4-3.6-8-8-8h-48c-4.4 0-8 3.6-8 8zm475.7 440l-416-720c-6.2-10.7-16.9-16-27.7-16s-21.6 5.3-27.7 16l-416 720C56 877.4 71.4 904 96 904h832c24.6 0 40-26.6 27.7-48zm-783.5-27.9L512 239.9l339.8 588.2H172.2z"}}]},name:"warning",theme:"outlined"},c=t(9491),l=function(e,n){return i.createElement(c.Z,(0,r.Z)({},e,{ref:n,icon:a}))};var s=i.forwardRef(l),d=t(3656),u=t(4942),f=t(9439),m=t(1662),g=t(3270),h=t.n(g),b=t(1235),x=t(9495),p=t(564);var v=t(1480),j=t(150),Z=t(111),y=t(1157),C=function(e,n,t){var o,r="string"!==typeof(o=t)?o:o.charAt(0).toUpperCase()+o.slice(1);return(0,u.Z)({},"".concat(e.componentCls,"-").concat(n),(0,u.Z)({color:e["color".concat(t)],background:e["color".concat(r,"Bg")],borderColor:e["color".concat(r,"Border")]},"&".concat(e.componentCls,"-borderless"),{borderColor:"transparent"}))},k=function(e){return(0,j.Z)(e,(function(n,t){var o=t.textColor,r=t.lightBorderColor,i=t.lightColor,a=t.darkColor;return(0,u.Z)({},"".concat(e.componentCls,"-").concat(n),(0,u.Z)({color:o,background:i,borderColor:r,"&-inverse":{color:e.colorTextLightSolid,background:a,borderColor:a}},"&".concat(e.componentCls,"-borderless"),{borderColor:"transparent"}))}))},S=function(e){var n,t,o,r=e.paddingXXS,i=e.lineWidth,a=e.tagPaddingHorizontal,c=e.componentCls,l=a-i,s=r-i;return o={},(0,u.Z)(o,c,Object.assign(Object.assign({},(0,v.Wf)(e)),(t={display:"inline-block",height:"auto",marginInlineEnd:e.marginXS,paddingInline:l,fontSize:e.tagFontSize,lineHeight:e.tagLineHeight,whiteSpace:"nowrap",background:e.defaultBg,border:"".concat(e.lineWidth,"px ").concat(e.lineType," ").concat(e.colorBorder),borderRadius:e.borderRadiusSM,opacity:1,transition:"all ".concat(e.motionDurationMid),textAlign:"start"},(0,u.Z)(t,"&".concat(c,"-rtl"),{direction:"rtl"}),(0,u.Z)(t,"&, a, a:hover",{color:e.defaultColor}),(0,u.Z)(t,"".concat(c,"-close-icon"),{marginInlineStart:s,color:e.colorTextDescription,fontSize:e.tagIconSize,cursor:"pointer",transition:"all ".concat(e.motionDurationMid),"&:hover":{color:e.colorTextHeading}}),(0,u.Z)(t,"&".concat(c,"-has-color"),(0,u.Z)({borderColor:"transparent"},"&, a, a:hover, ".concat(e.iconCls,"-close, ").concat(e.iconCls,"-close:hover"),{color:e.colorTextLightSolid})),(0,u.Z)(t,"&-checkable",(n={backgroundColor:"transparent",borderColor:"transparent",cursor:"pointer"},(0,u.Z)(n,"&:not(".concat(c,"-checkable-checked):hover"),{color:e.colorPrimary,backgroundColor:e.colorFillSecondary}),(0,u.Z)(n,"&:active, &-checked",{color:e.colorTextLightSolid}),(0,u.Z)(n,"&-checked",{backgroundColor:e.colorPrimary,"&:hover":{backgroundColor:e.colorPrimaryHover}}),(0,u.Z)(n,"&:active",{backgroundColor:e.colorPrimaryActive}),n)),(0,u.Z)(t,"&-hidden",{display:"none"}),(0,u.Z)(t,"> ".concat(e.iconCls," + span, > span + ").concat(e.iconCls),{marginInlineStart:l}),t))),(0,u.Z)(o,"".concat(c,"-borderless"),{borderColor:"transparent",background:e.tagBorderlessBg}),o},I=(0,Z.Z)("Tag",(function(e){var n=e.lineWidth,t=e.fontSizeIcon,o=e.fontSizeSM,r="".concat(e.lineHeightSM*o,"px"),i=(0,y.TS)(e,{tagFontSize:o,tagLineHeight:r,tagIconSize:t-2*n,tagPaddingHorizontal:8,tagBorderlessBg:e.colorFillTertiary});return[S(i),k(i),C(i,"success","Success"),C(i,"processing","Info"),C(i,"error","Error"),C(i,"warning","Warning")]}),(function(e){return{defaultBg:e.colorFillQuaternary,defaultColor:e.colorText}})),w=function(e,n){var t={};for(var o in e)Object.prototype.hasOwnProperty.call(e,o)&&n.indexOf(o)<0&&(t[o]=e[o]);if(null!=e&&"function"===typeof Object.getOwnPropertySymbols){var r=0;for(o=Object.getOwnPropertySymbols(e);r<o.length;r++)n.indexOf(o[r])<0&&Object.prototype.propertyIsEnumerable.call(e,o[r])&&(t[o[r]]=e[o[r]])}return t},D=function(e){var n,t=e.prefixCls,o=e.className,r=e.checked,a=e.onChange,c=e.onClick,l=w(e,["prefixCls","className","checked","onChange","onClick"]),s=(0,i.useContext(p.E_).getPrefixCls)("tag",t),d=I(s),m=(0,f.Z)(d,2),g=m[0],b=m[1],x=h()(s,(n={},(0,u.Z)(n,"".concat(s,"-checkable"),!0),(0,u.Z)(n,"".concat(s,"-checkable-checked"),r),n),o,b);return g(i.createElement("span",Object.assign({},l,{className:x,onClick:function(e){null===a||void 0===a||a(!r),null===c||void 0===c||c(e)}})))},L=function(e,n){var t={};for(var o in e)Object.prototype.hasOwnProperty.call(e,o)&&n.indexOf(o)<0&&(t[o]=e[o]);if(null!=e&&"function"===typeof Object.getOwnPropertySymbols){var r=0;for(o=Object.getOwnPropertySymbols(e);r<o.length;r++)n.indexOf(o[r])<0&&Object.prototype.propertyIsEnumerable.call(e,o[r])&&(t[o[r]]=e[o[r]])}return t},T=function(e,n){var t,o=e.prefixCls,r=e.className,a=e.rootClassName,c=e.style,l=e.children,s=e.icon,d=e.color,g=e.onClose,v=e.closeIcon,j=e.closable,Z=void 0!==j&&j,y=e.bordered,C=void 0===y||y,k=L(e,["prefixCls","className","rootClassName","style","children","icon","color","onClose","closeIcon","closable","bordered"]),S=i.useContext(p.E_),w=S.getPrefixCls,D=S.direction,T=i.useState(!0),O=(0,f.Z)(T,2),E=O[0],B=O[1];i.useEffect((function(){"visible"in k&&B(k.visible)}),[k.visible]);var P=(0,b.o2)(d)||(0,b.yT)(d),M=Object.assign({backgroundColor:d&&!P?d:void 0},c),z=w("tag",o),V=I(z),W=(0,f.Z)(V,2),_=W[0],A=W[1],N=h()(z,(t={},(0,u.Z)(t,"".concat(z,"-").concat(d),P),(0,u.Z)(t,"".concat(z,"-has-color"),d&&!P),(0,u.Z)(t,"".concat(z,"-hidden"),!E),(0,u.Z)(t,"".concat(z,"-rtl"),"rtl"===D),(0,u.Z)(t,"".concat(z,"-borderless"),!C),t),r,a,A),R=function(e){e.stopPropagation(),null===g||void 0===g||g(e),e.defaultPrevented||B(!1)},F=i.useMemo((function(){return Z?v?i.createElement("span",{className:"".concat(z,"-close-icon"),onClick:R},v):i.createElement(m.Z,{className:"".concat(z,"-close-icon"),onClick:R}):null}),[Z,v,z,R]),H="function"===typeof k.onClick||l&&"a"===l.type,Q=s||null,U=Q?i.createElement(i.Fragment,null,Q,i.createElement("span",null,l)):l,K=i.createElement("span",Object.assign({},k,{ref:n,className:N,style:M}),U,F);return _(H?i.createElement(x.Z,null,K):K)},O=i.forwardRef(T);O.CheckableTag=D;var E=O,B=t(4036),P=t(1718).Z,M=t(6038).Z,z=t(4479),V=t(867),W=t(7171),_=t(1748),A=t(9277),N=t(7112),R=t(3255),F=t(7760),H=t(2556),Q=function(e){var n=e.title,t=e.subtitle;return(0,H.jsxs)(A.Z,{direction:"column",align:"start",children:[(0,H.jsx)(d.Z.Text,{strong:!0,children:n}),t&&(0,H.jsx)(d.Z.Text,{type:"secondary",children:t})]})},U=function(e){var n=e.label,t=e.value;return(0,H.jsxs)(A.Z,{direction:"row",children:[(0,H.jsx)(E,{style:{margin:0,marginRight:-1,zIndex:1},children:n}),(0,H.jsx)(E,{color:"green",children:t})]})},K=function(){var e=(0,_.$G)().t,n=B.Z.useToken().token,t=(0,F.Dj)(),r=(0,N.useQuery)("licenseInfo",(function(){return t.enterprise.getLicense()}),{suspense:!1}),i=r.data,a=r.isLoading;i||(i={valid:!1,type:e("information.CannotRead"),licensee:e("information.CannotRead"),key:e("information.CannotRead"),expiration:e("information.CannotRead")});var c={xxl:4,xl:4,lg:2,md:1,sm:1,xs:1};return(0,H.jsxs)(A.Z,{direction:"column",align:"stretch",style:{margin:n.marginSM,gap:n.margin},children:[(0,H.jsxs)(P,{gutter:[n.margin,n.margin],children:[(0,H.jsx)(M,{xs:24,xxl:12,children:(0,H.jsx)(z.Z,{style:{height:"100%"},children:(0,H.jsxs)(V.Z,{title:e("information.Core"),bordered:!0,column:c,children:[(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.ManagerVersion")}),children:(0,H.jsxs)(A.Z,{direction:"column",style:{gap:n.marginXXS},align:"start",children:["Backend.AI ",t.managerVersion,(0,H.jsx)(U,{label:e("information.Installation"),value:t.managerVersion})]})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.APIVersion")}),children:t.apiVersion})]})})}),(0,H.jsx)(M,{xs:24,xxl:12,children:(0,H.jsx)(z.Z,{children:(0,H.jsxs)(V.Z,{title:e("information.Security"),bordered:!0,column:c,children:[(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.DefaultAdministratorAccountChanged"),subtitle:e("information.DescDefaultAdministratorAccountChanged")}),children:(0,H.jsx)(o.Z,{title:"Yes"})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.UsesSSL"),subtitle:e("information.DescUsesSSL")}),children:null!==t&&void 0!==t&&t._config.endpoint.startsWith("https:")?(0,H.jsx)(o.Z,{title:"Yes"}):(0,H.jsx)(s,{style:{color:"red"},title:"No"})})]})})})]}),(0,H.jsx)(z.Z,{children:(0,H.jsxs)(V.Z,{title:e("information.Component"),bordered:!0,column:{xxl:4,xl:2,lg:2,md:1,sm:1,xs:1},children:[(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.DockerVersion"),subtitle:e("information.DescDockerVersion")}),children:(0,H.jsx)(E,{children:e("information.Compatible")})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.PostgreSQLVersion"),subtitle:e("information.DescPostgreSQLVersion")}),children:(0,H.jsx)(E,{children:e("information.Compatible")})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.ETCDVersion"),subtitle:e("information.DescETCDVersion")}),children:(0,H.jsx)(E,{children:e("information.Compatible")})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.RedisVersion"),subtitle:(0,R.We)(e("information.DescRedisVersion"))}),children:(0,H.jsx)(E,{children:e("information.Compatible")})})]})}),(0,H.jsx)(z.Z,{children:(0,H.jsx)(W.Z,{spinning:a,children:(0,H.jsxs)(V.Z,{title:e("information.License"),bordered:!0,column:{xxl:2,xl:2,lg:2,md:1,sm:1,xs:1},children:[(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.IsLicenseValid"),subtitle:e("information.DescIsLicenseValid")}),children:i.valid?(0,H.jsx)(o.Z,{}):(0,H.jsx)(s,{style:{color:"red"}})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.LicenseType"),subtitle:(0,R.We)(e("information.DescLicenseType"))}),children:(0,H.jsx)(E,{children:"fixed"===i.type?e("information.FixedLicense"):e("information.DynamicLicense")})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.Licensee"),subtitle:e("information.DescLicensee")}),children:(0,H.jsx)(E,{children:i.licensee})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.LicenseKey"),subtitle:e("information.DescLicenseKey")}),children:(0,H.jsx)(E,{children:i.key})}),(0,H.jsx)(V.Z.Item,{label:(0,H.jsx)(Q,{title:e("information.Expiration"),subtitle:e("information.DescExpiration")}),children:(0,H.jsx)(E,{children:i.expiration})})]})})})]})}},3255:function(e,n,t){t.d(n,{Hz:function(){return c},Lc:function(){return i},Uz:function(){return l},VQ:function(){return s},We:function(){return r},en:function(){return a}});var o=t(2556),r=function(e){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:/(<br\s*\/?>|\n)/;return e.split(n).map((function(e,t){return e.match(n)?(0,o.jsx)("br",{},t):e}))},i=function(e){var n=e.method,t=e.url,o=e.body,r=void 0===o?null:o,i=e.client,a=null===i||void 0===i?void 0:i.newSignedRequest(n,t,r,null);return null===i||void 0===i?void 0:i._wrapWithPromise(a)},a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:2;if(0===e)return"0 Bytes";var t=Math.pow(10,3);n=n<0?0:n;var o=Math.floor(Math.log(Math.round(e))/Math.log(t));return o=o<0?0:o,parseFloat((e/Math.pow(t,o)).toFixed(n))+" "+["Bytes","KB","MB","GB","TB","PB"][o]},c=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0,n=Math.pow(10,9);return Math.round(n*e)},l=function(e){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:2;return null===e||void 0===e?"Unlimited":e?(e/Math.pow(10,9)).toFixed(n):e},s=function(e,n){return""===n||void 0===n?"":n.startsWith("".concat(e,":"))?n:"".concat(e,":").concat(n)}},7760:function(e,n,t){t.d(n,{Dj:function(){return s},Kr:function(){return a},M:function(){return l},qh:function(){return c}});var o=t(9439),r=t(4519),i=t(7112),a=function(e){var n=(0,r.useState)(e||(new Date).toISOString()),t=(0,o.Z)(n,2),i=t[0],a=t[1];return[i,function(e){a(e||(new Date).toISOString())}]},c=function(){var e=s(),n=(0,r.useState)({name:e.current_group,id:e.groupIds[e.current_group]}),t=(0,o.Z)(n,2),i=t[0],a=t[1];return(0,r.useEffect)((function(){var n=function(n){var t=n.detail;a({name:t,id:e.groupIds[t]})};return document.addEventListener("backend-ai-group-changed",n),function(){document.removeEventListener("backend-ai-group-changed",n)}})),i},l=function(e){var n=e.api_endpoint;return(0,r.useMemo)((function(){var e=new globalThis.BackendAIClientConfig("","",n,"SESSION");return new globalThis.BackendAIClient(e,"Backend.AI Console.")}),[n])},s=function(){return(0,i.useQuery)({queryKey:"backendai-client-for-suspense",queryFn:function(){return new Promise((function(e){if("undefined"!==typeof globalThis.backendaiclient&&null!==globalThis.backendaiclient&&!1!==globalThis.backendaiclient.ready)return e(globalThis.backendaiclient);document.addEventListener("backend-ai-connected",(function n(){e(globalThis.backendaiclient),document.removeEventListener("backend-ai-connected",n)}))}))},retry:!1,suspense:!0}).data}}}]);
//# sourceMappingURL=546.91eb8c03.chunk.js.map
|
PypiClean
|
/AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/dataset/_main.py
|
import json
import inspect
import warnings
from typing import Union
from copy import copy, deepcopy
import ai4water.datasets as datasets
from ai4water.datasets import all_datasets
from ai4water.utils.utils import TrainTestSplit
from ai4water.utils.plotting_tools import Plots
from ai4water.preprocessing.imputation import Imputation
from ai4water.utils.utils import prepare_data, jsonize, to_datetime_index, print_something
from ai4water.backend import np, pd, plt, os, mpl, sklearn, h5py
from .utils import check_for_classification
from .utils import consider_intervals, decode
from .utils import load_data_from_hdf5
train_test_split = sklearn.model_selection.train_test_split
KFold = sklearn.model_selection.KFold
LeaveOneOut = sklearn.model_selection.LeaveOneOut
TimeSeriesSplit = sklearn.model_selection.TimeSeriesSplit
ShuffleSplit = sklearn.model_selection.ShuffleSplit
Patch = mpl.patches.Patch
cmap_cv = plt.cm.coolwarm
class _DataSet(Plots):
def __init__(self, config, path=os.getcwd()):
Plots.__init__(self, config=config, path=path)
def training_data(self):
raise NotImplementedError
def validation_data(self):
raise NotImplementedError
def test_data(self):
raise NotImplementedError
def KFold_splits(self, n_splits=5):
raise NotImplementedError
def LeaveOneOut_splits(self):
raise NotImplementedError
def TimeSeriesSplit_splits(self, n_splits=5):
raise NotImplementedError
@classmethod
def from_h5(cls, h5_file: str):
raise NotImplementedError
def to_disk(self, path: str):
raise NotImplementedError
def return_xy(self, x, y, initial):
if self.mode == "classification" and self.is_binary:
if len(y) == y.size:
y = y.reshape(-1, 1)
if self.verbosity > 0:
print(f"{'*' * 5} {initial} {'*' * 5}")
print_something(x, "input_x")
print_something(y, "target")
return x, y
def return_x_yy(self, x, prev_y, y, initial):
if self.verbosity > 0:
print(f"{'*' * 5} {initial} data {'*' * 5}")
print_something(x, "input_x")
print_something(prev_y, "prev_y")
print_something(y, "target")
return x, prev_y, y
class DataSet(_DataSet):
"""The purpose of DataSet is to convert unprepared/raw data into prepared data.
A prepared data consists of x,y pairs where x is inputs and y is outputs. There
are >1 examples in a DataSet. Both inputs and outputs consists of same number
of examples. An example consists of one input, output pair which can be given
to a supervised machine learning algorithm for training. For tabular data, the
number of examples does not necessarily match number of rows. The number of
examples depend upon multiple factors such as presence of intervals, how
nans are handled and the arguments related to time series data preparation
which are listed in detail in prepare_data function.
DataSet class can accept the raw, unprepared data in a variety of formats such
as .csv, .xlsx, .parquet, .mat, .n5 etc. For details see this. The DataSet
class can save the prepared data into an hdf5 file which can susequently be
used to load the data and save the time.
Methods
------------
- training_data: returns training data
- validation_data: returns validation data
- test_data: returns test data
- from_h5:
- to_disk
- KFold_splits: creates splits using `KFold` of sklearn
- LeaveOneOut_splits: creates splits using `LeaveOneOut` of sklearn
- TimeSeriesSplit_splits: creates splits using `TimeSeriesSplit` of sklearn
- total_exs
"""
def __init__(
self,
data,
input_features: Union[str, list] = None,
output_features: Union[str, list] = None,
dataset_args: dict = None,
ts_args: dict = None,
split_random: bool = False,
train_fraction: float = 0.7,
val_fraction: float = 0.2,
indices: dict = None,
intervals=None,
shuffle: bool = True,
allow_nan_labels: int = 0,
nan_filler: dict = None,
batch_size: int = 32,
drop_remainder: bool = False,
teacher_forcing: bool = False,
allow_input_nans: bool = False,
seed: int = 313,
verbosity: int = 1,
mode: str = None,
category: str = None,
save: bool = False
):
"""
Initializes the DataSet class
Parameters
----------
data :
source from which to make the data. It can be one of the following:
- pandas dataframe: each columns is a feature and each row is an example
- numpy array
- xarray dataset: it can be xarray dataset
- path like: if the path is the path of a file, then this file can
be a csv/xlsx/nc/npz/mat/parquet/feather file. The .nc file
will be read using xarray to load datasets. If the path refers
to a directory, it is supposed that each file in the directory refers to one example.
- ai4water dataset : name of any of dataset name from ai4water.datasets
- name of .h5 file
input_features : Union[list, dict, str, None]
features to use as input. If `data` is pandas dataframe
then this is list of column names from `data` to be used as input.
output_features : Union[list, dict, str, None]
features to use as output. When `data` is dataframe
then it is list of column names from `data` to be used as output.
If `data` is `dict`, then it must be consistent with `data`.
Default is None,which means the last column of data will be
used as output. In case of multi-class classification, the output
column is not supposed to be one-hot-encoded rather in the form
of [0,1,2,0,1,2,1,2,0] for 3 classes. One-hot-encoding is done
inside the model.
dataset_args : dict
additional arguments for AI4Water's [datasets][ai4water.datasets]
ts_args : dict, optional
This argument should only be used if the data is time series data.
It must be a dictionary which is then passed to :py:func:`ai4water.utils.prepare_data`
for data preparation. Possible keys in dictionay are:
- lookback
- forecast_len
- forecast_step
- input_steps
split_random : bool, optional
whether to split the data into training and test randomly or not.
train_fraction : float
Fraction of the complete data to be used for training
purpose. Must be greater than 0.0.
val_fraction : float
The fraction of the training data to be used for validation.
Set to 0.0 if no validation data is to be used.
indices : dict, optional
A dictionary with two possible keys, 'training', 'validation'.
It determines the indices to be used to select training, validation
and test data. If indices are given for training, then train_fraction
must not be given. If indices are given for validation, then indices
for training must also be given and val_fraction must not be given.
Therefore, the possible keys in indices dictionary are follwoing
- ``training``
- ``training`` and ``validation``
intervals :
tuple of tuples where each tuple consits of two integers, marking
the start and end of interval. An interval here means indices
from the data. Only rows within those indices will be used when preparing
data/batches for NN. This is handy when our input data
contains chunks of missing values or when we don't want to consider several
rows in input data during data_preparation.
For further usage see `examples/using_intervals`
shuffle : bool
whether to shuffle the samples or not
allow_nan_labels : bool
whether to allow examples with nan labels or not.
if it is > 0, and if target values contain Nans, those examples
will not be ignored and will be used as it is.
In such a case a customized training and evaluation
step is performed where the loss is not calculated for predictions
corresponding to nan observations. Thus this option can be useful
when we are predicting more than 1 target and some of the examples
have some of their labels missing. In such a scenario, if we set this
option to >0, we don't need to ignore those samples at all during data
preparation. This option should be set to > 0 only when using tensorflow
for deep learning models. if == 1, then if an example has label [nan, 1]
it will not be removed while the example with label [nan, nan]
will be ignored/removed. If ==2, both examples (mentioned before) will be
considered/will not be removed. This means for multi-outputs, we can end
up having examples whose all labels are nans. if the number of outputs
are just one. Then this must be set to 2 in order to use samples with nan labels.
nan_filler : dict
This argument determines the imputation technique used to fill the nans in
the data. The imputation is actually performed by :py:class:`ai4water.preprocessing.Imputation`
class. Therefore this argument determines the interaction with `Imputation` class.
The default value is None, which will raise error if missing/nan values
are encountered in the input data. The user can however specify a
dictionary whose one key must be `method`. The value of 'method'
key can be `fillna` or `interpolate`. For example, to do forward
filling, the user can do as following
>>> {'method': 'fillna', 'imputer_args': {'method': 'ffill'}}
For details about fillna keyword options see fillna_
For `interpolate`, the user can specify the type of interpolation
for example
>>> {'method': 'interpolate', 'imputer_args': {'method': 'spline', 'order': 2}}
will perform spline interpolation with 2nd order.
For other possible options/keyword arguments for interpolate_
[see]()
The filling or interpolation is done columnwise, however, the user
can specify how to do for each column by providing the above mentioned
arguments as dictionary or list. The sklearn based imputation methods
can also be used in a similar fashion. For KNN
>>> {'method': 'KNNImputer', 'imputer_args': {'n_neighbors': 3}}
or for iterative imputation
>>> {'method': 'IterativeImputer', 'imputer_args': {'n_nearest_features': 2}}
To pass additional arguments one can make use of `imputer_args`
keyword argument
>>> {'method': 'KNNImputer', 'features': ['b'], 'imputer_args': {'n_neighbors': 4}},
For more on sklearn based imputation methods see this blog_
batch_size : int
size of one batch. Only relevent if `drop_remainder` is True.
drop_remainder : bool
whether to drop the remainder if len(data) % batch_size != 0 or not?
teacher_forcing : bool
whether to return previous output/target/ground
truth or not. This is useful when the user wants to feed output
at t-1 as input at timestep t. For details about this technique
see this article_
allow_input_nans : bool, optional
If False, the examples containing nans in inputs will be removed.
Setting this to True will result in feeding nan containing data
to your algorithm unless nans are filled with `nan_filler`.
seed : int
random seed for reproducibility
verbosity : int
mode : str
either ``regression`` or ``classification``
category : str
save : bool
whether to save the data in an h5 file or not.
Example
-------
>>> import pandas as pd
>>> import numpy as np
>>> from ai4water.preprocessing import DataSet
>>> data_ = pd.DataFrame(np.random.randint(0, 1000, (50, 2)), columns=['input', 'output'])
>>> data_set = DataSet(data=data_, ts_args={'lookback':5})
>>> x,y = data_set.training_data()
.. _fillna:
https://pandas.pydata.org/pandas-docs/version/0.22.0/generated/pandas.DataFrame.fillna.html
.. _article:
https://machinelearningmastery.com/teacher-forcing-for-recurrent-neural-networks/
.. _interpolate:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.interpolate.html
.. _blog:
https://scikit-learn.org/stable/auto_examples/impute/plot_missing_values.html#sphx-glr-auto-examples-impute-plot-missing-values-py
Note
----
The word 'index' is not allowed as column name, input_features or output_features
"""
indices = indices or {}
if indices:
assert split_random is False, "indices cannot be used with split_random"
if 'training' in indices:
assert train_fraction == 0.7, f"""
You can not set training data using both indices and train_fraction.
Use either indices or train_fraction."""
if 'validation' in indices:
assert val_fraction == 0.2, f"""
You can not set validation data using both indices and val_fraction.
Use either indices or val_fraction."""
assert 'training' in indices, f"""
when defining validation data using indices, training data must also be
defined using indices."""
assert val_fraction < 1.0, f"""
val_fraction must be less than 1.0 but it is {val_fraction}.
"""
self.dataset_args = dataset_args
self.config = {
'input_features': input_features,
'output_features': output_features
}
self.nan_filler = nan_filler
self.data = self._process_data(
data,
input_features,
output_features)
self.ts_args = ts_args
self.split_random = split_random
self.indices = indices
self.train_fraction = train_fraction
self.val_fraction = val_fraction
self.shuffle = shuffle
self.batch_size = batch_size
self.intervals = intervals
self.allow_nan_labels = allow_nan_labels
self.teacher_forcing = teacher_forcing
self.drop_remainder = drop_remainder
self.allow_input_nans = allow_input_nans
self.verbosity = verbosity
self.seed = seed
self.mode = mode
self.category = category
self.save = save
self.scalers = {}
self.indexes = {}
self.index_types = {}
self._input_features = copy(input_features)
if save and h5py:
self.to_disk()
_DataSet.__init__(self, config=self.config, path=os.getcwd())
def init_paras(self) -> dict:
"""Returns the initializing parameters of this class"""
signature = inspect.signature(self.__init__)
init_paras = {}
for para in signature.parameters.values():
init_paras[para.name] = getattr(self, para.name)
return init_paras
@property
def ts_args(self):
return self._ts_args
@ts_args.setter
def ts_args(self, _ts_args: dict = None):
default_args = {'input_steps': 1,
'lookback': 1,
'forecast_len': 1,
'forecast_step': 0,
'known_future_inputs': False
}
if _ts_args:
default_args.update(_ts_args)
self._ts_args = default_args
@property
def lookback(self):
return self.ts_args['lookback']
@property
def classes(self):
_classes = []
if self.mode == 'classification':
if self.num_outs == 1: # for binary/multiclass
array = self.data[self._output_features].values
_classes = np.unique(array[~np.isnan(array)])
else: # for one-hot encoded
_classes = self._output_features
return _classes
@property
def num_classes(self):
return len(self.classes)
@property
def is_binary(self) -> bool:
"""Returns True if the porblem is binary classification"""
_default = False
if self.mode == 'classification':
if self.num_outs == 1:
array = self.data[self._output_features].values
unique_vals = np.unique(array[~np.isnan(array)])
if len(unique_vals) == 2:
_default = True
else:
pass # todo, check when output columns are one-hot encoded
return _default
@property
def is_multiclass(self) -> bool:
"""Returns True if the porblem is multiclass classification"""
_default = False
if self.mode == 'classification':
if self.num_outs == 1:
array = self.data[self._output_features].values
unique_vals = np.unique(array[~np.isnan(array)])
if len(unique_vals) > 2:
_default = True
else:
pass # todo, check when output columns are one-hot encoded
return _default
@property
def is_multilabel(self) -> bool:
"""Returns True if the porblem is multilabel classification"""
_default = False
if self.mode == 'classification':
if self.num_outs > 1:
_default = True
return _default
@property
def _to_categorical(self):
# whether we have to convert y into one-hot encoded form
_defualt = False
if self.is_binary or self.is_multiclass:
if self.num_outs == 1:
_defualt = True
# it seems sklearn can accept one-hot-encoded targets but xgb, lgbm and catboost can't
# but since since sklearn can also accept non-one-hot-encoded targets for multiclass
# let's not one-hot-encode for all ML algos
if self.category == 'ML':
_defualt = False
return _defualt
@property
def teacher_forcing(self):
return self._teacher_forcing
@teacher_forcing.setter
def teacher_forcing(self, x):
self._teacher_forcing = x
@property
def input_features(self):
_inputs = self.config['input_features']
if _inputs is None and self.data is not None:
assert isinstance(self.data, pd.DataFrame)
_inputs = self.data.columns[0:-1].to_list()
return _inputs
@property
def output_features(self):
"""for external use"""
_outputs = self.config['output_features']
if _outputs is None and self.data is not None:
# assert isinstance(self.data, pd.DataFrame)
if self.data.ndim == 2:
_outputs = [col for col in self.data.columns if col not in self.input_features]
else:
_outputs = [] # todo
return _outputs
@property
def _output_features(self):
"""for internal use"""
_outputs = deepcopy(self.config['output_features'])
if isinstance(self.data, list):
assert isinstance(_outputs, list)
elif isinstance(self.data, dict):
assert isinstance(_outputs, dict), f"""
data is of type dict while output_features are
of type {_outputs.__class__.__name__}"""
for k in self.data.keys():
if k not in _outputs:
_outputs[k] = []
elif _outputs is None and self.data is not None:
assert isinstance(self.data, pd.DataFrame)
_outputs = [col for col in self.data.columns if col not in self.input_features]
return _outputs
@property
def num_ins(self):
return len(self.input_features)
@property
def num_outs(self):
return len(self.output_features)
@property
def batch_dim(self):
default = "3D"
if self.ts_args['lookback'] == 1:
default = "2D"
return default
def _process_data(self,
data,
input_features,
output_features
):
if isinstance(data, str):
_source = self._get_data_from_str(data, input_features, output_features)
if isinstance(_source, str) and _source.endswith('.h5'):
self._from_h5 = True
elif isinstance(data, pd.DataFrame):
_source = self._get_data_from_df(data, input_features, output_features)
elif isinstance(data, np.ndarray):
_source = self._get_data_from_ndarray(data, input_features, output_features)
elif data.__class__.__name__ == "Dataset":
_source = data
elif isinstance(data, list):
raise ValueError(f"""
data is given as a list. For such cases either use DataSetUnion
or DataSetPipeline insteadd of DataSet class""")
elif isinstance(data, dict):
raise ValueError(f"""
data is given as a dictionary. For such cases either use DataSetUnion
or DataSetPipeline insteadd of DataSet class""")
elif data is None:
return data
else:
assert data is not None
raise ValueError(f"""
unregnizable source of data of type {data.__class__.__name__} given
""")
_source = self.impute(_source)
return _source
def _get_data_from_ndarray(self, data, input_features, output_features):
if data.ndim == 2:
# if output_features is not defined, consider 1 output and name it
# as 'output'
if output_features is None:
output_features = ['outout']
self.config['output_features'] = output_features # we should put it in config as well
elif isinstance(output_features, str):
output_features = [output_features]
else:
assert isinstance(output_features, list)
if input_features is None: # define dummy names for input_features
input_features = [f'input_{i}' for i in range(data.shape[1] - len(output_features))]
self.config['input_features'] = input_features
return pd.DataFrame(data, columns=input_features + output_features)
else:
return data
def _get_data_from_df(self, data, input_features, output_features):
if input_features is None and output_features is not None:
if isinstance(output_features, str):
output_features = [output_features]
assert isinstance(output_features, list)
input_features = [col for col in data.columns if col not in output_features]
# since we have inferred the input_features, they should be put
# back into config
self.config['input_features'] = input_features
return data
def _get_data_from_str(self, data, input_features, output_features):
if isinstance(output_features, str):
output_features = [output_features]
# dir path/file path/ ai4water dataset name
if data.endswith('.h5'):
_source = data
if data.endswith('.csv'):
_source = pd.read_csv(data)
if _source.columns[0] in ['index', 'time', 'date']:
_source.index = pd.to_datetime(_source.pop('index'))
elif data.endswith('.xlsx') or data.endswith('xlx'):
_source = pd.read_excel(data)
if _source.columns[0] in ['index', 'time', 'date']:
_source.index = pd.to_datetime(_source.pop('index'))
elif data.endswith('.parquet'):
_source = pd.read_parquet(data)
elif data.endswith('.feather'):
_source = pd.read_feather(data)
if _source.columns[0] in ['index', 'time', 'date']:
_source.index = pd.to_datetime(_source.pop('index'))
# netcdf file
elif data.endswith('.nc'):
import xarray as xr
_source = xr.open_dataset(data)
_source = _source.to_dataframe()
elif data.endswith('npz'):
data = np.load(data)
assert len(data) == 1
d = []
for k, v in data.items():
d.append(v)
data: np.ndarray = d[0]
_source = pd.DataFrame(data, columns=input_features + output_features)
# matlab's mat file
elif data.endswith('.mat'):
import scipy
mat = scipy.io.loadmat(data)
data: np.ndarray = mat['data']
_source = pd.DataFrame(data, columns=input_features + output_features)
elif os.path.isfile(data):
assert os.path.exists(data)
_source = data
elif os.path.isdir(data):
assert len(os.listdir(data)) > 1
# read from directory
raise NotImplementedError
elif data in all_datasets:
_source = self._get_data_from_ai4w_datasets(data)
else:
raise ValueError(f"unregnizable source of data given {data}")
return _source
def _get_data_from_ai4w_datasets(self, data):
Dataset = getattr(datasets, data)
dataset = Dataset()
dataset_args = self.dataset_args
if dataset_args is None:
dataset_args = {}
# if self.config['input_features'] is not None:
dynamic_features = self.input_features + self.output_features
data = dataset.fetch(dynamic_features=dynamic_features,
**dataset_args)
data = data.to_dataframe(['time', 'dynamic_features']).unstack()
data.columns = [a[1] for a in data.columns.to_flat_index()]
return data
def impute(self, data):
"""Imputes the missing values in the data using `Imputation` module"""
if self.nan_filler is not None:
if isinstance(data, pd.DataFrame):
_source = self._impute(data, self.nan_filler)
else:
raise NotImplementedError
else:
_source = data
return _source
def _impute(self, data, impute_config):
if isinstance(impute_config, str):
method, impute_args = impute_config, {}
data = Imputation(data, method=method, **impute_args)()
elif isinstance(impute_config, dict):
data = Imputation(data, **impute_config)()
elif isinstance(impute_config, list):
for imp_conf in impute_config:
data = Imputation(data, **imp_conf)()
else:
raise NotImplementedError(f'{impute_config.__class__.__name__}')
return data
def get_indices(self):
"""If the data is to be divded into train/test based upon indices,
here we create train_indices and test_indices. The train_indices
contain indices for both training and validation data.
"""
tot_obs = self.total_exs(**self.ts_args)
all_indices = np.arange(tot_obs)
if len(self.indices) == 0:
if self.train_fraction < 1.0:
if self.split_random:
train_indices, test_indices = train_test_split(
all_indices,
train_size=self.train_fraction,
random_state=self.seed
)
else:
train_indices, test_indices = self._get_indices_by_seq_split(
all_indices,
self.train_fraction)
else: # no test data
train_indices, test_indices = all_indices, []
else:
_train_indices = self.indices.get('training', None)
_val_indices = self.indices.get('validation', None)
_test_indices = self.indices.get('test', None)
if _train_indices is not None:
if _val_indices is None:
# even if val_fraction is > 0.0, we will separate validation
# data from training later
_val_indices = np.array([]) # no validation set
else:
assert isinstance(np.array(_val_indices), np.ndarray)
_val_indices = np.array(_val_indices)
overlap = np.intersect1d(_train_indices, _val_indices)
assert len(overlap) == 0, f"""
Training and validation indices must be mutually exclusive.
They contain {len(overlap)} overlaping values."""
train_indices = np.sort(np.hstack([_train_indices, _val_indices]))
if _test_indices is None:
# get test_indices by subtracting train_indices from all indices
test_indices = [ind for ind in all_indices if ind not in train_indices]
# _val_indices = np.array([])
else: # todo
train_indices = []
setattr(self, 'train_indices', train_indices)
setattr(self, 'test_indices', test_indices)
return np.array(train_indices).astype("int32"), np.array(test_indices).astype("int32")
def _get_indices_by_seq_split(
self,
all_indices: Union[list, np.ndarray],
train_fraction):
""" sequential train/test split"""
train_indices = all_indices[0:int(train_fraction * len(all_indices))]
test_indices = all_indices[int(train_fraction * len(all_indices)):]
return train_indices, test_indices
def _training_data(self, key="_training", **kwargs):
"""training data including validation data"""
train_indices, test_indices = self.get_indices()
if 'validation' in self.indices:
# when validation indices are given, we first prepare
# complete data which contains training, validation and test data
# TODO this is agains function definition
indices = np.sort(np.hstack([train_indices, test_indices]))
else:
indices = train_indices
data = self.data.copy()
# numpy arrays are not indexed and is supposed that the whole array is
# use as input
if not isinstance(data, np.ndarray):
data = self.indexify(data, key)
# get x,_y, y
x, prev_y, y = self._make_data(
data,
intervals=self.intervals,
indices=indices,
**kwargs)
if not isinstance(self.data, np.ndarray):
x, self.indexes[key] = self.deindexify(x, key)
if self.mode == 'classification':
y = check_for_classification(y, self._to_categorical)
return x, prev_y, y
def training_data(self, key="train", **kwargs):
"""training data excluding validation data"""
if getattr(self, '_from_h5', False):
return load_data_from_hdf5('training_data', self.data)
x, prev_y, y = self._training_data(key=key, **kwargs)
if self.val_fraction > 0.0:
# when no output is generated, corresponding index will not be saved
idx = self.indexes.get(key, np.arange(len(x))) # index also needs to be split
x, prev_y, y, idx = self._train_val_split(x, prev_y, y, idx, 'training')
# if drop remainder, we need to
x, prev_y, y = self.check_for_batch_size(x, prev_y, y)
self.indexes[key] = idx[0:len(x)]
if self.teacher_forcing:
return self.return_x_yy(x, prev_y, y, "Training")
return self.return_xy(x, y, "Training")
def validation_data(self, key="val", **kwargs):
"""validation data"""
if getattr(self, '_from_h5', False):
return load_data_from_hdf5('validation_data', self.data)
x, prev_y, y = self._training_data(key=key, **kwargs)
if self.val_fraction > 0.0:
idx = self.indexes.get(key, np.arange(len(x)))
x, prev_y, y, idx = self._train_val_split(x, prev_y, y, idx, 'validation')
x, prev_y, y = self.check_for_batch_size(x, prev_y, y)
self.indexes[key] = idx[0:len(x)]
else:
x, prev_y, y = np.empty(0), np.empty(0), np.empty(0)
if self.teacher_forcing:
return self.return_x_yy(x, prev_y, y, "Validation")
return self.return_xy(x, y, "Validation")
def _train_val_split(self, x, prev_y, y, idx, return_type):
"""split x,y,idx,prev_y into training and validation data"""
if self.split_random:
# split x,y randomly
splitter = TrainTestSplit(test_fraction=self.val_fraction, seed=self.seed)
train_x, val_x, train_y, val_y = splitter.split_by_random(x, y)
splitter = TrainTestSplit(test_fraction=self.val_fraction, seed=self.seed)
train_idx, val_idx, train_prev_y, val_prev_y = splitter.split_by_random(
idx, prev_y)
elif 'validation' in self.indices:
# separate indices were provided for validation data
# it must be remembered that x,y now contains training+validation+test data
# but based upon indices, we will choose either training or validation data
val_indices = self.indices['validation']
_train_indices, _ = self.get_indices()
train_indices = [i for i in _train_indices if i not in val_indices]
splitter = TrainTestSplit(train_indices=train_indices, test_indices=val_indices)
train_x, val_x, train_y, val_y = splitter.split_by_indices(
x, y
)
splitter = TrainTestSplit(train_indices=train_indices, test_indices=val_indices)
train_idx, val_idx, train_prev_y, val_prev_y = splitter.split_by_indices(
idx, prev_y)
else:
# split x,y sequentially
splitter = TrainTestSplit(test_fraction=self.val_fraction)
train_x, val_x, train_y, val_y = splitter.split_by_slicing(x, y)
splitter = TrainTestSplit(test_fraction=self.val_fraction)
train_idx, val_idx, train_prev_y, val_prev_y = splitter.split_by_slicing(idx, prev_y)
if return_type == "training":
return train_x, train_prev_y, train_y, train_idx
return val_x, val_prev_y, val_y, val_idx
def test_data(self, key="test", **kwargs):
"""test data"""
if getattr(self, '_from_h5', False):
return load_data_from_hdf5('test_data', self.data)
if self.train_fraction < 1.0:
data = self.data.copy()
# numpy arrays are not indexed and is supposed that the whole array
# is use as input
if not isinstance(data, np.ndarray):
data = self.indexify(data, key)
_, test_indices = self.get_indices()
if len(test_indices) > 0: # it is possible that training and validation
# indices cover whole data
# get x,_y, y
x, prev_y, y = self._make_data(
data,
intervals=self.intervals,
indices=test_indices,
**kwargs)
x, prev_y, y = self.check_for_batch_size(x, prev_y, y)
if not isinstance(self.data, np.ndarray):
x, self.indexes[key] = self.deindexify(x, key)
if self.mode == 'classification':
y = check_for_classification(y, self._to_categorical)
else:
x, prev_y, y = np.empty(0), np.empty(0), np.empty(0)
else:
x, prev_y, y = np.empty(0), np.empty(0), np.empty(0)
if self.teacher_forcing:
return self.return_x_yy(x, prev_y, y, "Test")
return self.return_xy(x, y, "Test")
def check_for_batch_size(self, x, prev_y=None, y=None):
if self.drop_remainder:
assert isinstance(x, np.ndarray)
remainder = len(x) % self.batch_size
if remainder:
x = x[0:-remainder]
if prev_y is not None:
prev_y = prev_y[0:-remainder]
if y is not None:
y = y[0:-remainder]
return x, prev_y, y
def check_nans(self, data, input_x, input_y, label_y):
"""Checks whether anns are present or not and checks shapes of arrays
being prepared."""
if isinstance(data, pd.DataFrame):
nans = data[self.output_features].isna()
nans = nans.sum().sum()
data = data.values
else:
nans = np.isnan(data[:, -self.num_outs:])
# df[self.out_cols].isna().sum()
nans = int(nans.sum())
if nans > 0:
if self.allow_nan_labels == 2:
if self.verbosity > 0: print("""
\n{} Allowing NANs in predictions {}\n""".format(10 * '*', 10 * '*'))
elif self.allow_nan_labels == 1:
if self.verbosity > 0: print("""
\n{} Ignoring examples whose all labels are NaNs {}\n
""".format(10 * '*', 10 * '*'))
idx = ~np.array([all([np.isnan(x) for x in label_y[i]]) for i in range(len(label_y))])
input_x = input_x[idx]
input_y = input_y[idx]
label_y = label_y[idx]
if int(np.isnan(data[:, -self.num_outs:][0:self.lookback]).sum() / self.num_outs) >= self.lookback:
self.nans_removed_4m_st = -9999
else:
if self.verbosity > 0:
print('\n{} Removing Examples with nan in labels {}\n'.format(10 * '*', 10 * '*'))
if self.num_outs == 1:
# find out how many nans were present from start of data until
# lookback, these nans will be removed
self.nans_removed_4m_st = np.isnan(data[:, -self.num_outs:][0:self.lookback]).sum()
# find out such labels where 'y' has at least one nan
nan_idx = np.array([np.any(i) for i in np.isnan(label_y)])
non_nan_idx = np.invert(nan_idx)
label_y = label_y[non_nan_idx]
input_x = input_x[non_nan_idx]
input_y = input_y[non_nan_idx]
assert np.isnan(label_y).sum() < 1, """
label still contains {} nans""".format(np.isnan(label_y).sum())
assert input_x.shape[0] == input_y.shape[0] == label_y.shape[0], """
shapes are not same"""
if not self.allow_input_nans:
assert np.isnan(input_x).sum() == 0, """input still contains {} nans
""".format(np.isnan(input_x).sum())
return input_x, input_y, label_y
def indexify(self, data: pd.DataFrame, key):
data = data.copy()
dummy_index = False
# for dataframes
if isinstance(data.index, pd.DatetimeIndex):
index = list(map(int, np.array(data.index.strftime('%Y%m%d%H%M'))))
# datetime index
self.index_types[key] = 'dt'
original_index = pd.Series(index, index=index)
else:
try:
index = list(map(int, np.array(data.index)))
self.index_types[key] = 'int'
original_index = pd.Series(index, index=index)
except ValueError: # index may not be convertible to integer, it may be
# string values
dummy_index = np.arange(len(data), dtype=np.int64)
original_index = pd.Series(data.index, index=dummy_index)
index = dummy_index
self.index_types[key] = 'str'
self.indexes[key] = {'dummy': dummy_index,
'original': original_index}
# pandas will add the 'datetime' column as first column.
# This columns will only be used to keep
# track of indices of train and test data.
data.insert(0, 'index', index)
self._input_features = ['index'] + self.input_features
# setattr(self, 'input_features', ['index'] + self.input_features)
self.indexes[key] = {'index': index, 'dummy_index': dummy_index,
'original': original_index}
return data
def deindexify(self, data: np.ndarray, key):
_data, _index = self.deindexify_nparray(data, key)
if self.indexes[key].get('dummy_index', None) is not None:
_index = self.indexes[key]['original'].loc[_index].values
if self.index_types[key] == 'dt':
_index = to_datetime_index(_index)
return _data, _index
def get_batches(self, data):
if self.batch_dim == "2D":
return self.get_2d_batches(data)
else:
return self.check_nans(data, *prepare_data(data,
num_outputs=self.num_outs,
**self.ts_args))
def get_2d_batches(self, data):
# need to count num_ins based upon _input_features as it consider index
num_ins = len(self._input_features)
if not isinstance(data, np.ndarray):
if isinstance(data, pd.DataFrame):
data = data.values
else:
raise TypeError(f"unknown data type {data.__class__.__name__} for data ")
if self.num_outs > 0:
input_x = data[:, 0:num_ins]
input_y, label_y = data[:, -self.num_outs:], data[:, -self.num_outs:]
else:
dummy_input_y = np.random.random((len(data), self.num_outs))
dummy_y = np.random.random((len(data), self.num_outs))
input_x, input_y, label_y = data[:, 0:num_ins], dummy_input_y, dummy_y
assert self.lookback == 1, """
lookback should be one for MLP/Dense layer based model, but it is {}
""".format(self.lookback)
return self.check_nans(data, input_x, input_y, np.expand_dims(label_y, axis=2))
def _make_data(self, data, indices=None, intervals=None, shuffle=False):
# if indices is not None:
# indices = np.array(indices).astype("int32")
# assert isinstance(np.array(indices), np.ndarray), "indices must be array like"
if isinstance(data, pd.DataFrame):
data = data[self._input_features + self.output_features].copy()
df = data
else:
data = data.copy()
df = data
if intervals is None:
x, prev_y, y = self.get_batches(df)
if indices is not None:
# if indices are given then this should be done after `get_batches`
# method
x = x[indices]
prev_y = prev_y[indices]
y = y[indices]
else:
xs, prev_ys, ys = [], [], []
for _st, _en in intervals:
df1 = data[_st:_en]
if df1.shape[0] > 0:
x, prev_y, y = self.get_batches(df1.values)
xs.append(x)
prev_ys.append(prev_y)
ys.append(y)
if indices is None:
x = np.vstack(xs)
prev_y = np.vstack(prev_ys)
y = np.vstack(ys)
else:
x = np.vstack(xs)[indices]
prev_y = np.vstack(prev_ys)[indices]
y = np.vstack(ys)[indices]
if shuffle:
raise NotImplementedError
if isinstance(data, pd.DataFrame) and 'index' in data:
data.pop('index')
if self.ts_args['forecast_len'] == 1 and len(self.output_features) > 0:
y = y.reshape(-1, len(self.output_features))
return x, prev_y, y
def deindexify_nparray(self, data, key):
if data.ndim == 3:
_data, index = data[..., 1:].astype(np.float32), data[:, -1, 0]
elif data.ndim == 2:
_data, index = data[..., 1:].astype(np.float32), data[:, 0]
elif data.ndim == 4:
_data, index = data[..., 1:].astype(np.float32), data[:, -1, -1, 0]
elif data.ndim == 5:
_data, index = data[..., 1:].astype(np.float32), data[:, -1, -1, -1, 0]
else:
raise NotImplementedError
if self.index_types[key] != 'str':
index = np.array(index, dtype=np.int64)
return _data, index
def total_exs(self,
lookback,
forecast_step=0, forecast_len=1,
**ts_args
):
intervals = self.intervals
input_steps = self.ts_args['input_steps']
data = consider_intervals(self.data, intervals)
num_outs = len(self.output_features) if self.output_features is not None else None
max_tot_obs = 0
if not self.allow_nan_labels and intervals is None:
_data = data[self.input_features + self.output_features] if isinstance(data, pd.DataFrame) else data
x, _, _ = prepare_data(_data,
lookback, num_outputs=num_outs,
forecast_step=forecast_step,
forecast_len=forecast_len, mask=np.nan, **ts_args)
max_tot_obs = len(x)
# we need to ignore some values at the start
more = (lookback * input_steps) - 1
if isinstance(data, np.ndarray):
return len(data) - more
# todo, why not when allow_nan_labels>0?
if forecast_step > 0:
more += forecast_step
if forecast_len > 1:
more += forecast_len
if intervals is None: intervals = [()]
more *= len(intervals)
if self.allow_nan_labels == 2:
tot_obs = data.shape[0] - more
elif self.allow_nan_labels == 1:
label_y = data[self.output_features].values
idx = ~np.array([all([np.isnan(x) for x in label_y[i]]) for i in range(len(label_y))])
tot_obs = np.sum(idx) - more
else:
if num_outs == 1:
tot_obs = data.shape[0] - int(data[self.output_features].isna().sum()) - more
tot_obs = max(tot_obs, max_tot_obs)
else:
# count by droping all the rows when nans occur in output features
tot_obs = len(data.dropna(subset=self.output_features))
tot_obs -= more
return tot_obs
def KFold_splits(self, n_splits=5):
"""returns an iterator for kfold cross validation.
The iterator yields two tuples of training and test x,y pairs.
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`
Note: only `training_data` and `validation_data` are used to make kfolds.
Example
---------
>>> import numpy as np
>>> import pandas as pd
>>> from ai4water.preprocessing import DataSet
>>> data = pd.DataFrame(np.random.randint(0, 10, (20, 3)), columns=['a', 'b', 'c'])
>>> data_set = DataSet(data=data)
>>> kfold_splits = data_set.KFold_splits()
>>> for (train_x, train_y), (test_x, test_y) in kfold_splits:
... print(train_x, train_y, test_x, test_y)
"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
kf = KFold(n_splits=n_splits,
random_state=self.seed if self.shuffle else None,
shuffle=self.shuffle)
spliter = kf.split(x)
for tr_idx, test_idx in spliter:
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def LeaveOneOut_splits(self):
"""Yields leave one out splits
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
kf = LeaveOneOut()
for tr_idx, test_idx in kf.split(x):
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def ShuffleSplit_splits(self, **kwargs):
"""Yields ShuffleSplit splits
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
sf = ShuffleSplit(**kwargs)
for tr_idx, test_idx in sf.split(x):
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def TimeSeriesSplit_splits(self, n_splits=5, **kwargs):
"""returns an iterator for TimeSeriesSplit.
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`
"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
tscv = TimeSeriesSplit(n_splits=n_splits, **kwargs)
for tr_idx, test_idx in tscv.split(x):
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def plot_KFold_splits(self, n_splits=5, show=True, **kwargs):
"""Plots the indices of kfold splits"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
kf = KFold(n_splits=n_splits,
random_state=self.seed if self.shuffle else None,
shuffle=self.shuffle)
spliter = kf.split(x)
self._plot_splits(spliter, x, title="KFoldCV", show=show, **kwargs)
return
def plot_LeaveOneOut_splits(self, show=True, **kwargs):
"""Plots the indices obtained from LeaveOneOut strategy"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
spliter = LeaveOneOut().split(x)
self._plot_splits(spliter=spliter,
x=x,
title="LeaveOneOutCV",
show=show,
**kwargs)
return
def plot_TimeSeriesSplit_splits(self, n_splits=5, show=True, **kwargs):
"""Plots the indices obtained from TimeSeriesSplit strategy"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
spliter = TimeSeriesSplit(n_splits=n_splits, **kwargs).split(x)
self._plot_splits(spliter=spliter,
x=x,
title="TimeSeriesCV",
show=show,
**kwargs)
return
def _plot_splits(self, spliter, x, show=True, **kwargs):
splits = list(spliter)
figsize = kwargs.get('figsize', (10, 8))
legend_fs = kwargs.get('legend_fs', 20)
legend_pos = kwargs.get('legend_pos', (1.02, 0.8))
title = kwargs.get("title", "CV")
plt.close('all')
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for ii, split in enumerate(splits):
indices = np.array([np.nan] * len(x))
indices[split[0]] = 1
indices[split[1]] = 0
ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=10, cmap="coolwarm",
vmin=-.2, vmax=1.2)
yticklabels = list(range(len(splits)))
ax.set(yticks=np.arange(len(splits)) + .5, yticklabels=yticklabels)
ax.set_xlabel("Sample Index", fontsize=18)
ax.set_ylabel("CV iteration", fontsize=18)
ax.set_title(title, fontsize=20)
ax.legend([Patch(color=cmap_cv(.8)), Patch(color=cmap_cv(.02))],
['Training', 'Test'],
loc=legend_pos, fontsize=legend_fs)
if show:
plt.tight_layout()
plt.show()
return
def to_disk(self, path: str = None):
import h5py
path = path or os.getcwd()
filepath = os.path.join(path, "data.h5")
f = h5py.File(filepath, mode='w')
for k, v in self.init_paras().items():
if isinstance(v, (dict, list, tuple, float, int, str)):
f.attrs[k] = json.dumps(
v, default=jsonize).encode('utf8')
elif v is not None and k != 'data':
f.attrs[k] = v
if self.teacher_forcing:
x, prev_y, y = self.training_data()
val_x, val_prev_y, val_y = self.validation_data()
test_x, test_prev_y, test_y = self.test_data()
else:
prev_y, val_prev_y, test_prev_y = np.empty(0), np.empty(0), np.empty(0)
x, y = self.training_data()
val_x, val_y = self.validation_data()
test_x, test_y = self.test_data()
# save in disk
self._save_data_to_hdf5('training_data', x, prev_y, y, f)
self._save_data_to_hdf5('validation_data', val_x, val_prev_y, val_y, f)
self._save_data_to_hdf5('test_data', test_x, test_prev_y, test_y, f)
f.close()
return
def _save_data_to_hdf5(self, data_type, x, prev_y, y, f):
"""Saves one data_type in h5py. data_type is string indicating whether
it is training, validation or test data."""
assert x is not None
group_name = f.create_group(data_type)
container = {}
container['x'] = x
if self.teacher_forcing:
container['prev_y'] = prev_y
container['y'] = y
for name, val in container.items():
param_dset = group_name.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
return
@classmethod
def from_h5(cls, path):
"""Creates an instance of DataSet from .h5 file."""
import h5py
f = h5py.File(path, mode='r')
config = {}
for k, v in f.attrs.items():
if isinstance(v, str) or isinstance(v, bytes):
v = decode(v)
config[k] = v
cls._from_h5 = True
f.close()
# the data is already being loaded from h5 file so no need to save it again
# upon initialization of class
config['save'] = False
return cls(path, **config)
|
PypiClean
|
/reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/Utilities.py
|
from builtins import str, bytes
import subprocess
import os
import re
import zlib
import base64
import sys
from types import ModuleType, FunctionType
from gc import get_referents
def lowerCmsHeaders(headers):
"""
Lower CMS headers in provided header's dict. The WMCore Authentication
code check only cms headers in lower case, e.g. cms-xxx-yyy.
"""
lheaders = {}
for hkey, hval in list(headers.items()): # perform lower-case
# lower header keys since we check lower-case in headers
if hkey.startswith('Cms-') or hkey.startswith('CMS-'):
lheaders[hkey.lower()] = hval
else:
lheaders[hkey] = hval
return lheaders
def makeList(stringList):
"""
_makeList_
Make a python list out of a comma separated list of strings,
throws a ValueError if the input is not well formed.
If the stringList is already of type list, then return it untouched.
"""
if isinstance(stringList, list):
return stringList
if isinstance(stringList, str):
toks = stringList.lstrip(' [').rstrip(' ]').split(',')
if toks == ['']:
return []
return [str(tok.strip(' \'"')) for tok in toks]
raise ValueError("Can't convert to list %s" % stringList)
def makeNonEmptyList(stringList):
"""
_makeNonEmptyList_
Given a string or a list of strings, return a non empty list of strings.
Throws an exception in case the final list is empty or input data is not
a string or a python list
"""
finalList = makeList(stringList)
if not finalList:
raise ValueError("Input data cannot be an empty list %s" % stringList)
return finalList
def strToBool(string):
"""
Try to convert different variations of True or False (including a string
type object) to a boolean value.
In short:
* True gets mapped from: True, "True", "true", "TRUE".
* False gets mapped from: False, "False", "false", "FALSE"
* anything else will fail
:param string: expects a boolean or a string, but it could be anything else
:return: a boolean value, or raise an exception if value passed in is not supported
"""
if string is False or string is True:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string)
def safeStr(string):
"""
_safeStr_
Cast simple data (int, float, basestring) to string.
"""
if not isinstance(string, (tuple, list, set, dict)):
return str(string)
raise ValueError("We're not supposed to convert %s to string." % string)
def diskUse():
"""
This returns the % use of each disk partition
"""
diskPercent = []
df = subprocess.Popen(["df", "-klP"], stdout=subprocess.PIPE)
output = df.communicate()[0]
output = decodeBytesToUnicode(output).split("\n")
for x in output:
split = x.split()
if split != [] and split[0] != 'Filesystem':
diskPercent.append({'mounted': split[5], 'percent': split[4]})
return diskPercent
def numberCouchProcess():
"""
This returns the number of couch process
"""
ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
process = ps.communicate()[0]
process = decodeBytesToUnicode(process).count('couchjs')
return process
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None
def zipEncodeStr(message, maxLen=5120, compressLevel=9, steps=100, truncateIndicator=" (...)"):
"""
_zipEncodeStr_
Utility to zip a string and encode it.
If zipped encoded length is greater than maxLen,
truncate message until zip/encoded version
is within the limits allowed.
"""
message = encodeUnicodeToBytes(message)
encodedStr = zlib.compress(message, compressLevel)
encodedStr = base64.b64encode(encodedStr)
if len(encodedStr) < maxLen or maxLen == -1:
return encodedStr
compressRate = 1. * len(encodedStr) / len(base64.b64encode(message))
# Estimate new length for message zip/encoded version
# to be less than maxLen.
# Also, append truncate indicator to message.
truncateIndicator = encodeUnicodeToBytes(truncateIndicator)
strLen = int((maxLen - len(truncateIndicator)) / compressRate)
message = message[:strLen] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
# If new length is not short enough, truncate
# recursively by steps
while len(encodedStr) > maxLen:
message = message[:-steps - len(truncateIndicator)] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
return encodedStr
def getSize(obj):
"""
_getSize_
Function to traverse an object and calculate its total size in bytes
:param obj: a python object
:return: an integer representing the total size of the object
Code extracted from Stack Overflow:
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
if isinstance(obj, BLACKLIST):
raise TypeError('getSize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def decodeBytesToUnicode(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of bytes (i.e. in py2 `str` or
`future.types.newbytes.newbytes`, in py3 `bytes`), then it is converted to
a sequence of unicode codepoints.
This function is useful for cleaning input data when using the
"unicode sandwich" approach, which involves converting bytes (i.e. strings
of type sequence of bytes) to unicode (i.e. strings of type sequence of
unicode codepoints, in py2 `unicode` or `future.types.newstr.newstr`,
in py3 `str` ) as soon as possible when recieving input data, and
converting unicode back to bytes as late as possible.
achtung!:
- converting unicode back to bytes is not covered by this function
- converting unicode back to bytes is not always necessary. when in doubt,
do not do it.
Reference: https://nedbatchelder.com/text/unipain.html
py2:
- "errors" can be: "strict", "ignore", "replace",
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, bytes):
return value.decode("utf-8", errors)
return value
def decodeBytesToUnicodeConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call decodeBytesToUnicode(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply decodeBytesToUnicode,
maintaining brevity.
Parameters
----------
value : any
passed to decodeBytesToUnicode
errors: str
passed to decodeBytesToUnicode
condition: boolean of object with attribute __bool__()
if True, then we run decodeBytesToUnicode. Usually PY2/PY3
"""
if condition:
return decodeBytesToUnicode(value, errors)
return value
def encodeUnicodeToBytes(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of unicode (i.e. in py2 `unicode` or
`future.types.newstr.newstr`, in py3 `str`), then it is converted to
a sequence of bytes.
This function is useful for encoding output data when using the
"unicode sandwich" approach, which involves converting unicode (i.e. strings
of type sequence of unicode codepoints) to bytes (i.e. strings of type
sequence of bytes, in py2 `str` or `future.types.newbytes.newbytes`,
in py3 `bytes`) as late as possible when passing a string to a third-party
function that only accepts bytes as input (pycurl's curl.setop is an
example).
py2:
- "errors" can be: "strict", "ignore", "replace", "xmlcharrefreplace"
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace",
"xmlcharrefreplace", "namereplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, str):
return value.encode("utf-8", errors)
return value
def encodeUnicodeToBytesConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call encodeUnicodeToBytes(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply encodeUnicodeToBytes,
maintaining brevity.
Parameters
----------
value : any
passed to encodeUnicodeToBytes
errors: str
passed to encodeUnicodeToBytes
condition: boolean of object with attribute __bool__()
if True, then we run encodeUnicodeToBytes. Usually PY2/PY3
"""
if condition:
return encodeUnicodeToBytes(value, errors)
return value
|
PypiClean
|
/ckanext-query-dois-4.0.0.tar.gz/ckanext-query-dois-4.0.0/ckanext/query_dois/lib/query.py
|
import copy
import hashlib
import json
import time
from collections import defaultdict
from ckan.plugins import toolkit
class DatastoreQuery(object):
"""
This models datastore queries passed to datastore_search, not the DOIs created from
them.
"""
@staticmethod
def _parse_from_query_dict(query_dict):
'''
Parse a dict of query string parameters which represents the data dict for the
datastore_search action in the URL format used by CKAN. The query_dict parameter is expected
to look something like this (for example):
{
"q": "banana",
"filters": "colour:yellow|length:200|colour:brown|type:tasty",
etc
}
If a version is present, either as the version parameter or as the __version__ filter, it
is extracted with preference given to the version parameter if both are provided.
:param query_dict: the query string dict
:return: the query dict (defaults to {} if nothing can be extracted from the query_dict) and
the requested version (defaults to None, if not provided in the query_dict)
'''
query = {}
requested_version = None
for param, param_value in query_dict.items():
if param == 'version':
requested_version = int(param_value)
elif param == 'filters':
filters = defaultdict(list)
for filter_pair in param_value.split('|'):
filter_field, filter_value = filter_pair.split(':', 1)
filters[filter_field].append(filter_value)
if requested_version is None:
popped_version = filters.pop('__version__', None)
if popped_version:
requested_version = int(popped_version[0])
if filters:
query[param] = filters
else:
query[param] = param_value
return query, requested_version
@staticmethod
def _parse_from_data_dict(data_dict):
'''
Parse a dict of query string parameters which represents the data dict for the
datastore_search action in data dict form it expects. The data_dict parameter is expected to
look something like this (for example):
{
"q": "banana",
"filters": {
"colour": ["yellow", "brown"],
"length": "200",
"type": ["tasty"],
}
etc
}
If a version is present, either as the version parameter or as the __version__ filter, it
is extracted with preference given to the version parameter if both are provided.
:param data_dict: the query string dict
:return: the query dict (defaults to {} if nothing can be extracted from the query_dict) and
the requested version (defaults to None, if not provided in the query_dict)
'''
query = {}
requested_version = None
for param, param_value in data_dict.items():
if param == 'version':
requested_version = int(param_value)
elif param == 'filters':
filters = {}
for filter_field, filter_value in param_value.items():
if not isinstance(filter_value, list):
filter_value = [filter_value]
filters[filter_field] = filter_value
if requested_version is None:
popped_version = filters.pop('__version__', None)
if popped_version:
requested_version = int(popped_version[0])
if filters:
query[param] = filters
else:
query[param] = param_value
return query, requested_version
def __init__(self, query_dict=None, data_dict=None):
"""
Provide one of the 3 parameters depending on the format you have the query in.
:param query_dict: a dict of query string parameters in the CKAN URL format - i.e. the
filters are split with colons and pipes etc
:param data_dict: a dict of data dict parameters - i.e. the typical action data_dict format
"""
if query_dict is not None:
self.query, self.requested_version = self._parse_from_query_dict(query_dict)
elif data_dict is not None:
self.query, self.requested_version = self._parse_from_data_dict(data_dict)
else:
self.query = {}
self.requested_version = None
if self.requested_version is None:
# default the requested time to now
self.requested_version = int(time.time() * 1000)
self.query_hash = self._generate_query_hash()
def _generate_query_hash(self):
"""
Create a unique hash for this query. To do this we have to ensure that the
features like the order of filters is ignored to ensure that the meaning of the
query is what we're capturing.
:return: a unique hash of the query
"""
query = {}
for key, value in self.query.items():
if key == 'filters':
filters = {}
for filter_field, filter_value in value.items():
# to ensure the order doesn't matter we have to convert everything to unicode
# and then sort it
filters[str(filter_field)] = sorted(map(str, filter_value))
query['filters'] = filters
else:
query[str(key)] = str(value)
# sort_keys=True is used otherwise the key ordering would change between python versions
# and the hash wouldn't match even if the query was the same
dumped_query = json.dumps(query, ensure_ascii=False, sort_keys=True).encode(
'utf8'
)
return hashlib.sha1(dumped_query).hexdigest()
def get_rounded_version(self, resource_id):
"""
Round the requested version of this query down to the nearest actual version of
the resource. See the versioned-search plugin for more details.
:param resource_id: the id of the resource being searched
:return: the rounded version or None if no versions are available for the given resource id
"""
# first retrieve the rounded version to use
data_dict = {'resource_id': resource_id, 'version': self.requested_version}
return toolkit.get_action('datastore_get_rounded_version')({}, data_dict)
def get_count(self, resource_id):
"""
Retrieve the number of records matched by this query, resource id and version
combination.
:param resource_id: the resource id
:return: an integer value
"""
data_dict = copy.deepcopy(self.query)
data_dict.update(
{
'resource_id': resource_id,
# use the version parameter cause it's nicer than having to go in and modify the filters
'version': self.get_rounded_version(resource_id),
# we don't need the results, just the total
'limit': 0,
}
)
result = toolkit.get_action('datastore_search')({}, data_dict)
return result['total']
|
PypiClean
|
/discordia-0.2.tar.gz/discordia-0.2/discord/embeds.py
|
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Final, Mapping, Protocol, TypeVar, Union
from . import utils
from .colour import Colour
__all__ = (
"Embed",
"EmbedField",
"EmbedAuthor",
"EmbedFooter",
)
class _EmptyEmbed:
def __bool__(self) -> bool:
return False
def __repr__(self) -> str:
return "Embed.Empty"
def __len__(self) -> int:
return 0
EmptyEmbed: Final = _EmptyEmbed()
class EmbedProxy:
def __init__(self, layer: dict[str, Any]):
self.__dict__.update(layer)
def __len__(self) -> int:
return len(self.__dict__)
def __repr__(self) -> str:
inner = ", ".join(
(f"{k}={v!r}" for k, v in self.__dict__.items() if not k.startswith("_"))
)
return f"{type(self).__name__}({inner})"
def __getattr__(self, attr: str) -> _EmptyEmbed:
return EmptyEmbed
E = TypeVar("E", bound="Embed")
if TYPE_CHECKING:
from discord.types.embed import Embed as EmbedData
from discord.types.embed import EmbedType
T = TypeVar("T")
MaybeEmpty = Union[T, _EmptyEmbed]
class _EmbedFooterProxy(Protocol):
text: MaybeEmpty[str]
icon_url: MaybeEmpty[str]
class _EmbedMediaProxy(Protocol):
url: MaybeEmpty[str]
proxy_url: MaybeEmpty[str]
height: MaybeEmpty[int]
width: MaybeEmpty[int]
class _EmbedVideoProxy(Protocol):
url: MaybeEmpty[str]
height: MaybeEmpty[int]
width: MaybeEmpty[int]
class _EmbedProviderProxy(Protocol):
name: MaybeEmpty[str]
url: MaybeEmpty[str]
class _EmbedAuthorProxy(Protocol):
name: MaybeEmpty[str]
url: MaybeEmpty[str]
icon_url: MaybeEmpty[str]
proxy_icon_url: MaybeEmpty[str]
class EmbedAuthor(EmbedProxy):
"""Represents the author on the :class:`Embed` object.
.. versionadded:: 2.5
Attributes
----------
name: :class:`str`
The name of the author.
url: :class:`str`
The URL of the hyperlink created in the author's name.
icon_url: :class:`str`
The URL of the author icon image.
"""
def __init__(
self,
name: str,
url: MaybeEmpty[str] = EmptyEmbed,
icon_url: MaybeEmpty[str] = EmptyEmbed,
proxy_icon_url: MaybeEmpty[str] = EmptyEmbed,
) -> None:
layer = {
k: v
for k, v in locals().items()
if k in {"name", "url", "icon_url", "proxy_icon_url"}
and v is not EmptyEmbed
}
super().__init__(layer)
class EmbedFooter(EmbedProxy):
"""Represents the footer on the :class:`Embed` object.
.. versionadded:: 2.5
Attributes
----------
text: :class:`str`
The text inside the footer.
icon_url: :class:`str`
The URL of the footer icon image.
"""
def __init__(
self,
text: str,
icon_url: MaybeEmpty[str] = EmptyEmbed,
proxy_icon_url: MaybeEmpty[str] = EmptyEmbed,
) -> None:
layer = {
k: v
for k, v in locals().items()
if k in {"text", "icon_url", "proxy_icon_url"} and v is not EmptyEmbed
}
super().__init__(layer)
class EmbedField:
"""Represents a field on the :class:`Embed` object.
.. versionadded:: 2.0
Attributes
----------
name: :class:`str`
The name of the field.
value: :class:`str`
The value of the field.
inline: :class:`bool`
Whether the field should be displayed inline.
"""
def __init__(self, name: str, value: str, inline: bool | None = False):
self.name = name
self.value = value
self.inline = inline
@classmethod
def from_dict(cls: type[E], data: Mapping[str, Any]) -> E:
"""Converts a :class:`dict` to a :class:`EmbedField` provided it is in the
format that Discord expects it to be in.
You can find out about this format in the `official Discord documentation`__.
.. _DiscordDocsEF: https://discord.com/developers/docs/resources/channel#embed-object-embed-field-structure
__ DiscordDocsEF_
Parameters
----------
data: :class:`dict`
The dictionary to convert into an EmbedField object.
"""
self: E = cls.__new__(cls)
self.name = data["name"]
self.value = data["value"]
self.inline = data.get("inline", False)
return self
def to_dict(self) -> dict[str, str | bool]:
"""Converts this EmbedField object into a dict.
Returns
-------
Dict[:class:`str`, Union[:class:`str`, :class:`bool`]]
A dictionary of :class:`str` embed field keys bound to the respective value.
"""
return {
"name": self.name,
"value": self.value,
"inline": self.inline,
}
class Embed:
"""Represents a Discord embed.
.. container:: operations
.. describe:: len(x)
Returns the total size of the embed.
Useful for checking if it's within the 6000 character limit.
.. describe:: bool(b)
Returns whether the embed has any data set.
.. versionadded:: 2.0
Certain properties return an ``EmbedProxy``, a type
that acts similar to a regular :class:`dict` except using dotted access,
e.g. ``embed.author.icon_url``. If the attribute
is invalid or empty, then a special sentinel value is returned,
:attr:`Embed.Empty`.
For ease of use, all parameters that expect a :class:`str` are implicitly
cast to :class:`str` for you.
Attributes
----------
title: :class:`str`
The title of the embed.
This can be set during initialisation.
Must be 256 characters or fewer.
type: :class:`str`
The type of embed. Usually "rich".
This can be set during initialisation.
Possible strings for embed types can be found on discord's
`api docs <https://discord.com/developers/docs/resources/channel#embed-object-embed-types>`_
description: :class:`str`
The description of the embed.
This can be set during initialisation.
Must be 4096 characters or fewer.
url: :class:`str`
The URL of the embed.
This can be set during initialisation.
timestamp: :class:`datetime.datetime`
The timestamp of the embed content. This is an aware datetime.
If a naive datetime is passed, it is converted to an aware
datetime with the local timezone.
colour: Union[:class:`Colour`, :class:`int`]
The colour code of the embed. Aliased to ``color`` as well.
This can be set during initialisation.
Empty
A special sentinel value used by ``EmbedProxy`` and this class
to denote that the value or attribute is empty.
"""
__slots__ = (
"title",
"url",
"type",
"_timestamp",
"_colour",
"_footer",
"_image",
"_thumbnail",
"_video",
"_provider",
"_author",
"_fields",
"description",
)
Empty: Final = EmptyEmbed
def __init__(
self,
*,
colour: int | Colour | _EmptyEmbed = EmptyEmbed,
color: int | Colour | _EmptyEmbed = EmptyEmbed,
title: MaybeEmpty[Any] = EmptyEmbed,
type: EmbedType = "rich",
url: MaybeEmpty[Any] = EmptyEmbed,
description: MaybeEmpty[Any] = EmptyEmbed,
timestamp: datetime.datetime = None,
fields: list[EmbedField] | None = None,
author: MaybeEmpty[EmbedAuthor] = EmptyEmbed,
footer: MaybeEmpty[EmbedFooter] = EmptyEmbed,
image: MaybeEmpty[str] = EmptyEmbed,
thumbnail: MaybeEmpty[str] = EmptyEmbed,
):
self.colour = colour if colour is not EmptyEmbed else color
self.title = title
self.type = type
self.url = url
self.description = description
if self.title is not EmptyEmbed and self.title is not None:
self.title = str(self.title)
if self.description is not EmptyEmbed and self.description is not None:
self.description = str(self.description)
if self.url is not EmptyEmbed and self.url is not None:
self.url = str(self.url)
if timestamp:
self.timestamp = timestamp
self._fields: list[EmbedField] = fields or []
if author is not EmptyEmbed:
self.set_author(**author.__dict__)
if footer is not EmptyEmbed:
self.set_footer(**footer.__dict__)
if image is not EmptyEmbed:
self.set_image(url=image)
if thumbnail is not EmptyEmbed:
self.set_thumbnail(url=thumbnail)
@classmethod
def from_dict(cls: type[E], data: Mapping[str, Any]) -> E:
"""Converts a :class:`dict` to a :class:`Embed` provided it is in the
format that Discord expects it to be in.
You can find out about this format in the `official Discord documentation`__.
.. _DiscordDocs: https://discord.com/developers/docs/resources/channel#embed-object
__ DiscordDocs_
Parameters
----------
data: :class:`dict`
The dictionary to convert into an embed.
Returns
-------
:class:`Embed`
The converted embed object.
"""
# we are bypassing __init__ here since it doesn't apply here
self: E = cls.__new__(cls)
# fill in the basic fields
self.title = data.get("title", EmptyEmbed)
self.type = data.get("type", EmptyEmbed)
self.description = data.get("description", EmptyEmbed)
self.url = data.get("url", EmptyEmbed)
if self.title is not EmptyEmbed:
self.title = str(self.title)
if self.description is not EmptyEmbed:
self.description = str(self.description)
if self.url is not EmptyEmbed:
self.url = str(self.url)
# try to fill in the more rich fields
try:
self._colour = Colour(value=data["color"])
except KeyError:
pass
try:
self._timestamp = utils.parse_time(data["timestamp"])
except KeyError:
pass
for attr in (
"thumbnail",
"video",
"provider",
"author",
"fields",
"image",
"footer",
):
if attr == "fields":
value = data.get(attr, [])
self._fields = [EmbedField.from_dict(d) for d in value] if value else []
else:
try:
value = data[attr]
except KeyError:
continue
else:
setattr(self, f"_{attr}", value)
return self
def copy(self: E) -> E:
"""Creates a shallow copy of the :class:`Embed` object.
Returns
-------
:class:`Embed`
The copied embed object.
"""
return self.__class__.from_dict(self.to_dict())
def __len__(self) -> int:
total = len(self.title) + len(self.description)
for field in getattr(self, "_fields", []):
total += len(field.name) + len(field.value)
try:
footer_text = self._footer["text"]
except (AttributeError, KeyError):
pass
else:
total += len(footer_text)
try:
author = self._author
except AttributeError:
pass
else:
total += len(author["name"])
return total
def __bool__(self) -> bool:
return any(
(
self.title,
self.url,
self.description,
self.colour,
self.fields,
self.timestamp,
self.author,
self.thumbnail,
self.footer,
self.image,
self.provider,
self.video,
)
)
@property
def colour(self) -> MaybeEmpty[Colour]:
return getattr(self, "_colour", EmptyEmbed)
@colour.setter
def colour(self, value: int | Colour | _EmptyEmbed): # type: ignore
if isinstance(value, (Colour, _EmptyEmbed)):
self._colour = value
elif isinstance(value, int):
self._colour = Colour(value=value)
else:
raise TypeError(
"Expected discord.Colour, int, or Embed.Empty but received"
f" {value.__class__.__name__} instead."
)
color = colour
@property
def timestamp(self) -> MaybeEmpty[datetime.datetime]:
return getattr(self, "_timestamp", EmptyEmbed)
@timestamp.setter
def timestamp(self, value: MaybeEmpty[datetime.datetime]):
if isinstance(value, datetime.datetime):
if value.tzinfo is None:
value = value.astimezone()
self._timestamp = value
elif isinstance(value, _EmptyEmbed):
self._timestamp = value
else:
raise TypeError(
"Expected datetime.datetime or Embed.Empty received"
f" {value.__class__.__name__} instead"
)
@property
def footer(self) -> EmbedFooter:
"""Returns an ``EmbedProxy`` denoting the footer contents.
See :meth:`set_footer` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedFooter(**getattr(self, "_footer", {}))
def set_footer(
self: E,
*,
text: MaybeEmpty[Any] = EmptyEmbed,
icon_url: MaybeEmpty[Any] = EmptyEmbed,
) -> E:
"""Sets the footer for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
----------
text: :class:`str`
The footer text.
Must be 2048 characters or fewer.
icon_url: :class:`str`
The URL of the footer icon. Only HTTP(S) is supported.
"""
self._footer = {}
if text is not EmptyEmbed:
self._footer["text"] = str(text)
if icon_url is not EmptyEmbed:
self._footer["icon_url"] = str(icon_url)
return self
def remove_footer(self: E) -> E:
"""Clears embed's footer information.
This function returns the class instance to allow for fluent-style
chaining.
.. versionadded:: 2.0
"""
try:
del self._footer
except AttributeError:
pass
return self
@property
def image(self) -> _EmbedMediaProxy:
"""Returns an ``EmbedProxy`` denoting the image contents.
Possible attributes you can access are:
- ``url``
- ``proxy_url``
- ``width``
- ``height``
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, "_image", {})) # type: ignore
def set_image(self: E, *, url: MaybeEmpty[Any]) -> E:
"""Sets the image for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
.. versionchanged:: 1.4
Passing :attr:`Empty` removes the image.
Parameters
----------
url: :class:`str`
The source URL for the image. Only HTTP(S) is supported.
"""
if url is EmptyEmbed:
try:
del self._image
except AttributeError:
pass
else:
self._image = {
"url": str(url),
}
return self
def remove_image(self: E) -> E:
"""Removes the embed's image.
This function returns the class instance to allow for fluent-style
chaining.
.. versionadded:: 2.0
"""
try:
del self._image
except AttributeError:
pass
return self
@property
def thumbnail(self) -> _EmbedMediaProxy:
"""Returns an ``EmbedProxy`` denoting the thumbnail contents.
Possible attributes you can access are:
- ``url``
- ``proxy_url``
- ``width``
- ``height``
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, "_thumbnail", {})) # type: ignore
def set_thumbnail(self: E, *, url: MaybeEmpty[Any]) -> E:
"""Sets the thumbnail for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
.. versionchanged:: 1.4
Passing :attr:`Empty` removes the thumbnail.
Parameters
----------
url: :class:`str`
The source URL for the thumbnail. Only HTTP(S) is supported.
"""
if url is EmptyEmbed:
try:
del self._thumbnail
except AttributeError:
pass
else:
self._thumbnail = {
"url": str(url),
}
return self
def remove_thumbnail(self: E) -> E:
"""Removes the embed's thumbnail.
This function returns the class instance to allow for fluent-style
chaining.
.. versionadded:: 2.0
"""
try:
del self._thumbnail
except AttributeError:
pass
return self
@property
def video(self) -> _EmbedVideoProxy:
"""Returns an ``EmbedProxy`` denoting the video contents.
Possible attributes include:
- ``url`` for the video URL.
- ``height`` for the video height.
- ``width`` for the video width.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, "_video", {})) # type: ignore
@property
def provider(self) -> _EmbedProviderProxy:
"""Returns an ``EmbedProxy`` denoting the provider contents.
The only attributes that might be accessed are ``name`` and ``url``.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, "_provider", {})) # type: ignore
@property
def author(self) -> EmbedAuthor:
"""Returns an ``EmbedProxy`` denoting the author contents.
See :meth:`set_author` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedAuthor(**getattr(self, "_author", {})) # type: ignore
def set_author(
self: E,
*,
name: Any,
url: MaybeEmpty[Any] = EmptyEmbed,
icon_url: MaybeEmpty[Any] = EmptyEmbed,
) -> E:
"""Sets the author for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
----------
name: :class:`str`
The name of the author.
Must be 256 characters or fewer.
url: :class:`str`
The URL for the author.
icon_url: :class:`str`
The URL of the author icon. Only HTTP(S) is supported.
"""
self._author = {
"name": str(name),
}
if url is not EmptyEmbed:
self._author["url"] = str(url)
if icon_url is not EmptyEmbed:
self._author["icon_url"] = str(icon_url)
return self
def remove_author(self: E) -> E:
"""Clears embed's author information.
This function returns the class instance to allow for fluent-style
chaining.
.. versionadded:: 1.4
"""
try:
del self._author
except AttributeError:
pass
return self
@property
def fields(self) -> list[EmbedField]:
"""Returns a :class:`list` of :class:`EmbedField` objects denoting the field contents.
See :meth:`add_field` for possible values you can access.
If the attribute has no value then ``None`` is returned.
"""
return self._fields
@fields.setter
def fields(self, value: list[EmbedField]) -> None:
"""Sets the fields for the embed. This overwrites any existing fields.
Parameters
----------
value: List[:class:`EmbedField`]
The list of :class:`EmbedField` objects to include in the embed.
"""
if not all(isinstance(x, EmbedField) for x in value):
raise TypeError("Expected a list of EmbedField objects.")
self._fields = value
def append_field(self, field: EmbedField) -> None:
"""Appends an :class:`EmbedField` object to the embed.
.. versionadded:: 2.0
Parameters
----------
field: :class:`EmbedField`
The field to add.
"""
if not isinstance(field, EmbedField):
raise TypeError("Expected an EmbedField object.")
self._fields.append(field)
def add_field(self: E, *, name: str, value: str, inline: bool = True) -> E:
"""Adds a field to the embed object.
This function returns the class instance to allow for fluent-style
chaining. There must be 25 fields or fewer.
Parameters
----------
name: :class:`str`
The name of the field.
Must be 256 characters or fewer.
value: :class:`str`
The value of the field.
Must be 1024 characters or fewer.
inline: :class:`bool`
Whether the field should be displayed inline.
"""
self._fields.append(EmbedField(name=str(name), value=str(value), inline=inline))
return self
def insert_field_at(
self: E, index: int, *, name: Any, value: Any, inline: bool = True
) -> E:
"""Inserts a field before a specified index to the embed.
This function returns the class instance to allow for fluent-style
chaining. There must be 25 fields or fewer.
.. versionadded:: 1.2
Parameters
----------
index: :class:`int`
The index of where to insert the field.
name: :class:`str`
The name of the field.
Must be 256 characters or fewer.
value: :class:`str`
The value of the field.
Must be 1024 characters or fewer.
inline: :class:`bool`
Whether the field should be displayed inline.
"""
field = EmbedField(name=str(name), value=str(value), inline=inline)
self._fields.insert(index, field)
return self
def clear_fields(self) -> None:
"""Removes all fields from this embed."""
self._fields.clear()
def remove_field(self, index: int) -> None:
"""Removes a field at a specified index.
If the index is invalid or out of bounds then the error is
silently swallowed.
.. note::
When deleting a field by index, the index of the other fields
shift to fill the gap just like a regular list.
Parameters
----------
index: :class:`int`
The index of the field to remove.
"""
try:
del self._fields[index]
except IndexError:
pass
def set_field_at(
self: E, index: int, *, name: Any, value: Any, inline: bool = True
) -> E:
"""Modifies a field to the embed object.
The index must point to a valid pre-existing field. There must be 25 fields or fewer.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
----------
index: :class:`int`
The index of the field to modify.
name: :class:`str`
The name of the field.
Must be 256 characters or fewer.
value: :class:`str`
The value of the field.
Must be 1024 characters or fewer.
inline: :class:`bool`
Whether the field should be displayed inline.
Raises
------
IndexError
An invalid index was provided.
"""
try:
field = self._fields[index]
except (TypeError, IndexError):
raise IndexError("field index out of range")
field.name = str(name)
field.value = str(value)
field.inline = inline
return self
def to_dict(self) -> EmbedData:
"""Converts this embed object into a dict.
Returns
-------
Dict[:class:`str`, Union[:class:`str`, :class:`int`, :class:`bool`]]
A dictionary of :class:`str` embed keys bound to the respective value.
"""
# add in the raw data into the dict
result = {
key[1:]: getattr(self, key)
for key in self.__slots__
if key != "_fields" and key[0] == "_" and hasattr(self, key)
}
# add in the fields
result["fields"] = [field.to_dict() for field in self._fields]
# deal with basic convenience wrappers
try:
colour = result.pop("colour")
except KeyError:
pass
else:
if colour:
result["color"] = colour.value
try:
timestamp = result.pop("timestamp")
except KeyError:
pass
else:
if timestamp:
if timestamp.tzinfo:
result["timestamp"] = timestamp.astimezone(
tz=datetime.timezone.utc
).isoformat()
else:
result["timestamp"] = timestamp.replace(
tzinfo=datetime.timezone.utc
).isoformat()
# add in the non-raw attribute ones
if self.type:
result["type"] = self.type
if self.description:
result["description"] = self.description
if self.url:
result["url"] = self.url
if self.title:
result["title"] = self.title
return result # type: ignore
|
PypiClean
|
/nflgame3-0.1-py3-none-any.whl/nflgame/statmap.py
|
def values(category_id, yards):
"""
Returns a dictionary of field names to statistical values for a
particular category id defined in idmap.
"""
assert category_id in idmap, \
'Category identifier %d is not known.' % category_id
info = idmap[category_id]
try:
yards = int(yards)
except ValueError:
yards = 0
except TypeError:
# Catch errors if yards is a NoneType
yards = 0
vals = {}
if info['yds']:
vals[info['yds']] = yards
for f in info['fields']:
vals[f] = info.get('value', 1)
return vals
categories = ("passing", "rushing", "receiving",
"fumbles", "kicking", "punting", "kickret", "puntret",
"defense", "penalty")
"""
categories is a list of all statistical categories reported by NFL's
GameCenter.
"""
idmap = {
2: {
'cat': 'punting',
'fields': ['punting_blk'],
'yds': '',
'desc': 'Punt blocked (offense)',
'long': 'Punt was blocked. A blocked punt is a punt that is touched '
'behind the line of scrimmage, and is recovered, or goes '
'out of bounds, behind the line of scrimmage. If the '
'impetus of the punt takes it beyond the line of scrimmage, '
'it is not a blocked punt.',
},
3: {
'cat': 'team',
'fields': ['first_down', 'rushing_first_down'],
'yds': '',
'desc': '1st down (rushing)',
'long': 'A first down or TD occurred due to a rush.',
},
4: {
'cat': 'team',
'fields': ['first_down', 'passing_first_down'],
'yds': '',
'desc': '1st down (passing)',
'long': 'A first down or TD occurred due to a pass.',
},
5: {
'cat': 'team',
'fields': ['first_down', 'penalty_first_down'],
'yds': '',
'desc': '1st down (penalty)',
'long': 'A first down or TD occurred due to a penalty. A play can '
'have a first down from a pass or rush and from a penalty.',
},
6: {
'cat': 'team',
'fields': ['third_down_att', 'third_down_conv'],
'yds': '',
'desc': '3rd down attempt converted',
'long': '3rd down play resulted in a first down or touchdown.',
},
7: {
'cat': 'team',
'fields': ['third_down_att', 'third_down_failed'],
'yds': '',
'desc': '3rd down attempt failed',
'long': '3rd down play did not result in a first down or touchdown.',
},
8: {
'cat': 'team',
'fields': ['fourth_down_att', 'fourth_down_conv'],
'yds': '',
'desc': '4th down attempt converted',
'long': '4th down play resulted in a first down or touchdown.',
},
9: {
'cat': 'team',
'fields': ['fourth_down_att', 'fourth_down_failed'],
'yds': '',
'desc': '4th down attempt failed',
'long': '4th down play did not result in a first down or touchdown.',
},
10: {
'cat': 'rushing',
'fields': ['rushing_att'],
'yds': 'rushing_yds',
'desc': 'Rushing yards',
'long': 'Rushing yards and credit for a rushing attempt.',
},
11: {
'cat': 'rushing',
'fields': ['rushing_att', 'rushing_tds'],
'yds': 'rushing_yds',
'desc': 'Rushing yards, TD',
'long': 'Rushing yards and credit for a rushing attempt where the '
'result of the play was a touchdown.',
},
12: {
'cat': 'rushing',
'fields': [],
'yds': 'rushing_yds',
'desc': 'Rushing yards, No rush',
'long': 'Rushing yards with no rushing attempt. This will occur when '
'the initial runner laterals to a second runner, and the '
'second runner possesses the lateral beyond the line of '
'scrimmage. Both players get rushing yards, but only the '
'first player gets a rushing attempt.',
},
13: {
'cat': 'rushing',
'fields': ['rushing_tds'],
'yds': 'rushing_yds',
'desc': 'Rushing yards, TD, No rush',
'long': 'Rushing yards and no rushing attempt, where the result of '
'the play was a touchdown. (See id 12.)',
},
14: {
'cat': 'passing',
'fields': ['passing_att', 'passing_incmp'],
'yds': '',
'desc': 'Pass incomplete',
'long': 'Pass atempt, incomplete.',
},
15: {
'cat': 'passing',
'fields': ['passing_att', 'passing_cmp'],
'yds': 'passing_yds',
'desc': 'Passing yards',
'long': 'Passing yards and a pass attempt completed.',
},
16: {
'cat': 'passing',
'fields': ['passing_att', 'passing_cmp', 'passing_tds'],
'yds': 'passing_yds',
'desc': 'Passing yards, TD',
'long': 'Passing yards and a pass attempt completed that resulted in '
'a touchdown.',
},
# 17: Passing Yards, No Pass
# In SuperStat, this code was used when the initial pass receiver lateraled
# to a teammate. It was later combined with the "Passing Yards" code to
# determine the passer's (quarterback's) total passing yardage on the play.
# This stat is not in use at this time.
# 18: Passing Yards, YD, No pass
# Passing yards, no pass attempt, with a result of touchdown. This stat
# is not in use at this time.
19: {
'cat': 'passing',
'fields': ['passing_att', 'passing_incmp', 'passing_int'],
'yds': '',
'desc': 'Interception (by passer)',
'long': 'Pass attempt that resulted in an interception.',
},
20: {
'cat': 'passing',
'fields': ['passing_sk'],
'yds': 'passing_sk_yds',
'desc': 'Sack yards (offense)',
'long': 'Number of yards lost on a pass play that resulted in a sack.',
},
21: {
'cat': 'receiving',
'fields': ['receiving_rec'],
'yds': 'receiving_yds',
'desc': 'Pass reception yards',
'long': 'Pass reception and yards.',
},
22: {
'cat': 'receiving',
'fields': ['receiving_rec', 'receiving_tds'],
'yds': 'receiving_yds',
'desc': 'Pass reception yards, TD',
'long': 'Same as previous (21), except when the play results in a '
'touchdown.',
},
23: {
'cat': 'receiving',
'fields': [],
'yds': 'receiving_yds',
'desc': 'Pass reception yards, No reception',
'long': 'Pass reception yards, no pass reception. This will occur '
'when the pass receiver laterals to a teammate. The teammate '
'gets pass reception yards, but no credit for a pass '
'reception.',
},
24: {
'cat': 'receiving',
'fields': ['receiving_tds'],
'yds': 'receiving_yds',
'desc': 'Pass reception yards, TD, No reception',
'long': 'Same as previous (23), except when the play results in a '
'touchdown.',
},
25: {
'cat': 'defense',
'fields': ['defense_int'],
'yds': 'defense_int_yds',
'desc': 'Interception yards',
'long': 'Interception and return yards.',
},
26: {
'cat': 'defense',
'fields': ['defense_int', 'defense_tds', 'defense_int_tds'],
'yds': 'defense_int_yds',
'desc': 'Interception yards, TD',
'long': 'Same as previous (25), except when the play results in a '
'touchdown.',
},
27: {
'cat': 'defense',
'fields': [],
'yds': 'defense_int_yds',
'also': [],
'desc': 'Interception yards, No interception',
'long': 'Interception yards, with no credit for an interception. This '
'will occur when the player who intercepted the pass laterals '
'to a teammate. The teammate gets interception return yards, '
'but no credit for a pass interception.',
},
28: {
'cat': 'defense',
'fields': ['defense_tds', 'defense_int_tds'],
'yds': 'defense_int_yds',
'also': [],
'desc': 'Interception yards, TD, No interception',
'long': 'Same as previous (27), except when the play results in a '
'touchdown.',
},
29: {
'cat': 'punting',
'fields': ['punting_tot'],
'yds': 'punting_yds',
'desc': 'Punting yards',
'long': 'Punt and length of the punt. This stat is not used if '
'the punt results in a touchback; or the punt is received '
'in the endzone and run out; or the punt is blocked. This '
'stat is used exclusively of the PU_EZ, PU_TB and PU_BK '
'stats.',
},
30: {
'cat': 'punting',
'fields': ['punting_i20'],
'yds': '',
'desc': 'Punt inside 20',
'long': 'This stat is recorded when the punt return ended inside the '
'opponent\'s 20 yard line. This is not counted as a punt or '
'towards punting yards. This stat is used solely to calculate '
'"inside 20" stats. This stat is used in addition to either a '
'PU or PU_EZ stat.',
},
31: {
'cat': 'punting',
'fields': ['punting_tot'],
'yds': 'punting_yds',
'desc': 'Punt into endzone',
'long': 'SuperStat records this stat when the punt is received in '
'the endzone, and then run out of the endzone. If the play '
'ends in the endzone for a touchback, the stat is not '
'recorded. This stat is used exclusively of the PU, PU_TB and '
'PU_BK stats.',
},
32: {
'cat': 'punting',
'fields': ['punting_tot', 'punting_touchback'],
'yds': 'punting_yds',
'desc': 'Punt with touchback',
'long': 'Punt and length of the punt when the play results in a '
'touchback. This stat is used exclusively of the PU, PU_EZ '
'and PU_BK stats.',
},
33: {
'cat': 'puntret',
'fields': ['puntret_tot'],
'yds': 'puntret_yds',
'desc': 'Punt return yards',
'long': 'Punt return and yards.',
},
34: {
'cat': 'puntret',
'fields': ['puntret_tot', 'puntret_tds'],
'yds': 'puntret_yds',
'desc': 'Punt return yards, TD',
'long': 'Same as previous (33), except when the play results in a '
'touchdown.',
},
35: {
'cat': 'puntret',
'fields': [],
'yds': 'puntret_yds',
'desc': 'Punt return yards, No return',
'long': 'Punt return yards with no credit for a punt return. This '
'will occur when the player who received the punt laterals '
'to a teammate. The teammate gets punt return yards, but no '
'credit for a return.',
},
36: {
'cat': 'puntret',
'fields': ['puntret_tds'],
'yds': 'puntret_yds',
'desc': 'Punt return yards, TD, No return',
'long': 'Same as previous (35), except when the play results in a '
'touchdown.',
},
37: {
'cat': 'team',
'fields': ['puntret_oob'],
'yds': '',
'desc': 'Punt out of bounds',
'long': 'Punt went out of bounds, no return on the play.',
},
38: {
'cat': 'team',
'fields': ['puntret_downed'],
'yds': '',
'also': [],
'value': 1,
'desc': 'Punt downed (no return)',
'long': 'Punt was downed by kicking team, no return on the play. '
'The player column this stat will always be NULL.',
},
39: {
'cat': 'puntret',
'fields': ['puntret_fair'],
'yds': '',
'desc': 'Punt - fair catch',
'long': 'Punt resulted in a fair catch.',
},
40: {
'cat': 'team',
'fields': ['puntret_touchback'],
'yds': '',
'desc': 'Punt - touchback (no return)',
'long': 'Punt resulted in a touchback. This is the receiving team\'s '
'version of code 1504/28 (32) above. Both are needed for stat '
'calculations, especially in season cumulative analysis.',
},
41: {
'cat': 'kicking',
'fields': ['kicking_tot'],
'yds': 'kicking_yds',
'desc': 'Kickoff yards',
'long': 'Kickoff and length of kick.',
},
42: {
'cat': 'kicking',
'fields': ['kicking_i20'],
'yds': '',
'desc': 'Kickoff inside 20',
'long': 'Kickoff and length of kick, where return ended inside '
'opponent\'s 20 yard line. This is not counted as a kick or '
'towards kicking yards. This code is used solely to calculate '
'"inside 20" stats. used in addition to a 1701 code.',
},
43: {
'cat': 'kicking',
'fields': ['kicking_tot'],
'yds': 'kicking_yds',
'desc': 'Kickff into endzone',
'long': 'SuperStat records this stat when the kickoff is received '
'in the endzone, and then run out of the endzone. If the play '
'ends in the endzone for a touchback, the stat is not '
'recorded. Compare to "Punt into endzone."',
},
44: {
'cat': 'kicking',
'fields': ['kicking_tot', 'kicking_touchback'],
'yds': 'kicking_yds',
'desc': 'Kickoff with touchback',
'long': 'Kickoff resulted in a touchback.',
},
45: {
'cat': 'kickret',
'fields': ['kickret_ret'],
'yds': 'kickret_yds',
'desc': 'Kickoff return yards',
'long': 'Kickoff return and yards.',
},
46: {
'cat': 'kickret',
'fields': ['kickret_ret', 'kickret_tds'],
'yds': 'kickret_yds',
'desc': 'Kickoff return yards, TD',
'long': 'Same as previous (45), except when the play results in a '
'touchdown.',
},
47: {
'cat': 'kickret',
'fields': [],
'yds': 'kickret_yds',
'desc': 'Kickoff return yards, No return',
'long': 'Kickoff yards with no return. This will occur when the '
'player who is credited with the return laterals to a '
'teammate. The teammate gets kickoff return yards, but no '
'credit for a kickoff return.',
},
48: {
'cat': 'kickret',
'fields': ['kickret_tds'],
'yds': 'kickret_yds',
'desc': 'Kickoff return yards, TD, No return',
'long': 'Same as previous (47), except when the play results in a '
'touchdown.',
},
49: {
'cat': 'team',
'fields': ['kickret_oob'],
'yds': '',
'desc': 'Kickoff out of bounds',
'long': 'Kicked ball went out of bounds.',
},
50: {
'cat': 'kickret',
'fields': ['kickret_fair'],
'yds': '',
'desc': 'Kickoff - fair catch',
'long': 'Kick resulted in a fair catch (no return).',
},
51: {
'cat': 'team',
'fields': ['kickret_touchback'],
'yds': '',
'desc': 'Kickoff - touchback',
'long': 'Kick resulted in a touchback. A touchback implies that '
'there is no return.',
},
52: {
'cat': 'fumbles',
'fields': ['fumbles_tot', 'fumbles_forced'],
'yds': '',
'desc': 'Fumble - forced',
'long': 'Player fumbled the ball, fumble was forced by another '
'player.',
},
53: {
'cat': 'fumbles',
'fields': ['fumbles_tot', 'fumbles_notforced'],
'yds': '',
'desc': 'Fumble - not forced',
'long': 'Player fumbled the ball, fumble was not forced by another '
'player.',
},
54: {
'cat': 'fumbles',
'fields': ['fumbles_oob'],
'yds': '',
'desc': 'Fumble - out of bounds',
'long': 'Player fumbled the ball, and the ball went out of bounds.',
},
55: {
'cat': 'fumbles',
'fields': ['fumbles_rec'],
'yds': 'fumbles_rec_yds',
'desc': 'Own recovery yards',
'long': 'Yardage gained/lost by a player after he recovered a fumble '
'by his own team.',
},
56: {
'cat': 'fumbles',
'fields': ['fumbles_rec', 'fumbles_rec_tds'],
'yds': 'fumbles_rec_yds',
'desc': 'Own recovery yards, TD',
'long': 'Same as previous (55), except when the play results in a '
'touchdown.',
},
57: {
'cat': 'fumbles',
'fields': [],
'yds': 'fumbles_rec_yds',
'desc': 'Own recovery yards, No recovery',
'long': 'If a player recovered a fumble by his own team, then '
'lateraled to a teammate, the yardage gained/lost by teammate '
'would be recorded with this stat.',
},
58: {
'cat': 'fumbles',
'fields': ['fumbles_rec_tds'],
'yds': 'fumbles_rec_yds',
'desc': 'Own recovery yards, TD, No recovery',
'long': 'Same as previous (57), except when the play results in a '
'touchdown.',
},
59: {
'cat': 'defense',
'fields': ['defense_frec'],
'yds': 'defense_frec_yds',
'desc': 'Opponent recovery yards',
'long': 'Yardage gained/lost by a player after he recovered a fumble '
'by the opposing team.',
},
60: {
'cat': 'defense',
'fields': ['defense_frec', 'defense_tds', 'defense_frec_tds'],
'yds': 'defense_frec_yds',
'desc': 'Opponent recovery yards, TD',
'long': 'Same as previous (59), except when the play results in a '
'touchdown.',
},
61: {
'cat': 'defense',
'fields': [],
'yds': 'defense_frec_yds',
'desc': 'Opponent recovery yards, No recovery',
'long': 'If a player recovered a fumble by the opposing team, then '
'lateraled to a teammate, the yardage gained/lost by the '
'teammate would be recorded with this stat.',
},
62: {
'cat': 'defense',
'fields': ['defense_tds', 'defense_frec_tds'],
'yds': 'defense_frec_yds',
'desc': 'Opponent recovery yards, TD, No recovery',
'long': 'Same as previous, except when the play results in a '
'touchdown.',
},
63: {
'cat': 'defense',
'fields': [],
'yds': 'defense_misc_yds',
'desc': 'Miscellaneous yards',
'long': 'This is sort of a catch-all for yardage that doesn\'t '
'fall into any other category. According to Elias, it does '
'not include loose ball yardage. Examples are yardage on '
'missed field goal, blocked punt. This stat is not used '
'to "balance the books."',
},
64: {
'cat': 'defense',
'fields': ['defense_tds', 'defense_misc_tds'],
'yds': 'defense_misc_yds',
'desc': 'Miscellaneous yards, TD',
'long': 'Same as previous (63), except when the play results in a '
'touchdown.',
},
68: {
'cat': 'team',
'fields': ['timeout'],
'yds': '',
'desc': 'Timeout',
'long': 'Team took a time out.',
},
69: {
'cat': 'kicking',
'fields': ['kicking_fga', 'kicking_fgmissed'],
'yds': 'kicking_fgmissed_yds',
'desc': 'Field goal missed yards',
'long': 'The length of a missed field goal.',
},
70: {
'cat': 'kicking',
'fields': ['kicking_fga', 'kicking_fgm'],
'yds': 'kicking_fgm_yds',
'desc': 'Field goal yards',
'long': 'The length of a successful field goal.',
},
71: {
'cat': 'kicking',
'fields': ['kicking_fga', 'kicking_fgmissed', 'kicking_fgb'],
'yds': 'kicking_fgmissed_yds',
'desc': 'Field goal blocked (offense)',
'long': 'The length of an attempted field goal that was blocked. '
'Unlike a punt, a field goal is statistically blocked even '
'if the ball does go beyond the line of scrimmage.',
},
72: {
'cat': 'kicking',
'fields': ['kicking_xpa', 'kicking_xpmade'],
'yds': '',
'desc': 'Extra point - good',
'long': 'Extra point good. SuperStat uses one code for both '
'successful and unsuccessful extra points. I think it might '
'be better to use 2 codes.',
},
73: {
'cat': 'kicking',
'fields': ['kicking_xpa', 'kicking_xpmissed'],
'yds': '',
'desc': 'Extra point - failed',
'long': 'Extra point failed.',
},
74: {
'cat': 'kicking',
'fields': ['kicking_xpa', 'kicking_xpmissed', 'kicking_xpb'],
'yds': '',
'desc': 'Extra point - blocked',
'long': 'Extra point blocked. Exclusive of the extra point failed '
'stat.'
},
75: {
'cat': 'rushing',
'fields': ['rushing_twopta', 'rushing_twoptm'],
'yds': '',
'desc': '2 point rush - good',
'long': 'Extra points by run good (old version has 0/1 in yards '
'for failed/good).',
},
76: {
'cat': 'rushing',
'fields': ['rushing_twopta', 'rushing_twoptmissed'],
'yds': '',
'desc': '2 point rush - failed',
'long': '',
},
77: {
'cat': 'passing',
'fields': ['passing_twopta', 'passing_twoptm'],
'yds': '',
'desc': '2 point pass - good',
'long': 'Extra points by pass good (old version has 0/1 in yards '
'for failed/good).',
},
78: {
'cat': 'passing',
'fields': ['passing_twopta', 'passing_twoptmissed'],
'yds': '',
'desc': '2 point pass - failed',
'long': 'Extra point by pass failed.',
},
79: {
'cat': 'defense',
'fields': ['defense_tkl'],
'yds': '',
'desc': 'Solo tackle',
'long': 'Tackle with no assists. Note: There are no official '
'defensive statistics except for sacks.',
},
80: {
'cat': 'defense',
'fields': ['defense_tkl', 'defense_tkl_primary'],
'yds': '',
'desc': 'Assisted tackle',
'long': 'Tackle with one or more assists.',
},
# 81: 1/2 tackle
# Tackle split equally between two players. This stat is not in use at
# this time.
82: {
'cat': 'defense',
'fields': ['defense_ast'],
'yds': '',
'desc': 'Tackle assist',
'long': 'Assist to a tackle.',
},
83: {
'cat': 'defense',
'fields': ['defense_sk'],
'yds': 'defense_sk_yds',
'value': 1.0,
'desc': 'Sack yards (defense)',
'long': 'Unassisted sack.',
},
84: {
'cat': 'defense',
'fields': ['defense_sk'],
'yds': 'defense_sk_yds',
'value': 0.5,
'desc': '1/2 sack yards (defense)',
'long': 'Sack split equally between two players.',
},
85: {
'cat': 'defense',
'fields': ['defense_pass_def'],
'yds': '',
'desc': 'Pass defensed',
'long': 'Incomplete pass was due primarily to the player\'s action.',
},
86: {
'cat': 'defense',
'fields': ['defense_puntblk'],
'yds': '',
'desc': 'Punt blocked (defense)',
'long': 'Player blocked a punt.',
},
87: {
'cat': 'defense',
'fields': ['defense_xpblk'],
'yds': '',
'desc': 'Extra point blocked (defense)',
'long': 'Player blocked the extra point.',
},
88: {
'cat': 'defense',
'fields': ['defense_fgblk'],
'yds': '',
'desc': 'Field goal blocked (defense)',
'long': '',
},
89: {
'cat': 'defense',
'fields': ['defense_safe'],
'yds': '',
'desc': 'Safety (defense)',
'long': 'Tackle that resulted in a safety. This is in addition to '
'a tackle.',
},
# 90: 1/2 safety (defense)
# This stat was used by SuperStat when a 1/2 tackle resulted in a safety.
# This stat is not in use at this time.
91: {
'cat': 'defense',
'fields': ['defense_ffum'],
'yds': '',
'desc': 'Forced fumble (defense)',
'long': 'Player forced a fumble.',
},
93: {
'cat': 'penalty',
'fields': ['penalty'],
'yds': 'penalty_yds',
'desc': 'Penalty',
'long': '',
},
95: {
'cat': 'team',
'fields': ['rushing_loss'],
'yds': 'rushing_loss_yds',
'desc': 'Tackled for a loss',
'long': 'Tackled for a loss (TFL) is an offensive stat. A team is '
'charged with a TFL if its rush ends behind the line of '
'scrimmage, and at least one defensive player is credited '
'with ending the rush with a tackle, or tackle assist. The '
'stat will contain yardage.',
},
# I'm not sure how to classify these...
# 96: Extra point - safety
# If there is a fumble on an extra point attempt, and the loose ball goes
# into the endzone from impetus provided by the defensive team, and
# becomes dead in the endzone, the offense is awarded 1 point.
# 99: 2 point rush - safety
# See "Extra point - safety".
# 100: 2 point pass - safety
# See "Extra point - safety".
102: {
'cat': 'team',
'fields': ['kicking_downed'],
'yds': '',
'desc': 'Kickoff - kick downed',
'long': 'SuperStat didn\'t have this code. A kickoff is "downed" when '
'touched by an offensive player within the 10 yard free zone, '
'and the ball is awarded to the receivers at the spot of the '
'touch.',
},
103: {
'cat': 'passing',
'fields': [],
'yds': 'passing_sk_yds',
'desc': 'Sack yards (offense), No sack',
'long': 'This stat will be used when the passer fumbles, then '
'recovers, then laterals. The receiver of the lateral gets '
'sack yardage but no sack.',
},
104: {
'cat': 'receiving',
'fields': ['receiving_twopta', 'receiving_twoptm'],
'yds': '',
'desc': '2 point pass reception - good',
'long': '',
},
105: {
'cat': 'receiving',
'fields': ['receiving_twopta', 'receiving_twoptmissed'],
'yds': '',
'desc': '2 point pass reception - failed',
'long': '',
},
106: {
'cat': 'fumbles',
'fields': ['fumbles_lost'],
'yds': '',
'desc': 'Fumble - lost',
'long': '',
},
107: {
'cat': 'kicking',
'fields': ['kicking_rec'],
'yds': '',
'desc': 'Own kickoff recovery',
'long': 'Direct recovery of own kickoff, whether or not the kickoff '
'is onside',
},
108: {
'cat': 'kicking',
'fields': ['kicking_rec', 'kicking_rec_tds'],
'yds': '',
'desc': 'Own kickoff recovery, TD',
'long': 'Direct recovery in endzone of own kickoff, whether or not '
'the kickoff is onside.',
},
110: {
'cat': 'defense',
'fields': ['defense_qbhit'],
'yds': '',
'desc': 'Quarterback hit',
'long': 'Player knocked the quarterback to the ground, quarterback '
'was not the ball carrier. Not available for games before '
'2006 season.',
},
111: {
'cat': 'passing',
'fields': [],
'yds': 'passing_cmp_air_yds',
'desc': 'Pass length, completion',
'long': 'Length of the pass, not including the yards gained by the '
'receiver after the catch. Unofficial stat. Not available for '
'games before 2006 season.',
},
112: {
'cat': 'passing',
'fields': [],
'yds': 'passing_incmp_air_yds',
'desc': 'Pass length, No completion',
'long': 'Length of the pass, if it would have been a completion.'
'Unofficial stat. Not available for games before 2006 season.',
},
113: {
'cat': 'receiving',
'fields': [],
'yds': 'receiving_yac_yds',
'desc': 'Yardage gained after the catch',
'long': 'Yardage from where the ball was caught until the player\'s '
'action was over. Unofficial stat. Not available for games '
'before 2006 season.',
},
115: {
'cat': 'receiving',
'fields': ['receiving_tar'],
'yds': '',
'desc': 'Pass target',
'long': 'Player was the target of a pass attempt. Unofficial stat. '
'Not available for games before 2009 season.',
},
120: {
'cat': 'defense',
'fields': ['defense_tkl_loss'],
'yds': '',
'desc': 'Tackle for a loss',
'long': 'Player tackled the runner behind the line of scrimmage. '
'Play must have ended, player must have received a tackle '
'stat, has to be an offensive player tackled. Unofficial '
'stat. Not available for games before 2008 season.',
},
# 201, 211, 212 and 213 are for NFL Europe.
301: {
'cat': 'team',
'fields': ['xp_aborted'],
'yds': '',
'desc': 'Extra point - aborted',
'long': '',
},
402: {
'cat': 'defense',
'fields': [],
'yds': 'defense_tkl_loss_yds',
'desc': 'Tackle for a loss yards',
'long': '',
},
410: {
'cat': 'kicking',
'fields': [],
'yds': 'kicking_all_yds',
'desc': 'Kickoff and length of kick',
'long': 'Kickoff and length of kick. Includes end zone yards '
'for all kicks into the end zone, including kickoffs '
'ending in a touchback.',
},
}
|
PypiClean
|
/django-oauth-plus-2.2.9.tar.gz/django-oauth-plus-2.2.9/oauth_provider/models.py
|
import uuid
import urllib
import urlparse
from time import time
import warnings
import oauth2 as oauth
from django.db import models
from oauth_provider.compat import AUTH_USER_MODEL, get_random_string
from oauth_provider.managers import TokenManager
from oauth_provider.consts import KEY_SIZE, SECRET_SIZE, CONSUMER_KEY_SIZE, CONSUMER_STATES,\
PENDING, VERIFIER_SIZE, MAX_URL_LENGTH, OUT_OF_BAND
from oauth_provider.utils import check_valid_callback
class Nonce(models.Model):
token_key = models.CharField(max_length=KEY_SIZE)
consumer_key = models.CharField(max_length=CONSUMER_KEY_SIZE)
key = models.CharField(max_length=255)
timestamp = models.PositiveIntegerField(db_index=True)
def __unicode__(self):
return u"Nonce %s for %s" % (self.key, self.consumer_key)
class Scope(models.Model):
name = models.CharField(max_length=255)
url = models.TextField(max_length=MAX_URL_LENGTH)
is_readonly = models.BooleanField(default=True)
def __unicode__(self):
return u"Resource %s with url %s" % (self.name, self.url)
class Resource(Scope):
def __init__(self, *args, **kwargs):
warnings.warn("oauth_provider.Resource model is deprecated, use oauth_provider.Scope instead", DeprecationWarning)
super(Resource, self).__init__(*args, **kwargs)
class Meta:
proxy = True
class Consumer(models.Model):
name = models.CharField(max_length=255)
description = models.TextField(blank=True)
key = models.CharField(max_length=CONSUMER_KEY_SIZE)
secret = models.CharField(max_length=SECRET_SIZE, blank=True)
status = models.SmallIntegerField(choices=CONSUMER_STATES, default=PENDING)
user = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True)
xauth_allowed = models.BooleanField("Allow xAuth", default = False)
def __unicode__(self):
return u"Consumer %s with key %s" % (self.name, self.key)
def generate_random_codes(self):
"""
Used to generate random key/secret pairings.
Use this after you've added the other data in place of save().
"""
self.key = uuid.uuid4().hex
self.secret = get_random_string(length=SECRET_SIZE)
self.save()
def default_token_timestamp():
return long(time())
class Token(models.Model):
REQUEST = 1
ACCESS = 2
TOKEN_TYPES = ((REQUEST, u'Request'), (ACCESS, u'Access'))
key = models.CharField(max_length=KEY_SIZE, null=True, blank=True)
secret = models.CharField(max_length=SECRET_SIZE, null=True, blank=True)
token_type = models.SmallIntegerField(choices=TOKEN_TYPES)
timestamp = models.IntegerField(default=default_token_timestamp)
is_approved = models.BooleanField(default=False)
user = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True, related_name='tokens')
consumer = models.ForeignKey(Consumer)
scope = models.ForeignKey(Scope, null=True, blank=True)
@property
def resource(self):
return self.scope
@resource.setter
def resource(self, value):
self.scope = value
## OAuth 1.0a stuff
verifier = models.CharField(max_length=VERIFIER_SIZE)
callback = models.CharField(max_length=MAX_URL_LENGTH, null=True, blank=True)
callback_confirmed = models.BooleanField(default=False)
objects = TokenManager()
def __unicode__(self):
return u"%s Token %s for %s" % (self.get_token_type_display(), self.key, self.consumer)
def to_string(self, only_key=False):
token_dict = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
'oauth_callback_confirmed': self.callback_confirmed and 'true' or 'error'
}
if self.verifier:
token_dict['oauth_verifier'] = self.verifier
if only_key:
del token_dict['oauth_token_secret']
del token_dict['oauth_callback_confirmed']
return urllib.urlencode(token_dict)
def generate_random_codes(self):
"""
Used to generate random key/secret pairings.
Use this after you've added the other data in place of save().
"""
self.key = uuid.uuid4().hex
self.secret = get_random_string(length=SECRET_SIZE)
self.save()
def get_callback_url(self, args=None):
"""
OAuth 1.0a, append the oauth_verifier.
"""
if self.callback and self.verifier:
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
# workaround for non-http scheme urlparse problem in py2.6 (issue #2)
if "?" in path:
query = "%s&%s" % (path.split("?")[-1], query)
path = "?".join(path[:-1])
if args is not None:
query += "&%s" % urllib.urlencode(args)
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
args = args is not None and "?%s" % urllib.urlencode(args) or ""
return self.callback and self.callback + args
def set_callback(self, callback):
if callback != OUT_OF_BAND: # out of band, says "we can't do this!"
if check_valid_callback(callback):
self.callback = callback
self.callback_confirmed = True
self.save()
else:
raise oauth.Error('Invalid callback URL.')
|
PypiClean
|
/Ximpia-0.2.1.tar.gz/Ximpia-0.2.1/ximpia/xpsite/choices.py
|
import constants as K
from django.utils.translation import ugettext as _
class Choices(object):
# MSG_MEDIA
MSG_MEDIA = (
(K.XIMPIA,'Ximpia'),
(K.TWITTER,'Twitter'),
(K.FACEBOOK,'Facebook'),
(K.LINKEDIN,'LinkedIn'),
(K.EMAIL,'Email'),
(K.SMS,'SMS'),
)
# MSG_PREFERRED
MSG_PREFERRED = (
(K.XIMPIA,'Ximpia'),
(K.TWITTER,'Twitter'),
(K.FACEBOOK,'Facebook'),
(K.LINKEDIN,'LinkedIn'),
(K.EMAIL,'Email'),
)
# SOCIAL_NETS
SOCIAL_NETS = (
(K.TWITTER,'Twitter'),
(K.FACEBOOK,'Facebook'),
(K.LINKEDIN,'LinkedIn'),
(K.XING,'Xing'),
(K.GOOGLE,'Google'),
)
# COUNTRY
COUNTRY = (
('fr', _('France')),
('es', _('Spain')),
('us', _('United States')),
('ag', _('Antigua and Barbuda')),
('ai', _('Angilla')),
('al', _('Albania')),
('am', _('Armenia')),
('an', _('Netherlands Antilles')),
('ao', _('Angola')),
('aq', _('Antartica')),
('ar', _('Argentina')),
('as', _('American Samoa')),
('at', _('Australia')),
('aw', _('Aruba')),
('ax', _('Aland Islands')),
('az', _('Azerbaijan')),
('ba', _('Bosnia and Herzegovina')),
('bb', _('Barbados')),
('bd', _('Bangladesh')),
('be', _('Belgium')),
('bf', _('Burkina Faso')),
)
MSG_LOG_ACTION_READ = 'read'
MSG_LOG_ACTION_DOWNLOAD = 'download'
MSG_LOG_ACTION = (
(MSG_LOG_ACTION_READ,_('Read')),
(MSG_LOG_ACTION_DOWNLOAD, _('File Download')),
)
# FOLLOW_STATUS
FOLLOW_STATUS = (
(K.OK,'Ok'),
(K.BLOCKED, _('Blocked')),
(K.UNBLOCKED, _('UnBlocked')),
)
# CONTACT_SOURCE
CONTACT_SOURCE = (
(K.TWITTER,'Twitter'),
(K.FACEBOOK,'Facebook'),
(K.LINKEDIN,'LinkedIn'),
(K.IMPORT, _('Imported')),
(K.GMAIL,'Gmail'),
(K.YAHOO,'Yahoo'),
(K.MSN,'MSN'),
)
# ADDRESS_TYPE
ADDRESS_TYPE_BILL = 'bill'
ADDRESS_TYPE_SHIP = 'ship'
ADDRESS_TYPE_HOME = 'home'
ADDRESS_TYPE_OTHER = 'other'
ADDRESS_TYPE = (
(ADDRESS_TYPE_BILL, _('Billing')),
(ADDRESS_TYPE_SHIP, _('Shipping & Handling')),
(ADDRESS_TYPE_HOME, _('Home')),
(ADDRESS_TYPE_SHIP, _('Other')),
)
# SCHDULE
SCHEDULE_FULL = 'full'
SCHEDULE_PART = 'part'
SCHEDULE = (
(SCHEDULE_FULL, _('Full Time')),
(SCHEDULE_PART, _('Part Time')),
)
# CONTRACT_TYPE
CONTRACT_TYPE_REGULAR = 'regular'
CONTRACT_TYPE_TEMP = 'temporary'
CONTRACT_TYPE = (
(CONTRACT_TYPE_REGULAR, _('Regular')),
(CONTRACT_TYPE_TEMP, _('Temporary')),
)
# STATUS
STATUS_SELF = 'self'
STATUS_EMPLOYEE = 'employee'
STATUS_DIRECTOR = 'director'
STATUS_CONTRACTOR = 'contractor'
STATUS = (
(STATUS_SELF, _('Self Employed')),
(STATUS_EMPLOYEE, _('Employee')),
(STATUS_DIRECTOR, _('Director')),
(STATUS_CONTRACTOR, _('Contractor')),
)
# SALUTATION
SALUTATION_MR = 'mr'
SALUTATION_MS = 'ms'
SALUTATION_DR = 'dr'
SALUTATION = (
(SALUTATION_MR, _('Mr.')),
(SALUTATION_MS, _('Miss')),
(SALUTATION_DR, _('Dr.')),
)
# CONTACT_COMM
CONTACT_COMM = (
(K.EMAIL, 'Email'),
(K.HOME, _('Home Phone')),
(K.WORK, _('Work Phone')),
(K.MOBILE, _('Mobile')),
(K.WORK_MOBILE, _('Work Mobile')),
(K.FAX, 'Fax'),
(K.NETWORK, _('Social Network')),
(K.SITE, _('Site')),
(K.BLOG, _('Blog')),
(K.FACEBOOK_PAGE, _('Facebook Page')),
(K.IM, _('Instant Messenger')),
)
# MILITARY
MILITARY_NA = 'na'
MILITARY_COMPLETED = 'completed'
MILITARY_PENDING = 'pending'
MILITARY = (
(MILITARY_NA, 'N/A'),
(MILITARY_COMPLETED, _('Completed')),
(MILITARY_PENDING, _('Pending')),
)
# ETHNIC
ETHNIC_WHITE = 'white'
ETHNIC = (
(ETHNIC_WHITE, _('White/Causasian')),
)
# SUBSCRIPTION
SUBSCRIPTION_TRIAL = 'trial'
SUBSCRIPTION_VALID = 'valid'
SUBSCRIPTION_NONE = 'None'
SUBSCRIPTION = (
(SUBSCRIPTION_TRIAL, _('30-Day Free Trial')),
(SUBSCRIPTION_VALID, _('Valid')),
(SUBSCRIPTION_NONE, _('None')),
)
# EDUCATION
EDUCATION_DEGREE = 'degree'
EDUCATION_COURSE = 'course'
EDUCATION_CERT = 'cert'
EDUCATION = (
(EDUCATION_DEGREE, _('Degree')),
(EDUCATION_COURSE, _('Course')),
(EDUCATION_CERT, _('Certification')),
)
# COURSE_MEDIA
COURSE_MEDIA_VIDEO = 'video'
COURSE_MEDIA_PRESENTATION = 'slides'
COURSE_MEDIA = (
(COURSE_MEDIA_VIDEO, 'Video'),
(COURSE_MEDIA_PRESENTATION, _('Presentation')),
)
# SEX
SEX_MAN = 'male'
SEX_WOMAN = 'female'
SEX = (
(SEX_MAN, _('Male')),
(SEX_WOMAN, _('Female'))
)
# RELATIONSHIP
SINGLE = 'single'
IN_RELATIONSHIP = 'in_relationship'
MARRIED = 'married'
RELATIONSHIP = (
(SINGLE, _('Single')),
(IN_RELATIONSHIP, _('In a Relationship')),
(MARRIED, _('Married')))
# CUSTOM_TYPE
CUSTOM_TYPE_INPUT = 'input'
CUSTOM_TYPE_COMBO = 'combo'
CUSTOM_TYPE = (
(CUSTOM_TYPE_INPUT, 'Input'),
(CUSTOM_TYPE_COMBO, 'Combo'),
)
# SKILL_TYPE
SKILL_TYPE_TECH = 'tech'
SKILL_TYPE_TEAM_WORK = 'team_work'
SKILL_TYPE = (
(SKILL_TYPE_TECH, _('Technical')),
(SKILL_TYPE_TEAM_WORK, _('Team Work')),
)
# INDUSTRY
INDUSTRY = (
(101, _('Accounting')),
(102, _('Airlines/Aviation')),
(103, _('Alternative Dispute Resolution')),
(104, _('Alternative Medicine')),
(105, _('Animation')),
(106, _('Apparel & Fashion')),
(107, _('Architecture & Planning')),
(108, _('Arts and Crafts')),
(109, _('Automotive')),
(110, _('Aviation & Aerospace')),
(111, _('Banking')),
(112, _('Biotechnology')),
(113, _('Broadcast Media')),
(114, _('Building Materials')),
(115, _('Business Supplies and Equipment')),
(116, _('Capital Markets')),
(117, _('Chemicals')),
(118, _('Civic & Social Organization')),
(119, _('Civil Engineering')),
(120, _('Commercial Real Estate')),
(121, _('Computer & Network Security')),
(122, _('Computer Games')),
(123, _('Computer Hardware')),
(124, _('Computer Networking')),
(125, _('Computer Software')),
(126, _('Construction')),
(127, _('Consumer Electronics')),
(128, _('Consumer Goods')),
(129, _('Consumer Services')),
(130, _('Cosmetics')),
(131, _('Dairy')),
(132, _('Defense & Space')),
(133, _('Design')),
(134, _('Education Management')),
(135, _('E-Learning')),
(136, _('Electrical/Electronic Manufacturing')),
(137, _('Entertainment')),
(138, _('Environmental Services')),
(139, _('Events Services')),
(140, _('Executive Office')),
(141, _('Facilities Services')),
(142, _('Farming')),
(143, _('Financial Services')),
(144, _('Fine Art')),
(145, _('Fishery')),
(146, _('Food & Beverages')),
(147, _('Food Production')),
(148, _('Fund-Raising')),
(149, _('Furniture')),
(150, _('Gambling & Casinos')),
(151, _('Glass, Ceramics & Concrete')),
(152, _('Government Administration')),
(153, _('Government Relations')),
(154, _('Graphic Design')),
(155, _('Health, Wellness and Fitness')),
(156, _('Higher Education')),
(157, _('Hospital & Health Care')),
(158, _('Hospitality')),
(159, _('Human Resources')),
(160, _('Import and Export')),
(161, _('Individual & Family Services')),
(162, _('Industrial Automation')),
(163, _('Information Services')),
(164, _('Information Technology and Services')),
(165, _('Insurance')),
(166, _('International Affairs')),
(167, _('International Trade and Development')),
(168, _('Internet')),
(169, _('Investment Banking')),
(170, _('Investment Management')),
(171, _('Judiciary')),
(172, _('Law Enforcement')),
(173, _('Law Practice')),
(174, _('Legal Services')),
(175, _('Legislative Office')),
(176, _('Leisure, Travel & Tourism')),
(177, _('Libraries')),
(178, _('Logistics and Supply Chain')),
(179, _('Luxury Goods & Jewelry')),
(180, _('Machinery')),
(181, _('Management Consulting')),
(182, _('Maritime')),
(183, _('Marketing and Advertising')),
(184, _('Market Research')),
(185, _('Mechanical or Industrial Engineering')),
(186, _('Media Production')),
(187, _('Medical Devices')),
(188, _('Medical Practice')),
(189, _('Mental Health Care')),
(190, _('Military')),
(191, _('Mining & Metals')),
(192, _('Motion Pictures and Film')),
(193, _('Museums and Institutions')),
(194, _('Music')),
(195, _('Nanotechnology')),
(196, _('Newspapers')),
(197, _('Non-Profit Organization Management')),
(198, _('Oil & Energy')),
(199, _('Online Media')),
(200, _('Outsourcing/Offshoring')),
(201, _('Package/Freight Delivery')),
(202, _('Packaging and Containers')),
(203, _('Paper & Forest Products')),
(204, _('Performing Arts')),
(205, _('Pharmaceuticals')),
(206, _('Philanthropy')),
(207, _('Photography')),
(208, _('Plastics')),
(209, _('Political Organization')),
(210, _('Primary/Secondary Education')),
(211, _('Printing')),
(212, _('Professional Training & Coaching')),
(213, _('Program Development')),
(214, _('Public Policy')),
(215, _('Public Relations and Communications')),
(216, _('Public Safety')),
(217, _('Publishing')),
(218, _('Railroad Manufacture')),
(219, _('Ranching')),
(220, _('Real Estate')),
(221, _('Recreational Facilities and Services')),
(222, _('Religious Institutions')),
(223, _('Renewables & Environment')),
(224, _('Research')),
(225, _('Restaurants')),
(226, _('Retail')),
(227, _('Security and Investigations')),
(228, _('Semiconductors')),
(229, _('Shipbuilding')),
(230, _('Sporting Goods')),
(240, _('Sports')),
(250, _('Staffing and Recruiting')),
(251, _('Supermarkets')),
(252, _('Telecommunications')),
(253, _('Textiles')),
(254, _('Think Tanks')),
(255, _('Tobacco')),
(256, _('Translation and Localization')),
(257, _('Transportation/Trucking/Railroad')),
(258, _('Utilities')),
(259, _('Venture Capital & Private Equity')),
(260, _('Veterinary')),
(261, _('Warehousing')),
(262, _('Wholesale')),
(263, _('Wine and Spirits')),
(264, _('Wireless')),
(265, _('Writing and Editing')),
)
JOB_CANDIDATE_PENDING = 'pending'
JOB_CANDIDATE_STATUS = (
(JOB_CANDIDATE_PENDING, _('Pending')),
)
JOB_CONTRACT = ()
# INVITATION STATUS
INVITATION_STATUS = (
(K.PENDING, _('Pending')),
(K.USED, _('Used')),
)
# CALENDAR TYPE
CALENDAR_TYPE_BIRTHDAY = 'birthday'
CALENDAR_TYPE_ANNIVERSARY = 'anniversary'
CALENDAR_TYPE = (
('meeting', _('Meeting')),
('event', _('Event')),
('birthday', _('Birthday')),
('anniversary', _('Anniversary')),
('appointment', _('Appointment')),
)
# CALENDAR REPEAT
CALENDAR_REPEAT_YEARLY = 'yearly'
CALENDAR_REPEAT = (
('daily', _('Daily')),
('weekly', _('Weekly')),
('monthly', _('Monthly')),
('yearly', _('Yearly')),
)
# CALENDAR_INVITE_STATUS
CALENDAR_INVITE_STATUS_PENDING = K.PENDING
CALENDAR_INVITE_STATUS = (
(K.PENDING, _('Pending')),
('accepted', _('Accepted')),
('declined', _('Declined')),
)
# TASK_FINISH_TYPE
TASK_FINISH_TYPE_DEFAULT = 'all'
TASK_FINISH_TYPE = (
('all', _('All')),
('any', _('Any')),
)
# TASK_STATUS
TASK_STATUS_DEFAULT = 'notStarted'
TASK_STATUS = (
('notStarted', _('Not Started')),
('inProgress', _('In Progress')),
('completed', _('Completed')),
('waiting', _('Waiting')),
('deferred', _('Deferred')),
)
# SUBSCRIPTION ITEMS
SUBSCRIPTION_ITEMS = (
('sms', 'SMS'),
)
# FILE TYPES
FILE_TYPE = (
('pdf', 'Pdf'),
('word', 'Word')
)
# INVITATION TYPE
"""INVITATION_TYPE_ORDINARY = 'ordinary'
INVITATION_TYPE_PROMOTION = 'promotion'
INVITATION_TYPE = (
(INVITATION_TYPE_ORDINARY, _('Ordinary')),
(INVITATION_TYPE_PROMOTION, _('Promotion'))
)"""
# INVITATION_ACC_TYPE
INVITATION_ACC_TYPE_USER = 'user'
INVITATION_ACC_TYPE_ORG = 'organization'
INVITATION_ACC_TYPE = (
(INVITATION_ACC_TYPE_USER, _('User')),
(INVITATION_ACC_TYPE_ORG, _('Organization'))
)
# INVITATION_PAY_TYPE
INVITATION_PAY_TYPE_FREE = 'free'
INVITATION_PAY_TYPE_PAY = 'pay'
INVITATION_PAY_TYPE_PROMOTION = 'promotion'
INVITATION_PAY_TYPE = (
(INVITATION_PAY_TYPE_FREE, _('Free')),
(INVITATION_PAY_TYPE_PAY, _('Pay')),
(INVITATION_PAY_TYPE_PROMOTION, _('Promotion'))
)
# ACCOUNT_TYPE
ACCOUNT_TYPE_ORDINARY = 'ordinary'
ACCOUNT_TYPE_PROMOTION = 'promotion'
ACCOUNT_TYPE = (
('ordinary', _('Ordinary')),
('promotion', _('Promotion'))
)
# LANG
LANG_ENGLISH = 'en'
LANG = (
('en', _('English')),
('es', _('Spanish')))
# ORG GROUPS
ORG_GROUPS = (
(1, _('Management')),
(2, _('Engineering')),
(3, _('Sales')),
(4, _('Customer Support')),
(5, _('Finance')),
(6, _('Software Development')),
(7, _('Legal')),
)
# JOB TITLES
JOB_TITLES = (
(1, _('Project Manager')),
(2, _('CEO')),
(3, _('Founder')),
(4, _('CFO')),
(5, _('Director')),
(6, _('Manager')),
(7, _('Sales Director')),
(8, _('Marketing Director')),
)
# CATEGORY_TYE
CATEGORY_TYPE_DEFAULT = 'default'
CATEGORY_TYPE = (
(CATEGORY_TYPE_DEFAULT, _('Default')),
)
# USER_RELATIONSHIP
ACCESS_RELATIONSHIP_OWNER = 'owner'
ACCESS_RELATIONSHIP_ADMIN = 'admin'
ACCESS_RELATIONSHIP_MANAGER = 'manager'
ACCESS_RELATIONSHIP_USER = 'user'
ACCESS_RELATIONSHIP = (
(ACCESS_RELATIONSHIP_ADMIN, _('Admin')),
(ACCESS_RELATIONSHIP_OWNER, _('Owner')),
(ACCESS_RELATIONSHIP_MANAGER, _('Manager')),
(ACCESS_RELATIONSHIP_USER, _('User')),
)
|
PypiClean
|
/FAST_OAD_CS23-1.2.0-py3-none-any.whl/fastga/models/geometry/geom_components/wing/compute_wing.py
|
# This file is part of FAST-OAD_CS23 : A framework for rapid Overall Aircraft Design
# Copyright (C) 2022 ONERA & ISAE-SUPAERO
# FAST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from openmdao.api import Group
import fastoad.api as oad
from .constants import (
SUBMODEL_WING_THICKNESS_RATIO,
SUBMODEL_WING_SPAN,
SUBMODEL_WING_HEIGHT,
SUBMODEL_WING_L1_L4,
SUBMODEL_WING_L2_L3,
SUBMODEL_WING_X_LOCAL,
SUBMODEL_WING_X_ABSOLUTE,
SUBMODEL_WING_B50,
SUBMODEL_WING_MAC,
SUBMODEL_WING_SWEEP,
SUBMODEL_WING_WET_AREA,
)
from ...constants import SUBMODEL_WING_GEOMETRY
@oad.RegisterSubmodel(SUBMODEL_WING_GEOMETRY, "fastga.submodel.geometry.wing.legacy")
class ComputeWingGeometry(Group):
# TODO: Document equations. Cite sources
"""Wing geometry estimation."""
def setup(self):
self.add_subsystem(
"wing_toc",
oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_THICKNESS_RATIO),
promotes=["*"],
)
self.add_subsystem(
"wing_y", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_SPAN), promotes=["*"]
)
self.add_subsystem(
"wing_l1l4", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_L1_L4), promotes=["*"]
)
self.add_subsystem(
"wing_l2l3", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_L2_L3), promotes=["*"]
)
self.add_subsystem(
"wing_z", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_HEIGHT), promotes=["*"]
)
self.add_subsystem(
"wing_x", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_X_LOCAL), promotes=["*"]
)
self.add_subsystem(
"wing_b50", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_B50), promotes=["*"]
)
self.add_subsystem(
"wing_mac", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_MAC), promotes=["*"]
)
self.add_subsystem(
"wing_xabsolute",
oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_X_ABSOLUTE),
promotes=["*"],
)
self.add_subsystem(
"wing_sweep", oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_SWEEP), promotes=["*"]
)
self.add_subsystem(
"wing_wet_area",
oad.RegisterSubmodel.get_submodel(SUBMODEL_WING_WET_AREA),
promotes=["*"],
)
|
PypiClean
|
/fast_oad_cs25-0.3.0.tar.gz/fast_oad_cs25-0.3.0/src/fastoad_cs25/models/loops/wing_area_component/update_wing_area_geom.py
|
import numpy as np
import openmdao.api as om
from fastoad.module_management.service_registry import RegisterSubmodel
from fastoad_cs25.models.loops.constants import (
SERVICE_WING_AREA_CONSTRAINT_GEOM,
SERVICE_WING_AREA_LOOP_GEOM,
)
@RegisterSubmodel(
SERVICE_WING_AREA_LOOP_GEOM, "fastoad.submodel.loops.wing.area.update.geom.legacy"
)
class UpdateWingAreaGeom(om.ExplicitComponent):
"""Computes wing area for being able to load enough fuel to achieve the sizing mission."""
def setup(self):
self.add_input("data:geometry:wing:aspect_ratio", val=np.nan)
self.add_input("data:geometry:wing:root:thickness_ratio", val=np.nan)
self.add_input("data:geometry:wing:tip:thickness_ratio", val=np.nan)
self.add_input("data:weight:aircraft:sizing_block_fuel", val=np.nan, units="kg")
# Same remark on the naming and connection as in the aero component
self.add_output("wing_area:geom", val=100.0, units="m**2")
self.declare_partials(of="*", wrt="*", method="exact")
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
lambda_wing = inputs["data:geometry:wing:aspect_ratio"]
root_thickness_ratio = inputs["data:geometry:wing:root:thickness_ratio"]
tip_thickness_ratio = inputs["data:geometry:wing:tip:thickness_ratio"]
mfw_mission = inputs["data:weight:aircraft:sizing_block_fuel"]
avg_thickness_ratio = 0.6 * root_thickness_ratio + 0.4 * tip_thickness_ratio
wing_area_mission = (
max(1000.0, mfw_mission - 1570.0) / (224 * lambda_wing**-0.4 * avg_thickness_ratio)
) ** (1.0 / 1.5)
outputs["wing_area:geom"] = wing_area_mission
def compute_partials(self, inputs, partials, discrete_inputs=None):
lambda_wing = inputs["data:geometry:wing:aspect_ratio"]
root_thickness_ratio = inputs["data:geometry:wing:root:thickness_ratio"]
tip_thickness_ratio = inputs["data:geometry:wing:tip:thickness_ratio"]
mfw_mission = inputs["data:weight:aircraft:sizing_block_fuel"]
avg_thickness_ratio = 0.6 * root_thickness_ratio + 0.4 * tip_thickness_ratio
# Derivative of wing area wrt average thickness ratio
d_wing_area_d_avg_toc = (
-1.0
/ 1.5
* (
max(1000.0, mfw_mission - 1570.0)
/ (224.0 * lambda_wing**-0.4 * avg_thickness_ratio ** (5.0 / 2.0))
)
** (1.0 / 1.5)
)
partials["wing_area:geom", "data:geometry:wing:aspect_ratio"] = (
0.4
/ 1.5
* (
max(1000.0, mfw_mission - 1570.0)
/ (224 * avg_thickness_ratio * lambda_wing ** (11.0 / 10.0))
)
** (1.0 / 1.5)
)
partials["wing_area:geom", "data:geometry:wing:root:thickness_ratio"] = (
d_wing_area_d_avg_toc * 0.6
)
partials["wing_area:geom", "data:geometry:wing:tip:thickness_ratio"] = (
d_wing_area_d_avg_toc * 0.4
)
if mfw_mission < 1000.0:
partials["wing_area:geom", "data:weight:aircraft:sizing_block_fuel"] = 0.0
else:
partials["wing_area:geom", "data:weight:aircraft:sizing_block_fuel"] = (
(1.0 / (224 * lambda_wing**-0.4 * avg_thickness_ratio)) ** (1.0 / 1.5)
* (1.0 / 1.5)
* (mfw_mission - 1570.0) ** (-1.0 / 3.0)
)
@RegisterSubmodel(
SERVICE_WING_AREA_CONSTRAINT_GEOM, "fastoad.submodel.loops.wing.area.constraint.geom.legacy"
)
class WingAreaConstraintsGeom(om.ExplicitComponent):
def setup(self):
self.add_input("data:weight:aircraft:sizing_block_fuel", val=np.nan, units="kg")
self.add_input("data:weight:aircraft:MFW", val=np.nan, units="kg")
self.add_output("data:weight:aircraft:additional_fuel_capacity", units="kg")
# Value are easy to compute so they'll be given here
self.declare_partials(
of="data:weight:aircraft:additional_fuel_capacity",
wrt="data:weight:aircraft:sizing_block_fuel",
val=-1,
)
self.declare_partials(
of="data:weight:aircraft:additional_fuel_capacity",
wrt="data:weight:aircraft:MFW",
val=1,
)
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
mfw = inputs["data:weight:aircraft:MFW"]
mission_fuel = inputs["data:weight:aircraft:sizing_block_fuel"]
outputs["data:weight:aircraft:additional_fuel_capacity"] = mfw - mission_fuel
|
PypiClean
|
/python_modulr_client-0.0.11-py3-none-any.whl/modulr_client/api/payments/send_payment.py
|
from http import HTTPStatus
from typing import Any, Dict, Optional
import httpx
from ... import errors
from ...client import Client
from ...models.payment_payment_out_request import PaymentPaymentOutRequest
from ...models.payment_payment_response import PaymentPaymentResponse
from ...types import Response
def _get_kwargs(
*,
client: Client,
json_body: PaymentPaymentOutRequest,
) -> Dict[str, Any]:
url = f"{client.base_url}/payments"
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
json_json_body = json_body.to_dict()
return {
"method": "post",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"follow_redirects": client.follow_redirects,
"json": json_json_body,
}
def _parse_response(
*, client: Client, response: httpx.Response
) -> Optional[PaymentPaymentResponse]:
if response.status_code == HTTPStatus.CREATED:
response_201 = PaymentPaymentResponse.from_dict(response.json())
return response_201
if client.raise_on_unexpected_status:
raise errors.UnexpectedStatus(response.status_code, response.content)
else:
return None
def _build_response(
*, client: Client, response: httpx.Response
) -> Response[PaymentPaymentResponse]:
return Response(
status_code=HTTPStatus(response.status_code),
content=response.content,
headers=response.headers,
parsed=_parse_response(client=client, response=response),
)
def sync_detailed(
*,
client: Client,
json_body: PaymentPaymentOutRequest,
) -> Response[PaymentPaymentResponse]:
"""Create a payment
Supports both Payments to external bank accounts via Faster Payments and transfers to other Modulr
accounts. Requests to Payments are asynchronous.
Args:
json_body (PaymentPaymentOutRequest): Details of Payment request
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[PaymentPaymentResponse]
"""
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(client=client, response=response)
def sync(
*,
client: Client,
json_body: PaymentPaymentOutRequest,
) -> Optional[PaymentPaymentResponse]:
"""Create a payment
Supports both Payments to external bank accounts via Faster Payments and transfers to other Modulr
accounts. Requests to Payments are asynchronous.
Args:
json_body (PaymentPaymentOutRequest): Details of Payment request
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[PaymentPaymentResponse]
"""
return sync_detailed(
client=client,
json_body=json_body,
).parsed
async def asyncio_detailed(
*,
client: Client,
json_body: PaymentPaymentOutRequest,
) -> Response[PaymentPaymentResponse]:
"""Create a payment
Supports both Payments to external bank accounts via Faster Payments and transfers to other Modulr
accounts. Requests to Payments are asynchronous.
Args:
json_body (PaymentPaymentOutRequest): Details of Payment request
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[PaymentPaymentResponse]
"""
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(client=client, response=response)
async def asyncio(
*,
client: Client,
json_body: PaymentPaymentOutRequest,
) -> Optional[PaymentPaymentResponse]:
"""Create a payment
Supports both Payments to external bank accounts via Faster Payments and transfers to other Modulr
accounts. Requests to Payments are asynchronous.
Args:
json_body (PaymentPaymentOutRequest): Details of Payment request
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[PaymentPaymentResponse]
"""
return (
await asyncio_detailed(
client=client,
json_body=json_body,
)
).parsed
|
PypiClean
|
/globus_nexus_client-0.4.1-py3-none-any.whl/globus_nexus_client/__init__.py
|
import logging
import typing as t
from globus_sdk import BaseClient, GlobusHTTPResponse, exc
from globus_sdk.authorizers import (
BasicAuthorizer,
GlobusAuthorizer,
StaticGlobusAuthorizer,
)
log = logging.getLogger(__name__)
ACTIVE_IDENTITY_HEADER = "X-Globus-Active-Identity"
class NexusArrayResponse(GlobusHTTPResponse):
"""
super-simple response class for data where the top-level JSON entity is an
Array, so __iter__ can be defined naturally on that array
"""
def __iter__(self):
return iter(self.data)
class LegacyGOAuthAuthorizer(StaticGlobusAuthorizer):
def __init__(self, legacy_token: str):
self.header_val = f"Globus-Goauthtoken {legacy_token}"
class NexusClient(BaseClient):
"""
Client for Globus Nexus API.
Basic usage should be something similar to
>>> import getpass
>>> from globus_nexus_client import NexusClient
>>> from globus_sdk import BasicAuthorizer
>>> nc = NexusClient(authorizer=BasicAuthorizer("username", getpass.getpass()))
followed by whatever actions you want to perform using basic auth, e.g.
>>> from globus_nexus_client import LegacyGOAuthAuthorizer
>>> token = nc.get_goauth_token()
>>> nc2 = NexusClient(authorizer=LegacyGOAuthAuthorizer(token))
Alternatively, you can use Globus Auth tokens with Nexus Client, as in
>>> from globus_nexus_client import NexusClient
>>> from globus_sdk import AccessTokenAuthorizer
>>> nc = NexusClient(authorizer=AccessTokenAuthorizer(my_access_token))
"""
service_name = "nexus"
def __init__(
self,
legacy_token: t.Optional[str] = None,
*,
authorizer: t.Optional[GlobusAuthorizer] = None,
**kwargs,
):
self._active_identity: t.Optional[str] = None
if legacy_token:
authorizer = LegacyGOAuthAuthorizer(legacy_token)
super().__init__(authorizer=authorizer, **kwargs)
@property
def active_identity(self) -> t.Optional[str]:
return self._active_identity
@active_identity.setter
def active_identity(self, val: t.Optional[str]):
self._active_identity = val
def request(self, *args, headers=None, **kwargs) -> GlobusHTTPResponse:
headers = headers or {}
if self._active_identity is not None:
headers[ACTIVE_IDENTITY_HEADER] = self._active_identity
headers["Content-Type"] = "application/json"
return super().request(*args, headers=headers, **kwargs)
def get_goauth_token(self) -> str:
"""
Note that these tokens have a long lifetime and should be
saved and re-used.
"""
log.debug("NexusClient.get_goauth_token() called")
if not isinstance(self.authorizer, BasicAuthorizer):
raise exc.GlobusError("get_goauth_token() requires basic auth")
r = self.get("/goauth/token", query_params={"grant_type": "client_credentials"})
try:
tok = r["access_token"]
log.debug("NexusClient.get_goauth_token() success")
return tok
except KeyError:
log.warn(
"NexusClient.get_goauth_token() failed somehow, raising an "
"exception now"
)
raise exc.GlobusAPIError(r)
def get_user(self, username: str) -> GlobusHTTPResponse:
if not isinstance(self.authorizer, LegacyGOAuthAuthorizer):
raise exc.GlobusError(
"get_user() requires LegacyGOAuthAuthorizer "
"based authorization (a.k.a. Nexus Tokens)"
)
log.debug(f"NexusClient.get_user({username})")
return self.get(f"/users/{username}")
def get_user_groups_profile(
self, group_id: str, username: str
) -> GlobusHTTPResponse:
log.debug(f"NexusClient.get_user_profile({group_id}, {username})")
return self.get(f"/groups/{group_id}/members/{username}/user")
def get_group(self, group_id: str) -> GlobusHTTPResponse:
log.debug(f"NexusClient.get_group({group_id})")
return self.get(f"/groups/{group_id}")
def create_group(
self,
name: str,
description: str,
body_params: t.Optional[t.Dict[str, t.Any]] = None,
) -> GlobusHTTPResponse:
body_params = body_params or {}
body_params["name"] = name
body_params["description"] = description
log.debug("NexusClient.create_group(%s)", body_params)
return self.post("/groups", data=body_params)
def update_group(
self, group_id: str, group_doc: t.Dict[str, t.Any]
) -> GlobusHTTPResponse:
log.debug(f"NexusClient.update_group({group_id})")
return self.put(f"/groups/{group_id}", data=group_doc)
def delete_group(self, group_id: str) -> GlobusHTTPResponse:
log.debug(f"NexusClient.delete_group({group_id})")
return self.delete(f"/groups/{group_id}")
def list_groups(
self,
for_all_identities: t.Optional[bool] = None,
fields: t.Optional[str] = None,
my_roles: t.Union[None, str, t.List[str]] = None,
my_statuses: t.Optional[str] = None,
query_params: t.Optional[t.Dict[str, t.Any]] = None,
) -> NexusArrayResponse:
query_params = query_params or {}
# if not string, assume iterable
if my_roles is not None and not isinstance(my_roles, str):
my_roles = ",".join(my_roles)
# if not string, assume iterable
if my_statuses is not None and not isinstance(my_statuses, str):
my_statuses = ",".join(my_statuses)
# either string "true" (lowercase) or None (remove from params)
for_all_identities_ = "true" if for_all_identities else None
if for_all_identities_ is not None:
query_params["for_all_identities"] = for_all_identities_
if fields is not None:
query_params["fields"] = fields
if my_roles is not None:
query_params["my_roles"] = my_roles
if my_statuses is not None:
query_params["my_statuses"] = my_statuses
log.debug("NexusClient.list_groups(%s)", query_params)
return NexusArrayResponse(self.get("/groups", query_params=query_params))
def get_group_tree(
self,
group_id: str,
depth: t.Optional[int] = None,
my_roles: t.Union[None, str, t.List[str]] = None,
my_statuses: t.Union[None, str, t.List[str]] = None,
query_params: t.Optional[t.Dict[str, t.Any]] = None,
) -> NexusArrayResponse:
query_params = query_params or {}
# if not string, assume iterable
if my_roles is not None and not isinstance(my_roles, str):
my_roles = ",".join(my_roles)
# if not string, assume iterable
if my_statuses is not None and not isinstance(my_statuses, str):
my_statuses = ",".join(my_statuses)
if depth is not None:
query_params["depth"] = depth
if my_roles is not None:
query_params["my_roles"] = my_roles
if my_statuses is not None:
query_params["my_statuses"] = my_statuses
log.debug("NexusClient.get_group_tree(%s,%s)", group_id, query_params)
return NexusArrayResponse(
self.get(f"/groups/{group_id}/tree", query_params=query_params)
)
def get_group_memberships(self, group_id: str) -> NexusArrayResponse:
log.debug(f"NexusClient.get_group_members({group_id})")
return NexusArrayResponse(self.get(f"/groups/{group_id}/members"))
def get_group_membership(self, group_id: str, username: str) -> GlobusHTTPResponse:
log.debug(f"NexusClient.get_group_membership({group_id}, {username})")
return self.get(f"/groups/{group_id}/members/{username}")
def create_group_memberships(
self,
group_id: str,
usernames: t.Sequence[str],
emails: t.Optional[t.Sequence[str]] = None,
) -> GlobusHTTPResponse:
if isinstance(usernames, str):
usernames = [usernames]
if isinstance(emails, str):
emails = [emails]
body = {"users": list(usernames)}
if emails:
body["emails"] = list(emails)
log.debug(f"NexusClient.create_group_memberships({group_id}, {usernames})")
return self.post(f"/groups/{group_id}/members", data=body)
def update_group_membership(
self, group_id: str, username: str, membership_doc: t.Dict[str, t.Any]
) -> GlobusHTTPResponse:
log.debug(f"NexusClient.update_group_membership({membership_doc})")
return self.put(f"/groups/{group_id}/members/{username}", data=membership_doc)
|
PypiClean
|
/google-cloud-network-security-0.9.3.tar.gz/google-cloud-network-security-0.9.3/google/cloud/network_security_v1/services/network_security/transports/rest.py
|
import dataclasses
import json # type: ignore
import re
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import (
gapic_v1,
operations_v1,
path_template,
rest_helpers,
rest_streaming,
)
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.transport.requests import AuthorizedSession # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.protobuf import json_format
import grpc # type: ignore
from requests import __version__ as requests_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.cloud.network_security_v1.types import (
authorization_policy as gcn_authorization_policy,
)
from google.cloud.network_security_v1.types import (
client_tls_policy as gcn_client_tls_policy,
)
from google.cloud.network_security_v1.types import (
server_tls_policy as gcn_server_tls_policy,
)
from google.cloud.network_security_v1.types import authorization_policy
from google.cloud.network_security_v1.types import client_tls_policy
from google.cloud.network_security_v1.types import server_tls_policy
from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
from .base import NetworkSecurityTransport
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class NetworkSecurityRestInterceptor:
"""Interceptor for NetworkSecurity.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the NetworkSecurityRestTransport.
.. code-block:: python
class MyCustomNetworkSecurityInterceptor(NetworkSecurityRestInterceptor):
def pre_create_authorization_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_create_authorization_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_create_client_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_create_client_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_create_server_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_create_server_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_delete_authorization_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete_authorization_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_delete_client_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete_client_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_delete_server_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete_server_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_get_authorization_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_authorization_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_get_client_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_client_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_get_server_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_server_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_list_authorization_policies(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_authorization_policies(self, response):
logging.log(f"Received response: {response}")
return response
def pre_list_client_tls_policies(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_client_tls_policies(self, response):
logging.log(f"Received response: {response}")
return response
def pre_list_server_tls_policies(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_server_tls_policies(self, response):
logging.log(f"Received response: {response}")
return response
def pre_update_authorization_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update_authorization_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_update_client_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update_client_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
def pre_update_server_tls_policy(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update_server_tls_policy(self, response):
logging.log(f"Received response: {response}")
return response
transport = NetworkSecurityRestTransport(interceptor=MyCustomNetworkSecurityInterceptor())
client = NetworkSecurityClient(transport=transport)
"""
def pre_create_authorization_policy(
self,
request: gcn_authorization_policy.CreateAuthorizationPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
gcn_authorization_policy.CreateAuthorizationPolicyRequest,
Sequence[Tuple[str, str]],
]:
"""Pre-rpc interceptor for create_authorization_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_create_authorization_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for create_authorization_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_create_client_tls_policy(
self,
request: gcn_client_tls_policy.CreateClientTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
gcn_client_tls_policy.CreateClientTlsPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for create_client_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_create_client_tls_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for create_client_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_create_server_tls_policy(
self,
request: gcn_server_tls_policy.CreateServerTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
gcn_server_tls_policy.CreateServerTlsPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for create_server_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_create_server_tls_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for create_server_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_delete_authorization_policy(
self,
request: authorization_policy.DeleteAuthorizationPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
authorization_policy.DeleteAuthorizationPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for delete_authorization_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_delete_authorization_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for delete_authorization_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_delete_client_tls_policy(
self,
request: client_tls_policy.DeleteClientTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
client_tls_policy.DeleteClientTlsPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for delete_client_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_delete_client_tls_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for delete_client_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_delete_server_tls_policy(
self,
request: server_tls_policy.DeleteServerTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
server_tls_policy.DeleteServerTlsPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for delete_server_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_delete_server_tls_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for delete_server_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_get_authorization_policy(
self,
request: authorization_policy.GetAuthorizationPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
authorization_policy.GetAuthorizationPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for get_authorization_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_get_authorization_policy(
self, response: authorization_policy.AuthorizationPolicy
) -> authorization_policy.AuthorizationPolicy:
"""Post-rpc interceptor for get_authorization_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_get_client_tls_policy(
self,
request: client_tls_policy.GetClientTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[client_tls_policy.GetClientTlsPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_client_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_get_client_tls_policy(
self, response: client_tls_policy.ClientTlsPolicy
) -> client_tls_policy.ClientTlsPolicy:
"""Post-rpc interceptor for get_client_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_get_server_tls_policy(
self,
request: server_tls_policy.GetServerTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[server_tls_policy.GetServerTlsPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_server_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_get_server_tls_policy(
self, response: server_tls_policy.ServerTlsPolicy
) -> server_tls_policy.ServerTlsPolicy:
"""Post-rpc interceptor for get_server_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_list_authorization_policies(
self,
request: authorization_policy.ListAuthorizationPoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
authorization_policy.ListAuthorizationPoliciesRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for list_authorization_policies
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_list_authorization_policies(
self, response: authorization_policy.ListAuthorizationPoliciesResponse
) -> authorization_policy.ListAuthorizationPoliciesResponse:
"""Post-rpc interceptor for list_authorization_policies
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_list_client_tls_policies(
self,
request: client_tls_policy.ListClientTlsPoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
client_tls_policy.ListClientTlsPoliciesRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for list_client_tls_policies
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_list_client_tls_policies(
self, response: client_tls_policy.ListClientTlsPoliciesResponse
) -> client_tls_policy.ListClientTlsPoliciesResponse:
"""Post-rpc interceptor for list_client_tls_policies
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_list_server_tls_policies(
self,
request: server_tls_policy.ListServerTlsPoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
server_tls_policy.ListServerTlsPoliciesRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for list_server_tls_policies
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_list_server_tls_policies(
self, response: server_tls_policy.ListServerTlsPoliciesResponse
) -> server_tls_policy.ListServerTlsPoliciesResponse:
"""Post-rpc interceptor for list_server_tls_policies
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_update_authorization_policy(
self,
request: gcn_authorization_policy.UpdateAuthorizationPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
gcn_authorization_policy.UpdateAuthorizationPolicyRequest,
Sequence[Tuple[str, str]],
]:
"""Pre-rpc interceptor for update_authorization_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_update_authorization_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for update_authorization_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_update_client_tls_policy(
self,
request: gcn_client_tls_policy.UpdateClientTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
gcn_client_tls_policy.UpdateClientTlsPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for update_client_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_update_client_tls_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for update_client_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_update_server_tls_policy(
self,
request: gcn_server_tls_policy.UpdateServerTlsPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
gcn_server_tls_policy.UpdateServerTlsPolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for update_server_tls_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_update_server_tls_policy(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for update_server_tls_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_get_location(
self,
request: locations_pb2.GetLocationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_location
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_get_location(
self, response: locations_pb2.Location
) -> locations_pb2.Location:
"""Post-rpc interceptor for get_location
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_list_locations(
self,
request: locations_pb2.ListLocationsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_locations
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_list_locations(
self, response: locations_pb2.ListLocationsResponse
) -> locations_pb2.ListLocationsResponse:
"""Post-rpc interceptor for list_locations
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_get_iam_policy(
self,
request: iam_policy_pb2.GetIamPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_iam_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
"""Post-rpc interceptor for get_iam_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_set_iam_policy(
self,
request: iam_policy_pb2.SetIamPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for set_iam_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
"""Post-rpc interceptor for set_iam_policy
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_test_iam_permissions(
self,
request: iam_policy_pb2.TestIamPermissionsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for test_iam_permissions
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_test_iam_permissions(
self, response: iam_policy_pb2.TestIamPermissionsResponse
) -> iam_policy_pb2.TestIamPermissionsResponse:
"""Post-rpc interceptor for test_iam_permissions
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_cancel_operation(
self,
request: operations_pb2.CancelOperationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for cancel_operation
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_cancel_operation(self, response: None) -> None:
"""Post-rpc interceptor for cancel_operation
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_delete_operation(
self,
request: operations_pb2.DeleteOperationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete_operation
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_delete_operation(self, response: None) -> None:
"""Post-rpc interceptor for delete_operation
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_get_operation(
self,
request: operations_pb2.GetOperationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_operation
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_get_operation(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for get_operation
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
def pre_list_operations(
self,
request: operations_pb2.ListOperationsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_operations
Override in a subclass to manipulate the request or metadata
before they are sent to the NetworkSecurity server.
"""
return request, metadata
def post_list_operations(
self, response: operations_pb2.ListOperationsResponse
) -> operations_pb2.ListOperationsResponse:
"""Post-rpc interceptor for list_operations
Override in a subclass to manipulate the response
after it is returned by the NetworkSecurity server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class NetworkSecurityRestStub:
_session: AuthorizedSession
_host: str
_interceptor: NetworkSecurityRestInterceptor
class NetworkSecurityRestTransport(NetworkSecurityTransport):
"""REST backend transport for NetworkSecurity.
Network Security API provides resources to configure
authentication and authorization policies. Refer to per API
resource documentation for more information.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
def __init__(
self,
*,
host: str = "networksecurity.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[NetworkSecurityRestInterceptor] = None,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or NetworkSecurityRestInterceptor()
self._prep_wrapped_messages(client_info)
@property
def operations_client(self) -> operations_v1.AbstractOperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Only create a new client if we do not already have one.
if self._operations_client is None:
http_options: Dict[str, List[Dict[str, str]]] = {
"google.longrunning.Operations.CancelOperation": [
{
"method": "post",
"uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel",
"body": "*",
},
],
"google.longrunning.Operations.DeleteOperation": [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
],
"google.longrunning.Operations.GetOperation": [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
],
"google.longrunning.Operations.ListOperations": [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*}/operations",
},
],
}
rest_transport = operations_v1.OperationsRestTransport(
host=self._host,
# use the credentials which are saved
credentials=self._credentials,
scopes=self._scopes,
http_options=http_options,
path_prefix="v1",
)
self._operations_client = operations_v1.AbstractOperationsClient(
transport=rest_transport
)
# Return the client from cache.
return self._operations_client
class _CreateAuthorizationPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("CreateAuthorizationPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
"authorizationPolicyId": "",
}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: gcn_authorization_policy.CreateAuthorizationPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the create authorization
policy method over HTTP.
Args:
request (~.gcn_authorization_policy.CreateAuthorizationPolicyRequest):
The request object. Request used by the
CreateAuthorizationPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*}/authorizationPolicies",
"body": "authorization_policy",
},
]
request, metadata = self._interceptor.pre_create_authorization_policy(
request, metadata
)
pb_request = gcn_authorization_policy.CreateAuthorizationPolicyRequest.pb(
request
)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_create_authorization_policy(resp)
return resp
class _CreateClientTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("CreateClientTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
"clientTlsPolicyId": "",
}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: gcn_client_tls_policy.CreateClientTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the create client tls policy method over HTTP.
Args:
request (~.gcn_client_tls_policy.CreateClientTlsPolicyRequest):
The request object. Request used by the
CreateClientTlsPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*}/clientTlsPolicies",
"body": "client_tls_policy",
},
]
request, metadata = self._interceptor.pre_create_client_tls_policy(
request, metadata
)
pb_request = gcn_client_tls_policy.CreateClientTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_create_client_tls_policy(resp)
return resp
class _CreateServerTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("CreateServerTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
"serverTlsPolicyId": "",
}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: gcn_server_tls_policy.CreateServerTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the create server tls policy method over HTTP.
Args:
request (~.gcn_server_tls_policy.CreateServerTlsPolicyRequest):
The request object. Request used by the
CreateServerTlsPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*}/serverTlsPolicies",
"body": "server_tls_policy",
},
]
request, metadata = self._interceptor.pre_create_server_tls_policy(
request, metadata
)
pb_request = gcn_server_tls_policy.CreateServerTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_create_server_tls_policy(resp)
return resp
class _DeleteAuthorizationPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("DeleteAuthorizationPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: authorization_policy.DeleteAuthorizationPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the delete authorization
policy method over HTTP.
Args:
request (~.authorization_policy.DeleteAuthorizationPolicyRequest):
The request object. Request used by the
DeleteAuthorizationPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/authorizationPolicies/*}",
},
]
request, metadata = self._interceptor.pre_delete_authorization_policy(
request, metadata
)
pb_request = authorization_policy.DeleteAuthorizationPolicyRequest.pb(
request
)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete_authorization_policy(resp)
return resp
class _DeleteClientTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("DeleteClientTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: client_tls_policy.DeleteClientTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the delete client tls policy method over HTTP.
Args:
request (~.client_tls_policy.DeleteClientTlsPolicyRequest):
The request object. Request used by the
DeleteClientTlsPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/clientTlsPolicies/*}",
},
]
request, metadata = self._interceptor.pre_delete_client_tls_policy(
request, metadata
)
pb_request = client_tls_policy.DeleteClientTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete_client_tls_policy(resp)
return resp
class _DeleteServerTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("DeleteServerTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: server_tls_policy.DeleteServerTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the delete server tls policy method over HTTP.
Args:
request (~.server_tls_policy.DeleteServerTlsPolicyRequest):
The request object. Request used by the
DeleteServerTlsPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/serverTlsPolicies/*}",
},
]
request, metadata = self._interceptor.pre_delete_server_tls_policy(
request, metadata
)
pb_request = server_tls_policy.DeleteServerTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete_server_tls_policy(resp)
return resp
class _GetAuthorizationPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("GetAuthorizationPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: authorization_policy.GetAuthorizationPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> authorization_policy.AuthorizationPolicy:
r"""Call the get authorization policy method over HTTP.
Args:
request (~.authorization_policy.GetAuthorizationPolicyRequest):
The request object. Request used by the
GetAuthorizationPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.authorization_policy.AuthorizationPolicy:
AuthorizationPolicy is a resource
that specifies how a server should
authorize incoming connections. This
resource in itself does not change the
configuration unless it's attached to a
target https proxy or endpoint config
selector resource.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/authorizationPolicies/*}",
},
]
request, metadata = self._interceptor.pre_get_authorization_policy(
request, metadata
)
pb_request = authorization_policy.GetAuthorizationPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = authorization_policy.AuthorizationPolicy()
pb_resp = authorization_policy.AuthorizationPolicy.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get_authorization_policy(resp)
return resp
class _GetClientTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("GetClientTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: client_tls_policy.GetClientTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> client_tls_policy.ClientTlsPolicy:
r"""Call the get client tls policy method over HTTP.
Args:
request (~.client_tls_policy.GetClientTlsPolicyRequest):
The request object. Request used by the
GetClientTlsPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.client_tls_policy.ClientTlsPolicy:
ClientTlsPolicy is a resource that
specifies how a client should
authenticate connections to backends of
a service. This resource itself does not
affect configuration unless it is
attached to a backend service resource.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/clientTlsPolicies/*}",
},
]
request, metadata = self._interceptor.pre_get_client_tls_policy(
request, metadata
)
pb_request = client_tls_policy.GetClientTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = client_tls_policy.ClientTlsPolicy()
pb_resp = client_tls_policy.ClientTlsPolicy.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get_client_tls_policy(resp)
return resp
class _GetServerTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("GetServerTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: server_tls_policy.GetServerTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> server_tls_policy.ServerTlsPolicy:
r"""Call the get server tls policy method over HTTP.
Args:
request (~.server_tls_policy.GetServerTlsPolicyRequest):
The request object. Request used by the
GetServerTlsPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.server_tls_policy.ServerTlsPolicy:
ServerTlsPolicy is a resource that
specifies how a server should
authenticate incoming requests. This
resource itself does not affect
configuration unless it is attached to a
target https proxy or endpoint config
selector resource.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/serverTlsPolicies/*}",
},
]
request, metadata = self._interceptor.pre_get_server_tls_policy(
request, metadata
)
pb_request = server_tls_policy.GetServerTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = server_tls_policy.ServerTlsPolicy()
pb_resp = server_tls_policy.ServerTlsPolicy.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get_server_tls_policy(resp)
return resp
class _ListAuthorizationPolicies(NetworkSecurityRestStub):
def __hash__(self):
return hash("ListAuthorizationPolicies")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: authorization_policy.ListAuthorizationPoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> authorization_policy.ListAuthorizationPoliciesResponse:
r"""Call the list authorization
policies method over HTTP.
Args:
request (~.authorization_policy.ListAuthorizationPoliciesRequest):
The request object. Request used with the
ListAuthorizationPolicies method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.authorization_policy.ListAuthorizationPoliciesResponse:
Response returned by the
ListAuthorizationPolicies method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{parent=projects/*/locations/*}/authorizationPolicies",
},
]
request, metadata = self._interceptor.pre_list_authorization_policies(
request, metadata
)
pb_request = authorization_policy.ListAuthorizationPoliciesRequest.pb(
request
)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = authorization_policy.ListAuthorizationPoliciesResponse()
pb_resp = authorization_policy.ListAuthorizationPoliciesResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list_authorization_policies(resp)
return resp
class _ListClientTlsPolicies(NetworkSecurityRestStub):
def __hash__(self):
return hash("ListClientTlsPolicies")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: client_tls_policy.ListClientTlsPoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> client_tls_policy.ListClientTlsPoliciesResponse:
r"""Call the list client tls policies method over HTTP.
Args:
request (~.client_tls_policy.ListClientTlsPoliciesRequest):
The request object. Request used by the
ListClientTlsPolicies method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.client_tls_policy.ListClientTlsPoliciesResponse:
Response returned by the
ListClientTlsPolicies method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{parent=projects/*/locations/*}/clientTlsPolicies",
},
]
request, metadata = self._interceptor.pre_list_client_tls_policies(
request, metadata
)
pb_request = client_tls_policy.ListClientTlsPoliciesRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = client_tls_policy.ListClientTlsPoliciesResponse()
pb_resp = client_tls_policy.ListClientTlsPoliciesResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list_client_tls_policies(resp)
return resp
class _ListServerTlsPolicies(NetworkSecurityRestStub):
def __hash__(self):
return hash("ListServerTlsPolicies")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: server_tls_policy.ListServerTlsPoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> server_tls_policy.ListServerTlsPoliciesResponse:
r"""Call the list server tls policies method over HTTP.
Args:
request (~.server_tls_policy.ListServerTlsPoliciesRequest):
The request object. Request used by the
ListServerTlsPolicies method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.server_tls_policy.ListServerTlsPoliciesResponse:
Response returned by the
ListServerTlsPolicies method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{parent=projects/*/locations/*}/serverTlsPolicies",
},
]
request, metadata = self._interceptor.pre_list_server_tls_policies(
request, metadata
)
pb_request = server_tls_policy.ListServerTlsPoliciesRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = server_tls_policy.ListServerTlsPoliciesResponse()
pb_resp = server_tls_policy.ListServerTlsPoliciesResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list_server_tls_policies(resp)
return resp
class _UpdateAuthorizationPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("UpdateAuthorizationPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: gcn_authorization_policy.UpdateAuthorizationPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the update authorization
policy method over HTTP.
Args:
request (~.gcn_authorization_policy.UpdateAuthorizationPolicyRequest):
The request object. Request used by the
UpdateAuthorizationPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/v1/{authorization_policy.name=projects/*/locations/*/authorizationPolicies/*}",
"body": "authorization_policy",
},
]
request, metadata = self._interceptor.pre_update_authorization_policy(
request, metadata
)
pb_request = gcn_authorization_policy.UpdateAuthorizationPolicyRequest.pb(
request
)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update_authorization_policy(resp)
return resp
class _UpdateClientTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("UpdateClientTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: gcn_client_tls_policy.UpdateClientTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the update client tls policy method over HTTP.
Args:
request (~.gcn_client_tls_policy.UpdateClientTlsPolicyRequest):
The request object. Request used by UpdateClientTlsPolicy
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/v1/{client_tls_policy.name=projects/*/locations/*/clientTlsPolicies/*}",
"body": "client_tls_policy",
},
]
request, metadata = self._interceptor.pre_update_client_tls_policy(
request, metadata
)
pb_request = gcn_client_tls_policy.UpdateClientTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update_client_tls_policy(resp)
return resp
class _UpdateServerTlsPolicy(NetworkSecurityRestStub):
def __hash__(self):
return hash("UpdateServerTlsPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: gcn_server_tls_policy.UpdateServerTlsPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the update server tls policy method over HTTP.
Args:
request (~.gcn_server_tls_policy.UpdateServerTlsPolicyRequest):
The request object. Request used by UpdateServerTlsPolicy
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/v1/{server_tls_policy.name=projects/*/locations/*/serverTlsPolicies/*}",
"body": "server_tls_policy",
},
]
request, metadata = self._interceptor.pre_update_server_tls_policy(
request, metadata
)
pb_request = gcn_server_tls_policy.UpdateServerTlsPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update_server_tls_policy(resp)
return resp
@property
def create_authorization_policy(
self,
) -> Callable[
[gcn_authorization_policy.CreateAuthorizationPolicyRequest],
operations_pb2.Operation,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._CreateAuthorizationPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def create_client_tls_policy(
self,
) -> Callable[
[gcn_client_tls_policy.CreateClientTlsPolicyRequest], operations_pb2.Operation
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._CreateClientTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def create_server_tls_policy(
self,
) -> Callable[
[gcn_server_tls_policy.CreateServerTlsPolicyRequest], operations_pb2.Operation
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._CreateServerTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def delete_authorization_policy(
self,
) -> Callable[
[authorization_policy.DeleteAuthorizationPolicyRequest],
operations_pb2.Operation,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._DeleteAuthorizationPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def delete_client_tls_policy(
self,
) -> Callable[
[client_tls_policy.DeleteClientTlsPolicyRequest], operations_pb2.Operation
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._DeleteClientTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def delete_server_tls_policy(
self,
) -> Callable[
[server_tls_policy.DeleteServerTlsPolicyRequest], operations_pb2.Operation
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._DeleteServerTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def get_authorization_policy(
self,
) -> Callable[
[authorization_policy.GetAuthorizationPolicyRequest],
authorization_policy.AuthorizationPolicy,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._GetAuthorizationPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def get_client_tls_policy(
self,
) -> Callable[
[client_tls_policy.GetClientTlsPolicyRequest], client_tls_policy.ClientTlsPolicy
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._GetClientTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def get_server_tls_policy(
self,
) -> Callable[
[server_tls_policy.GetServerTlsPolicyRequest], server_tls_policy.ServerTlsPolicy
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._GetServerTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def list_authorization_policies(
self,
) -> Callable[
[authorization_policy.ListAuthorizationPoliciesRequest],
authorization_policy.ListAuthorizationPoliciesResponse,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ListAuthorizationPolicies(self._session, self._host, self._interceptor) # type: ignore
@property
def list_client_tls_policies(
self,
) -> Callable[
[client_tls_policy.ListClientTlsPoliciesRequest],
client_tls_policy.ListClientTlsPoliciesResponse,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ListClientTlsPolicies(self._session, self._host, self._interceptor) # type: ignore
@property
def list_server_tls_policies(
self,
) -> Callable[
[server_tls_policy.ListServerTlsPoliciesRequest],
server_tls_policy.ListServerTlsPoliciesResponse,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ListServerTlsPolicies(self._session, self._host, self._interceptor) # type: ignore
@property
def update_authorization_policy(
self,
) -> Callable[
[gcn_authorization_policy.UpdateAuthorizationPolicyRequest],
operations_pb2.Operation,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._UpdateAuthorizationPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def update_client_tls_policy(
self,
) -> Callable[
[gcn_client_tls_policy.UpdateClientTlsPolicyRequest], operations_pb2.Operation
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._UpdateClientTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def update_server_tls_policy(
self,
) -> Callable[
[gcn_server_tls_policy.UpdateServerTlsPolicyRequest], operations_pb2.Operation
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._UpdateServerTlsPolicy(self._session, self._host, self._interceptor) # type: ignore
@property
def get_location(self):
return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore
class _GetLocation(NetworkSecurityRestStub):
def __call__(
self,
request: locations_pb2.GetLocationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.Location:
r"""Call the get location method over HTTP.
Args:
request (locations_pb2.GetLocationRequest):
The request object for GetLocation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
locations_pb2.Location: Response from GetLocation method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*}",
},
]
request, metadata = self._interceptor.pre_get_location(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = locations_pb2.Location()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_get_location(resp)
return resp
@property
def list_locations(self):
return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore
class _ListLocations(NetworkSecurityRestStub):
def __call__(
self,
request: locations_pb2.ListLocationsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.ListLocationsResponse:
r"""Call the list locations method over HTTP.
Args:
request (locations_pb2.ListLocationsRequest):
The request object for ListLocations method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
locations_pb2.ListLocationsResponse: Response from ListLocations method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*}/locations",
},
]
request, metadata = self._interceptor.pre_list_locations(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = locations_pb2.ListLocationsResponse()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_list_locations(resp)
return resp
@property
def get_iam_policy(self):
return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
class _GetIamPolicy(NetworkSecurityRestStub):
def __call__(
self,
request: iam_policy_pb2.GetIamPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Call the get iam policy method over HTTP.
Args:
request (iam_policy_pb2.GetIamPolicyRequest):
The request object for GetIamPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
policy_pb2.Policy: Response from GetIamPolicy method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{resource=projects/*/locations/*/authorizationPolicies/*}:getIamPolicy",
},
{
"method": "get",
"uri": "/v1/{resource=projects/*/locations/*/serverTlsPolicies/*}:getIamPolicy",
},
{
"method": "get",
"uri": "/v1/{resource=projects/*/locations/*/clientTlsPolicies/*}:getIamPolicy",
},
]
request, metadata = self._interceptor.pre_get_iam_policy(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = policy_pb2.Policy()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_get_iam_policy(resp)
return resp
@property
def set_iam_policy(self):
return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
class _SetIamPolicy(NetworkSecurityRestStub):
def __call__(
self,
request: iam_policy_pb2.SetIamPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Call the set iam policy method over HTTP.
Args:
request (iam_policy_pb2.SetIamPolicyRequest):
The request object for SetIamPolicy method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
policy_pb2.Policy: Response from SetIamPolicy method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{resource=projects/*/locations/*/authorizationPolicies/*}:setIamPolicy",
"body": "*",
},
{
"method": "post",
"uri": "/v1/{resource=projects/*/locations/*/serverTlsPolicies/*}:setIamPolicy",
"body": "*",
},
{
"method": "post",
"uri": "/v1/{resource=projects/*/locations/*/clientTlsPolicies/*}:setIamPolicy",
"body": "*",
},
]
request, metadata = self._interceptor.pre_set_iam_policy(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
body = json.dumps(transcoded_request["body"])
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = policy_pb2.Policy()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_set_iam_policy(resp)
return resp
@property
def test_iam_permissions(self):
return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore
class _TestIamPermissions(NetworkSecurityRestStub):
def __call__(
self,
request: iam_policy_pb2.TestIamPermissionsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Call the test iam permissions method over HTTP.
Args:
request (iam_policy_pb2.TestIamPermissionsRequest):
The request object for TestIamPermissions method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{resource=projects/*/locations/*/authorizationPolicies/*}:testIamPermissions",
"body": "*",
},
{
"method": "post",
"uri": "/v1/{resource=projects/*/locations/*/serverTlsPolicies/*}:testIamPermissions",
"body": "*",
},
{
"method": "post",
"uri": "/v1/{resource=projects/*/locations/*/clientTlsPolicies/*}:testIamPermissions",
"body": "*",
},
]
request, metadata = self._interceptor.pre_test_iam_permissions(
request, metadata
)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
body = json.dumps(transcoded_request["body"])
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = iam_policy_pb2.TestIamPermissionsResponse()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_test_iam_permissions(resp)
return resp
@property
def cancel_operation(self):
return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore
class _CancelOperation(NetworkSecurityRestStub):
def __call__(
self,
request: operations_pb2.CancelOperationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Call the cancel operation method over HTTP.
Args:
request (operations_pb2.CancelOperationRequest):
The request object for CancelOperation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel",
"body": "*",
},
]
request, metadata = self._interceptor.pre_cancel_operation(
request, metadata
)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
body = json.dumps(transcoded_request["body"])
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
return self._interceptor.post_cancel_operation(None)
@property
def delete_operation(self):
return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore
class _DeleteOperation(NetworkSecurityRestStub):
def __call__(
self,
request: operations_pb2.DeleteOperationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Call the delete operation method over HTTP.
Args:
request (operations_pb2.DeleteOperationRequest):
The request object for DeleteOperation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
]
request, metadata = self._interceptor.pre_delete_operation(
request, metadata
)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
return self._interceptor.post_delete_operation(None)
@property
def get_operation(self):
return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore
class _GetOperation(NetworkSecurityRestStub):
def __call__(
self,
request: operations_pb2.GetOperationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the get operation method over HTTP.
Args:
request (operations_pb2.GetOperationRequest):
The request object for GetOperation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
operations_pb2.Operation: Response from GetOperation method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
]
request, metadata = self._interceptor.pre_get_operation(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = operations_pb2.Operation()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_get_operation(resp)
return resp
@property
def list_operations(self):
return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore
class _ListOperations(NetworkSecurityRestStub):
def __call__(
self,
request: operations_pb2.ListOperationsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.ListOperationsResponse:
r"""Call the list operations method over HTTP.
Args:
request (operations_pb2.ListOperationsRequest):
The request object for ListOperations method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
operations_pb2.ListOperationsResponse: Response from ListOperations method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*}/operations",
},
]
request, metadata = self._interceptor.pre_list_operations(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = operations_pb2.ListOperationsResponse()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_list_operations(resp)
return resp
@property
def kind(self) -> str:
return "rest"
def close(self):
self._session.close()
__all__ = ("NetworkSecurityRestTransport",)
|
PypiClean
|
/casai-home-frontend-20220503.0.tar.gz/casai-home-frontend-20220503.0/hass_frontend/frontend_es5/b8e9916a.js
|
"use strict";(self.webpackChunkhome_assistant_frontend=self.webpackChunkhome_assistant_frontend||[]).push([[23466],{92685:function(e,t,r){r.d(t,{a:function(){return P}});var n,i=r(87480),o=r(72774),a={ROOT:"mdc-form-field"},s={LABEL_SELECTOR:".mdc-form-field > label"},c=function(e){function t(r){var n=e.call(this,(0,i.__assign)((0,i.__assign)({},t.defaultAdapter),r))||this;return n.click=function(){n.handleClick()},n}return(0,i.__extends)(t,e),Object.defineProperty(t,"cssClasses",{get:function(){return a},enumerable:!1,configurable:!0}),Object.defineProperty(t,"strings",{get:function(){return s},enumerable:!1,configurable:!0}),Object.defineProperty(t,"defaultAdapter",{get:function(){return{activateInputRipple:function(){},deactivateInputRipple:function(){},deregisterInteractionHandler:function(){},registerInteractionHandler:function(){}}},enumerable:!1,configurable:!0}),t.prototype.init=function(){this.adapter.registerInteractionHandler("click",this.click)},t.prototype.destroy=function(){this.adapter.deregisterInteractionHandler("click",this.click)},t.prototype.handleClick=function(){var e=this;this.adapter.activateInputRipple(),requestAnimationFrame((function(){e.adapter.deactivateInputRipple()}))},t}(o.K),l=r(78220),f=r(18601),d=r(14114),u=r(37500),p=r(33310),h=r(8636);function m(e){return m="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},m(e)}function y(e,t,r,n,i,o,a){try{var s=e[o](a),c=s.value}catch(l){return void r(l)}s.done?t(c):Promise.resolve(c).then(n,i)}function v(e){return function(){var t=this,r=arguments;return new Promise((function(n,i){var o=e.apply(t,r);function a(e){y(o,n,i,a,s,"next",e)}function s(e){y(o,n,i,a,s,"throw",e)}a(void 0)}))}}function b(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function g(e,t){for(var r=0;r<t.length;r++){var n=t[r];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(e,n.key,n)}}function w(e,t){return w=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},w(e,t)}function k(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var r,n=_(e);if(t){var i=_(this).constructor;r=Reflect.construct(n,arguments,i)}else r=n.apply(this,arguments);return E(this,r)}}function E(e,t){if(t&&("object"===m(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return function(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}(e)}function _(e){return _=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},_(e)}var P=function(e){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&w(e,t)}(a,e);var t,r,i,o=k(a);function a(){var e;return b(this,a),(e=o.apply(this,arguments)).alignEnd=!1,e.spaceBetween=!1,e.nowrap=!1,e.label="",e.mdcFoundationClass=c,e}return t=a,r=[{key:"createAdapter",value:function(){var e,t,r=this;return{registerInteractionHandler:function(e,t){r.labelEl.addEventListener(e,t)},deregisterInteractionHandler:function(e,t){r.labelEl.removeEventListener(e,t)},activateInputRipple:(t=v(regeneratorRuntime.mark((function e(){var t,n;return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!((t=r.input)instanceof f.Wg)){e.next=6;break}return e.next=4,t.ripple;case 4:(n=e.sent)&&n.startPress();case 6:case"end":return e.stop()}}),e)}))),function(){return t.apply(this,arguments)}),deactivateInputRipple:(e=v(regeneratorRuntime.mark((function e(){var t,n;return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!((t=r.input)instanceof f.Wg)){e.next=6;break}return e.next=4,t.ripple;case 4:(n=e.sent)&&n.endPress();case 6:case"end":return e.stop()}}),e)}))),function(){return e.apply(this,arguments)})}}},{key:"input",get:function(){var e,t;return null!==(t=null===(e=this.slottedInputs)||void 0===e?void 0:e[0])&&void 0!==t?t:null}},{key:"render",value:function(){var e={"mdc-form-field--align-end":this.alignEnd,"mdc-form-field--space-between":this.spaceBetween,"mdc-form-field--nowrap":this.nowrap};return(0,u.dy)(n||(n=function(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}(['\n <div class="mdc-form-field ','">\n <slot></slot>\n <label class="mdc-label"\n @click="','">',"</label>\n </div>"])),(0,h.$)(e),this._labelClick,this.label)}},{key:"click",value:function(){this._labelClick()}},{key:"_labelClick",value:function(){var e=this.input;e&&(e.focus(),e.click())}}],r&&g(t.prototype,r),i&&g(t,i),a}(l.H);(0,i.__decorate)([(0,p.Cb)({type:Boolean})],P.prototype,"alignEnd",void 0),(0,i.__decorate)([(0,p.Cb)({type:Boolean})],P.prototype,"spaceBetween",void 0),(0,i.__decorate)([(0,p.Cb)({type:Boolean})],P.prototype,"nowrap",void 0),(0,i.__decorate)([(0,p.Cb)({type:String}),(0,d.P)(function(){var e=v(regeneratorRuntime.mark((function e(t){var r;return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:null===(r=this.input)||void 0===r||r.setAttribute("aria-label",t);case 1:case"end":return e.stop()}}),e,this)})));return function(t){return e.apply(this,arguments)}}())],P.prototype,"label",void 0),(0,i.__decorate)([(0,p.IO)(".mdc-form-field")],P.prototype,"mdcRoot",void 0),(0,i.__decorate)([(0,p.vZ)("",!0,"*")],P.prototype,"slottedInputs",void 0),(0,i.__decorate)([(0,p.IO)("label")],P.prototype,"labelEl",void 0)},92038:function(e,t,r){var n;r.d(t,{W:function(){return a}});var i,o,a=(0,r(37500).iv)(n||(i=[".mdc-form-field{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;font-family:Roboto, sans-serif;font-family:var(--mdc-typography-body2-font-family, var(--mdc-typography-font-family, Roboto, sans-serif));font-size:0.875rem;font-size:var(--mdc-typography-body2-font-size, 0.875rem);line-height:1.25rem;line-height:var(--mdc-typography-body2-line-height, 1.25rem);font-weight:400;font-weight:var(--mdc-typography-body2-font-weight, 400);letter-spacing:0.0178571429em;letter-spacing:var(--mdc-typography-body2-letter-spacing, 0.0178571429em);text-decoration:inherit;text-decoration:var(--mdc-typography-body2-text-decoration, inherit);text-transform:inherit;text-transform:var(--mdc-typography-body2-text-transform, inherit);color:rgba(0, 0, 0, 0.87);color:var(--mdc-theme-text-primary-on-background, rgba(0, 0, 0, 0.87));display:inline-flex;align-items:center;vertical-align:middle}.mdc-form-field>label{margin-left:0;margin-right:auto;padding-left:4px;padding-right:0;order:0}[dir=rtl] .mdc-form-field>label,.mdc-form-field>label[dir=rtl]{margin-left:auto;margin-right:0}[dir=rtl] .mdc-form-field>label,.mdc-form-field>label[dir=rtl]{padding-left:0;padding-right:4px}.mdc-form-field--nowrap>label{text-overflow:ellipsis;overflow:hidden;white-space:nowrap}.mdc-form-field--align-end>label{margin-left:auto;margin-right:0;padding-left:0;padding-right:4px;order:-1}[dir=rtl] .mdc-form-field--align-end>label,.mdc-form-field--align-end>label[dir=rtl]{margin-left:0;margin-right:auto}[dir=rtl] .mdc-form-field--align-end>label,.mdc-form-field--align-end>label[dir=rtl]{padding-left:4px;padding-right:0}.mdc-form-field--space-between{justify-content:space-between}.mdc-form-field--space-between>label{margin:0}[dir=rtl] .mdc-form-field--space-between>label,.mdc-form-field--space-between>label[dir=rtl]{margin:0}:host{display:inline-flex}.mdc-form-field{width:100%}::slotted(*){-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;font-family:Roboto, sans-serif;font-family:var(--mdc-typography-body2-font-family, var(--mdc-typography-font-family, Roboto, sans-serif));font-size:0.875rem;font-size:var(--mdc-typography-body2-font-size, 0.875rem);line-height:1.25rem;line-height:var(--mdc-typography-body2-line-height, 1.25rem);font-weight:400;font-weight:var(--mdc-typography-body2-font-weight, 400);letter-spacing:0.0178571429em;letter-spacing:var(--mdc-typography-body2-letter-spacing, 0.0178571429em);text-decoration:inherit;text-decoration:var(--mdc-typography-body2-text-decoration, inherit);text-transform:inherit;text-transform:var(--mdc-typography-body2-text-transform, inherit);color:rgba(0, 0, 0, 0.87);color:var(--mdc-theme-text-primary-on-background, rgba(0, 0, 0, 0.87))}::slotted(mwc-switch){margin-right:10px}[dir=rtl] ::slotted(mwc-switch),::slotted(mwc-switch[dir=rtl]){margin-left:10px}"],o||(o=i.slice(0)),n=Object.freeze(Object.defineProperties(i,{raw:{value:Object.freeze(o)}}))))},83927:function(e,t,r){var n,i=r(92685),o=r(92038),a=r(37500),s=r(33310),c=r(47181);function l(e){return l="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},l(e)}function f(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function d(e,t){return d=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},d(e,t)}function u(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var r,n=m(e);if(t){var i=m(this).constructor;r=Reflect.construct(n,arguments,i)}else r=n.apply(this,arguments);return p(this,r)}}function p(e,t){if(t&&("object"===l(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return h(e)}function h(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function m(e){return m=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},m(e)}function y(){y=function(){return e};var e={elementsDefinitionOrder:[["method"],["field"]],initializeInstanceElements:function(e,t){["method","field"].forEach((function(r){t.forEach((function(t){t.kind===r&&"own"===t.placement&&this.defineClassElement(e,t)}),this)}),this)},initializeClassElements:function(e,t){var r=e.prototype;["method","field"].forEach((function(n){t.forEach((function(t){var i=t.placement;if(t.kind===n&&("static"===i||"prototype"===i)){var o="static"===i?e:r;this.defineClassElement(o,t)}}),this)}),this)},defineClassElement:function(e,t){var r=t.descriptor;if("field"===t.kind){var n=t.initializer;r={enumerable:r.enumerable,writable:r.writable,configurable:r.configurable,value:void 0===n?void 0:n.call(e)}}Object.defineProperty(e,t.key,r)},decorateClass:function(e,t){var r=[],n=[],i={static:[],prototype:[],own:[]};if(e.forEach((function(e){this.addElementPlacement(e,i)}),this),e.forEach((function(e){if(!g(e))return r.push(e);var t=this.decorateElement(e,i);r.push(t.element),r.push.apply(r,t.extras),n.push.apply(n,t.finishers)}),this),!t)return{elements:r,finishers:n};var o=this.decorateConstructor(r,t);return n.push.apply(n,o.finishers),o.finishers=n,o},addElementPlacement:function(e,t,r){var n=t[e.placement];if(!r&&-1!==n.indexOf(e.key))throw new TypeError("Duplicated element ("+e.key+")");n.push(e.key)},decorateElement:function(e,t){for(var r=[],n=[],i=e.decorators,o=i.length-1;o>=0;o--){var a=t[e.placement];a.splice(a.indexOf(e.key),1);var s=this.fromElementDescriptor(e),c=this.toElementFinisherExtras((0,i[o])(s)||s);e=c.element,this.addElementPlacement(e,t),c.finisher&&n.push(c.finisher);var l=c.extras;if(l){for(var f=0;f<l.length;f++)this.addElementPlacement(l[f],t);r.push.apply(r,l)}}return{element:e,finishers:n,extras:r}},decorateConstructor:function(e,t){for(var r=[],n=t.length-1;n>=0;n--){var i=this.fromClassDescriptor(e),o=this.toClassDescriptor((0,t[n])(i)||i);if(void 0!==o.finisher&&r.push(o.finisher),void 0!==o.elements){e=o.elements;for(var a=0;a<e.length-1;a++)for(var s=a+1;s<e.length;s++)if(e[a].key===e[s].key&&e[a].placement===e[s].placement)throw new TypeError("Duplicated element ("+e[a].key+")")}}return{elements:e,finishers:r}},fromElementDescriptor:function(e){var t={kind:e.kind,key:e.key,placement:e.placement,descriptor:e.descriptor};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),"field"===e.kind&&(t.initializer=e.initializer),t},toElementDescriptors:function(e){var t;if(void 0!==e)return(t=e,function(e){if(Array.isArray(e))return e}(t)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(t)||function(e,t){if(e){if("string"==typeof e)return _(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);return"Object"===r&&e.constructor&&(r=e.constructor.name),"Map"===r||"Set"===r?Array.from(e):"Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r)?_(e,t):void 0}}(t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()).map((function(e){var t=this.toElementDescriptor(e);return this.disallowProperty(e,"finisher","An element descriptor"),this.disallowProperty(e,"extras","An element descriptor"),t}),this)},toElementDescriptor:function(e){var t=String(e.kind);if("method"!==t&&"field"!==t)throw new TypeError('An element descriptor\'s .kind property must be either "method" or "field", but a decorator created an element descriptor with .kind "'+t+'"');var r=E(e.key),n=String(e.placement);if("static"!==n&&"prototype"!==n&&"own"!==n)throw new TypeError('An element descriptor\'s .placement property must be one of "static", "prototype" or "own", but a decorator created an element descriptor with .placement "'+n+'"');var i=e.descriptor;this.disallowProperty(e,"elements","An element descriptor");var o={kind:t,key:r,placement:n,descriptor:Object.assign({},i)};return"field"!==t?this.disallowProperty(e,"initializer","A method descriptor"):(this.disallowProperty(i,"get","The property descriptor of a field descriptor"),this.disallowProperty(i,"set","The property descriptor of a field descriptor"),this.disallowProperty(i,"value","The property descriptor of a field descriptor"),o.initializer=e.initializer),o},toElementFinisherExtras:function(e){return{element:this.toElementDescriptor(e),finisher:k(e,"finisher"),extras:this.toElementDescriptors(e.extras)}},fromClassDescriptor:function(e){var t={kind:"class",elements:e.map(this.fromElementDescriptor,this)};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),t},toClassDescriptor:function(e){var t=String(e.kind);if("class"!==t)throw new TypeError('A class descriptor\'s .kind property must be "class", but a decorator created a class descriptor with .kind "'+t+'"');this.disallowProperty(e,"key","A class descriptor"),this.disallowProperty(e,"placement","A class descriptor"),this.disallowProperty(e,"descriptor","A class descriptor"),this.disallowProperty(e,"initializer","A class descriptor"),this.disallowProperty(e,"extras","A class descriptor");var r=k(e,"finisher");return{elements:this.toElementDescriptors(e.elements),finisher:r}},runClassFinishers:function(e,t){for(var r=0;r<t.length;r++){var n=(0,t[r])(e);if(void 0!==n){if("function"!=typeof n)throw new TypeError("Finishers must return a constructor.");e=n}}return e},disallowProperty:function(e,t,r){if(void 0!==e[t])throw new TypeError(r+" can't have a ."+t+" property.")}};return e}function v(e){var t,r=E(e.key);"method"===e.kind?t={value:e.value,writable:!0,configurable:!0,enumerable:!1}:"get"===e.kind?t={get:e.value,configurable:!0,enumerable:!1}:"set"===e.kind?t={set:e.value,configurable:!0,enumerable:!1}:"field"===e.kind&&(t={configurable:!0,writable:!0,enumerable:!0});var n={kind:"field"===e.kind?"field":"method",key:r,placement:e.static?"static":"field"===e.kind?"own":"prototype",descriptor:t};return e.decorators&&(n.decorators=e.decorators),"field"===e.kind&&(n.initializer=e.value),n}function b(e,t){void 0!==e.descriptor.get?t.descriptor.get=e.descriptor.get:t.descriptor.set=e.descriptor.set}function g(e){return e.decorators&&e.decorators.length}function w(e){return void 0!==e&&!(void 0===e.value&&void 0===e.writable)}function k(e,t){var r=e[t];if(void 0!==r&&"function"!=typeof r)throw new TypeError("Expected '"+t+"' to be a function");return r}function E(e){var t=function(e,t){if("object"!==l(e)||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==l(n))return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===l(t)?t:String(t)}function _(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r<t;r++)n[r]=e[r];return n}!function(e,t,r,n){var i=y();if(n)for(var o=0;o<n.length;o++)i=n[o](i);var a=t((function(e){i.initializeInstanceElements(e,s.elements)}),r),s=i.decorateClass(function(e){for(var t=[],r=function(e){return"method"===e.kind&&e.key===o.key&&e.placement===o.placement},n=0;n<e.length;n++){var i,o=e[n];if("method"===o.kind&&(i=t.find(r)))if(w(o.descriptor)||w(i.descriptor)){if(g(o)||g(i))throw new ReferenceError("Duplicated methods ("+o.key+") can't be decorated.");i.descriptor=o.descriptor}else{if(g(o)){if(g(i))throw new ReferenceError("Decorators can't be placed on different accessors with for the same property ("+o.key+").");i.decorators=o.decorators}b(o,i)}else t.push(o)}return t}(a.d.map(v)),e);i.initializeClassElements(a.F,s.elements),i.runClassFinishers(a.F,s.finishers)}([(0,s.Mo)("ha-formfield")],(function(e,t){var r=function(t){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&d(e,t)}(n,t);var r=u(n);function n(){var t;f(this,n);for(var i=arguments.length,o=new Array(i),a=0;a<i;a++)o[a]=arguments[a];return t=r.call.apply(r,[this].concat(o)),e(h(t)),t}return n}(t);return{F:r,d:[{kind:"method",key:"_labelClick",value:function(){var e=this.input;if(e)switch(e.focus(),e.tagName){case"HA-CHECKBOX":case"HA-RADIO":e.checked=!e.checked,(0,c.B)(e,"change");break;default:e.click()}}},{kind:"field",static:!0,key:"styles",value:function(){return[o.W,(0,a.iv)(n||(e=['\n :host(:not([alignEnd])) ::slotted(ha-switch) {\n margin-right: 10px;\n }\n :host([dir="rtl"]:not([alignEnd])) ::slotted(ha-switch) {\n margin-left: 10px;\n margin-right: auto;\n }\n '],t||(t=e.slice(0)),n=Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))))];var e,t}}]}}),i.a)},43709:function(e,t,r){var n,i=r(11581),o=r(4301),a=r(37500),s=r(33310),c=r(62359);function l(e){return l="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},l(e)}function f(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function d(e,t){return d=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},d(e,t)}function u(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var r,n=P(e);if(t){var i=P(this).constructor;r=Reflect.construct(n,arguments,i)}else r=n.apply(this,arguments);return p(this,r)}}function p(e,t){if(t&&("object"===l(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return h(e)}function h(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function m(){m=function(){return e};var e={elementsDefinitionOrder:[["method"],["field"]],initializeInstanceElements:function(e,t){["method","field"].forEach((function(r){t.forEach((function(t){t.kind===r&&"own"===t.placement&&this.defineClassElement(e,t)}),this)}),this)},initializeClassElements:function(e,t){var r=e.prototype;["method","field"].forEach((function(n){t.forEach((function(t){var i=t.placement;if(t.kind===n&&("static"===i||"prototype"===i)){var o="static"===i?e:r;this.defineClassElement(o,t)}}),this)}),this)},defineClassElement:function(e,t){var r=t.descriptor;if("field"===t.kind){var n=t.initializer;r={enumerable:r.enumerable,writable:r.writable,configurable:r.configurable,value:void 0===n?void 0:n.call(e)}}Object.defineProperty(e,t.key,r)},decorateClass:function(e,t){var r=[],n=[],i={static:[],prototype:[],own:[]};if(e.forEach((function(e){this.addElementPlacement(e,i)}),this),e.forEach((function(e){if(!b(e))return r.push(e);var t=this.decorateElement(e,i);r.push(t.element),r.push.apply(r,t.extras),n.push.apply(n,t.finishers)}),this),!t)return{elements:r,finishers:n};var o=this.decorateConstructor(r,t);return n.push.apply(n,o.finishers),o.finishers=n,o},addElementPlacement:function(e,t,r){var n=t[e.placement];if(!r&&-1!==n.indexOf(e.key))throw new TypeError("Duplicated element ("+e.key+")");n.push(e.key)},decorateElement:function(e,t){for(var r=[],n=[],i=e.decorators,o=i.length-1;o>=0;o--){var a=t[e.placement];a.splice(a.indexOf(e.key),1);var s=this.fromElementDescriptor(e),c=this.toElementFinisherExtras((0,i[o])(s)||s);e=c.element,this.addElementPlacement(e,t),c.finisher&&n.push(c.finisher);var l=c.extras;if(l){for(var f=0;f<l.length;f++)this.addElementPlacement(l[f],t);r.push.apply(r,l)}}return{element:e,finishers:n,extras:r}},decorateConstructor:function(e,t){for(var r=[],n=t.length-1;n>=0;n--){var i=this.fromClassDescriptor(e),o=this.toClassDescriptor((0,t[n])(i)||i);if(void 0!==o.finisher&&r.push(o.finisher),void 0!==o.elements){e=o.elements;for(var a=0;a<e.length-1;a++)for(var s=a+1;s<e.length;s++)if(e[a].key===e[s].key&&e[a].placement===e[s].placement)throw new TypeError("Duplicated element ("+e[a].key+")")}}return{elements:e,finishers:r}},fromElementDescriptor:function(e){var t={kind:e.kind,key:e.key,placement:e.placement,descriptor:e.descriptor};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),"field"===e.kind&&(t.initializer=e.initializer),t},toElementDescriptors:function(e){var t;if(void 0!==e)return(t=e,function(e){if(Array.isArray(e))return e}(t)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(t)||function(e,t){if(e){if("string"==typeof e)return E(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);return"Object"===r&&e.constructor&&(r=e.constructor.name),"Map"===r||"Set"===r?Array.from(e):"Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r)?E(e,t):void 0}}(t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()).map((function(e){var t=this.toElementDescriptor(e);return this.disallowProperty(e,"finisher","An element descriptor"),this.disallowProperty(e,"extras","An element descriptor"),t}),this)},toElementDescriptor:function(e){var t=String(e.kind);if("method"!==t&&"field"!==t)throw new TypeError('An element descriptor\'s .kind property must be either "method" or "field", but a decorator created an element descriptor with .kind "'+t+'"');var r=k(e.key),n=String(e.placement);if("static"!==n&&"prototype"!==n&&"own"!==n)throw new TypeError('An element descriptor\'s .placement property must be one of "static", "prototype" or "own", but a decorator created an element descriptor with .placement "'+n+'"');var i=e.descriptor;this.disallowProperty(e,"elements","An element descriptor");var o={kind:t,key:r,placement:n,descriptor:Object.assign({},i)};return"field"!==t?this.disallowProperty(e,"initializer","A method descriptor"):(this.disallowProperty(i,"get","The property descriptor of a field descriptor"),this.disallowProperty(i,"set","The property descriptor of a field descriptor"),this.disallowProperty(i,"value","The property descriptor of a field descriptor"),o.initializer=e.initializer),o},toElementFinisherExtras:function(e){return{element:this.toElementDescriptor(e),finisher:w(e,"finisher"),extras:this.toElementDescriptors(e.extras)}},fromClassDescriptor:function(e){var t={kind:"class",elements:e.map(this.fromElementDescriptor,this)};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),t},toClassDescriptor:function(e){var t=String(e.kind);if("class"!==t)throw new TypeError('A class descriptor\'s .kind property must be "class", but a decorator created a class descriptor with .kind "'+t+'"');this.disallowProperty(e,"key","A class descriptor"),this.disallowProperty(e,"placement","A class descriptor"),this.disallowProperty(e,"descriptor","A class descriptor"),this.disallowProperty(e,"initializer","A class descriptor"),this.disallowProperty(e,"extras","A class descriptor");var r=w(e,"finisher");return{elements:this.toElementDescriptors(e.elements),finisher:r}},runClassFinishers:function(e,t){for(var r=0;r<t.length;r++){var n=(0,t[r])(e);if(void 0!==n){if("function"!=typeof n)throw new TypeError("Finishers must return a constructor.");e=n}}return e},disallowProperty:function(e,t,r){if(void 0!==e[t])throw new TypeError(r+" can't have a ."+t+" property.")}};return e}function y(e){var t,r=k(e.key);"method"===e.kind?t={value:e.value,writable:!0,configurable:!0,enumerable:!1}:"get"===e.kind?t={get:e.value,configurable:!0,enumerable:!1}:"set"===e.kind?t={set:e.value,configurable:!0,enumerable:!1}:"field"===e.kind&&(t={configurable:!0,writable:!0,enumerable:!0});var n={kind:"field"===e.kind?"field":"method",key:r,placement:e.static?"static":"field"===e.kind?"own":"prototype",descriptor:t};return e.decorators&&(n.decorators=e.decorators),"field"===e.kind&&(n.initializer=e.value),n}function v(e,t){void 0!==e.descriptor.get?t.descriptor.get=e.descriptor.get:t.descriptor.set=e.descriptor.set}function b(e){return e.decorators&&e.decorators.length}function g(e){return void 0!==e&&!(void 0===e.value&&void 0===e.writable)}function w(e,t){var r=e[t];if(void 0!==r&&"function"!=typeof r)throw new TypeError("Expected '"+t+"' to be a function");return r}function k(e){var t=function(e,t){if("object"!==l(e)||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==l(n))return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===l(t)?t:String(t)}function E(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r<t;r++)n[r]=e[r];return n}function _(e,t,r){return _="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(e,t,r){var n=function(e,t){for(;!Object.prototype.hasOwnProperty.call(e,t)&&null!==(e=P(e)););return e}(e,t);if(n){var i=Object.getOwnPropertyDescriptor(n,t);return i.get?i.get.call(r):i.value}},_(e,t,r||e)}function P(e){return P=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},P(e)}!function(e,t,r,n){var i=m();if(n)for(var o=0;o<n.length;o++)i=n[o](i);var a=t((function(e){i.initializeInstanceElements(e,s.elements)}),r),s=i.decorateClass(function(e){for(var t=[],r=function(e){return"method"===e.kind&&e.key===o.key&&e.placement===o.placement},n=0;n<e.length;n++){var i,o=e[n];if("method"===o.kind&&(i=t.find(r)))if(g(o.descriptor)||g(i.descriptor)){if(b(o)||b(i))throw new ReferenceError("Duplicated methods ("+o.key+") can't be decorated.");i.descriptor=o.descriptor}else{if(b(o)){if(b(i))throw new ReferenceError("Decorators can't be placed on different accessors with for the same property ("+o.key+").");i.decorators=o.decorators}v(o,i)}else t.push(o)}return t}(a.d.map(y)),e);i.initializeClassElements(a.F,s.elements),i.runClassFinishers(a.F,s.finishers)}([(0,s.Mo)("ha-switch")],(function(e,t){var r=function(t){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&d(e,t)}(n,t);var r=u(n);function n(){var t;f(this,n);for(var i=arguments.length,o=new Array(i),a=0;a<i;a++)o[a]=arguments[a];return t=r.call.apply(r,[this].concat(o)),e(h(t)),t}return n}(t);return{F:r,d:[{kind:"field",decorators:[(0,s.Cb)({type:Boolean})],key:"haptic",value:function(){return!1}},{kind:"method",key:"firstUpdated",value:function(){var e=this;_(P(r.prototype),"firstUpdated",this).call(this),this.addEventListener("change",(function(){e.haptic&&(0,c.j)("light")}))}},{kind:"field",static:!0,key:"styles",value:function(){return[o.W,(0,a.iv)(n||(e=["\n :host {\n --mdc-theme-secondary: var(--switch-checked-color);\n }\n .mdc-switch.mdc-switch--checked .mdc-switch__thumb {\n background-color: var(--switch-checked-button-color);\n border-color: var(--switch-checked-button-color);\n }\n .mdc-switch.mdc-switch--checked .mdc-switch__track {\n background-color: var(--switch-checked-track-color);\n border-color: var(--switch-checked-track-color);\n }\n .mdc-switch:not(.mdc-switch--checked) .mdc-switch__thumb {\n background-color: var(--switch-unchecked-button-color);\n border-color: var(--switch-unchecked-button-color);\n }\n .mdc-switch:not(.mdc-switch--checked) .mdc-switch__track {\n background-color: var(--switch-unchecked-track-color);\n border-color: var(--switch-unchecked-track-color);\n }\n "],t||(t=e.slice(0)),n=Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))))];var e,t}}]}}),i.H)},23466:function(e,t,r){r.r(t);r(51187);var n,i,o,a,s,c,l,f=r(37500),d=r(33310),u=r(47181),p=r(87744),h=(r(34821),r(83927),r(43709),r(81582)),m=r(11654),y=r(26765);function v(e){return v="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},v(e)}function b(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function g(e,t,r,n,i,o,a){try{var s=e[o](a),c=s.value}catch(l){return void r(l)}s.done?t(c):Promise.resolve(c).then(n,i)}function w(e){return function(){var t=this,r=arguments;return new Promise((function(n,i){var o=e.apply(t,r);function a(e){g(o,n,i,a,s,"next",e)}function s(e){g(o,n,i,a,s,"throw",e)}a(void 0)}))}}function k(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function E(e,t){return E=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},E(e,t)}function _(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var r,n=x(e);if(t){var i=x(this).constructor;r=Reflect.construct(n,arguments,i)}else r=n.apply(this,arguments);return P(this,r)}}function P(e,t){if(t&&("object"===v(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return O(e)}function O(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function x(e){return x=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},x(e)}function j(){j=function(){return e};var e={elementsDefinitionOrder:[["method"],["field"]],initializeInstanceElements:function(e,t){["method","field"].forEach((function(r){t.forEach((function(t){t.kind===r&&"own"===t.placement&&this.defineClassElement(e,t)}),this)}),this)},initializeClassElements:function(e,t){var r=e.prototype;["method","field"].forEach((function(n){t.forEach((function(t){var i=t.placement;if(t.kind===n&&("static"===i||"prototype"===i)){var o="static"===i?e:r;this.defineClassElement(o,t)}}),this)}),this)},defineClassElement:function(e,t){var r=t.descriptor;if("field"===t.kind){var n=t.initializer;r={enumerable:r.enumerable,writable:r.writable,configurable:r.configurable,value:void 0===n?void 0:n.call(e)}}Object.defineProperty(e,t.key,r)},decorateClass:function(e,t){var r=[],n=[],i={static:[],prototype:[],own:[]};if(e.forEach((function(e){this.addElementPlacement(e,i)}),this),e.forEach((function(e){if(!A(e))return r.push(e);var t=this.decorateElement(e,i);r.push(t.element),r.push.apply(r,t.extras),n.push.apply(n,t.finishers)}),this),!t)return{elements:r,finishers:n};var o=this.decorateConstructor(r,t);return n.push.apply(n,o.finishers),o.finishers=n,o},addElementPlacement:function(e,t,r){var n=t[e.placement];if(!r&&-1!==n.indexOf(e.key))throw new TypeError("Duplicated element ("+e.key+")");n.push(e.key)},decorateElement:function(e,t){for(var r=[],n=[],i=e.decorators,o=i.length-1;o>=0;o--){var a=t[e.placement];a.splice(a.indexOf(e.key),1);var s=this.fromElementDescriptor(e),c=this.toElementFinisherExtras((0,i[o])(s)||s);e=c.element,this.addElementPlacement(e,t),c.finisher&&n.push(c.finisher);var l=c.extras;if(l){for(var f=0;f<l.length;f++)this.addElementPlacement(l[f],t);r.push.apply(r,l)}}return{element:e,finishers:n,extras:r}},decorateConstructor:function(e,t){for(var r=[],n=t.length-1;n>=0;n--){var i=this.fromClassDescriptor(e),o=this.toClassDescriptor((0,t[n])(i)||i);if(void 0!==o.finisher&&r.push(o.finisher),void 0!==o.elements){e=o.elements;for(var a=0;a<e.length-1;a++)for(var s=a+1;s<e.length;s++)if(e[a].key===e[s].key&&e[a].placement===e[s].placement)throw new TypeError("Duplicated element ("+e[a].key+")")}}return{elements:e,finishers:r}},fromElementDescriptor:function(e){var t={kind:e.kind,key:e.key,placement:e.placement,descriptor:e.descriptor};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),"field"===e.kind&&(t.initializer=e.initializer),t},toElementDescriptors:function(e){var t;if(void 0!==e)return(t=e,function(e){if(Array.isArray(e))return e}(t)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(t)||function(e,t){if(e){if("string"==typeof e)return R(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);return"Object"===r&&e.constructor&&(r=e.constructor.name),"Map"===r||"Set"===r?Array.from(e):"Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r)?R(e,t):void 0}}(t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()).map((function(e){var t=this.toElementDescriptor(e);return this.disallowProperty(e,"finisher","An element descriptor"),this.disallowProperty(e,"extras","An element descriptor"),t}),this)},toElementDescriptor:function(e){var t=String(e.kind);if("method"!==t&&"field"!==t)throw new TypeError('An element descriptor\'s .kind property must be either "method" or "field", but a decorator created an element descriptor with .kind "'+t+'"');var r=T(e.key),n=String(e.placement);if("static"!==n&&"prototype"!==n&&"own"!==n)throw new TypeError('An element descriptor\'s .placement property must be one of "static", "prototype" or "own", but a decorator created an element descriptor with .placement "'+n+'"');var i=e.descriptor;this.disallowProperty(e,"elements","An element descriptor");var o={kind:t,key:r,placement:n,descriptor:Object.assign({},i)};return"field"!==t?this.disallowProperty(e,"initializer","A method descriptor"):(this.disallowProperty(i,"get","The property descriptor of a field descriptor"),this.disallowProperty(i,"set","The property descriptor of a field descriptor"),this.disallowProperty(i,"value","The property descriptor of a field descriptor"),o.initializer=e.initializer),o},toElementFinisherExtras:function(e){return{element:this.toElementDescriptor(e),finisher:z(e,"finisher"),extras:this.toElementDescriptors(e.extras)}},fromClassDescriptor:function(e){var t={kind:"class",elements:e.map(this.fromElementDescriptor,this)};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),t},toClassDescriptor:function(e){var t=String(e.kind);if("class"!==t)throw new TypeError('A class descriptor\'s .kind property must be "class", but a decorator created a class descriptor with .kind "'+t+'"');this.disallowProperty(e,"key","A class descriptor"),this.disallowProperty(e,"placement","A class descriptor"),this.disallowProperty(e,"descriptor","A class descriptor"),this.disallowProperty(e,"initializer","A class descriptor"),this.disallowProperty(e,"extras","A class descriptor");var r=z(e,"finisher");return{elements:this.toElementDescriptors(e.elements),finisher:r}},runClassFinishers:function(e,t){for(var r=0;r<t.length;r++){var n=(0,t[r])(e);if(void 0!==n){if("function"!=typeof n)throw new TypeError("Finishers must return a constructor.");e=n}}return e},disallowProperty:function(e,t,r){if(void 0!==e[t])throw new TypeError(r+" can't have a ."+t+" property.")}};return e}function S(e){var t,r=T(e.key);"method"===e.kind?t={value:e.value,writable:!0,configurable:!0,enumerable:!1}:"get"===e.kind?t={get:e.value,configurable:!0,enumerable:!1}:"set"===e.kind?t={set:e.value,configurable:!0,enumerable:!1}:"field"===e.kind&&(t={configurable:!0,writable:!0,enumerable:!0});var n={kind:"field"===e.kind?"field":"method",key:r,placement:e.static?"static":"field"===e.kind?"own":"prototype",descriptor:t};return e.decorators&&(n.decorators=e.decorators),"field"===e.kind&&(n.initializer=e.value),n}function C(e,t){void 0!==e.descriptor.get?t.descriptor.get=e.descriptor.get:t.descriptor.set=e.descriptor.set}function A(e){return e.decorators&&e.decorators.length}function D(e){return void 0!==e&&!(void 0===e.value&&void 0===e.writable)}function z(e,t){var r=e[t];if(void 0!==r&&"function"!=typeof r)throw new TypeError("Expected '"+t+"' to be a function");return r}function T(e){var t=function(e,t){if("object"!==v(e)||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==v(n))return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===v(t)?t:String(t)}function R(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r<t;r++)n[r]=e[r];return n}!function(e,t,r,n){var i=j();if(n)for(var o=0;o<n.length;o++)i=n[o](i);var a=t((function(e){i.initializeInstanceElements(e,s.elements)}),r),s=i.decorateClass(function(e){for(var t=[],r=function(e){return"method"===e.kind&&e.key===o.key&&e.placement===o.placement},n=0;n<e.length;n++){var i,o=e[n];if("method"===o.kind&&(i=t.find(r)))if(D(o.descriptor)||D(i.descriptor)){if(A(o)||A(i))throw new ReferenceError("Duplicated methods ("+o.key+") can't be decorated.");i.descriptor=o.descriptor}else{if(A(o)){if(A(i))throw new ReferenceError("Decorators can't be placed on different accessors with for the same property ("+o.key+").");i.decorators=o.decorators}C(o,i)}else t.push(o)}return t}(a.d.map(S)),e);i.initializeClassElements(a.F,s.elements),i.runClassFinishers(a.F,s.finishers)}([(0,d.Mo)("dialog-config-entry-system-options")],(function(e,t){var r,v,g=function(t){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&E(e,t)}(n,t);var r=_(n);function n(){var t;k(this,n);for(var i=arguments.length,o=new Array(i),a=0;a<i;a++)o[a]=arguments[a];return t=r.call.apply(r,[this].concat(o)),e(O(t)),t}return n}(t);return{F:g,d:[{kind:"field",decorators:[(0,d.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[(0,d.SB)()],key:"_disableNewEntities",value:void 0},{kind:"field",decorators:[(0,d.SB)()],key:"_disablePolling",value:void 0},{kind:"field",decorators:[(0,d.SB)()],key:"_error",value:void 0},{kind:"field",decorators:[(0,d.SB)()],key:"_params",value:void 0},{kind:"field",decorators:[(0,d.SB)()],key:"_submitting",value:function(){return!1}},{kind:"method",key:"showDialog",value:(v=w(regeneratorRuntime.mark((function e(t){return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:this._params=t,this._error=void 0,this._disableNewEntities=t.entry.pref_disable_new_entities,this._disablePolling=t.entry.pref_disable_polling;case 4:case"end":return e.stop()}}),e,this)}))),function(e){return v.apply(this,arguments)})},{kind:"method",key:"closeDialog",value:function(){this._error="",this._params=void 0,(0,u.B)(this,"dialog-closed",{dialog:this.localName})}},{kind:"method",key:"render",value:function(){return this._params?(0,f.dy)(i||(i=b(["\n <ha-dialog\n open\n @closed=","\n .heading=","\n >\n ","\n <ha-formfield\n .label=","\n .dir=","\n >\n <ha-switch\n .checked=","\n @change=","\n .disabled=","\n dialogInitialFocus\n ></ha-switch>\n </ha-formfield>\n ",'\n <mwc-button\n slot="secondaryAction"\n @click=',"\n .disabled=","\n >\n ",'\n </mwc-button>\n <mwc-button\n slot="primaryAction"\n @click=',"\n .disabled=","\n >\n ","\n </mwc-button>\n </ha-dialog>\n "])),this.closeDialog,this.hass.localize("ui.dialogs.config_entry_system_options.title","integration",this.hass.localize("component.".concat(this._params.entry.domain,".title"))||this._params.entry.domain),this._error?(0,f.dy)(o||(o=b([' <div class="error">',"</div> "])),this._error):"",(0,f.dy)(a||(a=b(["<p>\n ",'\n </p>\n <p class="secondary">\n ',"\n </p>"])),this.hass.localize("ui.dialogs.config_entry_system_options.enable_new_entities_label"),this.hass.localize("ui.dialogs.config_entry_system_options.enable_new_entities_description","integration",this.hass.localize("component.".concat(this._params.entry.domain,".title"))||this._params.entry.domain)),(0,p.Zu)(this.hass),!this._disableNewEntities,this._disableNewEntitiesChanged,this._submitting,this._allowUpdatePolling()?(0,f.dy)(s||(s=b(["\n <ha-formfield\n .label=","\n .dir=","\n >\n <ha-switch\n .checked=","\n @change=","\n .disabled=","\n ></ha-switch>\n </ha-formfield>\n "])),(0,f.dy)(c||(c=b(["<p>\n ",'\n </p>\n <p class="secondary">\n ',"\n </p>"])),this.hass.localize("ui.dialogs.config_entry_system_options.enable_polling_label"),this.hass.localize("ui.dialogs.config_entry_system_options.enable_polling_description","integration",this.hass.localize("component.".concat(this._params.entry.domain,".title"))||this._params.entry.domain)),(0,p.Zu)(this.hass),!this._disablePolling,this._disablePollingChanged,this._submitting):"",this.closeDialog,this._submitting,this.hass.localize("ui.common.cancel"),this._updateEntry,this._submitting,this.hass.localize("ui.dialogs.config_entry_system_options.update")):(0,f.dy)(n||(n=b([""])))}},{kind:"method",key:"_allowUpdatePolling",value:function(){return this._params.manifest&&("local_polling"===this._params.manifest.iot_class||"cloud_polling"===this._params.manifest.iot_class)}},{kind:"method",key:"_disableNewEntitiesChanged",value:function(e){this._error=void 0,this._disableNewEntities=!e.target.checked}},{kind:"method",key:"_disablePollingChanged",value:function(e){this._error=void 0,this._disablePolling=!e.target.checked}},{kind:"method",key:"_updateEntry",value:(r=w(regeneratorRuntime.mark((function e(){var t,r;return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return this._submitting=!0,t={pref_disable_new_entities:this._disableNewEntities},this._allowUpdatePolling()&&(t.pref_disable_polling=this._disablePolling),e.prev=3,e.next=6,(0,h.SO)(this.hass,this._params.entry.entry_id,t);case 6:if(!(r=e.sent).require_restart){e.next=10;break}return e.next=10,(0,y.Ys)(this,{text:this.hass.localize("ui.dialogs.config_entry_system_options.restart_home_assistant")});case 10:this._params.entryUpdated(r.config_entry),this.closeDialog(),e.next=17;break;case 14:e.prev=14,e.t0=e.catch(3),this._error=e.t0.message||"Unknown error";case 17:return e.prev=17,this._submitting=!1,e.finish(17);case 20:case"end":return e.stop()}}),e,this,[[3,14,17,20]])}))),function(){return r.apply(this,arguments)})},{kind:"get",static:!0,key:"styles",value:function(){return[m.yu,(0,f.iv)(l||(l=b(["\n .error {\n color: var(--error-color);\n }\n "])))]}}]}}),f.oi)}}]);
|
PypiClean
|
/django-sequence-0.0.0.tar.gz/django-sequence-0.0.0/src/django_sequence/models.py
|
import django
from django.db import models
from django.db import IntegrityError
def get_default_stage_state():
""" get a default value for confirmation status; create new state if not available """
return StageState.objects.get_or_create(name='pending')[0].id
def get_default_stage_result():
""" get a default value for confirmation result; create new result if not available """
return StageResult.objects.get_or_create(name='unknown')[0].id
class ReferenceTable(models.Model):
""" abstract model for enum-based 'lookup' tables """
name = models.CharField(max_length=64, blank=True, null=True, help_text='human readable representation of value')
description = models.CharField(max_length=255, blank=True, null=True, help_text='description of this entry')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
ordering = ['name']
def __str__(self):
return self.name
class StageState(ReferenceTable):
"""
Values to reflect the operational activity of a stage.
default states:
- pending = activity on this stage is expected but has not yet started
- started = activity on this stage has started
- completed = activity on this stage has completed
- blocked = activity on this stage is blocked by a previous stage
"""
class StageResult(ReferenceTable):
"""
Values to reflect the outcome of a stage.
default results:
- success = stage completed successfully with no issues
- fail = stage completed with a well-defined failure condition
- error = stage encountered an error and could not complete
- unknown = stage has not completed, or completion can not be determined
"""
class Sequence(models.Model):
""" individual instance of a model_sequence entry """
is_complete = models.BooleanField(default=False, help_text='set to true when all stages have a state of completed')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def advance(self):
""" complete the current stage with a result of success; if not the last stage, start the next stage """
current_stage = self.get_current_stage()
if current_stage:
current_stage.succeed_stage()
try:
next_stage = Stage.objects.get(id=current_stage.order + 1)
next_stage.start_stage()
except IntegrityError:
return None
def get_stages(self) -> models.query.QuerySet:
""" get all the stages of this model_sequence """
return self.stage_set.all()
def get_completion_percentage(self) -> float:
""" get the current completion, as a percentage, of this model_sequence """
return self.stage_set.filter(state__name='completed').count() / self.stage_set.count()
def get_current_stage(self):
""" get the current stage of this model_sequence """
remaining_stages = self.get_remaining_stages()
if remaining_stages:
return remaining_stages.first()
def get_completed_stages(self) -> models.query.QuerySet:
""" get completed stages of this model_sequence """
return self.stage_set.filter(state__name='completed')
def get_remaining_stages(self) -> models.query.QuerySet:
""" get remaining (non-completed) stages of this model_sequence """
return self.stage_set.exclude(state__name='completed')
def add_stage(self, name: str, description: str = None, blocking: bool = None):
""" add a stage to this model_sequence
Parameters:
name - (str) name of this stage
description - (str) description of this stage
blocking - (bool) set this stage to blocking
"""
data = dict(sequence=self, name=name)
if description:
data['description'] = description
if blocking:
data['blocking'] = blocking
Stage.objects.create(**data)
def add_stages(self, stage_list: list):
""" add all the stages passed as a list of dictionaries such as:
[
{'name': 'my_stage', 'description': 'description of my stage', },
...
]
Parameters:
stage_list - (list) list of dictionaries
"""
for stage in stage_list:
stage['sequence'] = self
Stage.objects.create(**stage)
@property
def stages(self):
return self.get_stages()
class Stage(models.Model):
""" individual stage(s) for a model_sequence item; identifies where in the overall model_sequence a thing is;
includes a state and a result """
sequence = models.ForeignKey(Sequence, on_delete=models.CASCADE, help_text='model_sequence this stage belongs to')
name = models.CharField(max_length=64, unique=True, help_text='short reference for stage')
description = models.CharField(max_length=255, blank=True, null=True, help_text='detailed description of stage')
order = models.IntegerField(help_text='operational order of stage (where 1 is the first)')
state = models.ForeignKey(StageState, default=get_default_stage_state, on_delete=models.CASCADE,
help_text='current activity of model_sequence stage')
result = models.ForeignKey(StageResult, default=get_default_stage_result, on_delete=models.CASCADE,
help_text='outcome of model_sequence stage')
details = models.CharField(max_length=255, blank=True, null=True,
help_text='additional details, such as incomplete reason')
blocking = models.BooleanField(default=True,
help_text='if True, do not continue to next stage if a failure or error occurs')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['order']
def __str__(self):
return self.name
def get_duration(self):
""" """
return self.updated_at - self.created_at
def update_sequence(self):
""" when this stage is updated (state or result) update the parent LifeCycle accordingly """
pass
def start_stage(self):
""" set state of stage to 'started' """
self.state = StageState.objects.get_or_create(name='started')[0]
self.save()
def fail_stage(self):
""" set state of stage to 'started' and result to 'fail' """
self.state = StageState.objects.get_or_create(name='completed')[0]
self.result = StageResult.objects.get_or_create(name='fail')[0]
self.save()
def error_stage(self):
""" set result of stage to 'error' """
self.result = StageResult.objects.get_or_create(name='error')[0]
self.save()
def succeed_stage(self):
""" set state of stage to 'started' and result to 'success' """
self.state = StageState.objects.get_or_create(name='completed')[0]
self.result = StageResult.objects.get_or_create(name='success')[0]
self.save()
def save(self, *args, **kwargs):
if not self.pk and not self.order:
stage_count = self.sequence.stage_set.count()
self.order = stage_count + 1
else:
if self == self.sequence.stage_set.last():
if self.sequence.stage_set.filter(state__name='completed').count() == self.sequence.stage_set.count():
self.sequence.is_complete = True
self.sequence.save()
super(Stage, self).save(*args, **kwargs)
duration = property(get_duration)
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/apimanagement/v20191201/policy.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['Policy']
class Policy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
format: Optional[pulumi.Input[Union[str, 'PolicyContentFormat']]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Policy Contract details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'PolicyContentFormat']] format: Format of the policyContent.
:param pulumi.Input[str] policy_id: The identifier of the Policy.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] value: Contents of the Policy as defined by the format.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if format is None:
format = 'xml'
__props__['format'] = format
if policy_id is None and not opts.urn:
raise TypeError("Missing required property 'policy_id'")
__props__['policy_id'] = policy_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__['value'] = value
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Policy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Policy, __self__).__init__(
'azure-nextgen:apimanagement/v20191201:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Policy':
"""
Get an existing Policy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Policy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def format(self) -> pulumi.Output[Optional[str]]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
PypiClean
|
/aws-cdk.aws-appsync-1.204.0.tar.gz/aws-cdk.aws-appsync-1.204.0/README.md
|
# AWS AppSync Construct Library
<!--BEGIN STABILITY BANNER-->---

> AWS CDK v1 has reached End-of-Support on 2023-06-01.
> This package is no longer being updated, and users should migrate to AWS CDK v2.
>
> For more information on how to migrate, see the [*Migrating to AWS CDK v2* guide](https://docs.aws.amazon.com/cdk/v2/guide/migrating-v2.html).
---
<!--END STABILITY BANNER-->
The `@aws-cdk/aws-appsync` package contains constructs for building flexible
APIs that use GraphQL.
```python
import aws_cdk.aws_appsync as appsync
```
## Example
### DynamoDB
Example of a GraphQL API with `AWS_IAM` [authorization](#authorization) resolving into a DynamoDb
backend data source.
GraphQL schema file `schema.graphql`:
```gql
type demo {
id: String!
version: String!
}
type Query {
getDemos: [ demo! ]
}
input DemoInput {
version: String!
}
type Mutation {
addDemo(input: DemoInput!): demo
}
```
CDK stack file `app-stack.ts`:
```python
api = appsync.GraphqlApi(self, "Api",
name="demo",
schema=appsync.Schema.from_asset(path.join(__dirname, "schema.graphql")),
authorization_config=appsync.AuthorizationConfig(
default_authorization=appsync.AuthorizationMode(
authorization_type=appsync.AuthorizationType.IAM
)
),
xray_enabled=True
)
demo_table = dynamodb.Table(self, "DemoTable",
partition_key=dynamodb.Attribute(
name="id",
type=dynamodb.AttributeType.STRING
)
)
demo_dS = api.add_dynamo_db_data_source("demoDataSource", demo_table)
# Resolver for the Query "getDemos" that scans the DynamoDb table and returns the entire list.
demo_dS.create_resolver(
type_name="Query",
field_name="getDemos",
request_mapping_template=appsync.MappingTemplate.dynamo_db_scan_table(),
response_mapping_template=appsync.MappingTemplate.dynamo_db_result_list()
)
# Resolver for the Mutation "addDemo" that puts the item into the DynamoDb table.
demo_dS.create_resolver(
type_name="Mutation",
field_name="addDemo",
request_mapping_template=appsync.MappingTemplate.dynamo_db_put_item(
appsync.PrimaryKey.partition("id").auto(),
appsync.Values.projecting("input")),
response_mapping_template=appsync.MappingTemplate.dynamo_db_result_item()
)
```
### Aurora Serverless
AppSync provides a data source for executing SQL commands against Amazon Aurora
Serverless clusters. You can use AppSync resolvers to execute SQL statements
against the Data API with GraphQL queries, mutations, and subscriptions.
```python
# Build a data source for AppSync to access the database.
# api: appsync.GraphqlApi
# Create username and password secret for DB Cluster
secret = rds.DatabaseSecret(self, "AuroraSecret",
username="clusteradmin"
)
# The VPC to place the cluster in
vpc = ec2.Vpc(self, "AuroraVpc")
# Create the serverless cluster, provide all values needed to customise the database.
cluster = rds.ServerlessCluster(self, "AuroraCluster",
engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
vpc=vpc,
credentials={"username": "clusteradmin"},
cluster_identifier="db-endpoint-test",
default_database_name="demos"
)
rds_dS = api.add_rds_data_source("rds", cluster, secret, "demos")
# Set up a resolver for an RDS query.
rds_dS.create_resolver(
type_name="Query",
field_name="getDemosRds",
request_mapping_template=appsync.MappingTemplate.from_string("""
{
"version": "2018-05-29",
"statements": [
"SELECT * FROM demos"
]
}
"""),
response_mapping_template=appsync.MappingTemplate.from_string("""
$utils.toJson($utils.rds.toJsonObject($ctx.result)[0])
""")
)
# Set up a resolver for an RDS mutation.
rds_dS.create_resolver(
type_name="Mutation",
field_name="addDemoRds",
request_mapping_template=appsync.MappingTemplate.from_string("""
{
"version": "2018-05-29",
"statements": [
"INSERT INTO demos VALUES (:id, :version)",
"SELECT * WHERE id = :id"
],
"variableMap": {
":id": $util.toJson($util.autoId()),
":version": $util.toJson($ctx.args.version)
}
}
"""),
response_mapping_template=appsync.MappingTemplate.from_string("""
$utils.toJson($utils.rds.toJsonObject($ctx.result)[1][0])
""")
)
```
### HTTP Endpoints
GraphQL schema file `schema.graphql`:
```gql
type job {
id: String!
version: String!
}
input DemoInput {
version: String!
}
type Mutation {
callStepFunction(input: DemoInput!): job
}
```
GraphQL request mapping template `request.vtl`:
```json
{
"version": "2018-05-29",
"method": "POST",
"resourcePath": "/",
"params": {
"headers": {
"content-type": "application/x-amz-json-1.0",
"x-amz-target":"AWSStepFunctions.StartExecution"
},
"body": {
"stateMachineArn": "<your step functions arn>",
"input": "{ \"id\": \"$context.arguments.id\" }"
}
}
}
```
GraphQL response mapping template `response.vtl`:
```json
{
"id": "${context.result.id}"
}
```
CDK stack file `app-stack.ts`:
```python
api = appsync.GraphqlApi(self, "api",
name="api",
schema=appsync.Schema.from_asset(path.join(__dirname, "schema.graphql"))
)
http_ds = api.add_http_data_source("ds", "https://states.amazonaws.com",
name="httpDsWithStepF",
description="from appsync to StepFunctions Workflow",
authorization_config=appsync.AwsIamConfig(
signing_region="us-east-1",
signing_service_name="states"
)
)
http_ds.create_resolver(
type_name="Mutation",
field_name="callStepFunction",
request_mapping_template=appsync.MappingTemplate.from_file("request.vtl"),
response_mapping_template=appsync.MappingTemplate.from_file("response.vtl")
)
```
### Amazon OpenSearch Service
AppSync has builtin support for Amazon OpenSearch Service (successor to Amazon
Elasticsearch Service) from domains that are provisioned through your AWS account. You can
use AppSync resolvers to perform GraphQL operations such as queries, mutations, and
subscriptions.
```python
import aws_cdk.aws_opensearchservice as opensearch
# api: appsync.GraphqlApi
user = iam.User(self, "User")
domain = opensearch.Domain(self, "Domain",
version=opensearch.EngineVersion.OPENSEARCH_1_2,
removal_policy=RemovalPolicy.DESTROY,
fine_grained_access_control=opensearch.AdvancedSecurityOptions(master_user_arn=user.user_arn),
encryption_at_rest=opensearch.EncryptionAtRestOptions(enabled=True),
node_to_node_encryption=True,
enforce_https=True
)
ds = api.add_open_search_data_source("ds", domain)
ds.create_resolver(
type_name="Query",
field_name="getTests",
request_mapping_template=appsync.MappingTemplate.from_string(JSON.stringify({
"version": "2017-02-28",
"operation": "GET",
"path": "/id/post/_search",
"params": {
"headers": {},
"query_string": {},
"body": {"from": 0, "size": 50}
}
})),
response_mapping_template=appsync.MappingTemplate.from_string("""[
#foreach($entry in $context.result.hits.hits)
#if( $velocityCount > 1 ) , #end
$utils.toJson($entry.get("_source"))
#end
]""")
)
```
## Custom Domain Names
For many use cases you may want to associate a custom domain name with your
GraphQL API. This can be done during the API creation.
```python
import aws_cdk.aws_certificatemanager as acm
import aws_cdk.aws_route53 as route53
# hosted zone and route53 features
# hosted_zone_id: str
zone_name = "example.com"
my_domain_name = "api.example.com"
certificate = acm.Certificate(self, "cert", domain_name=my_domain_name)
api = appsync.GraphqlApi(self, "api",
name="myApi",
domain_name=appsync.DomainOptions(
certificate=certificate,
domain_name=my_domain_name
)
)
# hosted zone for adding appsync domain
zone = route53.HostedZone.from_hosted_zone_attributes(self, "HostedZone",
hosted_zone_id=hosted_zone_id,
zone_name=zone_name
)
# create a cname to the appsync domain. will map to something like xxxx.cloudfront.net
route53.CnameRecord(self, "CnameApiRecord",
record_name="api",
zone=zone,
domain_name=my_domain_name
)
```
## Schema
Every GraphQL Api needs a schema to define the Api. CDK offers `appsync.Schema`
for static convenience methods for various types of schema declaration: code-first
or schema-first.
### Code-First
When declaring your GraphQL Api, CDK defaults to a code-first approach if the
`schema` property is not configured.
```python
api = appsync.GraphqlApi(self, "api", name="myApi")
```
CDK will declare a `Schema` class that will give your Api access functions to
define your schema code-first: `addType`, `addToSchema`, etc.
You can also declare your `Schema` class outside of your CDK stack, to define
your schema externally.
```python
schema = appsync.Schema()
schema.add_type(appsync.ObjectType("demo",
definition={"id": appsync.GraphqlType.id()}
))
api = appsync.GraphqlApi(self, "api",
name="myApi",
schema=schema
)
```
See the [code-first schema](#Code-First-Schema) section for more details.
### Schema-First
You can define your GraphQL Schema from a file on disk. For convenience, use
the `appsync.Schema.fromAsset` to specify the file representing your schema.
```python
api = appsync.GraphqlApi(self, "api",
name="myApi",
schema=appsync.Schema.from_asset(path.join(__dirname, "schema.graphl"))
)
```
## Imports
Any GraphQL Api that has been created outside the stack can be imported from
another stack into your CDK app. Utilizing the `fromXxx` function, you have
the ability to add data sources and resolvers through a `IGraphqlApi` interface.
```python
# api: appsync.GraphqlApi
# table: dynamodb.Table
imported_api = appsync.GraphqlApi.from_graphql_api_attributes(self, "IApi",
graphql_api_id=api.api_id,
graphql_api_arn=api.arn
)
imported_api.add_dynamo_db_data_source("TableDataSource", table)
```
If you don't specify `graphqlArn` in `fromXxxAttributes`, CDK will autogenerate
the expected `arn` for the imported api, given the `apiId`. For creating data
sources and resolvers, an `apiId` is sufficient.
## Authorization
There are multiple authorization types available for GraphQL API to cater to different
access use cases. They are:
* API Keys (`AuthorizationType.API_KEY`)
* Amazon Cognito User Pools (`AuthorizationType.USER_POOL`)
* OpenID Connect (`AuthorizationType.OPENID_CONNECT`)
* AWS Identity and Access Management (`AuthorizationType.AWS_IAM`)
* AWS Lambda (`AuthorizationType.AWS_LAMBDA`)
These types can be used simultaneously in a single API, allowing different types of clients to
access data. When you specify an authorization type, you can also specify the corresponding
authorization mode to finish defining your authorization. For example, this is a GraphQL API
with AWS Lambda Authorization.
```python
import aws_cdk.aws_lambda as lambda_
# auth_function: lambda.Function
appsync.GraphqlApi(self, "api",
name="api",
schema=appsync.Schema.from_asset(path.join(__dirname, "appsync.test.graphql")),
authorization_config=appsync.AuthorizationConfig(
default_authorization=appsync.AuthorizationMode(
authorization_type=appsync.AuthorizationType.LAMBDA,
lambda_authorizer_config=appsync.LambdaAuthorizerConfig(
handler=auth_function
)
)
)
)
```
## Permissions
When using `AWS_IAM` as the authorization type for GraphQL API, an IAM Role
with correct permissions must be used for access to API.
When configuring permissions, you can specify specific resources to only be
accessible by `IAM` authorization. For example, if you want to only allow mutability
for `IAM` authorized access you would configure the following.
In `schema.graphql`:
```gql
type Mutation {
updateExample(...): ...
@aws_iam
}
```
In `IAM`:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"appsync:GraphQL"
],
"Resource": [
"arn:aws:appsync:REGION:ACCOUNT_ID:apis/GRAPHQL_ID/types/Mutation/fields/updateExample"
]
}
]
}
```
See [documentation](https://docs.aws.amazon.com/appsync/latest/devguide/security.html#aws-iam-authorization) for more details.
To make this easier, CDK provides `grant` API.
Use the `grant` function for more granular authorization.
```python
# api: appsync.GraphqlApi
role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")
)
api.grant(role, appsync.IamResource.custom("types/Mutation/fields/updateExample"), "appsync:GraphQL")
```
### IamResource
In order to use the `grant` functions, you need to use the class `IamResource`.
* `IamResource.custom(...arns)` permits custom ARNs and requires an argument.
* `IamResouce.ofType(type, ...fields)` permits ARNs for types and their fields.
* `IamResource.all()` permits ALL resources.
### Generic Permissions
Alternatively, you can use more generic `grant` functions to accomplish the same usage.
These include:
* grantMutation (use to grant access to Mutation fields)
* grantQuery (use to grant access to Query fields)
* grantSubscription (use to grant access to Subscription fields)
```python
# api: appsync.GraphqlApi
# role: iam.Role
# For generic types
api.grant_mutation(role, "updateExample")
# For custom types and granular design
api.grant(role, appsync.IamResource.of_type("Mutation", "updateExample"), "appsync:GraphQL")
```
## Pipeline Resolvers and AppSync Functions
AppSync Functions are local functions that perform certain operations onto a
backend data source. Developers can compose operations (Functions) and execute
them in sequence with Pipeline Resolvers.
```python
# api: appsync.GraphqlApi
appsync_function = appsync.AppsyncFunction(self, "function",
name="appsync_function",
api=api,
data_source=api.add_none_data_source("none"),
request_mapping_template=appsync.MappingTemplate.from_file("request.vtl"),
response_mapping_template=appsync.MappingTemplate.from_file("response.vtl")
)
```
AppSync Functions are used in tandem with pipeline resolvers to compose multiple
operations.
```python
# api: appsync.GraphqlApi
# appsync_function: appsync.AppsyncFunction
pipeline_resolver = appsync.Resolver(self, "pipeline",
api=api,
data_source=api.add_none_data_source("none"),
type_name="typeName",
field_name="fieldName",
request_mapping_template=appsync.MappingTemplate.from_file("beforeRequest.vtl"),
pipeline_config=[appsync_function],
response_mapping_template=appsync.MappingTemplate.from_file("afterResponse.vtl")
)
```
Learn more about Pipeline Resolvers and AppSync Functions [here](https://docs.aws.amazon.com/appsync/latest/devguide/pipeline-resolvers.html).
## Code-First Schema
CDK offers the ability to generate your schema in a code-first approach.
A code-first approach offers a developer workflow with:
* **modularity**: organizing schema type definitions into different files
* **reusability**: simplifying down boilerplate/repetitive code
* **consistency**: resolvers and schema definition will always be synced
The code-first approach allows for **dynamic** schema generation. You can generate your schema based on variables and templates to reduce code duplication.
### Code-First Example
To showcase the code-first approach. Let's try to model the following schema segment.
```gql
interface Node {
id: String
}
type Query {
allFilms(after: String, first: Int, before: String, last: Int): FilmConnection
}
type FilmNode implements Node {
filmName: String
}
type FilmConnection {
edges: [FilmEdge]
films: [Film]
totalCount: Int
}
type FilmEdge {
node: Film
cursor: String
}
```
Above we see a schema that allows for generating paginated responses. For example,
we can query `allFilms(first: 100)` since `FilmConnection` acts as an intermediary
for holding `FilmEdges` we can write a resolver to return the first 100 films.
In a separate file, we can declare our object types and related functions.
We will call this file `object-types.ts` and we will have created it in a way that
allows us to generate other `XxxConnection` and `XxxEdges` in the future.
```python
import aws_cdk.aws_appsync as appsync
pluralize = require("pluralize")
args = {
"after": appsync.GraphqlType.string(),
"first": appsync.GraphqlType.int(),
"before": appsync.GraphqlType.string(),
"last": appsync.GraphqlType.int()
}
Node = appsync.InterfaceType("Node",
definition={"id": appsync.GraphqlType.string()}
)
FilmNode = appsync.ObjectType("FilmNode",
interface_types=[Node],
definition={"film_name": appsync.GraphqlType.string()}
)
def generate_edge_and_connection(base):
edge = appsync.ObjectType(f"{base.name}Edge",
definition={"node": base.attribute(), "cursor": appsync.GraphqlType.string()}
)
connection = appsync.ObjectType(f"{base.name}Connection",
definition={
"edges": edge.attribute(is_list=True),
"pluralize(base.name)": base.attribute(is_list=True),
"total_count": appsync.GraphqlType.int()
}
)
return {"edge": edge, "connection": connection}
```
Finally, we will go to our `cdk-stack` and combine everything together
to generate our schema.
```python
# dummy_request: appsync.MappingTemplate
# dummy_response: appsync.MappingTemplate
api = appsync.GraphqlApi(self, "Api",
name="demo"
)
object_types = [Node, FilmNode]
film_connections = generate_edge_and_connection(FilmNode)
api.add_query("allFilms", appsync.ResolvableField(
return_type=film_connections.connection.attribute(),
args=args,
data_source=api.add_none_data_source("none"),
request_mapping_template=dummy_request,
response_mapping_template=dummy_response
))
api.add_type(Node)
api.add_type(FilmNode)
api.add_type(film_connections.edge)
api.add_type(film_connections.connection)
```
Notice how we can utilize the `generateEdgeAndConnection` function to generate
Object Types. In the future, if we wanted to create more Object Types, we can simply
create the base Object Type (i.e. Film) and from there we can generate its respective
`Connections` and `Edges`.
Check out a more in-depth example [here](https://github.com/BryanPan342/starwars-code-first).
## GraphQL Types
One of the benefits of GraphQL is its strongly typed nature. We define the
types within an object, query, mutation, interface, etc. as **GraphQL Types**.
GraphQL Types are the building blocks of types, whether they are scalar, objects,
interfaces, etc. GraphQL Types can be:
* [**Scalar Types**](https://docs.aws.amazon.com/appsync/latest/devguide/scalars.html): Id, Int, String, AWSDate, etc.
* [**Object Types**](#Object-Types): types that you generate (i.e. `demo` from the example above)
* [**Interface Types**](#Interface-Types): abstract types that define the base implementation of other
Intermediate Types
More concretely, GraphQL Types are simply the types appended to variables.
Referencing the object type `Demo` in the previous example, the GraphQL Types
is `String!` and is applied to both the names `id` and `version`.
### Directives
`Directives` are attached to a field or type and affect the execution of queries,
mutations, and types. With AppSync, we use `Directives` to configure authorization.
CDK provides static functions to add directives to your Schema.
* `Directive.iam()` sets a type or field's authorization to be validated through `Iam`
* `Directive.apiKey()` sets a type or field's authorization to be validated through a `Api Key`
* `Directive.oidc()` sets a type or field's authorization to be validated through `OpenID Connect`
* `Directive.cognito(...groups: string[])` sets a type or field's authorization to be validated
through `Cognito User Pools`
* `groups` the name of the cognito groups to give access
To learn more about authorization and directives, read these docs [here](https://docs.aws.amazon.com/appsync/latest/devguide/security.html).
### Field and Resolvable Fields
While `GraphqlType` is a base implementation for GraphQL fields, we have abstractions
on top of `GraphqlType` that provide finer grain support.
### Field
`Field` extends `GraphqlType` and will allow you to define arguments. [**Interface Types**](#Interface-Types) are not resolvable and this class will allow you to define arguments,
but not its resolvers.
For example, if we want to create the following type:
```gql
type Node {
test(argument: string): String
}
```
The CDK code required would be:
```python
field = appsync.Field(
return_type=appsync.GraphqlType.string(),
args={
"argument": appsync.GraphqlType.string()
}
)
type = appsync.InterfaceType("Node",
definition={"test": field}
)
```
### Resolvable Fields
`ResolvableField` extends `Field` and will allow you to define arguments and its resolvers.
[**Object Types**](#Object-Types) can have fields that resolve and perform operations on
your backend.
You can also create resolvable fields for object types.
```gql
type Info {
node(id: String): String
}
```
The CDK code required would be:
```python
# api: appsync.GraphqlApi
# dummy_request: appsync.MappingTemplate
# dummy_response: appsync.MappingTemplate
info = appsync.ObjectType("Info",
definition={
"node": appsync.ResolvableField(
return_type=appsync.GraphqlType.string(),
args={
"id": appsync.GraphqlType.string()
},
data_source=api.add_none_data_source("none"),
request_mapping_template=dummy_request,
response_mapping_template=dummy_response
)
}
)
```
To nest resolvers, we can also create top level query types that call upon
other types. Building off the previous example, if we want the following graphql
type definition:
```gql
type Query {
get(argument: string): Info
}
```
The CDK code required would be:
```python
# api: appsync.GraphqlApi
# dummy_request: appsync.MappingTemplate
# dummy_response: appsync.MappingTemplate
query = appsync.ObjectType("Query",
definition={
"get": appsync.ResolvableField(
return_type=appsync.GraphqlType.string(),
args={
"argument": appsync.GraphqlType.string()
},
data_source=api.add_none_data_source("none"),
request_mapping_template=dummy_request,
response_mapping_template=dummy_response
)
}
)
```
Learn more about fields and resolvers [here](https://docs.aws.amazon.com/appsync/latest/devguide/resolver-mapping-template-reference-overview.html).
### Intermediate Types
Intermediate Types are defined by Graphql Types and Fields. They have a set of defined
fields, where each field corresponds to another type in the system. Intermediate
Types will be the meat of your GraphQL Schema as they are the types defined by you.
Intermediate Types include:
* [**Interface Types**](#Interface-Types)
* [**Object Types**](#Object-Types)
* [**Enum Types**](#Enum-Types)
* [**Input Types**](#Input-Types)
* [**Union Types**](#Union-Types)
#### Interface Types
**Interface Types** are abstract types that define the implementation of other
intermediate types. They are useful for eliminating duplication and can be used
to generate Object Types with less work.
You can create Interface Types ***externally***.
```python
node = appsync.InterfaceType("Node",
definition={
"id": appsync.GraphqlType.string(is_required=True)
}
)
```
To learn more about **Interface Types**, read the docs [here](https://graphql.org/learn/schema/#interfaces).
#### Object Types
**Object Types** are types that you declare. For example, in the [code-first example](#code-first-example)
the `demo` variable is an **Object Type**. **Object Types** are defined by
GraphQL Types and are only usable when linked to a GraphQL Api.
You can create Object Types in two ways:
1. Object Types can be created ***externally***.
```python
api = appsync.GraphqlApi(self, "Api",
name="demo"
)
demo = appsync.ObjectType("Demo",
definition={
"id": appsync.GraphqlType.string(is_required=True),
"version": appsync.GraphqlType.string(is_required=True)
}
)
api.add_type(demo)
```
> This method allows for reusability and modularity, ideal for larger projects.
> For example, imagine moving all Object Type definition outside the stack.
`object-types.ts` - a file for object type definitions
```python
import aws_cdk.aws_appsync as appsync
demo = appsync.ObjectType("Demo",
definition={
"id": appsync.GraphqlType.string(is_required=True),
"version": appsync.GraphqlType.string(is_required=True)
}
)
```
`cdk-stack.ts` - a file containing our cdk stack
```python
# api: appsync.GraphqlApi
api.add_type(demo)
```
2. Object Types can be created ***externally*** from an Interface Type.
```python
node = appsync.InterfaceType("Node",
definition={
"id": appsync.GraphqlType.string(is_required=True)
}
)
demo = appsync.ObjectType("Demo",
interface_types=[node],
definition={
"version": appsync.GraphqlType.string(is_required=True)
}
)
```
> This method allows for reusability and modularity, ideal for reducing code duplication.
To learn more about **Object Types**, read the docs [here](https://graphql.org/learn/schema/#object-types-and-fields).
#### Enum Types
**Enum Types** are a special type of Intermediate Type. They restrict a particular
set of allowed values for other Intermediate Types.
```gql
enum Episode {
NEWHOPE
EMPIRE
JEDI
}
```
> This means that wherever we use the type Episode in our schema, we expect it to
> be exactly one of NEWHOPE, EMPIRE, or JEDI.
The above GraphQL Enumeration Type can be expressed in CDK as the following:
```python
# api: appsync.GraphqlApi
episode = appsync.EnumType("Episode",
definition=["NEWHOPE", "EMPIRE", "JEDI"
]
)
api.add_type(episode)
```
To learn more about **Enum Types**, read the docs [here](https://graphql.org/learn/schema/#enumeration-types).
#### Input Types
**Input Types** are special types of Intermediate Types. They give users an
easy way to pass complex objects for top level Mutation and Queries.
```gql
input Review {
stars: Int!
commentary: String
}
```
The above GraphQL Input Type can be expressed in CDK as the following:
```python
# api: appsync.GraphqlApi
review = appsync.InputType("Review",
definition={
"stars": appsync.GraphqlType.int(is_required=True),
"commentary": appsync.GraphqlType.string()
}
)
api.add_type(review)
```
To learn more about **Input Types**, read the docs [here](https://graphql.org/learn/schema/#input-types).
#### Union Types
**Union Types** are a special type of Intermediate Type. They are similar to
Interface Types, but they cannot specify any common fields between types.
**Note:** the fields of a union type need to be `Object Types`. In other words, you
can't create a union type out of interfaces, other unions, or inputs.
```gql
union Search = Human | Droid | Starship
```
The above GraphQL Union Type encompasses the Object Types of Human, Droid and Starship. It
can be expressed in CDK as the following:
```python
# api: appsync.GraphqlApi
string = appsync.GraphqlType.string()
human = appsync.ObjectType("Human", definition={"name": string})
droid = appsync.ObjectType("Droid", definition={"name": string})
starship = appsync.ObjectType("Starship", definition={"name": string})
search = appsync.UnionType("Search",
definition=[human, droid, starship]
)
api.add_type(search)
```
To learn more about **Union Types**, read the docs [here](https://graphql.org/learn/schema/#union-types).
### Query
Every schema requires a top level Query type. By default, the schema will look
for the `Object Type` named `Query`. The top level `Query` is the **only** exposed
type that users can access to perform `GET` operations on your Api.
To add fields for these queries, we can simply run the `addQuery` function to add
to the schema's `Query` type.
```python
# api: appsync.GraphqlApi
# film_connection: appsync.InterfaceType
# dummy_request: appsync.MappingTemplate
# dummy_response: appsync.MappingTemplate
string = appsync.GraphqlType.string()
int = appsync.GraphqlType.int()
api.add_query("allFilms", appsync.ResolvableField(
return_type=film_connection.attribute(),
args={"after": string, "first": int, "before": string, "last": int},
data_source=api.add_none_data_source("none"),
request_mapping_template=dummy_request,
response_mapping_template=dummy_response
))
```
To learn more about top level operations, check out the docs [here](https://docs.aws.amazon.com/appsync/latest/devguide/graphql-overview.html).
### Mutation
Every schema **can** have a top level Mutation type. By default, the schema will look
for the `ObjectType` named `Mutation`. The top level `Mutation` Type is the only exposed
type that users can access to perform `mutable` operations on your Api.
To add fields for these mutations, we can simply run the `addMutation` function to add
to the schema's `Mutation` type.
```python
# api: appsync.GraphqlApi
# film_node: appsync.ObjectType
# dummy_request: appsync.MappingTemplate
# dummy_response: appsync.MappingTemplate
string = appsync.GraphqlType.string()
int = appsync.GraphqlType.int()
api.add_mutation("addFilm", appsync.ResolvableField(
return_type=film_node.attribute(),
args={"name": string, "film_number": int},
data_source=api.add_none_data_source("none"),
request_mapping_template=dummy_request,
response_mapping_template=dummy_response
))
```
To learn more about top level operations, check out the docs [here](https://docs.aws.amazon.com/appsync/latest/devguide/graphql-overview.html).
### Subscription
Every schema **can** have a top level Subscription type. The top level `Subscription` Type
is the only exposed type that users can access to invoke a response to a mutation. `Subscriptions`
notify users when a mutation specific mutation is called. This means you can make any data source
real time by specify a GraphQL Schema directive on a mutation.
**Note**: The AWS AppSync client SDK automatically handles subscription connection management.
To add fields for these subscriptions, we can simply run the `addSubscription` function to add
to the schema's `Subscription` type.
```python
# api: appsync.GraphqlApi
# film: appsync.InterfaceType
api.add_subscription("addedFilm", appsync.Field(
return_type=film.attribute(),
args={"id": appsync.GraphqlType.id(is_required=True)},
directives=[appsync.Directive.subscribe("addFilm")]
))
```
To learn more about top level operations, check out the docs [here](https://docs.aws.amazon.com/appsync/latest/devguide/real-time-data.html).
|
PypiClean
|
/maproxy-0.0.12.zip/maproxy-0.0.12/demos/all.py
|
import os
import sys
from maproxy.iomanager import IOManager
from maproxy.proxyserver import ProxyServer
import signal
g_IOManager=IOManager()
if __name__ == '__main__':
# Add standard signal handlers -
# call the "sto()" method when the user hits Ctrl-C
signal.signal(signal.SIGINT, lambda sig,frame: g_IOManager.stop())
signal.signal(signal.SIGTERM, lambda sig,frame: g_IOManager.stop())
bUseSSL=True
ssl_certs={ "certfile": os.path.join(os.path.dirname(sys.argv[0]), "certificate.pem"),
"keyfile": os.path.join(os.path.dirname(sys.argv[0]), "privatekey.pem") }
if not os.path.isfile(ssl_certs["certfile"]) or \
not os.path.isfile(ssl_certs["keyfile"]):
print("Warning: SSL-Proxy is disabled . certificate file(s) not found")
bUseSSL=False
# HTTP->HTTP
# On your computer, browse to "http://127.0.0.1:81/" and you'll get http://www.google.com
server = ProxyServer("www.google.com",80)
server.listen(81)
g_IOManager.add(server)
print("http://127.0.0.1:81 -> http://www.google.com")
# HTTP->HTTPS
# "server_ssl_options=True" simply means "connect to server with SSL"
server = ProxyServer("www.google.com",443, server_ssl_options=True)
server.listen(82)
g_IOManager.add(server)
print("http://127.0.0.1:82 -> https://www.google.com:443")
if bUseSSL:
# HTTPS->HTTP
# "client_ssl_options=ssl_certs" simply means "listen using SSL"
server = ProxyServer("www.google.com",80, client_ssl_options=ssl_certs)
server.listen(83)
g_IOManager.add(server)
print("https://127.0.0.1:83 -> http://www.google.com")
# HTTPS->HTTPS
# (Listens on SSL , connect to SSL)
server = ProxyServer("www.google.com",443, client_ssl_options=ssl_certs,server_ssl_options=True)
server.listen(84)
g_IOManager.add(server)
print("https://127.0.0.1:84 -> https://www.google.com:443 ")
# HTTP->HTTPS , use specific client-certificate
# "server_ssl_options=ssl_certs" means "connect to SSL-server using the provided client-certificates"
server = ProxyServer("www.google.com",443, server_ssl_options=ssl_certs)
server.listen(85)
g_IOManager.add(server)
print("http://127.0.0.1:85 -> https://www.google.com:443 (Connect with client-certificates)")
print("Starting...")
# the next call to start) is blocking (thread=False)
# So we simply wait for Ctrl-C
g_IOManager.start(thread=False)
print("Stopping...")
g_IOManager.stop(gracefully=True,wait=False)
print("Stopped...")
print("Done")
|
PypiClean
|
/esi_client-0.1.0a1.tar.gz/esi_client-0.1.0a1/esi_client/docs/GetFleetsFleetIdMembers200Ok.md
|
# GetFleetsFleetIdMembers200Ok
200 ok object
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**character_id** | **int** | character_id integer |
**join_time** | **datetime** | join_time string |
**role** | **str** | Member’s role in fleet |
**role_name** | **str** | Localized role names |
**ship_type_id** | **int** | ship_type_id integer |
**solar_system_id** | **int** | Solar system the member is located in |
**squad_id** | **int** | ID of the squad the member is in. If not applicable, will be set to -1 |
**takes_fleet_warp** | **bool** | Whether the member take fleet warps |
**wing_id** | **int** | ID of the wing the member is in. If not applicable, will be set to -1 |
**station_id** | **int** | Station in which the member is docked in, if applicable | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
PypiClean
|
/smartai-1.0.tar.gz/smartai-1.0/smartui/static/admin/simpleui-x/elementui/locale/lang/hu.js
|
'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'OK',
clear: 'Törlés'
},
datepicker: {
now: 'Most',
today: 'Ma',
cancel: 'Mégse',
clear: 'Törlés',
confirm: 'OK',
selectDate: 'Dátum',
selectTime: 'Időpont',
startDate: 'Dátum-tól',
startTime: 'Időpont-tól',
endDate: 'Dátum-ig',
endTime: 'Időpont-ig',
prevYear: 'Előző év',
nextYear: 'Következő év',
prevMonth: 'Előző hónap',
nextMonth: 'Következő hónap',
year: '',
month1: 'Január',
month2: 'Február',
month3: 'Március',
month4: 'Április',
month5: 'Május',
month6: 'Június',
month7: 'Július',
month8: 'Augusztus',
month9: 'Szeptember',
month10: 'Október',
month11: 'November',
month12: 'December',
weeks: {
sun: 'Vas',
mon: 'Hét',
tue: 'Ked',
wed: 'Sze',
thu: 'Csü',
fri: 'Pén',
sat: 'Szo'
},
months: {
jan: 'Jan',
feb: 'Feb',
mar: 'Már',
apr: 'Ápr',
may: 'Máj',
jun: 'Jún',
jul: 'Júl',
aug: 'Aug',
sep: 'Szep',
oct: 'Okt',
nov: 'Nov',
dec: 'Dec'
}
},
select: {
loading: 'Betöltés',
noMatch: 'Nincs találat',
noData: 'Nincs adat',
placeholder: 'Válassz'
},
cascader: {
noMatch: 'Nincs találat',
loading: 'Betöltés',
placeholder: 'Válassz',
noData: 'Nincs adat'
},
pagination: {
goto: 'Ugrás',
pagesize: '/oldal',
total: 'Össz {total}',
pageClassifier: ''
},
messagebox: {
title: 'Üzenet',
confirm: 'OK',
cancel: 'Mégse',
error: 'Hibás adat'
},
upload: {
deleteTip: 'kattints a törléshez',
delete: 'Törlés',
preview: 'Előnézet',
continue: 'Tovább'
},
table: {
emptyText: 'Nincs adat',
confirmFilter: 'Megerősít',
resetFilter: 'Alaphelyet',
clearFilter: 'Mind',
sumText: 'Összeg'
},
tree: {
emptyText: 'Nincs adat'
},
transfer: {
noMatch: 'Nincs találat',
noData: 'Nincs adat',
titles: ['Lista 1', 'Lista 2'],
filterPlaceholder: 'Kulcsszó',
noCheckedFormat: '{total} elem',
hasCheckedFormat: '{checked}/{total} kiválasztva'
},
image: {
error: 'FAILED' // to be translated
},
pageHeader: {
title: 'Back' // to be translated
}
}
};
|
PypiClean
|
/p_cmd_runr-0.1.2-py3-none-any.whl/gp_cmd_runr/__main__.py
|
import re
import sys
import os
import argparse
from p_cmd_runr import p_cmd_runr as pcr
from p_cmd_runr import file_manip as fm
def main():
"""
Description:
Implements the functionality of the general purpose command runner (gp_cmd_runr.py) executable script/module.
If you create a tmp folder, the script will attempt to move the output files to that tmp folder.
"""
parser = argparse.ArgumentParser(prog="gp_cmd_runr.py", description="general purpose command runner", \
epilog="""*** NO RESPONSIBILITY OR LIABILITY DISCLAIMER ***
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO YOU OR ANY THIRD PARTIES FOR ANY SPECIAL,
PUNITIVE, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
OR ANY DAMAGES WHATSOEVER, INCLUDING, WITHOUT LIMITATION,
THOSE RESULTING FROM LOSS OF USE, LOST DATA OR PROFITS, OR ANY LIABILITY,
ARISING OUT OF OR IN CONNECTION WITH THE USE OF THIS SCRIPT.""")
group = parser.add_mutually_exclusive_group()
group.add_argument("-r", "--raw", dest="filemanip", const=None, action='store_const', help="no manipulation of output file(s)")
group.add_argument("-n", "--normal", dest="filemanip", const=fm.deflate_file, action='store_const', help="remove duplicate '\\n' between lines in output file(s). this normal appearance is the default behavior")
group.add_argument("-f", "--flatten", dest="filemanip", const=fm.flatten_file, action='store_const', help="only have output file(s) containing a single '\\n' between lines")
parser.add_argument("-d", "--dry_run", help="display loaded configuration, but do not execute", action="store_true")
parser.add_argument("-t", "--timeout", help="sets command execution to non-blocking. default is blocking", action="store_true")
parser.add_argument("-p", "--print_output", action="store_true", help="flag to print command output to the screen. default is not to print")
parser.add_argument("-c", "--config", nargs="+", help="one or more local configuration files. default is config.txt")
parser.set_defaults(filemanip=fm.deflate_file)
args = parser.parse_args()
cgol = []
if args.config:
if len(args.config) > 1:
print(f"\nusing configuration files {args.config}")
else:
print(f"\nusing configuration file {args.config}")
for c in args.config:
cgol.append(pcr.ConfigGrabber(c))
else:
cgol.append(pcr.ConfigGrabber())
if not args.dry_run:
for cgo in cgol:
nodes = pcr.get_all_nodes(cgo)
fp = pcr.boxjumper(cgo, len(cgo), print_output=args.print_output, blocking= not args.timeout)
if fp:
fp.close()
pcr.move_to_tmp(nodes, args.filemanip)
else:
print(f"blocking is {not args.timeout}")
print(f"print output is {args.print_output}")
if args.filemanip == fm.deflate_file:
print("normal output\n")
elif args.filemanip == fm.flatten_file:
print("flattened output\n")
elif args.filemanip == None:
print("raw output\n")
else:
print("unkown file manipulation option")
for cgo in cgol:
print(cgo.filename)
print(cgo)
if __name__ == "__main__":
main()
|
PypiClean
|
/mxnet_cu113-1.9.1-py3-none-manylinux2014_x86_64.whl/mxnet/numpy_extension/utils.py
|
import ctypes
from .. util import is_np_array, is_np_shape
from .. base import _LIB, check_call, string_types, c_str_array
from .. base import c_handle_array, c_str, mx_uint, NDArrayHandle, py_str
from ..numpy import ndarray
__all__ = ['save', 'load']
def save(file, arr):
"""Saves a list of `ndarray`s or a dict of `str`->`ndarray` to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
file : str
Filename to which the data is saved.
arr : `ndarray` or list of `ndarray`s or dict of `str` to `ndarray`
The data to be saved.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot save `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if isinstance(arr, ndarray):
arr = [arr]
if isinstance(arr, dict):
str_keys = arr.keys()
nd_vals = arr.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, ndarray) for v in nd_vals):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(arr, list):
if any(not isinstance(v, ndarray) for v in arr):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = None
handles = c_handle_array(arr)
else:
raise ValueError("data needs to either be a ndarray, dict of (str, ndarray) pairs "
"or a list of ndarrays.")
check_call(_LIB.MXNDArraySave(c_str(file),
mx_uint(len(handles)),
handles,
keys))
def load(file):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
file : str
The filename.
Returns
-------
result : list of ndarrays or dict of str -> ndarray
Data stored in the file.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot load `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if not isinstance(file, string_types):
raise TypeError('file required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(file),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [ndarray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), ndarray(NDArrayHandle(handles[i])))
for i in range(out_size.value))
|
PypiClean
|
/remark-1.7.9.tar.gz/remark-1.7.9/Remark/DocumentType_Registry.py
|
# Description: Document type registry
# Documentation: data_structures.txt
from Remark.FileSystem import fileExtension, unixDirectoryName
import six
_associationSet = dict()
_defaultDocumentType = None
_documentTypeSet = dict()
def setDefaultDocumentType(documentType):
'''
Sets the default document-type.
This document-type will be returned by documentType()
in case an associated document-type can not be found.
documentType (DocumentType or string):
The document-type object to use as a default
document-type.
See also:
documentType()
'''
if isinstance(documentType, six.string_types):
documentType = findDocumentType(documentType)
global _defaultDocumentType
_defaultDocumentType = documentType
def registerDocumentType(name, documentType):
_documentTypeSet[name] = documentType
def findDocumentType(name):
return _documentTypeSet.get(name)
def associateDocumentType(inputExtension, documentType):
'''
Associates the given filename-extension, or a set of filename-extensions,
to the given document-type object. The filename-extension-key is always
stored lower-case, so that we can be case-insensitive for it.
inputExtension (string or list-of-strings):
The file-extensions to associate to the given document-type.
documentType (DocumentType or string):
The document-type object to associate to the file-extensions.
'''
if isinstance(documentType, six.string_types):
documentType = findDocumentType(documentType)
global _associationSet
if isinstance(inputExtension, six.string_types):
_associationSet[inputExtension.lower()] = documentType
return
for extension in inputExtension:
associateDocumentType(extension, documentType)
def strictDocumentType(inputExtension):
'''
Returns the document-type object associated to a given
filename-extension.
The filename-extension comparison is case-insensitive.
inputExtension (string):
The file-extension for which to retrieve the document-type.
returns (DocumentType):
The associated document-type object, if such can be
found. Otherwise None.
'''
return _associationSet.get(inputExtension.lower())
def documentType(inputExtension):
'''
Returns the document-type object associated to a given
filename-extension.
The filename-extension comparison is case-insensitive.
inputExtension (string):
The file-extension for which to retrieve the document-type.
returns (DocumentType):
The associated document-type object, if such can be
found. Otherwise the default document-type object.
'''
return _associationSet.get(inputExtension.lower(), _defaultDocumentType)
def outputDocumentName(inputPath):
'''
Returns the name of the output-document filename given the
input-document filename.
The output-document filename is decided by the document-type
associated to the file-extension of the input-document filename.
name (string):
The path to the input-document.
returns (string):
The path to the output-document.
'''
inputExtension = fileExtension(inputPath)
type = documentType(inputExtension)
outputPath = type.outputName(inputPath)
return unixDirectoryName(outputPath)
|
PypiClean
|
/waymo_open_dataset_tf_2.11.0-1.5.0-py3-none-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl/waymo_open_dataset/protos/box_pb2.py
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from waymo_open_dataset.protos import vector_pb2 as waymo__open__dataset_dot_protos_dot_vector__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='waymo_open_dataset/protos/box.proto',
package='waymo.open_dataset',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n#waymo_open_dataset/protos/box.proto\x12\x12waymo.open_dataset\x1a&waymo_open_dataset/protos/vector.proto\"r\n\x05\x42ox2d\x12,\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x1c.waymo.open_dataset.Vector2d\x12*\n\x04size\x18\x02 \x01(\x0b\x32\x1c.waymo.open_dataset.Vector2d\x12\x0f\n\x07heading\x18\x03 \x01(\x01\"r\n\x05\x42ox3d\x12,\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x1c.waymo.open_dataset.Vector3d\x12*\n\x04size\x18\x02 \x01(\x0b\x32\x1c.waymo.open_dataset.Vector3d\x12\x0f\n\x07heading\x18\x03 \x01(\x01')
,
dependencies=[waymo__open__dataset_dot_protos_dot_vector__pb2.DESCRIPTOR,])
_BOX2D = _descriptor.Descriptor(
name='Box2d',
full_name='waymo.open_dataset.Box2d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='center', full_name='waymo.open_dataset.Box2d.center', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size', full_name='waymo.open_dataset.Box2d.size', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='heading', full_name='waymo.open_dataset.Box2d.heading', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=213,
)
_BOX3D = _descriptor.Descriptor(
name='Box3d',
full_name='waymo.open_dataset.Box3d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='center', full_name='waymo.open_dataset.Box3d.center', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size', full_name='waymo.open_dataset.Box3d.size', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='heading', full_name='waymo.open_dataset.Box3d.heading', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=215,
serialized_end=329,
)
_BOX2D.fields_by_name['center'].message_type = waymo__open__dataset_dot_protos_dot_vector__pb2._VECTOR2D
_BOX2D.fields_by_name['size'].message_type = waymo__open__dataset_dot_protos_dot_vector__pb2._VECTOR2D
_BOX3D.fields_by_name['center'].message_type = waymo__open__dataset_dot_protos_dot_vector__pb2._VECTOR3D
_BOX3D.fields_by_name['size'].message_type = waymo__open__dataset_dot_protos_dot_vector__pb2._VECTOR3D
DESCRIPTOR.message_types_by_name['Box2d'] = _BOX2D
DESCRIPTOR.message_types_by_name['Box3d'] = _BOX3D
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Box2d = _reflection.GeneratedProtocolMessageType('Box2d', (_message.Message,), {
'DESCRIPTOR' : _BOX2D,
'__module__' : 'waymo_open_dataset.protos.box_pb2'
# @@protoc_insertion_point(class_scope:waymo.open_dataset.Box2d)
})
_sym_db.RegisterMessage(Box2d)
Box3d = _reflection.GeneratedProtocolMessageType('Box3d', (_message.Message,), {
'DESCRIPTOR' : _BOX3D,
'__module__' : 'waymo_open_dataset.protos.box_pb2'
# @@protoc_insertion_point(class_scope:waymo.open_dataset.Box3d)
})
_sym_db.RegisterMessage(Box3d)
# @@protoc_insertion_point(module_scope)
|
PypiClean
|
/advanced-pid-0.0.8.tar.gz/advanced-pid-0.0.8/advanced_pid/pid.py
|
from warnings import warn
from math import exp
class PID:
"""An advanced PID controller with first-order filter on derivative term.
Parameters
----------
Kp : float
Proportional gain.
Ki: float
Integral gain.
Kd : float
Derivative gain.
Tf : float
Time constant of the first-order derivative filter.
"""
def __init__(self, Kp, Ki, Kd, Tf):
self.set_gains(Kp, Ki, Kd, Tf)
self.set_output_limits(None, None)
self.set_initial_value(None, None, None)
def __call__(self, t, e):
"""Call integrate method.
Parameters
----------
t : float
Current time.
e : float
Error signal.
Returns
-------
float
Control signal.
"""
return self.integrate(t, e)
def set_gains(self, Kp, Ki, Kd, Tf):
"""Set PID controller gains.
Parameters
----------
Kp : float
Proportional gain.
Ki: float
Integral gain.
Kd : float
Derivative gain.
Tf : float
Time constant of the first-order derivative filter.
"""
self.Kp, self.Ki, self.Kd, self.Tf = Kp, Ki, Kd, Tf
def get_gains(self):
"""Get PID controller gains.
Returns
-------
tuple
Gains of PID controller (Kp, Ki, Kd, Tf).
"""
return self.Kp, self.Ki, self.Kd, self.Tf
def set_output_limits(self, lower, upper):
"""Set PID controller output limits for anti-windup.
Parameters
----------
lower : float or None
Lower limit for anti-windup,
upper : flaot or None
Upper limit for anti-windup.
"""
self.lower, self.upper = lower, upper
if lower is None:
self.lower = -float('inf')
if upper is None:
self.upper = +float('inf')
def get_output_limits(self):
"""Get PID controller output limits for anti-windup.
Returns
-------
tuple
Output limits (lower, upper).
"""
return self.lower, self.upper
def set_initial_value(self, t0, e0, i0):
"""Set PID controller states.
Parameters
----------
t0 : float or None
Initial time. None will reset time.
e0 : float or None
Initial error. None will reset error.
i0 : float or None
Inital integral. None will reset integral.
"""
self.t0, self.e0, self.i0 = t0, e0, i0
def get_initial_value(self):
"""Get PID controller states.
Returns
-------
tuple
Initial states of PID controller (t0, e0, i0)
"""
return self.t0, self.e0, self.i0
def __set_none_value(self, t, e):
"""Set None states for first cycle."""
t0, e0, i0 = self.get_initial_value()
if t0 is None:
t0 = t
if e0 is None:
e0 = e
if i0 is None:
i0 = 0.0
self.set_initial_value(t0, e0, i0)
def __check_monotonic_timestamp(self, t0, t):
"""Check timestamp is monotonic."""
if t < t0:
msg = 'Current timestamp is smaller than initial timestamp.'
warn(msg, RuntimeWarning)
return False
return True
def integrate(self, t, e):
"""Calculates PID controller output.
Parameters
----------
t : float
Current time.
e : float
Error signal.
Returns
-------
float
Control signal.
"""
self.__set_none_value(t, e)
t0, e0, i0 = self.get_initial_value()
# Check monotonic timestamp
if not self.__check_monotonic_timestamp(t0, t):
t0 = t
# Calculate time step
dt = t - t0
# Calculate proportional term
p = self.Kp * e
# Calculate integral term
i = i0 + dt * self.Ki * e
i = min(max(i, self.lower), self.upper)
# Calcuate derivative term
d = 0.0
if self.Kd != 0.0 and self.Tf > 0.0:
Kn = 1.0 / self.Tf
x = -Kn * self.Kd * e0
x = exp(-Kn*dt) * x - Kn * (1.0 - exp(-Kn*dt)) * self.Kd * e
d = x + Kn * self.Kd * e
e = -(self.Tf/self.Kd) * x
# Set initial value for next cycle
self.set_initial_value(t, e, i)
return min(max(p+i+d, self.lower), self.upper)
|
PypiClean
|
/custom-awscli-1.27.51.tar.gz/custom-awscli-1.27.51/awscli/examples/workmail/delete-mailbox-permissions.rst
|
**To delete mailbox permissions**
The following ``delete-mailbox-permissions`` command deletes mailbox permissions that were previously granted to a user or group. The entity represents the user that owns the mailbox, and the grantee represents the user or group for whom to delete permissions. ::
aws workmail delete-mailbox-permissions \
--organization-id m-d281d0a2fd824be5b6cd3d3ce909fd27 \
--entity-id S-1-1-11-1122222222-2222233333-3333334444-4444 \
--grantee-id S-1-1-11-1111111111-2222222222-3333333333-3333
This command produces no output.
|
PypiClean
|
/panopticon-single-cell-0.3.tar.gz/panopticon-single-cell-0.3/panopticon/commands/bookem.py
|
import click
import os
import numpy as np
import pandas as pd
from scipy import sparse
from panopticon.dna import segmentation_to_copy_ratio_dict
from panopticon.utilities import get_valid_gene_info
# import loompy after the main packages, because sometimes it breaks packages that are imported further:
import loompy
def scrna_wizard_main():
""" """
filepath = click.prompt("Location of .loom file", type=click.Path('wb'))
filename = click.prompt("Name of .loom file")
if not filename.endswith('.loom'):
filename += '.loom'
matrixpath = click.prompt("Data/counts matrix (sparse npz or dense txt)",
type=click.File('rb'))
if matrixpath.name.endswith('.npz'):
matrix = sparse.load_npz(matrixpath)
elif (matrixpath.name.endswith('.csv')) or (matrixpath.name.endswith(
'.tsv')) or (matrixpath.name.endswith('.txt')):
hasheader = click.prompt('Does that file have a header?',
type=click.Choice(['n', 'y']),
default='n')
if (matrixpath.name.endswith('.csv')):
sep = ','
else:
sep = '\t'
if hasheader == 'n':
matrix = pd.read_table(matrixpath, header=None, sep=sep)
elif hasheader == 'y':
matrix = pd.read_table(matrixpath, sep=sep)
if len(matrix.dtypes == object) > 0:
print("Number of str columns:", len(matrix.dtypes == object))
potentialgenes = matrix.iloc[:, (matrix.dtypes == object).values]
print("Potential gene list:")
print(potentialgenes.head())
savepotentialgenes = click.prompt(
'Potential gene list identified. Would you like to save?',
type=click.Choice(['n', 'y']))
if savepotentialgenes:
potentialgenesfile = click.prompt('Gene list filename:',
default='genelist_' +
matrixpath.name)
np.savetxt(potentialgenesfile,
potentialgenes,
delimiter=',',
fmt='%s')
potentialcells = matrix.columns
savepotentialcells = click.prompt(
'Potential cell list identified. Would you like to save?',
type=click.Choice(['n', 'y']))
if savepotentialcells:
potentialcellsfile = click.prompt('Cell list filename:',
default='celllist_' +
matrixpath.name)
print("assuming first column is genelist...")
potentialcells = pd.DataFrame(potentialcells[1::])
potentialcells.columns = ['cellname']
potentialcells.to_csv(potentialcellsfile)
# np.savetxt(potentialcellsfile, potentialcells, delimiter=',', fmt='%s')
matrix = matrix.iloc[:, (matrix.dtypes != object).values]
matrix = matrix.values
else:
hasheader = click.prompt('Does that file have a header?',
type=click.Choice(['n', 'y']),
default='n')
if hasheader == 'n':
matrix = pd.read_table(matrixpath, header=None)
elif hasheader == 'y':
matrix = pd.read_table(matrixpath)
matrix = matrix.iloc[:, (matrix.dtypes != object).values]
matrix = matrix.values
metadatapath = click.prompt(
"Cell metadata (pandas-loadable) (if no provided cell metadata, just use cell list)",
type=click.File('rb'))
if metadatapath.name.endswith('.csv'):
metadata = pd.read_table(metadatapath, sep=',')
else:
metadata = pd.read_table(metadatapath)
iscomplexity = click.prompt(
"Does this cell metadata file have a column corresponding to complexity?",
type=click.Choice(['n', 'y']),
default='n')
if iscomplexity == 'y':
complexity_col = click.prompt(
"Which of these columns corresponds to the cell complexity?",
type=click.Choice(metadata.columns))
if complexity_col != 'complexity':
metadata['complexity'] = metadata[complexity_col]
metadata.drop(complexity_col, inplace=True, axis=1)
ispatientid = click.prompt(
"Does this cell metadata file have a column corresponding to patient identity?",
type=click.Choice(['n', 'y']),
default='n')
if ispatientid == 'y':
patient_col = click.prompt(
"Which of these columns corresponds to the patient identity?",
type=click.Choice(metadata.columns))
if patient_col != 'patient_ID':
metadata['patient_ID'] = metadata[patient_col]
metadata.drop(patient_col, inplace=True, axis=1)
iscelltype = click.prompt(
"Does this cell metadata file have a column corresponding to cell type?",
type=click.Choice(['n', 'y']),
default='n')
if iscelltype == 'y':
cell_type_col = click.prompt(
"Which of these columns corresponds to the cell type?",
type=click.Choice(metadata.columns))
if cell_type_col != 'cell_type':
metadata['cell_type'] = metadata[cell_type_col]
metadata.drop(cell_type_col, inplace=True, axis=1)
genepath = click.prompt("Gene metadata (or simply genelist)",
type=click.File('rb'))
isheader = click.prompt("Does this file have a header?",
type=click.Choice(['n', 'y']),
default='n')
if isheader == 'n':
genes = pd.read_table(genepath, header=None)
genes.columns = ['gene']
else:
genes = pd.read_table(genepath)
gene_col = click.prompt(
"Which of these columns corresponds to the gene name?",
type=click.Choice(genes.columns))
if gene_col != 'gene':
genes['gene'] = genes[gene_col]
genes.drop(gene_col, inplace=True, axis=1)
loompy.create(filepath + '/' + filename, matrix, genes.to_dict("list"),
metadata.to_dict("list"))
print("Loom file creation complete.")
def cnv_wizard_main():
""" """
loomfile = click.prompt(
"Loom file that you would like to augment with cnv/segmentation data: "
)
while not (os.path.isfile(loomfile) and loomfile.endswith('.loom')):
loomfile = click.prompt(
"Not a loom file. Please select loom file that you would like to augment with cnv/segmentation data: "
)
segmentation = None
segmentationfile = click.prompt(
"Segmentation file that you would like to add to loom file: ")
while not (os.path.isfile(segmentationfile)):
segmentationfile = click.prompt(
"Not a valid file. Please select segmentation file that you would like to the loom file: "
)
segmentation = pd.read_table(segmentationfile)
with loompy.connect(loomfile, validate=False) as loom:
chromosome = click.prompt(
"Which column of this segmentation corresponds to the chromosome?",
type=click.Choice(segmentation.columns))
chromosome_start = click.prompt(
"Which column of this segmentation corresponds to the chromosome start?",
type=click.Choice(segmentation.columns))
chromosome_end = click.prompt(
"Which column of this segmentation corresponds to the chromosome end?",
type=click.Choice(segmentation.columns))
chromosome_tcr = click.prompt(
"Which column of this segmentation corresponds to the copy ratio (or log_2(copy ratio))?",
type=click.Choice(segmentation.columns))
log2 = click.prompt(
"Was that the log2(copy ratio) (i.e., is the copy ratio 2^(value given))?",
type=click.Choice(['n', 'y']),
default='n')
segmentation['chrom'] = segmentation[chromosome]
segmentation['chromStart'] = segmentation[chromosome_start]
segmentation['chromEnd'] = segmentation[chromosome_end]
segmentation['copyRatio'] = segmentation[chromosome_tcr]
if log2 == 'y':
segmentation['copyRatio'] = segmentation['copyRatio'].apply(
lambda x: 2**x)
gene_to_cnv = segmentation_to_copy_ratio_dict(loom.ra['gene'],
segmentation)
segmentation_name = click.prompt(
"How would you like to label this segmentation?")
loom.ra[segmentation_name] = [
gene_to_cnv[gene] if gene in gene_to_cnv.keys() else np.nan
for gene in loom.ra['gene']
]
print("CNV Segmentation addition complete.")
def gene_position_augmentation_main(loomfile):
"""
Parameters
----------
loomfile :
Returns
-------
"""
with loompy.connect(loomfile, validate=False) as loom:
gene_names, gene_contigs, gene_starts, gene_ends = get_valid_gene_info(
loom.ra['gene'])
gene_to_contig = {
gene: contig
for gene, contig in zip(gene_names, gene_contigs)
}
gene_to_start = {
gene: start
for gene, start in zip(gene_names, gene_starts)
}
gene_to_end = {gene: end for gene, end in zip(gene_names, gene_ends)}
loom.ra.chromosome = [
gene_to_contig[gene] if gene in gene_to_contig.keys() else np.nan
for gene in loom.ra['gene']
]
loom.ra.start = [
gene_to_start[gene] if gene in gene_to_start.keys() else np.nan
for gene in loom.ra['gene']
]
loom.ra.end = [
gene_to_end[gene] if gene in gene_to_end.keys() else np.nan
for gene in loom.ra['gene']
]
def gene_signature_wizard_main(loomfile=None, signaturefile=None):
"""
Parameters
----------
loomfile :
(Default value = None)
signaturefile :
(Default value = None)
Returns
-------
"""
print(loomfile)
if loomfile is None:
loomfile = click.prompt(
"Loom file that you would like to augment with a gene signature: ")
while not (os.path.isfile(loomfile) and loomfile.endswith('.loom')):
loomfile = click.prompt(
"Not a loom file. Please select loom file that you would like to augment with cnv/segmentation data: "
)
if signaturefile is None:
signaturefile = click.prompt(
"Gene list that you would like to add as a gene signature (headerless file, single column): "
)
signature = np.genfromtxt(signaturefile, dtype=str)
with loompy.connect(loomfile, validate=False) as loom:
proceed = 'y'
if len(np.intersect1d(signature, loom.ra['gene'])) < len(signature):
proceed = click.prompt(
"The following genes ({} in total) in the given signature\n{}\nare not in the loom file. Would you like to proceed with those that are ({} genes in total)?"
.format(len(np.setdiff1d(signature, loom.ra['gene'])),
", ".join(np.setdiff1d(signature, loom.ra['gene'])),
len(np.intersect1d(signature, loom.ra['gene']))),
type=click.Choice(['n', 'y']),
default='y')
if proceed == 'y':
signature_name = click.prompt(
"What would you like to name this signature?",
default=signaturefile.split('/')[-1].split('.')[0::-1][0])
loom.ra[signature_name] = np.isin(loom.ra['gene'], signature)
|
PypiClean
|
/qiime-1.9.1.tar.gz/qiime-1.9.1/doc/tutorials/source_tracking.rst
|
.. _source_tracking:
==================================================
Tracking the source of microbes with SourceTracker
==================================================
Introduction
------------
This tutorial illustrations how to use the `SourceTracker <http://sourceforge.net/projects/sourcetracker/>`_ 0.9.5 software with QIIME. SourceTracker is designed to predict the source of microbial communities in a set of input samples (i.e., the sink samples). See `Knights (2011) <http://www.nature.com/nmeth/journal/v8/n9/full/nmeth.1650.html>`_ for the original paper on SourceTracker.
This tutorial does not attempt to cover every possible usage of SourceTracker. Instead, it provide an example of how to use the basic framework in your own analyses.
Tutorial data
-------------
You can obtain the files used in this tutorial `here <ftp://ftp.microbio.me/qiime/tutorial_files/sourcetracker_tutorial_files.tgz>`_. These are derived from `Hewitt et al., 2013 <http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0054703>`_, where samples were collected from various surfaces in two different Neonatal Intensive Care Units (NICUs). The 16S rRNA was sequenced from these samples, and compared against pre-existing data sets using SourceTracker in order to predict the likely origin of the microbial contaminants on each NICU surface.
This tutorial begins with an OTU table. For information on various ways to generate OTU tables with QIIME, see :ref:`otu_picking`.
Test Usage
----------
Before running SourceTracker check to see if it installed and accessible to QIIME::
print_qiime_config.py -t
You should see the following line in the output if SourceTracker is properly installed::
sourcetracker is installed ... ok
For information on interacting with SourceTracker, run the following command to get SourceTracker's help text::
R --slave --vanilla --args -h < $SOURCETRACKER_PATH/sourcetracker_for_qiime.r
(The ``$SOURCETRACKER_PATH`` environment variable referenced here will be defined if SourceTracker is correctly installed.)
Filter OTUs present in less than 1% of the samples from the OTU table
---------------------------------------------------------------------
First we filter OTUs that are present in very few samples, as we consider these unlikely to provide useful source tracking information. We define *very few samples* here as less than 1% of the samples, which in our case is roughly 7 samples. This value should be determined on a per-study basis (so you shouldn't just use 7 on your own data). You can find the total number of samples in your OTU table by running the ``biom summarize-table`` command on it.
To filter the OTU table, run the following command::
filter_otus_from_otu_table.py -i otu_table.biom -o filtered_otu_table.biom -s 7
This command will create an output file named ``filtered_otu_table.biom``, which only contains OTUs that appear in at least 7 samples. To see how many OTUs were filtered in this process, you can run the ``biom summarize-table`` command and compare *Num observations* for ``otu_table.biom`` and ``filtered_otu_table.biom``. It's not uncommon to filter a large percentage (e.g., greater than 50%) of your OTUs using this process.
Convert table from BIOM to tab-separated text format
----------------------------------------------------
SourceTracker does not work with the `BIOM format <http://www.biom-format.org>`_, so the OTU table needs to be converted to tab-separated text format. You can do that with the following command::
biom convert -i filtered_otu_table.biom -o filtered_otu_table.txt -b
This creates a file named ``filtered_otu_table.txt``.
Run SourceTracker
-----------------
To use SourceTracker, your mapping must contain two columns titled ``SourceSink``, and ``Env``, which define whether each sample should be treated as a source or a sink, and describe the sample type, respectively. These columns can be added to your mapping file by opening it in Excel or as a Google Spreadsheet and adding the new columns.
In the tutorial data set the sources include ``Outdoor Air``, ``Human Skin``, and ``Human Mouth``. Each row that represents a sample taken from one of these should be labeled ``source`` in the ``SourceSink`` column. Likewise the sink samples (including ``NICU Incubator``, ``NICU BabyBedside`` here) should be labeled ``sink`` in the ``SourceSink`` column. Each row in the ``Env`` column should contain a description of the corresponding sample type, for example ``Outdoor Air``, ``Human Skin``, and ``NICU BabyBedside``. Any sample that should not be used in the SourceTracker analysis should contain ``NA`` in the ``SourceSink`` and ``Env`` columns. These steps have all been completed in the mapping file used in this tutorial (``map.txt``). You can review that file as you prepare a mapping file for your own analyses.
You can run SourceTracker with the following command::
R --slave --vanilla --args -i filtered_otu_table.txt -m map.txt -o sourcetracker_out < $SOURCETRACKER_PATH/sourcetracker_for_qiime.r
This will take a few minutes to run. Once it's complete, you can open ``sourcetracker_out/sink_predictions_pie_NICU BabyBedside.pdf`` to view an example of the output. That file should look like this:
.. image:: ../images/sink_predictions_pie_NICU_BabyBedside.png
:align: center
These pie charts represent the likely origin of microbial communities from each sample taken from the ``BabyBedside``. Each of the colors in a pie chart represent one of the sources that were denoted in the mapping file. The ``sourcetracker_out/sink_predictions.txt`` file contains the raw data for these pie charts.
References
----------
Knights, Dan et al. "Bayesian community-wide culture-independent microbial source tracking." Nature Methods (2011) (`link <http://www.nature.com/nmeth/journal/v8/n9/full/nmeth.1650.html>`_)
Hewitt, Krissi M et al. "Bacterial Diversity in Two Neonatal Intensive Care Units (NICUs)." PLOS ONE (2013) (`link <http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0054703>`_)
|
PypiClean
|
/django-amazon-price-monitor-0.7.tar.gz/django-amazon-price-monitor-0.7/price_monitor/utils.py
|
import logging
from django.core.mail import send_mail as django_send_mail
from django.utils.translation import ugettext as _
from price_monitor import app_settings
logger = logging.getLogger('price_monitor.utils')
def get_offer_url(asin):
"""
Returns the offer url for an ASIN.
:param asin: the asin
:type asin: basestring
:return: the url to the offer
:rtype: basestring
"""
return app_settings.PRICE_MONITOR_OFFER_URL.format(**{
'domain': app_settings.PRICE_MONITOR_AMAZON_REGION_DOMAINS[app_settings.PRICE_MONITOR_AMAZON_PRODUCT_API_REGION],
'asin': asin,
'assoc_tag': app_settings.PRICE_MONITOR_AMAZON_PRODUCT_API_ASSOC_TAG,
})
def get_product_detail_url(asin):
"""
Returns the url to a product detail view.
As the frontend is AngularJS, we cannot use any Django reverse functionality.
:param asin: the asin to use
:return: the link
"""
return '{base_url:s}/#/products/{asin:s}'.format(
base_url=app_settings.PRICE_MONITOR_BASE_URL,
asin=asin,
)
def send_mail(product, subscription, price, additional_text=''):
"""
Sends an email using the appropriate settings for formatting aso.
:param product: the product
:type product: price_monitor.models.Product
:param subscription: the subscription
:type subscription: price_monitor.models.Subscription
:param price: the current price
:type price: price_monitor.models.Price
:param additional_text: additional text to include in mail
:type additional_text: str
"""
django_send_mail(
_(app_settings.PRICE_MONITOR_I18N_EMAIL_NOTIFICATION_SUBJECT) % {'product': product.title},
_(app_settings.PRICE_MONITOR_I18N_EMAIL_NOTIFICATION_BODY).format(
price_limit=subscription.price_limit,
currency=price.currency,
price=price.value,
price_date=price.date_seen.strftime('%b %d, %Y %H:%M %p %Z'),
product_title=product.get_title(),
url_product_amazon=product.offer_url,
url_product_detail=product.get_detail_url(),
additional_text=additional_text,
),
app_settings.PRICE_MONITOR_EMAIL_SENDER,
[subscription.email_notification.email],
fail_silently=False,
)
def chunk_list(the_list, chunk_size):
"""
Chunks a list.
:param the_list: list to chunk
:type the_list: list
:param chunk_size: number of elements to be contained in each created chunk list
:type chunk_size: int
:return: generator object with the chunked lists
:rtype: generator
"""
for i in range(0, len(the_list), chunk_size):
yield the_list[i:i + chunk_size]
|
PypiClean
|
/cs18-api-client-999.0.2.4.tar.gz/cs18-api-client-999.0.2.4/gateways/common/cs18_api_classes.py
|
import abc
import dateutil.parser
class AccessLink:
def __init__(self, protocol: str, link: str):
self.link = link
self.protocol = protocol
class Commit:
def __init__(self, data: dict):
self.sha = data["sha"]
self.author = data["commit"]["author"]["name"]
self.date = dateutil.parser.parse(data["commit"]["author"]["date"])
self.comment = data["commit"]["message"]
def __str__(self):
return "{0}: [{1}] {2}".format(self.date, self.sha[:7], self.comment)
class ColonyAccount:
def __init__(
self, account: str, email: str, password: str, first_name: str, last_name: str
):
self.account = account
self.default_space = "Trial"
self.sample_space = "Sample"
self.email = email
self.password = password
self.first_name = first_name
self.last_name = last_name
class BlueprintRepositoryDetails:
def __init__(self, repository_url: str, access_token: str, repository_type: str, branch: str = None):
self.repository_url = repository_url
self.repository_type = repository_type
self.access_token = access_token
self.branch = branch
class BitbucketBlueprintRepositoryDetails:
def __init__(self, auth_code: str, redirect_url: str, blueprint_repository_details: BlueprintRepositoryDetails):
self.blueprint_repository_details = blueprint_repository_details
self.auth_code = auth_code
self.redirect_url = redirect_url
class AddAccountBlueprintRepositoryRequest(abc.ABC):
def __init__(self, name: str, repository_url: str, allow_sharing: bool,
open_access: bool):
self.name = name
self.repository_url = repository_url
self.open_access = open_access
self.allow_sharing = allow_sharing
class AddBlueprintUsingTokenRepositoryRequest(AddAccountBlueprintRepositoryRequest):
def __init__(self, name: str, repository_url: str, allow_sharing: bool,
open_access: bool, access_token: str, repository_type: str):
super().__init__(name, repository_url, allow_sharing, open_access)
self.repository_type = repository_type
self.access_token = access_token
class AddBlueprintGithubRepositoryRequest(AddAccountBlueprintRepositoryRequest):
def __init__(self, name: str, repository_url: str, allow_sharing: bool,
open_access: bool, code: str, state: str):
super().__init__(name, repository_url, allow_sharing, open_access)
self.code = code
self.state = state
class AddBlueprintBitbucketRepositoryRequest(AddAccountBlueprintRepositoryRequest):
def __init__(self, name: str, repository_url: str, allow_sharing: bool,
open_access: bool, code: str, redirection_url: str):
super().__init__(name, repository_url, allow_sharing, open_access)
self.code = code
self.redirection_url = redirection_url
|
PypiClean
|
/ironic-python-agent-builder-5.2.0.tar.gz/ironic-python-agent-builder-5.2.0/README.rst
|
===========================
Ironic Python Agent Builder
===========================
Tools and scripts to build a deployment, cleaning or inspection ramdisk
based on `Ironic Python Agent`_.
* Free software: Apache license
* Documentation: https://docs.openstack.org/ironic-python-agent-builder
* Source: https://opendev.org/openstack/ironic-python-agent-builder
* Bugs: https://storyboard.openstack.org/#!/project/948
* Release Notes: https://docs.openstack.org/releasenotes/ironic-python-agent-builder/
.. _Ironic Python Agent: https://docs.openstack.org/ironic-python-agent
|
PypiClean
|
/liwc_text_analysis-1.0.2-py3-none-any.whl/liwc/liwc.py
|
import collections
class Liwc():
"""
Class for the Linguistic Inquiry and Word Count (LIWC) dictionairy.
The dictionary files are proprietary and can be obtained by liwc.net
"""
def __init__(self, filepath):
"""
:param filepath: path to the LIWC .dic file.
"""
self.categories, self.lexicon = self._load_dict_file(filepath)
self._trie = self._build_char_trie(self.lexicon)
def search(self, word):
"""
Search a word in the liwc dictionairy.
:param word:
:return: a list of the liwc categories the word belongs.
an empty list if the word is not found in the dictionary.
"""
return self._search_trie(self._trie, word)
def parse(self, tokens):
"""
Parses a document and extracts raw counts of words that fall into the
various LIWC categories.
:param tokens: a list of tokens, a tokeniSed document
:return: a counter with the linguistic categories found in the doc,
and the raw count of words that fall in each category.
"""
cat_counter = collections.Counter()
for token in tokens:
# Find in which categories this token falls, if any
cats = self.search(token)
for cat in cats:
cat_counter[cat] += 1
return cat_counter
def _load_dict_file(self, filepath):
liwc_file = open(filepath)
# Key, category dict
categories = {}
# Word, cat_name dict
lexicon = {}
# '%' signals a change in the .dic file.
# (0-1) Cats, ids
# (>1) Words, cat_ids
percent_sign_count = 0
for line in liwc_file:
stp = line.strip()
if stp:
parts = stp.split('\t')
if parts[0] == '%':
percent_sign_count += 1
else:
# If the percent sign counter equals 1, parse the LIWC
# categories
if percent_sign_count == 1:
categories[parts[0]] = parts[1]
# Else, parse lexicon
else:
lexicon[parts[0]] = [categories[cat_id]
for cat_id in parts[1:]]
return categories, lexicon
@staticmethod
def _build_char_trie(lexicon):
"""
Builds a char trie, to cater for wildcard ('*') matches.
"""
trie = {}
for pattern, cat_names in lexicon.items():
cursor = trie
for char in pattern:
if char == '*':
cursor['*'] = cat_names
break
if char not in cursor:
cursor[char] = {}
cursor = cursor[char]
# $ signifies end of token
cursor['$'] = cat_names
return trie
@staticmethod
def _search_trie(trie, token, i=0):
"""
Search the given char trie for paths that match the token.
"""
if '*' in trie:
return trie['*']
elif '$' in trie and i == len(token):
return trie['$']
elif i < len(token):
char = token[i]
if char in trie:
return Liwc._search_trie(trie[char], token, i + 1)
return []
|
PypiClean
|
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/standard/lang/de-ch.js
|
/*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['de-ch']={"editor":"WYSIWYG-Editor","editorPanel":"WYSIWYG-Editor-Leiste","common":{"editorHelp":"Drücken Sie ALT 0 für Hilfe","browseServer":"Server durchsuchen","url":"URL","protocol":"Protokoll","upload":"Hochladen","uploadSubmit":"Zum Server senden","image":"Bild","flash":"Flash","form":"Formular","checkbox":"Kontrollbox","radio":"Optionsfeld","textField":"Textfeld","textarea":"Textfeld","hiddenField":"Verstecktes Feld","button":"Schaltfläche","select":"Auswahlfeld","imageButton":"Bildschaltfläche","notSet":"<nicht festgelegt>","id":"Kennung","name":"Name","langDir":"Schreibrichtung","langDirLtr":"Links nach Rechts (LTR)","langDirRtl":"Rechts nach Links (RTL)","langCode":"Sprachcode","longDescr":"Langbeschreibungs-URL","cssClass":"Formatvorlagenklassen","advisoryTitle":"Titel Beschreibung","cssStyle":"Stil","ok":"OK","cancel":"Abbrechen","close":"Schliessen","preview":"Vorschau","resize":"Grösse ändern","generalTab":"Allgemein","advancedTab":"Erweitert","validateNumberFailed":"Dieser Wert ist keine Nummer.","confirmNewPage":"Alle nicht gespeicherten Änderungen gehen verlohren. Sind Sie sicher die neue Seite zu laden?","confirmCancel":"Einige Optionen wurden geändert. Wollen Sie den Dialog dennoch schliessen?","options":"Optionen","target":"Zielseite","targetNew":"Neues Fenster (_blank)","targetTop":"Oberstes Fenster (_top)","targetSelf":"Gleiches Fenster (_self)","targetParent":"Oberes Fenster (_parent)","langDirLTR":"Links nach Rechts (LNR)","langDirRTL":"Rechts nach Links (RNL)","styles":"Style","cssClasses":"Stylesheet Klasse","width":"Breite","height":"Höhe","align":"Ausrichtung","left":"Links","right":"Rechts","center":"Zentriert","justify":"Blocksatz","alignLeft":"Linksbündig","alignRight":"Rechtsbündig","alignCenter":"Align Center","alignTop":"Oben","alignMiddle":"Mitte","alignBottom":"Unten","alignNone":"Keine","invalidValue":"Ungültiger Wert.","invalidHeight":"Höhe muss eine Zahl sein.","invalidWidth":"Breite muss eine Zahl sein.","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"Wert spezifiziert für \"%1\" Feld muss ein positiver numerischer Wert sein mit oder ohne korrekte CSS Messeinheit (px, %, in, cm, mm, em, ex, pt oder pc).","invalidHtmlLength":"Wert spezifiziert für \"%1\" Feld muss ein positiver numerischer Wert sein mit oder ohne korrekte HTML Messeinheit (px oder %).","invalidInlineStyle":"Wert spezifiziert für inline Stilart muss enthalten ein oder mehr Tupels mit dem Format \"Name : Wert\" getrennt mit Semikolons.","cssLengthTooltip":"Gebe eine Zahl ein für ein Wert in pixels oder eine Zahl mit einer korrekten CSS Messeinheit (px, %, in, cm, mm, em, ex, pt oder pc).","unavailable":"%1<span class=\"cke_accessibility\">, nicht verfügbar</span>","keyboard":{"8":"Rücktaste","13":"Eingabe","16":"Umschalt","17":"Strg","18":"Alt","32":"Space","35":"Ende","36":"Pos1","46":"Entfernen","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Keyboard shortcut","optionDefault":"Default"},"about":{"copy":"Copyright © $1. Alle Rechte vorbehalten.","dlgTitle":"Über CKEditor 4","moreInfo":"Für Informationen über unsere Lizenzbestimmungen besuchen sie bitte unsere Webseite:"},"basicstyles":{"bold":"Fett","italic":"Kursiv","strike":"Durchgestrichen","subscript":"Tiefgestellt","superscript":"Hochgestellt","underline":"Unterstrichen"},"blockquote":{"toolbar":"Zitatblock"},"notification":{"closed":"Benachrichtigung geschlossen."},"toolbar":{"toolbarCollapse":"Werkzeugleiste einklappen","toolbarExpand":"Werkzeugleiste ausklappen","toolbarGroups":{"document":"Dokument","clipboard":"Zwischenablage/Rückgängig","editing":"Editieren","forms":"Formulare","basicstyles":"Grundstile","paragraph":"Absatz","links":"Links","insert":"Einfügen","styles":"Stile","colors":"Farben","tools":"Werkzeuge"},"toolbars":"Editor Werkzeugleisten"},"clipboard":{"copy":"Kopieren","copyError":"Die Sicherheitseinstellungen Ihres Browsers lassen es nicht zu, den Text automatisch kopieren. Bitte benutzen Sie die System-Zwischenablage über STRG-C (kopieren).","cut":"Ausschneiden","cutError":"Die Sicherheitseinstellungen Ihres Browsers lassen es nicht zu, den Text automatisch auszuschneiden. Bitte benutzen Sie die System-Zwischenablage über STRG-X (ausschneiden) und STRG-V (einfügen).","paste":"Einfügen","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Einfügebereich","pasteMsg":"Paste your content inside the area below and press OK."},"contextmenu":{"options":"Kontextmenüoptionen"},"elementspath":{"eleLabel":"Elementepfad","eleTitle":"%1 Element"},"filetools":{"loadError":"Während dem Lesen der Datei ist ein Fehler aufgetreten.","networkError":"Während dem Hochladen der Datei ist ein Netzwerkfehler aufgetreten.","httpError404":"Während dem Hochladen der Datei ist ein HTTP-Fehler aufgetreten (404: Datei nicht gefunden).","httpError403":"Während dem Hochladen der Datei ist ein HTTP-Fehler aufgetreten (403: Verboten).","httpError":"Während dem Hochladen der Datei ist ein HTTP-Fehler aufgetreten (Fehlerstatus: %1).","noUrlError":"Hochlade-URL ist nicht definiert.","responseError":"Falsche Antwort des Servers."},"format":{"label":"Format","panelTitle":"Absatzformat","tag_address":"Adresse","tag_div":"Normal (DIV)","tag_h1":"Überschrift 1","tag_h2":"Überschrift 2","tag_h3":"Überschrift 3","tag_h4":"Überschrift 4","tag_h5":"Überschrift 5","tag_h6":"Überschrift 6","tag_p":"Normal","tag_pre":"Formatiert"},"horizontalrule":{"toolbar":"Horizontale Linie einfügen"},"image":{"alt":"Alternativer Text","border":"Rahmen","btnUpload":"Zum Server senden","button2Img":"Möchten Sie die ausgewählte Bildschaltfläche in ein einfaches Bild umwandeln?","hSpace":"Horizontal-Abstand","img2Button":"Möchten Sie das ausgewählte Bild in eine Bildschaltfläche umwandeln?","infoTab":"Bildinfo","linkTab":"Link","lockRatio":"Grössenverhältnis beibehalten","menu":"Bildeigenschaften","resetSize":"Grösse zurücksetzen","title":"Bildeigenschaften","titleButton":"Bildschaltflächeneigenschaften","upload":"Hochladen","urlMissing":"Bildquellen-URL fehlt.","vSpace":"Vertikal-Abstand","validateBorder":"Rahmen muss eine ganze Zahl sein.","validateHSpace":"Horizontal-Abstand muss eine ganze Zahl sein.","validateVSpace":"Vertikal-Abstand muss eine ganze Zahl sein."},"indent":{"indent":"Einzug erhöhen","outdent":"Einzug verringern"},"fakeobjects":{"anchor":"Anker","flash":"Flash-Animation","hiddenfield":"Verstecktes Feld","iframe":"IFrame","unknown":"Unbekanntes Objekt"},"link":{"acccessKey":"Zugriffstaste","advanced":"Erweitert","advisoryContentType":"Inhaltstyp","advisoryTitle":"Titel Beschreibung","anchor":{"toolbar":"Anker","menu":"Anker bearbeiten","title":"Ankereigenschaften","name":"Ankername","errorName":"Bitte geben Sie den Namen des Ankers ein","remove":"Anker entfernen"},"anchorId":"Nach Elementkennung","anchorName":"Nach Ankername","charset":"Verknüpfter Ressourcenzeichensatz","cssClasses":"Formatvorlagenklasse","download":"Force Download","displayText":"Display Text","emailAddress":"E-Mail-Adresse","emailBody":"Nachrichtentext","emailSubject":"Betreffzeile","id":"Kennung","info":"Linkinfo","langCode":"Sprachcode","langDir":"Schreibrichtung","langDirLTR":"Links nach Rechts (LTR)","langDirRTL":"Rechts nach Links (RTL)","menu":"Link bearbeiten","name":"Name","noAnchors":"(Keine Anker im Dokument vorhanden)","noEmail":"Bitte geben Sie E-Mail-Adresse an","noUrl":"Bitte geben Sie die Link-URL an","noTel":"Please type the phone number","other":"<andere>","phoneNumber":"Phone number","popupDependent":"Abhängig (Netscape)","popupFeatures":"Pop-up Fenstereigenschaften","popupFullScreen":"Vollbild (IE)","popupLeft":"Linke Position","popupLocationBar":"Adressleiste","popupMenuBar":"Menüleiste","popupResizable":"Grösse änderbar","popupScrollBars":"Rollbalken","popupStatusBar":"Statusleiste","popupToolbar":"Werkzeugleiste","popupTop":"Obere Position","rel":"Beziehung","selectAnchor":"Anker auswählen","styles":"Style","tabIndex":"Tab-Index","target":"Zielseite","targetFrame":"<Frame>","targetFrameName":"Ziel-Fenster-Name","targetPopup":"<Pop-up Fenster>","targetPopupName":"Pop-up Fenster-Name","title":"Link","toAnchor":"Anker in dieser Seite","toEmail":"E-Mail","toUrl":"URL","toPhone":"Phone","toolbar":"Link einfügen/editieren","type":"Link-Typ","unlink":"Link entfernen","upload":"Hochladen"},"list":{"bulletedlist":"Liste","numberedlist":"Nummerierte Liste einfügen/entfernen"},"magicline":{"title":"Absatz hier einfügen"},"maximize":{"maximize":"Maximieren","minimize":"Minimieren"},"pastetext":{"button":"Als Klartext einfügen","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Als Klartext einfügen"},"pastefromword":{"confirmCleanup":"Der Text, den Sie einfügen möchten, scheint aus MS-Word kopiert zu sein. Möchten Sie ihn zuvor bereinigen lassen?","error":"Aufgrund eines internen Fehlers war es nicht möglich die eingefügten Daten zu bereinigen","title":"Aus Word einfügen","toolbar":"Aus Word einfügen"},"removeformat":{"toolbar":"Formatierung entfernen"},"sourcearea":{"toolbar":"Quellcode"},"specialchar":{"options":"Sonderzeichenoptionen","title":"Sonderzeichen auswählen","toolbar":"Sonderzeichen einfügen"},"scayt":{"btn_about":"About SCAYT","btn_dictionaries":"Dictionaries","btn_disable":"Disable SCAYT","btn_enable":"Enable SCAYT","btn_langs":"Languages","btn_options":"Options","text_title":"Spell Check As You Type"},"stylescombo":{"label":"Stil","panelTitle":"Formatierungsstile","panelTitle1":"Blockstile","panelTitle2":"Inline Stilart","panelTitle3":"Objektstile"},"table":{"border":"Rahmengrösse","caption":"Überschrift","cell":{"menu":"Zelle","insertBefore":"Zelle davor einfügen","insertAfter":"Zelle danach einfügen","deleteCell":"Zelle löschen","merge":"Zellen verbinden","mergeRight":"Nach rechts verbinden","mergeDown":"Nach unten verbinden","splitHorizontal":"Zelle horizontal teilen","splitVertical":"Zelle vertikal teilen","title":"Zelleneigenschaften","cellType":"Zellart","rowSpan":"Anzahl Zeilen verbinden","colSpan":"Anzahl Spalten verbinden","wordWrap":"Zeilenumbruch","hAlign":"Horizontale Ausrichtung","vAlign":"Vertikale Ausrichtung","alignBaseline":"Grundlinie","bgColor":"Hintergrundfarbe","borderColor":"Rahmenfarbe","data":"Daten","header":"Überschrift","yes":"Ja","no":"Nein","invalidWidth":"Zellenbreite muss eine Zahl sein.","invalidHeight":"Zellenhöhe muss eine Zahl sein.","invalidRowSpan":"\"Anzahl Zeilen verbinden\" muss eine Ganzzahl sein.","invalidColSpan":"\"Anzahl Spalten verbinden\" muss eine Ganzzahl sein.","chooseColor":"Wählen"},"cellPad":"Zellenabstand innen","cellSpace":"Zellenabstand aussen","column":{"menu":"Spalte","insertBefore":"Spalte links davor einfügen","insertAfter":"Spalte rechts danach einfügen","deleteColumn":"Spalte löschen"},"columns":"Spalte","deleteTable":"Tabelle löschen","headers":"Kopfzeile","headersBoth":"Beide","headersColumn":"Erste Spalte","headersNone":"Keine","headersRow":"Erste Zeile","heightUnit":"height unit","invalidBorder":"Die Rahmenbreite muss eine Zahl sein.","invalidCellPadding":"Der Zellenabstand innen muss eine positive Zahl sein.","invalidCellSpacing":"Der Zellenabstand aussen muss eine positive Zahl sein.","invalidCols":"Die Anzahl der Spalten muß grösser als 0 sein..","invalidHeight":"Die Tabellenbreite muss eine Zahl sein.","invalidRows":"Die Anzahl der Zeilen muß grösser als 0 sein.","invalidWidth":"Die Tabellenbreite muss eine Zahl sein.","menu":"Tabellen-Eigenschaften","row":{"menu":"Zeile","insertBefore":"Zeile oberhalb einfügen","insertAfter":"Zeile unterhalb einfügen","deleteRow":"Zeile entfernen"},"rows":"Zeile","summary":"Inhaltsübersicht","title":"Tabellen-Eigenschaften","toolbar":"Tabelle","widthPc":"%","widthPx":"Pixel","widthUnit":"Breite Einheit"},"undo":{"redo":"Wiederherstellen","undo":"Rückgängig"},"widget":{"move":"Zum Verschieben anwählen und ziehen","label":"%1 widget"},"uploadwidget":{"abort":"Hochladen durch den Benutzer abgebrochen.","doneOne":"Datei erfolgreich hochgeladen.","doneMany":"%1 Dateien erfolgreich hochgeladen.","uploadOne":"Datei wird hochgeladen ({percentage}%)...","uploadMany":"Dateien werden hochgeladen, {current} von {max} fertig ({percentage}%)..."},"wsc":{"btnIgnore":"Ignore","btnIgnoreAll":"Ignore All","btnReplace":"Replace","btnReplaceAll":"Replace All","btnUndo":"Undo","changeTo":"Change to","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"Spell checker not installed. Do you want to download it now?","manyChanges":"Spell check complete: %1 words changed","noChanges":"Spell check complete: No words changed","noMispell":"Spell check complete: No misspellings found","noSuggestions":"- No suggestions -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"Not in dictionary","oneChange":"Spell check complete: One word changed","progress":"Spell check in progress...","title":"Spell Checker","toolbar":"Check Spelling"}};
|
PypiClean
|
/dsr_shelx-241-py3-none-any.whl/dsr_shelx/networkx/generators/degree_seq.py
|
import heapq
import math
from itertools import chain, combinations, zip_longest
from operator import itemgetter
import networkx as nx
from networkx.utils import py_random_state, random_weighted_sample
__all__ = [
"configuration_model",
"directed_configuration_model",
"expected_degree_graph",
"havel_hakimi_graph",
"directed_havel_hakimi_graph",
"degree_sequence_tree",
"random_degree_sequence_graph",
]
chaini = chain.from_iterable
def _to_stublist(degree_sequence):
"""Returns a list of degree-repeated node numbers.
``degree_sequence`` is a list of nonnegative integers representing
the degrees of nodes in a graph.
This function returns a list of node numbers with multiplicities
according to the given degree sequence. For example, if the first
element of ``degree_sequence`` is ``3``, then the first node number,
``0``, will appear at the head of the returned list three times. The
node numbers are assumed to be the numbers zero through
``len(degree_sequence) - 1``.
Examples
--------
>>> degree_sequence = [1, 2, 3]
>>> _to_stublist(degree_sequence)
[0, 1, 1, 2, 2, 2]
If a zero appears in the sequence, that means the node exists but
has degree zero, so that number will be skipped in the returned
list::
>>> degree_sequence = [2, 0, 1]
>>> _to_stublist(degree_sequence)
[0, 0, 2]
"""
return list(chaini([n] * d for n, d in enumerate(degree_sequence)))
def _configuration_model(
deg_sequence, create_using, directed=False, in_deg_sequence=None, seed=None
):
"""Helper function for generating either undirected or directed
configuration model graphs.
``deg_sequence`` is a list of nonnegative integers representing the
degree of the node whose label is the index of the list element.
``create_using`` see :func:`~networkx.empty_graph`.
``directed`` and ``in_deg_sequence`` are required if you want the
returned graph to be generated using the directed configuration
model algorithm. If ``directed`` is ``False``, then ``deg_sequence``
is interpreted as the degree sequence of an undirected graph and
``in_deg_sequence`` is ignored. Otherwise, if ``directed`` is
``True``, then ``deg_sequence`` is interpreted as the out-degree
sequence and ``in_deg_sequence`` as the in-degree sequence of a
directed graph.
.. note::
``deg_sequence`` and ``in_deg_sequence`` need not be the same
length.
``seed`` is a random.Random or numpy.random.RandomState instance
This function returns a graph, directed if and only if ``directed``
is ``True``, generated according to the configuration model
algorithm. For more information on the algorithm, see the
:func:`configuration_model` or :func:`directed_configuration_model`
functions.
"""
n = len(deg_sequence)
G = nx.empty_graph(n, create_using)
# If empty, return the null graph immediately.
if n == 0:
return G
# Build a list of available degree-repeated nodes. For example,
# for degree sequence [3, 2, 1, 1, 1], the "stub list" is
# initially [0, 0, 0, 1, 1, 2, 3, 4], that is, node 0 has degree
# 3 and thus is repeated 3 times, etc.
#
# Also, shuffle the stub list in order to get a random sequence of
# node pairs.
if directed:
pairs = zip_longest(deg_sequence, in_deg_sequence, fillvalue=0)
# Unzip the list of pairs into a pair of lists.
out_deg, in_deg = zip(*pairs)
out_stublist = _to_stublist(out_deg)
in_stublist = _to_stublist(in_deg)
seed.shuffle(out_stublist)
seed.shuffle(in_stublist)
else:
stublist = _to_stublist(deg_sequence)
# Choose a random balanced bipartition of the stublist, which
# gives a random pairing of nodes. In this implementation, we
# shuffle the list and then split it in half.
n = len(stublist)
half = n // 2
seed.shuffle(stublist)
out_stublist, in_stublist = stublist[:half], stublist[half:]
G.add_edges_from(zip(out_stublist, in_stublist))
return G
@py_random_state(2)
def configuration_model(deg_sequence, create_using=None, seed=None):
"""Returns a random graph with the given degree sequence.
The configuration model generates a random pseudograph (graph with
parallel edges and self loops) by randomly assigning edges to
match the given degree sequence.
Parameters
----------
deg_sequence : list of nonnegative integers
Each list entry corresponds to the degree of a node.
create_using : NetworkX graph constructor, optional (default MultiGraph)
Graph type to create. If graph instance, then cleared before populated.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : MultiGraph
A graph with the specified degree sequence.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequence does not have an even sum.
See Also
--------
is_graphical
Notes
-----
As described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequence does not have an even sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified.
The density of self-loops and parallel edges tends to decrease as
the number of nodes increases. However, typically the number of
self-loops will approach a Poisson distribution with a nonzero mean,
and similarly for the number of parallel edges. Consider a node
with *k* stubs. The probability of being joined to another stub of
the same node is basically (*k* - *1*) / *N*, where *k* is the
degree and *N* is the number of nodes. So the probability of a
self-loop scales like *c* / *N* for some constant *c*. As *N* grows,
this means we expect *c* self-loops. Similarly for parallel edges.
References
----------
.. [1] M.E.J. Newman, "The structure and function of complex networks",
SIAM REVIEW 45-2, pp 167-256, 2003.
Examples
--------
You can create a degree sequence following a particular distribution
by using the one of the distribution functions in
:mod:`~networkx.utils.random_sequence` (or one of your own). For
example, to create an undirected multigraph on one hundred nodes
with degree sequence chosen from the power law distribution:
>>> sequence = nx.random_powerlaw_tree_sequence(100, tries=5000)
>>> G = nx.configuration_model(sequence)
>>> len(G)
100
>>> actual_degrees = [d for v, d in G.degree()]
>>> actual_degrees == sequence
True
The returned graph is a multigraph, which may have parallel
edges. To remove any parallel edges from the returned graph:
>>> G = nx.Graph(G)
Similarly, to remove self-loops:
>>> G.remove_edges_from(nx.selfloop_edges(G))
"""
if sum(deg_sequence) % 2 != 0:
msg = "Invalid degree sequence: sum of degrees must be even, not odd"
raise nx.NetworkXError(msg)
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
raise nx.NetworkXNotImplemented("not implemented for directed graphs")
G = _configuration_model(deg_sequence, G, seed=seed)
return G
@py_random_state(3)
def directed_configuration_model(
in_degree_sequence, out_degree_sequence, create_using=None, seed=None
):
"""Returns a directed_random graph with the given degree sequences.
The configuration model generates a random directed pseudograph
(graph with parallel edges and self loops) by randomly assigning
edges to match the given degree sequences.
Parameters
----------
in_degree_sequence : list of nonnegative integers
Each list entry corresponds to the in-degree of a node.
out_degree_sequence : list of nonnegative integers
Each list entry corresponds to the out-degree of a node.
create_using : NetworkX graph constructor, optional (default MultiDiGraph)
Graph type to create. If graph instance, then cleared before populated.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : MultiDiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequences do not have the same sum.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequences does not have the same sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified. This
"finite-size effect" decreases as the size of the graph increases.
References
----------
.. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J.
Random graphs with arbitrary degree distributions and their applications
Phys. Rev. E, 64, 026118 (2001)
Examples
--------
One can modify the in- and out-degree sequences from an existing
directed graph in order to create a new directed graph. For example,
here we modify the directed path graph:
>>> D = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
>>> din = list(d for n, d in D.in_degree())
>>> dout = list(d for n, d in D.out_degree())
>>> din.append(1)
>>> dout[0] = 2
>>> # We now expect an edge from node 0 to a new node, node 3.
... D = nx.directed_configuration_model(din, dout)
The returned graph is a directed multigraph, which may have parallel
edges. To remove any parallel edges from the returned graph:
>>> D = nx.DiGraph(D)
Similarly, to remove self-loops:
>>> D.remove_edges_from(nx.selfloop_edges(D))
"""
if sum(in_degree_sequence) != sum(out_degree_sequence):
msg = "Invalid degree sequences: sequences must have equal sums"
raise nx.NetworkXError(msg)
if create_using is None:
create_using = nx.MultiDiGraph
G = _configuration_model(
out_degree_sequence,
create_using,
directed=True,
in_deg_sequence=in_degree_sequence,
seed=seed,
)
name = "directed configuration_model {} nodes {} edges"
return G
@py_random_state(1)
def expected_degree_graph(w, seed=None, selfloops=True):
r"""Returns a random graph with given expected degrees.
Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$
of length $n$ this algorithm assigns an edge between node $u$ and
node $v$ with probability
.. math::
p_{uv} = \frac{w_u w_v}{\sum_k w_k} .
Parameters
----------
w : list
The list of expected degrees.
selfloops: bool (default=True)
Set to False to remove the possibility of self-loop edges.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
Graph
Examples
--------
>>> z = [10 for i in range(100)]
>>> G = nx.expected_degree_graph(z)
Notes
-----
The nodes have integer labels corresponding to index of expected degrees
input sequence.
The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the
number of nodes and $m$ is the expected number of edges.
The model in [1]_ includes the possibility of self-loop edges.
Set selfloops=False to produce a graph without self loops.
For finite graphs this model doesn't produce exactly the given
expected degree sequence. Instead the expected degrees are as
follows.
For the case without self loops (selfloops=False),
.. math::
E[deg(u)] = \sum_{v \ne u} p_{uv}
= w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) .
NetworkX uses the standard convention that a self-loop edge counts 2
in the degree of a node, so with self loops (selfloops=True),
.. math::
E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu}
= w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) .
References
----------
.. [1] Fan Chung and L. Lu, Connected components in random graphs with
given expected degree sequences, Ann. Combinatorics, 6,
pp. 125-145, 2002.
.. [2] Joel Miller and Aric Hagberg,
Efficient generation of networks with given expected degrees,
in Algorithms and Models for the Web-Graph (WAW 2011),
Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,
pp. 115-126, 2011.
"""
n = len(w)
G = nx.empty_graph(n)
# If there are no nodes are no edges in the graph, return the empty graph.
if n == 0 or max(w) == 0:
return G
rho = 1 / sum(w)
# Sort the weights in decreasing order. The original order of the
# weights dictates the order of the (integer) node labels, so we
# need to remember the permutation applied in the sorting.
order = sorted(enumerate(w), key=itemgetter(1), reverse=True)
mapping = {c: u for c, (u, v) in enumerate(order)}
seq = [v for u, v in order]
last = n
if not selfloops:
last -= 1
for u in range(last):
v = u
if not selfloops:
v += 1
factor = seq[u] * rho
p = min(seq[v] * factor, 1)
while v < n and p > 0:
if p != 1:
r = seed.random()
v += math.floor(math.log(r, 1 - p))
if v < n:
q = min(seq[v] * factor, 1)
if seed.random() < q / p:
G.add_edge(mapping[u], mapping[v])
v += 1
p = q
return G
def havel_hakimi_graph(deg_sequence, create_using=None):
"""Returns a simple graph with given degree sequence constructed
using the Havel-Hakimi algorithm.
Parameters
----------
deg_sequence: list of integers
Each integer corresponds to the degree of a node (need not be sorted).
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Directed graphs are not allowed.
Raises
------
NetworkXException
For a non-graphical degree sequence (i.e. one
not realizable by some simple graph).
Notes
-----
The Havel-Hakimi algorithm constructs a simple graph by
successively connecting the node of highest degree to other nodes
of highest degree, resorting remaining nodes by degree, and
repeating the process. The resulting graph has a high
degree-associativity. Nodes are labeled 1,.., len(deg_sequence),
corresponding to their position in deg_sequence.
The basic algorithm is from Hakimi [1]_ and was generalized by
Kleitman and Wang [2]_.
References
----------
.. [1] Hakimi S., On Realizability of a Set of Integers as
Degrees of the Vertices of a Linear Graph. I,
Journal of SIAM, 10(3), pp. 496-506 (1962)
.. [2] Kleitman D.J. and Wang D.L.
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
if not nx.is_graphical(deg_sequence):
raise nx.NetworkXError("Invalid degree sequence")
p = len(deg_sequence)
G = nx.empty_graph(p, create_using)
if G.is_directed():
raise nx.NetworkXError("Directed graphs are not supported")
num_degs = [[] for i in range(p)]
dmax, dsum, n = 0, 0, 0
for d in deg_sequence:
# Process only the non-zero integers
if d > 0:
num_degs[d].append(n)
dmax, dsum, n = max(dmax, d), dsum + d, n + 1
# Return graph if no edges
if n == 0:
return G
modstubs = [(0, 0)] * (dmax + 1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while len(num_degs[dmax]) == 0:
dmax -= 1
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n - 1:
raise nx.NetworkXError("Non-graphical integer sequence")
# Remove largest stub in list
source = num_degs[dmax].pop()
n -= 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while len(num_degs[k]) == 0:
k -= 1
target = num_degs[k].pop()
G.add_edge(source, target)
n -= 1
if k > 1:
modstubs[mslen] = (k - 1, target)
mslen += 1
# Add back to the list any nonzero stubs that were removed
for i in range(mslen):
(stubval, stubtarget) = modstubs[i]
num_degs[stubval].append(stubtarget)
n += 1
return G
def directed_havel_hakimi_graph(in_deg_sequence, out_deg_sequence, create_using=None):
"""Returns a directed graph with the given degree sequences.
Parameters
----------
in_deg_sequence : list of integers
Each list entry corresponds to the in-degree of a node.
out_deg_sequence : list of integers
Each list entry corresponds to the out-degree of a node.
create_using : NetworkX graph constructor, optional (default DiGraph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : DiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence
Raises
------
NetworkXError
If the degree sequences are not digraphical.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Kleitman and Wang [1]_.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = nx.utils.make_list_of_ints(in_deg_sequence)
out_deg_sequence = nx.utils.make_list_of_ints(out_deg_sequence)
# Process the sequences and form two heaps to store degree pairs with
# either zero or nonzero out degrees
sumin, sumout = 0, 0
nin, nout = len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
G = nx.empty_graph(maxn, create_using, default=nx.DiGraph)
if maxn == 0:
return G
maxin = 0
stubheap, zeroheap = [], []
for n in range(maxn):
in_deg, out_deg = 0, 0
if n < nout:
out_deg = out_deg_sequence[n]
if n < nin:
in_deg = in_deg_sequence[n]
if in_deg < 0 or out_deg < 0:
raise nx.NetworkXError(
"Invalid degree sequences. Sequence values must be positive."
)
sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1 * out_deg, -1 * in_deg, n))
elif out_deg > 0:
zeroheap.append((-1 * out_deg, n))
if sumin != sumout:
raise nx.NetworkXError(
"Invalid degree sequences. Sequences must have equal sums."
)
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0, 0, 0)] * (maxin + 1)
# Successively reduce degree sequence by removing the maximum
while stubheap:
# Remove first value in the sequence with a non-zero in degree
(freeout, freein, target) = heapq.heappop(stubheap)
freein *= -1
if freein > len(stubheap) + len(zeroheap):
raise nx.NetworkXError("Non-digraphical integer sequence")
# Attach arcs from the nodes with the most stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]):
(stubout, stubsource) = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin, stubsource) = heapq.heappop(stubheap)
if stubout == 0:
raise nx.NetworkXError("Non-digraphical integer sequence")
G.add_edge(stubsource, target)
# Check if source is now totally connected
if stubout + 1 < 0 or stubin < 0:
modstubs[mslen] = (stubout + 1, stubin, stubsource)
mslen += 1
# Add the nodes back to the heaps that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, (stub[0], stub[2]))
if freeout < 0:
heapq.heappush(zeroheap, (freeout, target))
return G
def degree_sequence_tree(deg_sequence, create_using=None):
"""Make a tree for the given degree sequence.
A tree has #nodes-#edges=1 so
the degree sequence must have
len(deg_sequence)-sum(deg_sequence)/2=1
"""
# The sum of the degree sequence must be even (for any undirected graph).
degree_sum = sum(deg_sequence)
if degree_sum % 2 != 0:
msg = "Invalid degree sequence: sum of degrees must be even, not odd"
raise nx.NetworkXError(msg)
if len(deg_sequence) - degree_sum // 2 != 1:
msg = (
"Invalid degree sequence: tree must have number of nodes equal"
" to one less than the number of edges"
)
raise nx.NetworkXError(msg)
G = nx.empty_graph(0, create_using)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
# Sort all degrees greater than 1 in decreasing order.
#
# TODO Does this need to be sorted in reverse order?
deg = sorted((s for s in deg_sequence if s > 1), reverse=True)
# make path graph as backbone
n = len(deg) + 2
nx.add_path(G, range(n))
last = n
# add the leaves
for source in range(1, n - 1):
nedges = deg.pop() - 2
for target in range(last, last + nedges):
G.add_edge(source, target)
last += nedges
# in case we added one too many
if len(G) > len(deg_sequence):
G.remove_node(0)
return G
@py_random_state(1)
def random_degree_sequence_graph(sequence, seed=None, tries=10):
r"""Returns a simple random graph with the given degree sequence.
If the maximum degree $d_m$ in the sequence is $O(m^{1/4})$ then the
algorithm produces almost uniform random graphs in $O(m d_m)$ time
where $m$ is the number of edges.
Parameters
----------
sequence : list of integers
Sequence of degrees
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
tries : int, optional
Maximum number of tries to create a graph
Returns
-------
G : Graph
A graph with the specified degree sequence.
Nodes are labeled starting at 0 with an index
corresponding to the position in the sequence.
Raises
------
NetworkXUnfeasible
If the degree sequence is not graphical.
NetworkXError
If a graph is not produced in specified number of tries
See Also
--------
is_graphical, configuration_model
Notes
-----
The generator algorithm [1]_ is not guaranteed to produce a graph.
References
----------
.. [1] Moshen Bayati, Jeong Han Kim, and Amin Saberi,
A sequential algorithm for generating random graphs.
Algorithmica, Volume 58, Number 4, 860-910,
DOI: 10.1007/s00453-009-9340-1
Examples
--------
>>> sequence = [1, 2, 2, 3]
>>> G = nx.random_degree_sequence_graph(sequence, seed=42)
>>> sorted(d for n, d in G.degree())
[1, 2, 2, 3]
"""
DSRG = DegreeSequenceRandomGraph(sequence, seed)
for try_n in range(tries):
try:
return DSRG.generate()
except nx.NetworkXUnfeasible:
pass
raise nx.NetworkXError(f"failed to generate graph in {tries} tries")
class DegreeSequenceRandomGraph:
# class to generate random graphs with a given degree sequence
# use random_degree_sequence_graph()
def __init__(self, degree, rng):
if not nx.is_graphical(degree):
raise nx.NetworkXUnfeasible("degree sequence is not graphical")
self.rng = rng
self.degree = list(degree)
# node labels are integers 0,...,n-1
self.m = sum(self.degree) / 2.0 # number of edges
try:
self.dmax = max(self.degree) # maximum degree
except ValueError:
self.dmax = 0
def generate(self):
# remaining_degree is mapping from int->remaining degree
self.remaining_degree = dict(enumerate(self.degree))
# add all nodes to make sure we get isolated nodes
self.graph = nx.Graph()
self.graph.add_nodes_from(self.remaining_degree)
# remove zero degree nodes
for n, d in list(self.remaining_degree.items()):
if d == 0:
del self.remaining_degree[n]
if len(self.remaining_degree) > 0:
# build graph in three phases according to how many unmatched edges
self.phase1()
self.phase2()
self.phase3()
return self.graph
def update_remaining(self, u, v, aux_graph=None):
# decrement remaining nodes, modify auxiliary graph if in phase3
if aux_graph is not None:
# remove edges from auxiliary graph
aux_graph.remove_edge(u, v)
if self.remaining_degree[u] == 1:
del self.remaining_degree[u]
if aux_graph is not None:
aux_graph.remove_node(u)
else:
self.remaining_degree[u] -= 1
if self.remaining_degree[v] == 1:
del self.remaining_degree[v]
if aux_graph is not None:
aux_graph.remove_node(v)
else:
self.remaining_degree[v] -= 1
def p(self, u, v):
# degree probability
return 1 - self.degree[u] * self.degree[v] / (4.0 * self.m)
def q(self, u, v):
# remaining degree probability
norm = max(self.remaining_degree.values()) ** 2
return self.remaining_degree[u] * self.remaining_degree[v] / norm
def suitable_edge(self):
"""Returns True if and only if an arbitrary remaining node can
potentially be joined with some other remaining node.
"""
nodes = iter(self.remaining_degree)
u = next(nodes)
return any(v not in self.graph[u] for v in nodes)
def phase1(self):
# choose node pairs from (degree) weighted distribution
rem_deg = self.remaining_degree
while sum(rem_deg.values()) >= 2 * self.dmax**2:
u, v = sorted(random_weighted_sample(rem_deg, 2, self.rng))
if self.graph.has_edge(u, v):
continue
if self.rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v)
def phase2(self):
# choose remaining nodes uniformly at random and use rejection sampling
remaining_deg = self.remaining_degree
rng = self.rng
while len(remaining_deg) >= 2 * self.dmax:
while True:
u, v = sorted(rng.sample(list(remaining_deg.keys()), 2))
if self.graph.has_edge(u, v):
continue
if rng.random() < self.q(u, v):
break
if rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v)
def phase3(self):
# build potential remaining edges and choose with rejection sampling
potential_edges = combinations(self.remaining_degree, 2)
# build auxiliary graph of potential edges not already in graph
H = nx.Graph(
[(u, v) for (u, v) in potential_edges if not self.graph.has_edge(u, v)]
)
rng = self.rng
while self.remaining_degree:
if not self.suitable_edge():
raise nx.NetworkXUnfeasible("no suitable edges left")
while True:
u, v = sorted(rng.choice(list(H.edges())))
if rng.random() < self.q(u, v):
break
if rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v, aux_graph=H)
|
PypiClean
|
/edc_adverse_event-0.3.60-py3-none-any.whl/edc_adverse_event/auth_objects.py
|
from .utils import get_adverse_event_app_label, get_hospitalization_model_app_label
AE = "AE"
AE_REVIEW = "AE_REVIEW"
AE_ROLE = "ae_role"
AE_SUPER = "AE_SUPER"
TMG = "TMG"
TMG_REVIEW = "TMG_REVIEW"
TMG_ROLE = "tmg"
ae_codenames = [
"edc_adverse_event.nav_ae_section",
"edc_adverse_event.view_ae_listboard",
"edc_adverse_event.view_aeclassification",
"edc_adverse_event.view_causeofdeath",
"edc_adverse_event.view_saereason",
f"{get_adverse_event_app_label()}.add_aefollowup",
f"{get_adverse_event_app_label()}.add_aeinitial",
f"{get_adverse_event_app_label()}.add_aesusar",
f"{get_adverse_event_app_label()}.add_deathreport",
f"{get_hospitalization_model_app_label()}.add_hospitalization",
f"{get_adverse_event_app_label()}.change_aefollowup",
f"{get_adverse_event_app_label()}.change_aeinitial",
f"{get_adverse_event_app_label()}.change_aesusar",
f"{get_adverse_event_app_label()}.change_deathreport",
f"{get_hospitalization_model_app_label()}.change_hospitalization",
f"{get_adverse_event_app_label()}.delete_aefollowup",
f"{get_adverse_event_app_label()}.delete_aeinitial",
f"{get_adverse_event_app_label()}.delete_aesusar",
f"{get_adverse_event_app_label()}.delete_deathreport",
f"{get_hospitalization_model_app_label()}.delete_hospitalization",
f"{get_adverse_event_app_label()}.view_aefollowup",
f"{get_adverse_event_app_label()}.view_aeinitial",
f"{get_adverse_event_app_label()}.view_aesusar",
f"{get_adverse_event_app_label()}.view_aetmg",
f"{get_adverse_event_app_label()}.view_deathreport",
f"{get_adverse_event_app_label()}.view_deathreporttmg",
f"{get_adverse_event_app_label()}.view_deathreporttmgsecond",
f"{get_hospitalization_model_app_label()}.view_hospitalization",
f"{get_adverse_event_app_label()}.view_historicalaefollowup",
f"{get_adverse_event_app_label()}.view_historicalaeinitial",
f"{get_adverse_event_app_label()}.view_historicalaesusar",
f"{get_adverse_event_app_label()}.view_historicalaetmg",
f"{get_adverse_event_app_label()}.view_historicaldeathreport",
f"{get_adverse_event_app_label()}.view_historicaldeathreporttmg",
f"{get_adverse_event_app_label()}.view_historicaldeathreporttmgsecond",
f"{get_hospitalization_model_app_label()}.view_historicalhospitalization",
]
tmg_codenames = [
"edc_adverse_event.nav_tmg_section",
"edc_adverse_event.view_tmg_listboard",
"edc_adverse_event.view_aeclassification",
"edc_adverse_event.view_causeofdeath",
"edc_adverse_event.view_saereason",
f"{get_adverse_event_app_label()}.add_aetmg",
f"{get_adverse_event_app_label()}.add_deathreporttmg",
f"{get_adverse_event_app_label()}.add_deathreporttmgsecond",
f"{get_adverse_event_app_label()}.change_aetmg",
f"{get_adverse_event_app_label()}.change_deathreporttmg",
f"{get_adverse_event_app_label()}.change_deathreporttmgsecond",
f"{get_adverse_event_app_label()}.delete_aetmg",
f"{get_adverse_event_app_label()}.delete_deathreporttmg",
f"{get_adverse_event_app_label()}.delete_deathreporttmgsecond",
f"{get_adverse_event_app_label()}.view_aefollowup",
f"{get_adverse_event_app_label()}.view_aeinitial",
f"{get_adverse_event_app_label()}.view_aesusar",
f"{get_adverse_event_app_label()}.view_aetmg",
f"{get_adverse_event_app_label()}.view_deathreport",
f"{get_adverse_event_app_label()}.view_deathreporttmg",
f"{get_adverse_event_app_label()}.view_deathreporttmgsecond",
f"{get_hospitalization_model_app_label()}.view_hospitalization",
f"{get_adverse_event_app_label()}.view_historicalaefollowup",
f"{get_adverse_event_app_label()}.view_historicalaeinitial",
f"{get_adverse_event_app_label()}.view_historicalaesusar",
f"{get_adverse_event_app_label()}.view_historicalaetmg",
f"{get_adverse_event_app_label()}.view_historicaldeathreport",
f"{get_adverse_event_app_label()}.view_historicaldeathreporttmg",
f"{get_adverse_event_app_label()}.view_historicaldeathreporttmgsecond",
f"{get_hospitalization_model_app_label()}.view_historicalhospitalization",
]
tmg_view_codenames = [c for c in tmg_codenames if "view_" in c]
ae_dashboard_tuples = (("edc_adverse_event.view_ae_listboard", "Can view AE listboard"),)
ae_navbar_tuples = (("edc_adverse_event.nav_ae_section", "Can view AE section"),)
tmg_dashboard_tuples = (("edc_adverse_event.view_tmg_listboard", "Can view TMG Listboard"),)
tmg_navbar_tuples = (("edc_adverse_event.nav_tmg_section", "Can view TMG section"),)
|
PypiClean
|
/Fortpy-1.7.7.tar.gz/Fortpy-1.7.7/fortpy/parsers/module.py
|
import re
from ..elements import Module
from .docstring import DocStringParser
from .variable import VariableParser
from .types import TypeParser
from .executable import ExecutableParser
from .interface import InterfaceParser
class ModuleParser(object):
"""Extracts modules from fortran code files."""
def __init__(self):
self.setup_regex()
self.vparser = VariableParser()
self.docparser = DocStringParser()
self.iparser = InterfaceParser(self.docparser)
self.tparser = TypeParser(self.vparser, self.docparser)
self.xparser = ExecutableParser(self.vparser, self.docparser)
def setup_regex(self):
"""Sets up compiled regex objects for parsing code elements."""
#Regex for extracting modules from the code
self._RX_MODULE = r"(\n|^)\s*module\s+(?P<name>[a-z0-9_]+)(?P<contents>.+?)end\s*module"
self.RE_MODULE = re.compile(self._RX_MODULE, re.I | re.DOTALL)
self._RX_PROGRAM = r"(\n|^)\s*program\s+(?P<name>[a-z0-9_]+)(?P<contents>.+?)end\s*program"
self.RE_PROGRAM = re.compile(self._RX_PROGRAM, re.I | re.DOTALL)
#Regex for use statements in a module
self._RX_USE = r"^\s*use\s+(?P<name>[^,]+?)(\s*,\s+only\s*:(?P<only>[A-Za-z0-9_\s,]+?))?$"
self.RE_USE = re.compile(self._RX_USE, re.I | re.M)
#Regex for finding if the module is private
self._RX_PRIV = "private.+?(type|contains)"
self.RE_PRIV = re.compile(self._RX_PRIV, re.DOTALL | re.I)
#Regex for finding publcily labeled members declared using public keyword.
self._RX_PUBLIC = r"\n\s*public\s+(?P<methods>[A-Za-z0-9_,\s&\n]+)"
self.RE_PUBLIC = re.compile(self._RX_PUBLIC, re.I)
#Regex for finding text before type or contains declarations that may contian members.
self._RX_MEMBERS = "(?P<preamble>.+?)(\s+type[,\s]|contains)"
self.RE_MEMBERS = re.compile(self._RX_MEMBERS, re.DOTALL | re.I)
self._RX_PRECOMP = r"#endif"
self.RE_PRECOMP = re.compile(self._RX_PRECOMP, re.I)
def rt_update(self, statement, element, mode, linenum, lineparser):
"""Performs a real-time update of the specified statement that is in the body of the
module.
:arg statement: the lines of code that was added/removed/changed on the
element after it had alread been parsed. The lines together form a single
continuous code statement.
:arg element: the Module instance to update.
:arg mode: 'insert', 'replace', or 'delete'.
"""
#First find out if we are passed the CONTAINS section separating executables from
#the type and member definitions. In order for this code to be reached, the lines
#that are being changed are *between* definitions that already exist. The likelihood
#is that they are *new* definitions of members, types or executables.
if linenum <= element.contains_index:
#we only have to look for type and member definitions.
self._rt_parse_members(statement, element, mode)
self._rt_parse_types(statement, element, mode, lineparser)
else:
#we only have to deal with executables.
self._rt_parse_execs(statement, element, mode, lineparser)
def _rt_parse_execs(self, statement, element, mode, lineparser):
"""As part of parse_line(), checks for new executable declarations in the statement."""
#This is the same deal as with _rt_parse_types() below.
if mode == "insert":
enew, start, end = self.xparser.parse_signature(statement, element, element)
if enew is not None:
enew.start, enew.end = lineparser.absolute_charindex(statement, start, end)
enew.incomplete = True
element.executables[enew.name.lower()] = enew
lineparser.additions.append((enew, element))
def _rt_parse_types(self, statement, element, mode, lineparser):
"""As part of parse_line(), checks for new type declarations in the statement."""
if mode == "insert":
#Since we got to this point, there is *no* code element that owns the current
#line which is being replaced; we are merely checking to see if the new line
#being entered matches a type definition, do the same thing as "insert"
tnew, start, end = self.tparser.parse_signature(statement, element, element)
#We need to set sensible boundaries for 'start' and 'end' so that if the lines
#immediately after this one are member definitions (for example) they get
#associated correctly with this type.
if tnew is not None:
tnew.start, tnew.end = lineparser.absolute_charindex(statement, start, end)
tnew.incomplete = True
element.types[tnew.name.lower()] = tnew
lineparser.additions.append((tnew, element))
# elif mode == "delete":
#This line is not part of any existing code element. Pointless to do anything
#with delete since it probably is just a member that got removed.
def _rt_parse_members(self, statement, element, mode):
"""As part of parse_line(), checks for member declarations in the statement."""
if mode == "delete":
self._rt_members_delete(element, statement)
elif mode == "insert":
self._rt_members_add(element, statement)
def _rt_members_add(self, element, statement):
"""Finds all the member declarations in 'statement' and adds the
corresponding instances to element.members."""
members = self.vparser.parse(statement, None)
for member in members:
single = members[member]
single.parent = element
element.members[member] = single
def _rt_members_delete(self, element, statement):
"""Finds all the member declarations in 'statement' and removes the
corresponding instances from element.members."""
removals = self.vparser.parse(statement, None)
for member in removals:
if member in element.members:
del element.members[member]
def parse(self, string, parent, module=True, filepath=None):
"""Extracts modules *and* programs from a fortran code file.
:arg string: the contents of the fortran code file.
:arg parent: the instance of CodeParser that will own the return Module.
:arg module: when true, the code file will be searched for modules; otherwise
it will be searched for programs.
"""
if module:
result = self._parse_modules(string, parent, filepath)
else:
result = self._parse_programs(string, parent, filepath)
return result
def _parse_programs(self, string, parent, filepath=None):
"""Extracts a PROGRAM from the specified fortran code file."""
#First, get hold of the docstrings for all the modules so that we can
#attach them as we parse them.
moddocs = self.docparser.parse_docs(string)
#Now look for modules in the file and then match them to their decorators.
matches = self.RE_PROGRAM.finditer(string)
result = []
for rmodule in matches:
name = rmodule.group("name").lower()
contents = re.sub("&[ ]*\n", "", rmodule.group("contents"))
module = self._process_module(name, contents, parent, rmodule, filepath)
#Check whether the docparser found docstrings for the module.
if name in moddocs:
module.docstring = self.docparser.to_doc(moddocs[name][0], name)
module.docstart, module.docend = module.absolute_charindex(string, moddocs[name][1],
moddocs[name][2])
result.append(module)
return result
def _parse_modules(self, string, parent, filepath=None):
"""Extracts any modules from the specified fortran code file."""
#First, get hold of the docstrings for all the modules so that we can
#attach them as we parse them.
moddocs = self.docparser.parse_docs(string)
#Now look for modules in the file and then match them to their decorators.
matches = self.RE_MODULE.finditer(string)
result = []
for rmodule in matches:
name = rmodule.group("name").lower()
contents = re.sub("&[ ]*\n", "", rmodule.group("contents"))
module = self._process_module(name, contents, parent, rmodule, filepath)
#Check whether the docparser found docstrings for the module.
if name in moddocs:
module.docstring = self.docparser.to_doc(moddocs[name][0], name)
module.docstart, module.docend = module.absolute_charindex(string, moddocs[name][1],
moddocs[name][2])
#Before we append the module to the list, we need to update its list
#of publics if it hasn't explicitly been declared as private.
module.all_to_public()
result.append(module)
return result
def _process_publics(self, contents):
"""Extracts a list of public members, types and executables that were declared using
the public keyword instead of a decoration."""
matches = self.RE_PUBLIC.finditer(contents)
result = {}
start = 0
for public in matches:
methods = public.group("methods")
#We need to keep track of where the public declarations start so that the unit
#testing framework can insert public statements for those procedures being tested
#who are not marked as public
if start == 0:
start = public.start("methods")
for item in re.split(r"[\s&\n,]+", methods.strip()):
if item.lower() in ["interface", "type", "use"]:
#We have obviously reached the end of the actual public
#declaration in this regex match.
break
self._dict_increment(result, item.lower())
return (result, start)
def _process_module(self, name, contents, parent, match, filepath=None):
"""Processes a regex match for a module to create a CodeElement."""
#First, get hold of the name and contents of the module so that we can process the other
#parts of the module.
modifiers = []
#We need to check for the private keyword before any type or contains declarations
if self.RE_PRIV.search(contents):
modifiers.append("private")
#The only other modifier for modules ought to be implicit none
if re.search("implicit\s+none", contents):
modifiers.append("implicit none")
#Next, parse out the dependencies of the module on other modules
dependencies = self._parse_use(contents)
publics, pubstart = self._process_publics(match.string)
#We can now create the CodeElement
result = Module(name, modifiers, dependencies, publics, contents, parent)
if filepath is not None:
result.filepath = filepath.lower()
result.start = match.start()
result.end = match.end()
result.refstring = match.string
result.set_public_start(pubstart)
if self.RE_PRECOMP.search(contents):
result.precompile = True
self.xparser.parse(result)
self.tparser.parse(result)
#It is possible for the module to have members, parse those
self._parse_members(contents, result)
self.iparser.parse(result)
#Now we can update the docstrings for the types. They rely on data
#extracted during parse_members() which is why they have to run
#separately over here.
for t in result.types:
self.tparser.update_docs(result.types[t], result)
return result
def _parse_use(self, string):
"""Extracts use dependencies from the innertext of a module."""
result = {}
for ruse in self.RE_USE.finditer(string):
#We also handle comments for individual use cases, the "only" section
#won't pick up any comments.
name = ruse.group("name").split("!")[0].strip()
if name.lower() == "mpi":
continue
if ruse.group("only"):
only = ruse.group("only").split(",")
for method in only:
key = "{}.{}".format(name, method.strip())
self._dict_increment(result, key)
else:
self._dict_increment(result, name)
return result
def _dict_increment(self, dictionary, key):
"""Increments the value of the dictionary at the specified key."""
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
def _parse_members(self, contents, module):
"""Extracts any module-level members from the code. They must appear before
any type declalations."""
#We need to get hold of the text before the module's main CONTAINS keyword
#so that we don't find variables from executables and claim them as
#belonging to the module.
icontains = module.contains_index
ichar = module.charindex(icontains, 0)
module.preamble = module.refstring[:ichar]
#Get a dictionary of all the members in this module body
#We only want to look at variable definitions before the first type
lowest = ichar
remove = [] #Will use later below, see next comment
for t in module.types:
remove.append((module.types[t].start, module.types[t].end))
if module.types[t].start < lowest:
lowest = module.types[t].start
module.members.update(self.vparser.parse(contents[:lowest-(module.start + 10 + len(module.name))], module))
#The docstrings for these members will appear as member tags in the same
#preamble text. We can't use the entire preamble for this because member
#docs inside of a type declaration will show up as belonging to the
#module, when in fact, they don't.
remove.sort(key=lambda tup: tup[0])
retain = []
cur_end = 0
for rem in remove:
signature = module.refstring[rem[0]+1:rem[1]].index("\n") + 2
keep = module.refstring[cur_end:rem[0] + signature]
cur_end = rem[1]
retain.append(keep)
#If there weren't any types in the module, we still want to get at the docs in
#the preamble.
if len(remove) == 0:
retain = module.preamble
docsearch = "".join(retain)
module.predocs = self.docparser.parse_docs(docsearch, module)
if module.name in module.predocs:
#We can only do member docstrings if the module had internal docstrings
#that may to members.
memdocs = self.docparser.to_doc(module.predocs[module.name][0], module.name)
remainingdocs = self.docparser.process_memberdocs(memdocs, module)
module.predocs[module.name] = remainingdocs
|
PypiClean
|
/ipydrawio-1.3.0.tar.gz/ipydrawio-1.3.0/_/ipydrawio-webpack/static/dio/plugins/replay.js
|
Draw.loadPlugin(function(ui) {
var graph = ui.editor.graph;
var model = graph.model;
function decodeChanges(delta, direct)
{
var codec2 = new mxCodec(delta.ownerDocument);
codec2.lookup = function(id)
{
return model.getCell(id);
};
var changeNode = (direct) ? delta.firstChild : delta.firstChild.firstChild;
var changes = [];
while (changeNode != null)
{
var change = codec2.decode(changeNode);
change.model = model;
change.execute();
changes.push(change);
changeNode = changeNode.nextSibling;
}
return changes;
};
function createUndoableEdit(changes)
{
var edit = new mxUndoableEdit(model);
edit.changes = changes;
edit.notify = function()
{
// LATER: Remove changes property (deprecated)
edit.source.fireEvent(new mxEventObject(mxEvent.CHANGE,
'edit', edit, 'changes', edit.changes));
edit.source.fireEvent(new mxEventObject(mxEvent.NOTIFY,
'edit', edit, 'changes', edit.changes));
};
return edit;
};
function processDelta(delta, direct)
{
var changes = decodeChanges(delta, direct);
if (changes.length > 0)
{
var edit = createUndoableEdit(changes);
if (ui.chromelessResize)
{
// No notify event here to avoid the edit from being encoded and transmitted
// LATER: Remove changes property (deprecated)
model.fireEvent(new mxEventObject(mxEvent.CHANGE,
'edit', edit, 'changes', changes));
model.fireEvent(new mxEventObject(mxEvent.UNDO, 'edit', edit));
ui.chromelessResize();
}
else
{
edit.notify();
}
}
return edit;
};
if (ui.editor.isChromelessView())
{
var replayData = urlParams['replay-data'];
var delay = parseInt(urlParams['delay-delay'] || 1000);
if (replayData != null)
{
var xmlDoc = mxUtils.parseXml(Graph.decompress(replayData));
// LATER: Avoid duplicate parsing
ui.fileLoaded(new LocalFile(ui, mxUtils.getXml(xmlDoc.documentElement.firstChild.firstChild)));
// Process deltas
var delta = xmlDoc.documentElement.firstChild.nextSibling;
function nextStep()
{
if (delta != null)
{
window.setTimeout(function()
{
processDelta(delta);
delta = delta.nextSibling;
nextStep();
}, delay);
}
};
nextStep();
}
}
else
{
var tape = null;
var codec = new mxCodec();
codec.lookup = function(id)
{
return model.getCell(id);
};
model.addListener(mxEvent.CHANGE, function(sender, evt)
{
if (tape != null)
{
var changes = evt.getProperty('changes');
var node = codec.encode(changes);
var delta = codec.document.createElement('delta');
delta.appendChild(node);
tape.push(mxUtils.getXml(delta));
}
});
mxResources.parse('record=Record');
mxResources.parse('replay=Replay');
// Adds actions
var action = ui.actions.addAction('record...', function()
{
if (tape == null)
{
var node = codec.encode(model);
var state = codec.document.createElement('state');
state.appendChild(node);
tape =[mxUtils.getXml(state)];
ui.editor.setStatus('Recording started');
}
else if (tape != null)
{
ui.editor.setStatus('Recording stopped');
var tmp = tape;
tape = null;
var dlg = new FilenameDialog(ui, 1000, mxResources.get('apply'), function(newValue)
{
if (newValue != null)
{
var dlg = new EmbedDialog(ui, 'https://www.draw.io/?p=replay&lightbox=1&replay-delay=' +
parseFloat(newValue) + '&replay-data=' + Graph.compress('<recording>' +
tmp.join('') + '</recording>'));
ui.showDialog(dlg.container, 450, 240, true, true);
dlg.init();
}
}, 'Delay');
ui.showDialog(dlg.container, 300, 80, true, true);
dlg.init();
}
action.label = (tape != null) ? 'Stop recording' : mxResources.get('record') + '...';
});
ui.actions.addAction('replay...', function()
{
var dlg = new TextareaDialog(ui, 'Changes [JSON export, compressed edits or <edit>..</edit>]:', '',
function(newValue)
{
if (newValue.length > 0)
{
try
{
var current = null;
if (newValue.charAt(0) == '{')
{
var temp = JSON.parse(newValue);
current = temp.current;
newValue = temp.edits;
}
if (newValue.charAt(0) != '<')
{
newValue = Graph.decompress(newValue);
}
if (newValue.charAt(0) == '[')
{
newValue = JSON.parse(newValue);
console.log(JSON.stringify(newValue, null, 2));
var pageId = null;
var temp = [];
for (var i = 0; i < newValue.length; i++)
{
if (pageId == null)
{
pageId = newValue[i].pageid;
}
if (pageId == newValue[i].pageid)
{
temp.push(newValue[i].data);
}
else
{
mxLog.debug('edit ignored for page ' + newValue[i].pageid);
mxLog.show();
}
}
newValue = temp.join('');
}
var edits = mxUtils.parseXml('<edits>' + newValue + '</edits>');
var edit = edits.documentElement.firstChild;
function step()
{
console.log(processDelta(edit, true));
edit = edit.nextSibling;
return edit != null;
}
if (ui.buttonContainer != null)
{
console.log(mxUtils.getPrettyXml(edit));
var button = mxUtils.button('Step', function()
{
if (!step())
{
button.parentNode.removeChild(button);
}
else
{
console.log(mxUtils.getPrettyXml(edit));
}
});
button.className = 'geBtn gePrimaryBtn';
ui.buttonContainer.appendChild(button);
}
else
{
while (step())
{
// repeat
}
}
}
catch (e)
{
ui.handleError(e);
console.error(e);
}
}
});
ui.showDialog(dlg.container, 620, 460, true, true);
dlg.init();
});
var menu = ui.menus.get('extras');
var oldFunct = menu.funct;
menu.funct = function(menu, parent)
{
oldFunct.apply(this, arguments);
ui.menus.addMenuItems(menu, ['-', 'record', 'replay'], parent);
};
}
});
|
PypiClean
|
/aifs_nni-1.9.5-py3-none-manylinux1_x86_64.whl/aifs_nni-1.9.5.data/data/nni/node_modules/openid-client/LICENSE.md
|
The MIT License (MIT)
Copyright (c) 2016 Filip Skokan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/modello-0.1.0.post1.tar.gz/modello-0.1.0.post1/modello.py
|
"""Module for symbolic modeling of systems."""
import typing
from sympy import Basic, Dummy, Eq, simplify, solve
class ModelloSentinelClass:
"""This class is used for quick type.mro() checks."""
class InstanceDummy(Dummy):
"""Dummy which will create a bound bummy on Modello instantiation."""
def bound(self, model_name: str) -> "BoundInstanceDummy":
"""Return an dummy bound to a modello instance."""
return BoundInstanceDummy(model_name + "_" + self.name, **self.assumptions0)
# # for debugging
# def _sympystr(self, printer):
# return "%s[%s]" % (self.name, self.dummy_index)
class BoundInstanceDummy(InstanceDummy):
"""Dummy associated with a Modello instance."""
class ModelloMetaNamespace(dict):
"""This is so that Modello class definitions implicitly define symbols."""
def __init__(self, name: str, bases: typing.Tuple[type, ...]) -> None:
"""Create a namespace for a Modello class to use."""
self.name = name
self.attrs: typing.Dict[str, Basic] = {}
self.dummies: typing.Dict[str, Dummy] = {}
self.other_attrs: typing.Dict[str, object] = {}
self.dummy_overrides: typing.Dict[Dummy, Dummy] = {}
for base in bases:
if ModelloSentinelClass not in base.mro():
continue
parent_namespace = getattr(base, "_modello_namespace", None)
# TODO: read the following (regarding python's method resolution order) and make sure all is ok:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.3910&rep=rep1&type=pdf
if parent_namespace:
for attr in self.dummies.keys() & parent_namespace.dummies.keys():
override_dummy = parent_namespace.dummies[attr]
base_dummy = self.dummies[attr]
self.dummies[attr] = override_dummy
self.dummy_overrides[base_dummy] = override_dummy
self.attrs.update(parent_namespace.attrs)
self.dummies.update(parent_namespace.dummies)
self.other_attrs.update(parent_namespace.other_attrs)
self.update(parent_namespace)
if self.dummy_overrides:
for attr, value in self.attrs.items():
self.attrs[attr] = value.subs(self.dummy_overrides)
def __setitem__(self, key: str, value: object) -> None:
"""Manage modello attributes as values are assigned."""
if isinstance(value, Basic):
if key in self:
dummy = self.dummies[key]
elif isinstance(value, InstanceDummy):
dummy = value
else:
dummy = InstanceDummy(key, **value.assumptions0)
self.attrs[key] = simplify(value).subs(self.dummy_overrides)
self.dummies[key] = dummy
value = dummy
elif key in self.attrs:
# cannot overried a part of inherited expressions with a non-expression
raise ValueError("Cannot assign %s.%s to a non-expression" % (self.name, key))
else:
self.other_attrs[key] = value
super().__setitem__(key, value)
class ModelloMeta(type):
"""Used to make Modello class definitions use dummies."""
@classmethod
def __prepare__(metacls,
__name: str,
__bases: typing.Tuple[type, ...],
**kwds: typing.Any) -> typing.Mapping[str, typing.Any]:
"""Return a ModelloMetaNamespace instead of a plain dict to accumlate attributes on."""
return ModelloMetaNamespace(__name, __bases)
def __new__(mcs, name: str, bases: typing.Tuple[type, ...], meta_namespace: ModelloMetaNamespace) -> type:
"""Return a new class with modello attributes."""
namespace = dict(meta_namespace)
# could follow django's model of _meta? conflicts?
namespace["_modello_namespace"] = meta_namespace
namespace["_modello_class_constraints"] = {
dummy: meta_namespace.attrs[attr]
for attr, dummy in meta_namespace.dummies.items()
if meta_namespace.attrs[attr] is not dummy
}
return super().__new__(mcs, name, bases, namespace)
class Modello(ModelloSentinelClass, metaclass=ModelloMeta):
"""Base class for building symbolic models."""
_modello_namespace: typing.ClassVar[ModelloMetaNamespace] = ModelloMetaNamespace("", ())
_modello_class_constraints: typing.Dict[InstanceDummy, Basic] = {}
def __init__(self, name: str, **value_map: typing.Dict[str, Basic]) -> None:
"""Initialise a model instance and solve for each attribute."""
instance_dummies = {
class_dummy: class_dummy.bound(name)
for class_dummy in self._modello_namespace.dummies.values()
}
self._modello_instance_dummies = instance_dummies
instance_constraints = {}
for attr, value in value_map.items():
value = simplify(value).subs(instance_dummies)
value_map[attr] = value
class_dummy = getattr(self, attr)
instance_dummy = instance_dummies[class_dummy]
instance_constraints[instance_dummy] = value
self._modello_instance_constraints: typing.Dict[BoundInstanceDummy, Basic] = instance_constraints
constraints = [
Eq(instance_dummies[class_dummy], value.subs(instance_dummies))
for class_dummy, value in self._modello_class_constraints.items()
]
constraints.extend(
Eq(instance_dummy, value)
for instance_dummy, value in instance_constraints.items()
)
# handy for debugging
self._modello_constraints: typing.List[Eq] = constraints
if constraints:
solutions = solve(constraints, particular=True, dict=True)
if len(solutions) != 1:
raise ValueError("%s solutions" % len(solutions))
solution = solutions[0]
else:
solution = {}
for attr, class_dummy in self._modello_namespace.dummies.items():
instance_dummy = instance_dummies[class_dummy]
if instance_dummy in solution:
value = solution[instance_dummy]
elif instance_dummy in instance_constraints:
value = instance_constraints[instance_dummy]
elif class_dummy in self._modello_class_constraints:
value = self._modello_class_constraints[class_dummy].subs(instance_dummies)
else:
value = instance_dummy
setattr(self, attr, value)
|
PypiClean
|
/pymars-0.10.0a1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl/mars/learn/metrics/_base.py
|
from itertools import combinations
from ... import tensor as mt
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
def _average_binary_score(
binary_metric,
y_true,
y_score,
average,
sample_weight=None,
session=None,
run_kwargs=None,
):
average_options = (None, "micro", "macro", "weighted", "samples")
if average not in average_options: # pragma: no cover
raise ValueError("average has to be one of {0}".format(average_options))
y_type = type_of_target(y_true).to_numpy(session=session, **(run_kwargs or dict()))
if y_type not in ("binary", "multilabel-indicator"): # pragma: no cover
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(
y_true, y_score, sample_weight, session=session, run_kwargs=run_kwargs
)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None: # pragma: no cover
score_weight = mt.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == "weighted":
if score_weight is not None: # pragma: no cover
average_weight = mt.sum(
mt.multiply(y_true, mt.reshape(score_weight, (-1, 1))), axis=0
)
else:
average_weight = mt.sum(y_true, axis=0)
if mt.isclose(average_weight.sum(), 0.0).to_numpy(
session=session, **(run_kwargs or dict())
):
return 0
elif average == "samples":
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = mt.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight)
# Average the results
if average is not None:
if average_weight is not None:
# Scores with 0 weights are forced to be 0, preventing the average
# score from being affected by 0-weighted NaN elements.
average_weight = mt.asarray(average_weight)
score[average_weight == 0] = 0
return mt.average(score, weights=average_weight)
else:
return score
def _average_multiclass_ovo_score(
binary_metric, y_true, y_score, average="macro", session=None, run_kwargs=None
):
check_consistent_length(y_true, y_score, session=session, run_kwargs=run_kwargs)
y_true_unique = mt.unique(y_true).to_numpy()
n_classes = y_true_unique.shape[0]
n_pairs = n_classes * (n_classes - 1) // 2
pair_scores = mt.empty(n_pairs)
is_weighted = average == "weighted"
prevalence = mt.empty(n_pairs) if is_weighted else None
# Compute scores treating a as positive class and b as negative class,
# then b as positive class and a as negative class
for ix, (a, b) in enumerate(combinations(y_true_unique, 2)):
a_mask = y_true == a
b_mask = y_true == b
ab_mask = mt.logical_or(a_mask, b_mask)
if is_weighted:
prevalence[ix] = mt.average(ab_mask)
a_true = a_mask[ab_mask]
b_true = b_mask[ab_mask]
a_true_score = binary_metric(a_true, y_score[ab_mask, a])
b_true_score = binary_metric(b_true, y_score[ab_mask, b])
pair_scores[ix] = (a_true_score + b_true_score) / 2
return mt.average(pair_scores, weights=prevalence)
|
PypiClean
|
/SpiNNMan-2016.001.zip/SpiNNMan-2016.001/spinnman/constants.py
|
from enum import Enum
# The default port of the connection
SCP_SCAMP_PORT = 17893
# The default port of the connection
UDP_BOOT_CONNECTION_DEFAULT_PORT = 54321
# The base address of the system variable structure in System ram
SYSTEM_VARIABLE_BASE_ADDRESS = 0xf5007f00
# The base address of a routers diagnostic filter controls
ROUTER_REGISTER_BASE_ADDRESS = 0xe1000000
# offset for the router filter controls first register (one word each)
ROUTER_FILTER_CONTROLS_OFFSET = 0x200
# point where default filters finish and user set-able ones are available
ROUTER_DEFAULT_FILTERS_MAX_POSITION = 11
# size of a router diagnostic filter control register in bytes
ROUTER_DIAGNOSTIC_FILTER_SIZE = 4
# number of router diagnostic filters
NO_ROUTER_DIAGNOSTIC_FILTERS = 16
# The size of the system variable structure in bytes
SYSTEM_VARIABLE_BYTES = 256
# The max size a UDP packet can be
UDP_MESSAGE_MAX_SIZE = 256
# the amount of size in bytes that the EIEIO command header is
EIEIO_COMMAND_HEADER_SIZE = 3
# The amount of size in bytes the EIEIO data header is
EIEIO_DATA_HEADER_SIZE = 2
# how many bytes the cpu info data takes up
CPU_INFO_BYTES = 128
# the address at which user0 register starts
CPU_USER_0_START_ADDRESS = 112
# the address at which user0 register starts
CPU_USER_1_START_ADDRESS = 116
# the address at which user0 register starts
CPU_USER_2_START_ADDRESS = 120
# default UDP tag
DEFAULT_SDP_TAG = 0xFF
# max user requested tag value
MAX_TAG_ID = 7
# The range of values the BMP's 12-bit ADCs can measure.
BMP_ADC_MAX = 1 << 12
# Multiplier to convert from ADC value to volts for lines less than 2.5 V.
BMP_V_SCALE_2_5 = 2.5 / BMP_ADC_MAX
# Multiplier to convert from ADC value to volts for 3.3 V lines.
BMP_V_SCALE_3_3 = 3.75 / BMP_ADC_MAX
# Multiplier to convert from ADC value to volts for 12 V lines.
BMP_V_SCALE_12 = 15.0 / BMP_ADC_MAX
# Multiplier to convert from temperature probe values to degrees Celsius.
BMP_TEMP_SCALE = 1.0 / 256.0
# Temperature value returned when a probe is not connected.
BMP_MISSING_TEMP = -0x8000
# Fan speed value returned when a fan is absent.
BMP_MISSING_FAN = -1
# Additional timeout for BMP power-on commands to reply.
BMP_POWER_ON_TIMEOUT = 10.0
# number of chips to check are booted fully from the middle
NO_MIDDLE_CHIPS_TO_CHECK = 8
# a listing of what spinnaker specific EIEIO commands there are.
EIEIO_COMMAND_IDS = Enum(
value="EIEIO_COMMAND_IDS",
names=[
# Database handshake with external program
("DATABASE_CONFIRMATION", 1),
# Fill in buffer area with padding
("EVENT_PADDING", 2),
# End of all buffers, stop execution
("EVENT_STOP", 3),
# Stop complaining that there is SDRAM free space for buffers
("STOP_SENDING_REQUESTS", 4),
# Start complaining that there is SDRAM free space for buffers
("START_SENDING_REQUESTS", 5),
# Spinnaker requesting new buffers for spike source population
("SPINNAKER_REQUEST_BUFFERS", 6),
# Buffers being sent from host to SpiNNaker
("HOST_SEND_SEQUENCED_DATA", 7),
# Buffers available to be read from a buffered out vertex
("SPINNAKER_REQUEST_READ_DATA", 8),
# Host confirming data being read form SpiNNaker memory
("HOST_DATA_READ", 9)]
)
# the values used by the SCP iptag time outs. These control how long to wait
# for any message request which requires a response, before raising an error.
# The value is calculated via the following formulae
# 10ms * 2^(tag_timeout_value - 1)
IPTAG_TIME_OUT_WAIT_TIMES = Enum(
value="IPTAG_TIME_OUT_WAIT_TIMES",
names=[
("TIMEOUT_10_ms", 1),
("TIMEOUT_20_ms", 2),
("TIMEOUT_40_ms", 3),
("TIMEOUT_80_ms", 4),
("TIMEOUT_160_ms", 5),
("TIMEOUT_320_ms", 6),
("TIMEOUT_640_ms", 7),
("TIMEOUT_1280_ms", 8),
("TIMEOUT_2560_ms", 9)]
)
ROUTER_REGISTER_REGISTERS = Enum(
value="Registers",
names=[("LOC_MC", 0),
("EXT_MC", 1),
("LOC_PP", 2),
("EXT_PP", 3),
("LOC_NN", 4),
("EXT_NN", 5),
("LOC_FR", 6),
("EXT_FR", 7),
("DUMP_MC", 8),
("DUMP_PP", 9),
("DUMP_NN", 10),
("DUMP_FR", 11),
("USER_0", 12),
("USER_1", 13),
("USER_2", 14),
("USER_3", 15)]
)
# the types of read available from SARK. These values are used to tell SARK how
# to read the data in a time efficient manner.
READ_TYPES = Enum(
value="Read_types",
names=[("BYTE", 0),
("HALF_WORD", 1),
("WORD", 2)]
)
# This is a mapping between read address in the mapping between word byte
# position, the number of bytes you wish to read, and the type of time
# efficient way to read said amount of bytes via SARK
address_length_dtype = {
(0, 0): READ_TYPES.WORD,
(0, 1): READ_TYPES.BYTE,
(0, 2): READ_TYPES.HALF_WORD,
(0, 3): READ_TYPES.BYTE,
(1, 0): READ_TYPES.BYTE,
(1, 1): READ_TYPES.BYTE,
(1, 2): READ_TYPES.BYTE,
(1, 3): READ_TYPES.BYTE,
(2, 0): READ_TYPES.HALF_WORD,
(2, 1): READ_TYPES.BYTE,
(2, 2): READ_TYPES.HALF_WORD,
(2, 3): READ_TYPES.BYTE,
(3, 0): READ_TYPES.BYTE,
(3, 1): READ_TYPES.BYTE,
(3, 2): READ_TYPES.BYTE,
(3, 3): READ_TYPES.BYTE}
|
PypiClean
|
/pyinstaller-hooks-contrib-2023.8.tar.gz/pyinstaller-hooks-contrib-2023.8/src/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-clr.py
|
# There is a name clash between pythonnet's clr module/extension (which this hooks is for) and clr package that provides
# the terminal styling library (https://pypi.org/project/clr/). Therefore, we must first check if pythonnet is actually
# available...
from PyInstaller.utils.hooks import is_module_satisfies
from PyInstaller.compat import is_win
if is_module_satisfies("pythonnet"):
# pythonnet requires both clr.pyd and Python.Runtime.dll, but the latter isn't found by PyInstaller.
import ctypes.util
from PyInstaller.log import logger
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
collected_runtime_files = []
# Try finding Python.Runtime.dll via distribution's file list
dist_files = importlib_metadata.files('pythonnet')
if dist_files is not None:
runtime_dll_files = [f for f in dist_files if f.match('Python.Runtime.dll')]
if len(runtime_dll_files) == 1:
runtime_dll_file = runtime_dll_files[0]
collected_runtime_files = [(runtime_dll_file.locate(), runtime_dll_file.parent.as_posix())]
logger.debug("hook-clr: Python.Runtime.dll discovered via metadata.")
elif len(runtime_dll_files) > 1:
logger.warning("hook-clr: multiple instances of Python.Runtime.dll listed in metadata - cannot resolve.")
# Fall back to the legacy way
if not collected_runtime_files:
runtime_dll_file = ctypes.util.find_library('Python.Runtime')
if runtime_dll_file:
collected_runtime_files = [(runtime_dll_file, '.')]
logger.debug('hook-clr: Python.Runtime.dll discovered via legacy method.')
if not collected_runtime_files:
raise Exception('Python.Runtime.dll not found')
# On Windows, collect runtime DLL file(s) as binaries; on other OSes, collect them as data files, to prevent fatal
# errors in binary dependency analysis.
if is_win:
binaries = collected_runtime_files
else:
datas = collected_runtime_files
# These modules are imported inside Python.Runtime.dll
hiddenimports = ["platform", "warnings"]
|
PypiClean
|
/object_detection_tf-0.1.2-py3-none-any.whl/object_detection/protos/string_int_label_map_pb2.py
|
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/string_int_label_map.proto',
package='object_detection.protos',
syntax='proto2',
serialized_options=None,
serialized_pb=b'\n2object_detection/protos/string_int_label_map.proto\x12\x17object_detection.protos\"\xc1\x02\n\x15StringIntLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12M\n\tkeypoints\x18\x04 \x03(\x0b\x32:.object_detection.protos.StringIntLabelMapItem.KeypointMap\x12\x14\n\x0c\x61ncestor_ids\x18\x05 \x03(\x05\x12\x16\n\x0e\x64\x65scendant_ids\x18\x06 \x03(\x05\x12\x39\n\tfrequency\x18\x07 \x01(\x0e\x32&.object_detection.protos.LVISFrequency\x12\x16\n\x0einstance_count\x18\x08 \x01(\x05\x1a(\n\x0bKeypointMap\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05label\x18\x02 \x01(\t\"Q\n\x11StringIntLabelMap\x12<\n\x04item\x18\x01 \x03(\x0b\x32..object_detection.protos.StringIntLabelMapItem*D\n\rLVISFrequency\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0c\n\x08\x46REQUENT\x10\x01\x12\n\n\x06\x43OMMON\x10\x02\x12\x08\n\x04RARE\x10\x03'
)
_LVISFREQUENCY = _descriptor.EnumDescriptor(
name='LVISFrequency',
full_name='object_detection.protos.LVISFrequency',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FREQUENT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMON', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RARE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=486,
serialized_end=554,
)
_sym_db.RegisterEnumDescriptor(_LVISFREQUENCY)
LVISFrequency = enum_type_wrapper.EnumTypeWrapper(_LVISFREQUENCY)
UNSPECIFIED = 0
FREQUENT = 1
COMMON = 2
RARE = 3
_STRINGINTLABELMAPITEM_KEYPOINTMAP = _descriptor.Descriptor(
name='KeypointMap',
full_name='object_detection.protos.StringIntLabelMapItem.KeypointMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='object_detection.protos.StringIntLabelMapItem.KeypointMap.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label', full_name='object_detection.protos.StringIntLabelMapItem.KeypointMap.label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=361,
serialized_end=401,
)
_STRINGINTLABELMAPITEM = _descriptor.Descriptor(
name='StringIntLabelMapItem',
full_name='object_detection.protos.StringIntLabelMapItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='object_detection.protos.StringIntLabelMapItem.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='object_detection.protos.StringIntLabelMapItem.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_name', full_name='object_detection.protos.StringIntLabelMapItem.display_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keypoints', full_name='object_detection.protos.StringIntLabelMapItem.keypoints', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ancestor_ids', full_name='object_detection.protos.StringIntLabelMapItem.ancestor_ids', index=4,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='descendant_ids', full_name='object_detection.protos.StringIntLabelMapItem.descendant_ids', index=5,
number=6, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='frequency', full_name='object_detection.protos.StringIntLabelMapItem.frequency', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_count', full_name='object_detection.protos.StringIntLabelMapItem.instance_count', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_STRINGINTLABELMAPITEM_KEYPOINTMAP, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=401,
)
_STRINGINTLABELMAP = _descriptor.Descriptor(
name='StringIntLabelMap',
full_name='object_detection.protos.StringIntLabelMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='object_detection.protos.StringIntLabelMap.item', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=403,
serialized_end=484,
)
_STRINGINTLABELMAPITEM_KEYPOINTMAP.containing_type = _STRINGINTLABELMAPITEM
_STRINGINTLABELMAPITEM.fields_by_name['keypoints'].message_type = _STRINGINTLABELMAPITEM_KEYPOINTMAP
_STRINGINTLABELMAPITEM.fields_by_name['frequency'].enum_type = _LVISFREQUENCY
_STRINGINTLABELMAP.fields_by_name['item'].message_type = _STRINGINTLABELMAPITEM
DESCRIPTOR.message_types_by_name['StringIntLabelMapItem'] = _STRINGINTLABELMAPITEM
DESCRIPTOR.message_types_by_name['StringIntLabelMap'] = _STRINGINTLABELMAP
DESCRIPTOR.enum_types_by_name['LVISFrequency'] = _LVISFREQUENCY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StringIntLabelMapItem = _reflection.GeneratedProtocolMessageType('StringIntLabelMapItem', (_message.Message,), {
'KeypointMap' : _reflection.GeneratedProtocolMessageType('KeypointMap', (_message.Message,), {
'DESCRIPTOR' : _STRINGINTLABELMAPITEM_KEYPOINTMAP,
'__module__' : 'object_detection.protos.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMapItem.KeypointMap)
})
,
'DESCRIPTOR' : _STRINGINTLABELMAPITEM,
'__module__' : 'object_detection.protos.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMapItem)
})
_sym_db.RegisterMessage(StringIntLabelMapItem)
_sym_db.RegisterMessage(StringIntLabelMapItem.KeypointMap)
StringIntLabelMap = _reflection.GeneratedProtocolMessageType('StringIntLabelMap', (_message.Message,), {
'DESCRIPTOR' : _STRINGINTLABELMAP,
'__module__' : 'object_detection.protos.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMap)
})
_sym_db.RegisterMessage(StringIntLabelMap)
# @@protoc_insertion_point(module_scope)
|
PypiClean
|
/scikit_multiflow-0.5.3-cp37-cp37m-macosx_10_9_x86_64.whl/skmultiflow/data/led_generator_drift.py
|
import numpy as np
from skmultiflow.data.led_generator import LEDGenerator
class LEDGeneratorDrift(LEDGenerator):
""" LED stream generator with concept drift.
This class is an extension from the LEDGenerator. The purpose of this generator is to
add concept drift to the stream.
Parameters
----------
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
noise_percentage: float (Default: 0.0)
The probability that noise will happen in the generation. At each
new sample generated, a random probability is generated, and if that
probability is equal or less than the noise_percentage, the selected data will
be switched.
has_noise: bool (Default: False)
Adds 17 non relevant attributes to the stream.
n_drift_features : int (Default : 0)
The number of attributes that have drift.
Examples
--------
>>> # Imports
>>> from skmultiflow.data.led_generator_drift import LEDGeneratorDrift
>>> # Setting up the stream
>>> stream = LEDGeneratorDrift(random_state = 112, noise_percentage = 0.28,has_noise= True,
... n_drift_features=4)
>>> # Retrieving one sample
>>> stream.next_sample()
(array([[0., 1., 1., 1., 0., 1., 1., 0., 1., 0., 0., 0., 1., 0., 1., 1.,
1., 0., 0., 0., 0., 0., 1., 1.]]), array([4]))
>>> # Retrieving 10 samples
>>> stream.next_sample(10)
(array([[0., 0., 1., 0., 1., 0., 0., 1., 0., 0., 1., 1., 0., 0., 0., 0.,
1., 1., 0., 0., 0., 0., 1., 1.],
[0., 1., 1., 0., 0., 0., 1., 1., 1., 0., 1., 0., 0., 0., 1., 1.,
1., 1., 1., 0., 1., 1., 1., 0.],
[1., 1., 1., 0., 0., 1., 1., 1., 0., 0., 0., 0., 1., 0., 0., 0.,
0., 1., 0., 1., 1., 0., 1., 1.],
[0., 1., 0., 0., 1., 0., 0., 1., 0., 1., 1., 0., 1., 1., 0., 0.,
1., 1., 0., 1., 1., 1., 1., 0.],
[0., 1., 1., 0., 1., 0., 1., 0., 1., 1., 0., 1., 1., 0., 1., 0.,
0., 0., 0., 1., 0., 1., 0., 0.],
[1., 1., 1., 0., 1., 0., 1., 0., 1., 1., 0., 1., 0., 1., 1., 1.,
0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 1., 0., 1., 0., 1., 0., 1., 0., 1., 0.,
1., 1., 1., 0., 1., 0., 0., 1.],
[1., 0., 0., 0., 1., 1., 0., 1., 1., 1., 0., 0., 0., 0., 0., 1.,
1., 1., 0., 1., 0., 0., 1., 1.],
[0., 1., 1., 0., 1., 0., 0., 1., 1., 1., 0., 1., 1., 1., 1., 1.,
0., 1., 0., 1., 0., 1., 0., 1.],
[0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,
1., 1., 0., 1., 1., 1., 1., 0.]]),
array([1, 0, 7, 9, 7, 1, 3, 1, 4, 1]))
>>> # Generators will have infinite remaining instances, so it returns -1
>>> stream.n_remaining_samples()
-1
>>> stream.has_more_samples()
True
"""
_numberAttribute = np.zeros((24,), dtype=int)
_NUM_IRRELEVANT_ATTRIBUTES = 17
def __init__(self, random_state=None, noise_percentage=0.0, has_noise=False,
n_drift_features=0):
super().__init__(random_state=random_state, noise_percentage=noise_percentage,
has_noise=has_noise)
self.n_drift_features = n_drift_features
self.name = "Led Generator with drift"
for i in range(self._TOTAL_ATTRIBUTES_INCLUDING_NOISE):
self._numberAttribute[i] = i
if self.has_noise and self.n_drift_features > 0:
random_int = self._random_state.randint(7)
offset = self._random_state.randint(self._NUM_IRRELEVANT_ATTRIBUTES)
for i in range(self.n_drift_features):
value1 = (i + random_int) % 7
value2 = 7 + (i + offset) % self._NUM_IRRELEVANT_ATTRIBUTES
self._numberAttribute[value1] = value2
self._numberAttribute[value2] = value1
self._prepare_for_use()
def next_sample(self, batch_size=1):
""" Returns next sample from the stream.
An instance is generated based on the parameters passed. If noise
is included the total number of attributes will be 24, if it's not
included there will be 7 attributes.
Parameters
----------
batch_size: int (optional, default=1)
The number of samples to return.
Returns
-------
tuple or tuple list
Return a tuple with the features matrix
for the batch_size samples that were requested.
"""
data = np.zeros([batch_size, self.n_features + 1])
target = np.zeros(batch_size, dtype=int)
for j in range(batch_size):
self.sample_idx += 1
selected = self._random_state.randint(self.n_classes)
target[j] = selected
for i in range(self._NUM_BASE_ATTRIBUTES):
if (0.01 + self._random_state.rand()) <= self.noise_percentage:
data[j, self._numberAttribute[i]] = 1 if (
self._ORIGINAL_INSTANCES[selected, i] == 0) else 0
else:
data[j, self._numberAttribute[i]] = self._ORIGINAL_INSTANCES[selected, i]
if self.has_noise:
for i in range(self._NUM_BASE_ATTRIBUTES, self._TOTAL_ATTRIBUTES_INCLUDING_NOISE):
data[j, self._numberAttribute[i]] = self._random_state.randint(2)
self.current_sample_x = data[:, :self.n_features]
self.current_sample_y = target
return self.current_sample_x, self.current_sample_y
def get_data_info(self):
return "Led Generator with drift - {} features".format(self.n_features)
|
PypiClean
|
/lizard-ui-5.3.tar.gz/lizard-ui-5.3/lizard_ui/static/openlayers/1.12-r7/lib/OpenLayers/Format/WMSDescribeLayer/v1_1.js
|
* @requires OpenLayers/Format/WMSDescribeLayer.js
*/
/**
* Class: OpenLayers.Format.WMSDescribeLayer.v1_1
* Read SLD WMS DescribeLayer response for WMS 1.1.X
* WMS 1.1.X is tightly coupled to SLD 1.0.0
*
* Example DescribeLayer request:
* http://demo.opengeo.org/geoserver/wms?request=DescribeLayer&version=1.1.1&layers=topp:states
*
* Inherits from:
* - <OpenLayers.Format.WMSDescribeLayer>
*/
OpenLayers.Format.WMSDescribeLayer.v1_1 = OpenLayers.Class(
OpenLayers.Format.WMSDescribeLayer, {
/**
* Constructor: OpenLayers.Format.WMSDescribeLayer
* Create a new parser for WMS DescribeLayer responses.
*
* Parameters:
* options - {Object} An optional object whose properties will be set on
* this instance.
*/
initialize: function(options) {
OpenLayers.Format.WMSDescribeLayer.prototype.initialize.apply(this,
[options]);
},
/**
* APIMethod: read
* Read DescribeLayer data from a string, and return the response.
* The OGC defines 2 formats which are allowed for output,
* so we need to parse these 2 types for version 1.1.X
*
* Parameters:
* data - {String} or {DOMElement} data to read/parse.
*
* Returns:
* {Array} Array of {<LayerDescription>} objects which have:
* - {String} owsType: WFS/WCS
* - {String} owsURL: the online resource
* - {String} typeName: the name of the typename on the service
*/
read: function(data) {
if(typeof data == "string") {
data = OpenLayers.Format.XML.prototype.read.apply(this, [data]);
}
var root = data.documentElement;
var children = root.childNodes;
var describelayer = [];
var childNode, nodeName;
for(var i=0; i<children.length; ++i) {
childNode = children[i];
nodeName = childNode.nodeName;
if (nodeName == 'LayerDescription') {
var layerName = childNode.getAttribute('name');
var owsType = '';
var owsURL = '';
var typeName = '';
// check for owsType and owsURL attributes
if (childNode.getAttribute('owsType')) {
owsType = childNode.getAttribute('owsType');
owsURL = childNode.getAttribute('owsURL');
} else {
// look for wfs or wcs attribute
if (childNode.getAttribute('wfs') != '') {
owsType = 'WFS';
owsURL = childNode.getAttribute('wfs');
} else if (childNode.getAttribute('wcs') != '') {
owsType = 'WCS';
owsURL = childNode.getAttribute('wcs');
}
}
// look for Query child
var query = childNode.getElementsByTagName('Query');
if(query.length > 0) {
typeName = query[0].getAttribute('typeName');
if (!typeName) {
// because of Ionic bug
typeName = query[0].getAttribute('typename');
}
}
describelayer.push({layerName: layerName, owsType: owsType,
owsURL: owsURL, typeName: typeName});
}
}
return describelayer;
},
CLASS_NAME: "OpenLayers.Format.WMSDescribeLayer.v1_1"
});
|
PypiClean
|
/SciANN-0.7.0.1.tar.gz/SciANN-0.7.0.1/docs/templates/functionals.md
|
# Intro
A combination of neural network layers form a `Functional`.
Mathematically, a `functional` is a general mapping from input set \\(X\\) onto some output set \\(Y\\). Once the parameters of this transformation are found, this mapping is called a `function`.
`Functional`s are needed to form `SciModels`.
A `Functional` is a class to form complex architectures (mappings) from inputs (`Variables`) to the outputs.
```python
from sciann import Variable, Functional
x = Variable('x')
y = Variable('y')
Fxy = Functional('Fxy', [x, y],
hidden_layers=[10, 20, 10],
activation='tanh')
```
`Functionals` can be plotted when a `SciModel` is formed. A minimum of one `Constraint` is needed to form the SciModel
```python
from sciann.constraints import Data
from sciann import SciModel
model = SciModel(x, Data(Fxy),
plot_to_file='output.png')
```
---
{{autogenerated}}
|
PypiClean
|
/channels_hellwebprice_events-1.0.0.tar.gz/channels_hellwebprice_events-1.0.0/wheel/wheelfile.py
|
from __future__ import print_function
import csv
import hashlib
import os.path
import re
import stat
import time
from collections import OrderedDict
from distutils import log as logger
from zipfile import ZIP_DEFLATED, ZipInfo, ZipFile
from wheel.cli import WheelError
from wheel.util import urlsafe_b64decode, as_unicode, native, urlsafe_b64encode, as_bytes, StringIO
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.+?))(-(?P<build>\d[^-]*))?
-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)\.whl$""",
re.VERBOSE)
def get_zipinfo_datetime(timestamp=None):
# Some applications need reproducible .whl files, but they can't do this without forcing
# the timestamp of the individual ZipInfo objects. See issue #143.
timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timestamp or time.time()))
return time.gmtime(timestamp)[0:6]
class WheelFile(ZipFile):
"""A ZipFile derivative class that also reads SHA-256 hashes from
.dist-info/RECORD and checks any read files against those.
"""
_default_algorithm = hashlib.sha256
def __init__(self, file, mode='r', compression=ZIP_DEFLATED):
basename = os.path.basename(file)
self.parsed_filename = WHEEL_INFO_RE.match(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise WheelError("Bad wheel filename {!r}".format(basename))
ZipFile.__init__(self, file, mode, compression=compression, allowZip64=True)
self.dist_info_path = '{}.dist-info'.format(self.parsed_filename.group('namever'))
self.record_path = self.dist_info_path + '/RECORD'
self._file_hashes = OrderedDict()
self._file_sizes = {}
if mode == 'r':
# Ignore RECORD and any embedded wheel signatures
self._file_hashes[self.record_path] = None, None
self._file_hashes[self.record_path + '.jws'] = None, None
self._file_hashes[self.record_path + '.p7s'] = None, None
# Fill in the expected hashes by reading them from RECORD
try:
record = self.open(self.record_path)
except KeyError:
raise WheelError('Missing {} file'.format(self.record_path))
with record:
for line in record:
line = line.decode('utf-8')
path, hash_sum, size = line.rsplit(u',', 2)
if hash_sum:
algorithm, hash_sum = hash_sum.split(u'=')
try:
hashlib.new(algorithm)
except ValueError:
raise WheelError('Unsupported hash algorithm: {}'.format(algorithm))
if algorithm.lower() in {'md5', 'sha1'}:
raise WheelError(
'Weak hash algorithm ({}) is not permitted by PEP 427'
.format(algorithm))
self._file_hashes[path] = (
algorithm, urlsafe_b64decode(hash_sum.encode('ascii')))
def open(self, name_or_info, mode="r", pwd=None):
def _update_crc(newdata, eof=None):
if eof is None:
eof = ef._eof
update_crc_orig(newdata)
else: # Python 2
update_crc_orig(newdata, eof)
running_hash.update(newdata)
if eof and running_hash.digest() != expected_hash:
raise WheelError("Hash mismatch for file '{}'".format(native(ef_name)))
ef = ZipFile.open(self, name_or_info, mode, pwd)
ef_name = as_unicode(name_or_info.filename if isinstance(name_or_info, ZipInfo)
else name_or_info)
if mode == 'r' and not ef_name.endswith('/'):
if ef_name not in self._file_hashes:
raise WheelError("No hash found for file '{}'".format(native(ef_name)))
algorithm, expected_hash = self._file_hashes[ef_name]
if expected_hash is not None:
# Monkey patch the _update_crc method to also check for the hash from RECORD
running_hash = hashlib.new(algorithm)
update_crc_orig, ef._update_crc = ef._update_crc, _update_crc
return ef
def write_files(self, base_dir):
logger.info("creating '%s' and adding '%s' to it", self.filename, base_dir)
deferred = []
for root, dirnames, filenames in os.walk(base_dir):
# Sort the directory names so that `os.walk` will walk them in a
# defined order on the next iteration.
dirnames.sort()
for name in sorted(filenames):
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
arcname = os.path.relpath(path, base_dir).replace(os.path.sep, '/')
if arcname == self.record_path:
pass
elif root.endswith('.dist-info'):
deferred.append((path, arcname))
else:
self.write(path, arcname)
deferred.sort()
for path, arcname in deferred:
self.write(path, arcname)
def write(self, filename, arcname=None, compress_type=None):
with open(filename, 'rb') as f:
st = os.fstat(f.fileno())
data = f.read()
zinfo = ZipInfo(arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime))
zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
zinfo.compress_type = compress_type or self.compression
self.writestr(zinfo, data, compress_type)
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
ZipFile.writestr(self, zinfo_or_arcname, bytes, compress_type)
fname = (zinfo_or_arcname.filename if isinstance(zinfo_or_arcname, ZipInfo)
else zinfo_or_arcname)
logger.info("adding '%s'", fname)
if fname != self.record_path:
hash_ = self._default_algorithm(bytes)
self._file_hashes[fname] = hash_.name, native(urlsafe_b64encode(hash_.digest()))
self._file_sizes[fname] = len(bytes)
def close(self):
# Write RECORD
if self.fp is not None and self.mode == 'w' and self._file_hashes:
data = StringIO()
writer = csv.writer(data, delimiter=',', quotechar='"', lineterminator='\n')
writer.writerows((
(
fname,
algorithm + "=" + hash_,
self._file_sizes[fname]
)
for fname, (algorithm, hash_) in self._file_hashes.items()
))
writer.writerow((format(self.record_path), "", ""))
zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime())
zinfo.compress_type = self.compression
zinfo.external_attr = 0o664 << 16
self.writestr(zinfo, as_bytes(data.getvalue()))
ZipFile.close(self)
|
PypiClean
|
/d8s_networking-0.4.2.tar.gz/d8s_networking-0.4.2/d8s_networking/networking.py
|
import json
import os
import sys
from typing import Any, Dict
import requests
def requests_basic_auth(user, password):
"""Return an instance of request's basic auth."""
from requests.auth import HTTPBasicAuth
return HTTPBasicAuth(user, password)
def _process_response(
response,
url,
*,
process_response_as_bytes: bool = False,
):
"""Handle the responses from requests."""
from d8s_json import json_read
if response.ok:
if process_response_as_bytes:
return response.content
try:
return json_read(response.text)
except json.JSONDecodeError:
return response.text
else:
message = f'{response.status_code} error from {response.request.method} {url}: {response.text}'
print(message)
return response
def get(
url,
*,
use_common_user_agent: bool = True,
process_response: bool = False,
process_response_as_bytes: bool = False,
**request_kwargs,
):
"""Make a GET request to the given URL."""
from d8s_user_agents import user_agent_common
if use_common_user_agent:
user_agent = user_agent_common()
if request_kwargs.get('headers'):
if not request_kwargs['headers'].get('User-Agent'):
request_kwargs['headers']['User-Agent'] = user_agent
# if there is already a user agent provided, use that
else:
headers = {'User-Agent': user_agent}
request_kwargs['headers'] = headers
response = requests.get(url, **request_kwargs)
if process_response or process_response_as_bytes:
result = _process_response(
response,
url,
process_response_as_bytes=process_response_as_bytes,
)
else:
result = response
return result
def head(url, *, process_response: bool = False, **kwargs):
"""Make a head request."""
response = requests.head(url, **kwargs)
if process_response:
return _process_response(response, url)
else:
return response
def _data_is_json(data: Any) -> bool:
"""."""
JSON_DATATYPES = (dict, list)
if isinstance(data, JSON_DATATYPES):
return True
return False
def post(
url,
*,
update_headers_for_datatype: bool = True,
process_response: bool = False,
process_response_as_bytes: bool = False,
**request_kwargs,
):
"""Make a POST request to the given URL with the given data."""
has_data = request_kwargs.get('data')
if update_headers_for_datatype and has_data:
data = request_kwargs['data']
if _data_is_json(data):
request_kwargs['data'] = json.dumps(data)
request_kwargs = _update_header_for_json(**request_kwargs)
response = requests.post(url, **request_kwargs)
if process_response or process_response_as_bytes:
result = _process_response(
response,
url,
process_response_as_bytes=process_response_as_bytes,
)
else:
result = response
return result
def headers_update(headers: Dict[str, str], new_header_key: str, new_header_value: Any, *, overwrite: bool = True):
"""."""
if headers.get(new_header_key):
if overwrite:
headers[new_header_key] = new_header_value
else:
headers[new_header_key] = new_header_value
return headers
def _update_header_for_json(**kwargs):
"""Given the keyword arguments for a request, check to see if there is already a header, if there is a "Content-Type" header, don't change it; if there is not a "Content-Type" header, add one."""
if kwargs.get('headers'):
kwargs['headers'] = headers_update(kwargs['headers'], 'Content-Type', 'application/json', overwrite=False)
else:
kwargs['headers'] = {'Content-Type': 'application/json'}
return kwargs
def put(
url,
*,
update_headers_for_datatype: bool = True,
process_response: bool = False,
process_response_as_bytes: bool = False,
**request_kwargs,
):
"""Make a PUT request to the given URL with the given data."""
has_data = request_kwargs.get('data')
if update_headers_for_datatype and has_data:
data = request_kwargs['data']
if _data_is_json(data):
request_kwargs['data'] = json.dumps(data)
request_kwargs = _update_header_for_json(**request_kwargs)
response = requests.put(url, **request_kwargs)
if process_response or process_response_as_bytes:
result = _process_response(
response,
url,
process_response_as_bytes=process_response_as_bytes,
)
else:
result = response
return result
def delete(
url,
*,
process_response: bool = False,
process_response_as_bytes: bool = False,
**request_kwargs,
):
"""Make a DELETE request to the given URL with the given data."""
response = requests.delete(url, **request_kwargs)
if process_response or process_response_as_bytes:
return _process_response(
response,
url,
process_response_as_bytes=process_response_as_bytes,
)
else:
return response
def url_hash(url, hash_type='sha256'):
"""Return the hash of the url."""
from d8s_hashes.hashes import _string_hash
return _string_hash(get(url, process_response=True), hash_type)
def urllib3_backoff_factor_executions(backoff_factor: float, number_of_requests: int):
"""Return the times (in seconds) of the first n requests with the given backoff_factor. See https://urllib3.readthedocs.io/en/latest/reference/index.html#urllib3.Retry under the "backoff_factor" argument."""
# the end of the range through which we iterate is number_of_requests plus one because we start the iteration at one and we want to have n items in the execution_times array
range_end = number_of_requests + 1
if range_end > 1:
# if the original request (which can be considered the zeroth request) fails, the first re-request is made immediately by urllib3
yield 0.0
for i in range(2, range_end):
yield backoff_factor * (2 ** (i - 1))
|
PypiClean
|
/dysgu-1.6.0-cp37-cp37m-macosx_10_9_x86_64.whl/dysgu-1.6.0.dist-info/LICENSE.md
|
MIT License
Copyright (c) [2019] [Kez Cleal]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/python-aqbanking-0.0.8.tar.gz/python-aqbanking-0.0.8/README.md
|
AqBanking API for Python
========================
[](https://travis-ci.org/monofox/python-aqbanking) [](https://www.codefactor.io/repository/github/monofox/python-aqbanking)
This is a python wrapper for AqBanking - so of course you need the AqBanking and Gwenhywfar dependencies
installed for a successful compilation. The only supported things at the moment: get balance of an account,
get the transactions of an account (with limitation to start and end date), list configured accounts in
AqBanking, check an IBAN.
License
=======
This library is published under the GPLv3 License. See "LICENSE" for details.
Dependencies
============
The proper development packages are required for:
- AqBanking >= 5.8.1 (tested until 5.8.2)
- Python >= 3.1
- gwenhywfar >= 4.0.0
For the installation, it is necessary to have the proper development packages installed (e.g. `apt-get install libaqbanking-dev libgwenhywfar60-dev`)
Depending on your bank, you may need to update your bank account information (BPD) through aqhbci4 tool (refer to mailing list). Furthermore there might be a later release to support the TAN recording during balance / login.
Install
=======
To install this library, just execute (append --user if it should not be installed systemwide):
`python setup.py install`
Alternative, you can install the library via PyPi: `pip install python-aqbanking`
Please remember, that this library only works with Python 3.
Usage
======
To import it, just do the following:
`import aqbanking`
And then you can verify the IBAN number e.g. with:
`aqbanking.chkibn('DE19....')`
And to list all configured accounts (you can not configure it through this library at the moment), you execute this command:
`aqbanking.listacc()`
Furthermore in order to communicate with your bank, you'll need to register your application at [FinTS](https://www.hbci-zka.de/register/prod_register.htm). You'll receive a code which must be given to the module like:
`aqbanking.setRegistrationKey('some characters')`
For all other functions, you need first to create an account:
`acc = aqbanking.Account(no=157458624, bank_code=45021512)`
New is a function in order to get the information, which jobs or features are available:
`acc.availableJobs()`
Implemented is: `nationalTransfer` and `sepaTransfer`.
Furthermore if you're doing some transfer you're partially asked to enter three times the password. Now you can build your PIN cache with help of the `set_callbackPasswordStatus` function. This calls the python callback with parameters `token`, `pin` and `status` whereas the status field can be 9 = reset, 1 = Bad password, 2 = Remove password and 0 = all went fine.
You can find some examples inside of the `examples/` folder.
Known Bugs/Missing features
===========================
Smartcard/Chipcard support meanwhile integrated. But no "text" that user has to enter something on the readers panel is provided.
The server certificate of the HTTPS connection is not validated at the moment, so do not use it for sensitive data, as man in the middle attack is possible without notice.
Contributing
============
If you'd like to contribute, please fork the repository and use a feature branch. Pull requests are warmly welcome. We cannot cover everything from beginning. Your experience and expertise is necessary to make a awesome product out of it!
|
PypiClean
|
/django-ok-seo-0.9.4.tar.gz/django-ok-seo-0.9.4/seo/mixins/models.py
|
import mimetypes
from typing import Dict
from django.contrib.sites.shortcuts import get_current_site
from django.db.models import Model
from django.utils.translation import to_locale, get_language
from ..const import DEFAULT_OBJECT_TYPES, DEFAULT_TWITTER_TYPES
from ..settings import (
SEO_DEFAULT_IMAGE,
SEO_IMAGE_HEIGHT,
SEO_IMAGE_WIDTH,
SEO_OBJECT_IMAGE_FIELD,
SEO_SITE_NAME,
SEO_FB_APP_ID
)
from ..utils import get_locale
__all__ = (
'SeoTagsMixin',
)
class SeoTagsMixin:
"""
Mixin for seo tags in the <head> section
"""
SEO_IMAGE_FIELD = SEO_OBJECT_IMAGE_FIELD
def get_robots_content(self) -> str:
"""
Return robots content
"""
index = getattr(self, 'index', None)
follow = getattr(self, 'follow', None)
if index and follow:
return f'{index}, {follow}'
return ''
def get_canonical(self, request) -> str:
"""
Return canonical URL
"""
canonical = getattr(self, 'canonical', None)
if canonical and str(canonical).startswith('/'):
return request.build_absolute_uri(canonical)
return canonical
def get_meta_title(self) -> str:
"""
Return meta title
"""
return getattr(self, 'title', '')
def get_og_title(self) -> str:
"""
Return OpenGraph title
"""
return getattr(self, 'og_title', None) or self.get_meta_title()
def get_meta_description(self) -> str:
"""
Return meta description
"""
return getattr(self, 'description', '')
def get_og_description(self) -> str:
"""
Return OpenGraph description
"""
return (
getattr(self, 'og_description', None) or
self.get_meta_description()
)
def get_meta_keywords(self) -> str:
"""
Return meta keywords
"""
return getattr(self, 'keywords', '')
@staticmethod
def get_facebook_app_id() -> str:
"""
Return facebook app id
"""
return SEO_FB_APP_ID
def get_meta_image_field(self, obj: Model = None):
"""
Return image field instance to get image url
"""
return getattr(self, self.SEO_IMAGE_FIELD, None)
def get_meta_image(self, obj: Model = None) -> str:
"""
Return url of image
"""
image_field = self.get_meta_image_field(obj)
if image_field:
try:
return image_field.url
except Exception:
return SEO_DEFAULT_IMAGE
return SEO_DEFAULT_IMAGE
def get_meta_image_alt(self) -> str:
"""
Return alternative text for image
"""
return getattr(self, 'alt', self.get_meta_title())
def get_image_width(self) -> int:
"""
Return width value for image
"""
return getattr(self, 'width', SEO_IMAGE_WIDTH)
def get_image_height(self) -> int:
"""
Return height value for image
"""
return getattr(self, 'height', SEO_IMAGE_HEIGHT)
def get_opengraph_type(self) -> str:
"""
Return open graph object type
"""
return getattr(self, 'object_type', DEFAULT_OBJECT_TYPES[0][0])
def get_twitter_type(self) -> str:
"""
Return open graph object type
"""
if hasattr(self, 'twitter_type'):
return self.twitter_type
return getattr(self, 'twitter_type', DEFAULT_TWITTER_TYPES[0][0])
def get_h1_title(self) -> str:
"""
Return h1 title
"""
return getattr(self, 'h1', '')
def get_seo_text(self) -> str:
"""
Return top text
"""
return getattr(self, 'seo_text', '')
def as_meta(
self, request, debug: bool, obj: Model = None
) -> Dict[str, str]:
"""
Return dict available to render meta tags
"""
meta = {
'robots': self.get_robots_content(),
'canonical': (
self.get_canonical(request) or
request.build_absolute_uri(request.path)
),
'title': self.get_meta_title(),
'og_title': self.get_og_title(),
'description': self.get_meta_description(),
'og_description': self.get_og_description(),
'keywords': self.get_meta_keywords(),
'facebook_app_id': self.get_facebook_app_id(),
'og_type': self.get_opengraph_type(),
'site_name': SEO_SITE_NAME or get_current_site(request),
'og_locale': get_locale(request),
'twitter_type': self.get_twitter_type(),
'request': request,
'debug': debug
}
image = self.get_meta_image(obj)
if image:
meta.update({
'image': request.build_absolute_uri(image),
'alt': self.get_meta_image_alt(),
'mime_type': mimetypes.guess_type(image)[0] or 'image/jpeg',
'image_width': self.get_image_width(),
'image_height': self.get_image_height(),
})
return meta
|
PypiClean
|
/gd-vae-pytorch-1.0.4.tar.gz/gd-vae-pytorch-1.0.4/gd_vae_pytorch/geo_map.py
|
r"""
.. image:: geo_map_py.png
Geometric maps for manifolds and related methods for computing gradients for training.
Provides the *g-projection* maps discussed in the GD-VAE paper for training with
manifold latent spaces within machine learning methods.
If you find these codes or methods helpful for your project, please cite our related work.
"""
# more information: http://atzberger.org/
import torch,torch.nn,sklearn,sklearn.neighbors;
class PointCloudMapFunc(torch.autograd.Function):
r"""
Module layer which maps an input to nearest point in a manifold having a point cloud
representation. This layer also handles computing the associated gradients
for use in backpropogation and training methods.
"""
@staticmethod
def find_nearest_manifold_pt_kdtree(X0,params):
r"""Finds the nearest point on the manifold using for efficiency a kdtree data structure
for the point cloud representation.
Parameters:
X0 (Tensor): input point to map to the manifold. Tensor of shape [num_pts,num_dim].
params (dict): the parameters for the manifold map (matches find_k_nearest_neighs_kdtree()).
Returns:
**x** *(Tensor)* -- giving the closest nearby point. Tensor of shape [num_pts,num_dim].
"""
x,I_x = PointCloudMapFunc.find_k_nearest_neighs_kdtree(X0,k=1,params=params);
params['I_x'] = I_x;
return x;
@staticmethod
def find_k_nearest_neighs_kdtree(X0,k,params):
r"""Find the nearest neighbors on the manifold using for efficiency a kdtree data structure
for the point cloud representation.
Parameters:
X0 (Tensor): input point to map to the manifold. Tensor of shape [num_pts,num_dim].
manifold_ptsX (Tensor): points of the manifold point cloud representation.
Tensor of shape [num_manifold_pts,num_dim].
kdtree_params (dict): parameters for the kdtree methods. Giving 'None'
will result in the use of default parameters.
Returns:
(tuple) containing
**X_neighs** *(Tensor)* -- giving the closest nearby points. Tensor of shape [num_pts,num_dim].
**I_neighs** *(Tensor)* -- giving the indices of the closest points. Tensor of shape [num_pts,1].
"""
KDTree = sklearn.neighbors.KDTree;
r"""Finds nearest neighbors, assumes column vectors."""
flag_build_tree = False; # for now each time
manifold_ptsX = params['manifold_ptsX'];
if 'kdtree' in params:
kdtree = params['kdtree'];
else:
kdtree = None;
if kdtree is None:
flag_build_tree = True;
if flag_build_tree:
if 'kdtree_params' in params:
kdtree_params = params['kdtree_params'];
else:
kdtree_params = {'leaf_size':10,'metric':'euclidean'};
leaf_size = kdtree_params['leaf_size'];
metric = kdtree_params['metric'];
#manifold_ptsX = params['manifold_ptsX'];
kdtree = KDTree(manifold_ptsX.detach().cpu().numpy(),leaf_size=leaf_size,metric=metric);
params['kdtree'] = kdtree;
params['kdtree_params'] = kdtree_params;
I_neighs = kdtree.query(X0.detach().cpu().numpy(),k=k,return_distance=False);
I_neighs = I_neighs.squeeze(1);
X_neighs = manifold_ptsX[I_neighs,:];
return X_neighs, I_neighs;
@staticmethod
def forward(ctx, input, params=None):
r"""
Performs the projection mapping of the input points :math:`x` to the points :math:`z` on the manifold.
Parameters:
ctx (dict): pytorch context data structure.
input (Tensor): points :math:`x` in the embedding space. Tensor of shape [num_samples,num_dim_x].
params (dict): the parameters for the mapping
Returns:
**output** *(Tensor)* -- points :math:`z` on the manifold obtained from mapping :math:`x`. Tensor of size [num_samples,num_dim_x].
**params** [members]
============================= =======================================
**Property** **Description**
----------------------------- ---------------------------------------
**u** (Tensor) coordinate parameterization for the
manifold points
**coordinate_chart** (Tensor) coordinate chart information
**device** (torch.device) for the hardware device as a specific
gpu, cpu, or other component
**kdtree_params** (dict) for the parameters for the kdtree
methods
============================= =======================================
"""
info = {'params':params};
#device = params['device'];
device = input.device;
X = input; # short-hand
# -- solve the minimization problem (here we use a simple sample-point method)
# use kd-tree to find closest point in the point-cloud representation of the manifold
find_nearest_manifold_pt = params['find_nearest_manifold_pt']; # function to get closest point on the manifold
find_nearest_manifold_pt_params = params['find_nearest_manifold_pt_params'];
# for now we map to the nearest tabulated point, could also implement interpolations
x = find_nearest_manifold_pt(X,find_nearest_manifold_pt_params);
I_x = find_nearest_manifold_pt_params['I_x'];
info.update({'x':x,'X':X,'I_x':I_x});
output = x; # the collection of closest point outputs
ctx.atz_stored_for_later = info;
return output;
@staticmethod
def backward(ctx, grad_output):
r"""
Computes the gradients of the projection map.
"""
info = ctx.atz_stored_for_later; params = info['params'];
x = info['x']; X = info['X']; I_x = info['I_x']; num_samples = x.shape[0]; num_dim_x = x.shape[1];
device = x.device;
# get surface information (could be coordinate charts specific to each x)
get_manifold_sigma_info = params['get_manifold_sigma_info']; # function to get local sigm(u) and derivatives
get_manifold_sigma_info_params = params['get_manifold_sigma_info_params']; # function to get local sigm(u) and derivatives
get_manifold_sigma_info_params['I_x'] = I_x; # updated by recent closest point algorithm call
manifold_info = get_manifold_sigma_info(x,get_manifold_sigma_info_params);
d_ui_sigma_k = manifold_info['d_ui_sigma_k']; d_ui_uj_sigma_k = manifold_info['d_ui_uj_sigma_k']; sigma_k = manifold_info['sigma_k'];
num_dim_u = d_ui_sigma_k.shape[1];
# compute the tensors (note coordinate charts can be x-dependent)
d_u_G = torch.zeros(num_samples,num_dim_u,num_dim_u,device=device);
for i in range(0,num_dim_u):
for j in range(0,num_dim_u):
d_u_G[:,i,j] = torch.sum(d_ui_sigma_k[:,i,:]*d_ui_sigma_k[:,j,:],1) - torch.sum((X[:,:] - x[:,:])*d_ui_uj_sigma_k[:,i,j,:],1);
# compute the tensors (note coordinate charts can be x-dependent)
d_X_G = torch.zeros(num_samples,num_dim_u,num_dim_x,device=device);
for i in range(0,num_dim_u):
d_X_G[:,i,:] = -d_ui_sigma_k[:,i,:];
# use Gauasian eliminiation to solve inverse [\nabla_u G]^{-1},
# to obtain the final derivative
b = d_X_G; A = d_u_G;
#xx, LU = torch.solve(b,A); # dL/dX = -d_u_G^{-1}*d_X_G__d_x_L.
xx = torch.linalg.solve(A,b); # dL/dX = -d_u_G^{-1}*d_X_G__d_x_L.
du_dX = -1*xx; # assumes shape = [num_samples,num_dim_u,num_dim_x]
# -- for back-prop we have
# dL/dX = du/dX*dx/du*dL/dx = -([nabla_u G]^{-1} \nabla_X G)*\nabla_u\sigma*dL/dx
# first calculate (dG/dX)*(dL/dx)
dL_dx = grad_output; # assumes grad_output.shape = [num_samples,num_dim_x]
dL_du = torch.zeros(num_samples,num_dim_u,device=device); # @optimize
for i in range(0,num_dim_u):
dL_du[:,i] = torch.sum(d_ui_sigma_k[:,i,:]*dL_dx[:,:],1);
dL_dX = torch.zeros(num_samples,num_dim_x,device=device); # @optimize
for i in range(0,num_dim_x):
dL_dX[:,i] = torch.sum(du_dX[:,:,i]*dL_du[:,:],1);
grad_input = dL_dX;
return grad_input, None;
class ManifoldPointCloudLayer(torch.nn.Module):
r"""
This layer maps an input onto a manifold having a point cloud
representation and handles computing the associated gradients for
use in backpropogation.
"""
def __init__(self,params):
r"""
Parameters:
params (dict): collection of parameters for the mapping.
**params** [members]
============================= =======================================
**Property** **Description**
----------------------------- ---------------------------------------
**u** (Tensor) coordinate parameterization for the
manifold points
**coordinate_chart** (Tensor) coordinate chart information
**device** (torch.device) for the hardware device as a specific
gpu, cpu, or other component
**kdtree_params** (dict) for the parameters for the kdtree
methods
============================= =======================================
"""
super().__init__();
self.params = params;
def forward(self, input):
r"""
Performs the projection mapping of the input points :math:'x" to the points :math:'z' on the manifold.
Parameters:
input (Tensor): points :math:`x` in the embedding space. Tensor of shape [num_samples,num_dim_x].
Returns:
**output** *(Tensor)*: points :math:`z` on the manifold projected from :math:`x`. Tensor of size [num_samples,num_dim_x].
"""
# compute the periodic padding of the input
return PointCloudMapFunc.apply(input,self.params);
def to(self,device):
r"""
Maps the stored manifold points to the specified device.
"""
self.params['manifold_ptsX'] = self.params['manifold_ptsX'].to(device);
return self;
def extra_repr(self):
r"""
Gives a string representation for the parameters.
"""
# print information about this class
#return 'ManifoldPointCloudLayer: (no internal parameters)';
return 'ManifoldPointCloudLayer: params.keys() = ' + str(self.params.keys());
class ManifoldDirectMapLayer(torch.nn.Module):
r"""
This layer projects an input onto a manifold having a direct
representation as an expression that can be backpropogated.
"""
def __init__(self,params):
r"""
Parameters:
params (dict): the parameters of the map including
**params** [members]
========================== =======================================
**Property** **Description**
-------------------------- ---------------------------------------
**func_map** (function) function for the direct mapping
**params_map** (dict) paramaters for the mapping function
**device** (torch.device) for the hardware device as a specific
gpu, cpu, or other component.
========================== =======================================
"""
super().__init__();
self.params = params;
def forward(self, input):
r"""
Performs the projection mapping of the input points :math:`x` to the points :math:`z` on the manifold.
Parameters:
input (Tensor): points :math:`x` in the embedding space. Tensor of shape [num_samples,num_dim_x].
Returns:
**output** *(Tensor)* -- points :math:`z` on the manifold projected from :math:`x`. Tensor of size [num_samples,num_dim_x].
"""
func_map,func_map_params = tuple(map(self.params.get,['func_map','func_map_params']));
# compute the direct mapping for the manifold
output = func_map(input,func_map_params);
return output;
def to(self,device):
r"""
Currently nothing extra to do to map to a device.
"""
return self;
def extra_repr(self):
r"""
Gives a string representation for the parameters.
"""
# print information about this class
#return 'ManifoldPointCloudLayer: (no internal parameters)';
return 'ManifoldDirectMapLayer: params = ' + str(self.params);
def map_clifford_torus(input,params):
r"""
Computes the clifford torus map as represented by a product-space of circles
in :math:`R^{2n}`.
Parameters:
input (Tensor): input points :math:`x` to map to the Clifford Torus.
params_map (dict): parameters for the clifford torus map.
Returns:
**z** *(Tensor)* -- points mapped to the manifold
**params_map** [members]
========================== =======================================
**Property** **Description**
-------------------------- ---------------------------------------
**num_circles** (int) (default is 2): for number of circles
to use for the product-space
**device** (torch.device) for the hardware device as a specific
gpu, cpu, or other component.
========================== =======================================
"""
# compute the mapping to Clifford torus
num_circles,device = tuple(map(params.get,['num_circles','device']));
if num_circles is None:
num_circles = 2;
if device is None:
#device = torch.device('cpu');
device = input.device;
X = input; # short-hand
num_dim_c = 2; num_dim_z = num_circles*num_dim_c; num_samples = X.shape[0];
x = torch.zeros(num_samples,num_dim_z,device=device);
for k in range(0,num_circles):
v = X[:,k*num_dim_c:(k + 1)*num_dim_c];
norm_v = torch.sqrt(torch.sum(torch.pow(v,2),1)).unsqueeze(1);
x[:,k*num_dim_c:(k + 1)*num_dim_c] = v/norm_v; # map to a unit circle
return x; # the collection of closest point outputs
def map_sphere(input,params):
r"""
Computes the sphere map as represented in :math:`R^{n}`.
Parameters:
input (Tensor): input points :math:`x` to map to the sphere.
params_map (dict): parameters for the sphere map.
Returns:
**z** *(Tensor)* -- points mapped to the sphere.
**params_map** [members]
========================== =======================================
**Property** **Description**
-------------------------- ---------------------------------------
**sphere_r** (double) (default is 1.0) radius of the sphere
**epsilon** (double) (default is 1e-10) used to avoid
dividing by zero
**device** (torch.device) for the hardware device as a specific
gpu, cpu, or other component
========================== =======================================
"""
# compute the mapping to sphere
sphere_r,epsilon,device = tuple(map(params.get,['sphere_r','epsilon','device']));
if sphere_r is None:
sphere_r = 1.0;
if epsilon is None:
epsilon = 1e-10;
if device is None:
#device = torch.device('cpu');
device = input.device;
X = input; # short-hand
num_dim_z = X.shape[1]; # num_dim_x
x = torch.zeros(num_samples,num_dim_z,device=device);
norm_X = torch.sqrt(torch.sum(torch.pow(X,2),1)).unsqueeze(1);
# we use epsilon to avoid division by zero
x = X/(norm_X + epsilon); # map to a sphere of radius r
return x; # the collection of closest point outputs
|
PypiClean
|
/spgrep_modulation-0.2.5.tar.gz/spgrep_modulation-0.2.5/docs/phonon.md
|
# Lattice vibration
## Harmonic phonon
Hamiltonian and commutation relations:
```{math}
H = \sum_{ l \kappa \mu } \frac{ p_{\mu}(l\kappa)^{2} }{ 2 M_{\kappa} }
+ \frac{1}{2} \sum_{ l \kappa \mu } \sum_{ l' \kappa' \mu' } \Phi_{ \mu \mu' }(l\kappa, l'\kappa') u_{\mu}(l\kappa) u_{\mu'}(l'\kappa')
```
Dynamical matrix [^dynamical_matrix]
```{math}
D_{\mu \mu'}(\kappa\kappa'; \mathbf{q})
= \frac{1}{ \sqrt{ M_{\kappa} M_{\kappa'} } } \sum_{l'} \Phi_{ \mu \mu' }(0\kappa, l'\kappa') e^{i \mathbf{q} \cdot ( \mathbf{r}(l'\kappa') - \mathbf{r}(0\kappa) )}
```
We denote {math}`[\mathbf{D}(\mathbf{q})]_{\kappa\mu, \kappa'\mu'} = D_{\mu \mu'}(\kappa\kappa'; \mathbf{q})`, then
```{math}
\mathbf{D}(-\mathbf{q}) &= \mathbf{D}(\mathbf{q})^{\ast} \\
\mathbf{D}(\mathbf{q})^{\dagger} &= \mathbf{D}(\mathbf{q}) \quad \mbox{(Hermite)}.
```
Here we use {math}`\Phi_{ \mu' \mu }(0\kappa', -l\kappa) = \Phi_{ \mu \mu' }(0\kappa, l'\kappa')`.
Let normalized eigenvector of {math}`\mathbf{D}(\mathbf{q})` as {math}`[\mathbf{e}(\mathbf{q}\nu)]_{\kappa\mu} = e_{\mu}(\kappa; \mathbf{q}\nu)` with
```{math}
\mathbf{D}(\mathbf{q}) \mathbf{e}(\mathbf{q}\nu)
&= \omega_{\mathbf{q}\nu}^{2} \mathbf{e}(\mathbf{q}\nu)
\quad (\nu = 1, \dots, 3N) \\
\omega_{-\mathbf{q}\nu}^{2} &= \omega_{\mathbf{q}\nu}^{2}.
```
We can choose as
```{math}
e_{\mu}(\kappa; -\mathbf{q}\nu) = e_{\mu}(\kappa; \mathbf{q}\nu)^{\ast}.
```
Later we denote {math}`q = (\mathbf{q}, \nu)` and {math}`-q = (-\mathbf{q}, \nu)`.
[^dynamical_matrix]: This is the same phase convention with [phonopy](https://phonopy.github.io/phonopy/formulation.html#dynamical-matrix).
There is the other formulation for defining dynamical matrix as
```{math}
\Psi_{\mu \mu'}(\kappa\kappa'; \mathbf{q})
= \frac{1}{ \sqrt{ M_{\kappa} M_{\kappa'} } } \sum_{l'} \Phi_{ \mu \mu' }(0\kappa, l'\kappa') e^{i \mathbf{q} \cdot \mathbf{r}(l')}.
```
## Normal coordinates
```{math}
u_{\mu}(l\kappa)
&=: \frac{1}{\sqrt{ L^{3} M_{\kappa} }} \sum_{q} Q_{q} e_{\mu}(\kappa; q) e^{i \mathbf{q} \cdot \mathbf{r}(l\kappa)} \\
p_{\mu}(l\kappa)
&=: \sqrt{\frac{M_{\kappa}}{L^{3}}} \sum_{q} P_{q} e_{\mu}(\kappa; -q) e^{-i \mathbf{q} \cdot \mathbf{r}(l\kappa)} \\
Q_{q}
&= \sum_{ l\kappa\mu } \sqrt{ \frac{M_{\kappa}}{L^{3}} } u_{\mu}(l\kappa) e_{\mu}(\kappa; -q) e^{-i \mathbf{q} \cdot \mathbf{r}(l\kappa)} \\
P_{q}
&= \sum_{ l\kappa\mu } \frac{1}{\sqrt{ L^{3}M_{\kappa} }} p_{\mu}(l\kappa) e_{\mu}(\kappa; q) e^{i \mathbf{q} \cdot \mathbf{r}(l\kappa)} \\
Q_{-q} &= Q_{q}^{\ast} \\
P_{-q} &= P_{q}^{\ast} \\
H &= \frac{1}{2} \sum_{q} \left( P_{q} P_{-q} + \omega_{q}^{2} Q_{q} Q_{-q} \right) \\
```
Creation and annihilation operators
```{math}
A_{q}
&:= \frac{1}{\sqrt{2\hbar\omega_{q}}} \left( \omega_{q} Q_{q} + i P_{-q} \right) \\
A_{q}^{\dagger}
&:= \frac{1}{\sqrt{2\hbar\omega_{q}}} \left( \omega_{q} Q_{-q} - i P_{q} \right) \\
Q_{q}
&= \sqrt{\frac{\hbar}{2\omega_{q}}} \left( A_{q} + A_{-q}^{\dagger} \right) \\
P_{q}
&= i \sqrt{\frac{\hbar\omega_{q}}{2}} \left( A_{-q}^{\dagger} - A_{q} \right) \\
\left[ A_{q}, A_{q'}^{\dagger} \right] &= \delta_{qq'} \\
\left[ A_{q}, A_{q'} \right] &= 0 \\
\left[ A_{q}^{\dagger}, A_{q'}^{\dagger} \right] &= 0 \\
H &= \frac{1}{2} \sum_{q} \hbar \omega_{q} \left( A_{q}^{\dagger}A_{q} + \frac{1}{2} \right)
```
In canonical ensemble:
```{math}
\langle A_{q}^{\dagger}A_{q} \rangle_{\beta}
&= \frac{1}{ e^{\beta \hbar \omega_{q}} - 1 } \\
\langle |Q_{q}|^{2} \rangle_{\beta}
&= \frac{\hbar}{2\omega_{q}} \left( 2 \langle A_{q}^{\dagger}A_{q} \rangle_{\beta} + 1 \right)
```
## Action on displacements
We define left group action for positions {math}`\mathbf{r}(l\kappa) = \mathbf{r}(l) + \mathbf{r}(0\kappa)` by {math}`g = (\mathbf{R}_{g}, \mathbf{\tau}_{g}) \in \mathcal{G}` as
```{math}
g \mathbf{r}(l\kappa)
:= \mathbf{R}_{g} \mathbf{r}(l\kappa) + \mathbf{\tau}_{g}.
```
We denote that site {math}`\mathbf{r}(0, \kappa)` is transformed to {math}`\mathbf{r}(0, g \kappa ) + \mathbf{h}_{g}(\kappa)` by symmetry operation {math}`g`.
Then,
```{math}
g \mathbf{r}(\mathbf{l}, \kappa) &= \mathbf{R}_{g} \mathbf{r}(l) + \mathbf{r}(0, g\kappa) + \mathbf{h}_{g}(\kappa) \\
```
% \mathbf{h}_{g^{-1}}(\kappa) &= - \mathbf{p}_{g}^{-1} \mathbf{h}_{g}(g^{-1}\kappa)
We define left group action for displacement at {math}`\mathbf{r}(l\kappa)` as [^displacement_action]
```{math}
g u_{\mu}(\mathbf{r}(l\kappa))
:= \sum_{\nu} [\mathbf{R}_{g}]_{\mu\nu} u_{\nu}(g \mathbf{r}(l\kappa)).
```
[^displacement_action]: This definition actually satisfies the condition of left group action
```{math}
\left[ g \left( g' \mathbf{u}(\mathbf{r}(l\kappa)) \right) \right]_{\mu}
&= \left[ g \left\{ \sum_{\nu} R_{g', \mu'\nu} u_{\nu}( g'\mathbf{r}(l\kappa) ) \right\}_{\mu'} \right]_{\mu} \\
&= \sum_{ \mu'\nu } R_{g, \mu\mu'} R_{g', \mu'\nu} u_{\nu}( gg'\mathbf{r}(l\kappa) ) \\
&= \sum_{ \nu } R_{gg', \mu\nu} u_{\nu}( gg'\mathbf{r}(l\kappa) ) \\
&= \left[ (gg') \mathbf{u}(\mathbf{r}(l\kappa)) \right]_{\mu}.
```
Consider Fourier transformation of {math}`\mathbf{u}(\mathbf{r}(l\kappa))`
```{math}
\mathbf{u}(\kappa; \mathbf{q})
&:= \sqrt{\frac{M_{\kappa}}{L^{3}}} \sum_{l} \mathbf{u}(\mathbf{r}(l\kappa)) e^{ i \mathbf{q} \cdot \mathbf{r}(l) } \\
\mathbf{u}(\mathbf{r}(l\kappa))
&= \frac{1}{\sqrt{M_{\kappa}L^{3}}} \sum_{\mathbf{q}} \mathbf{u}(\kappa; \mathbf{q}) e^{ -i \mathbf{q} \cdot \mathbf{r}(l) } \\
g u_{\mu}(\kappa; \mathbf{q})
&= \sqrt{\frac{M_{\kappa}}{L^{3}}} \sum_{l} g u_{\mu}(\mathbf{r}(l\kappa)) e^{i \mathbf{q}\cdot \mathbf{r}(l)} \\
&= \sqrt{\frac{M_{\kappa}}{L^{3}}} \sum_{l}\sum_{\nu}
R_{g,\mu\nu} u_{\nu}\left( \mathbf{R}_{g}\mathbf{r}(l) + \mathbf{r}(0, g\kappa) + \mathbf{h}_{g}(\kappa) \right)
e^{i \mathbf{q}\cdot \mathbf{r}(l)} \\
&\quad (\mathbf{r}(l') := \mathbf{R}_{g}\mathbf{r}(l) + \mathbf{h}_{g}(\kappa) ) \\
&= \sqrt{\frac{M_{\kappa}}{L^{3}}} \sum_{l'}\sum_{\nu} R_{g,\mu\nu} u_{\nu}\left( \mathbf{r}(l', g\kappa) \right) e^{i \mathbf{q}\cdot \mathbf{R}_{g}^{-1}(\mathbf{r}(l') - \mathbf{h}_{\kappa} ) } \\
&= \sqrt{\frac{M_{\kappa}}{L^{3}}} \sum_{l'}\sum_{\nu} R_{g,\mu\nu} u_{\nu}\left( \mathbf{r}(l', g\kappa) \right) e^{i \mathbf{R}_{g}\mathbf{q}\cdot (\mathbf{r}(l') - \mathbf{h}_{\kappa} ) }
\quad (\because \mathbf{R}_{g} \in O(3)) \\
&= \sum_{\kappa'\mu'} u_{\mu'}(\kappa'; \mathbf{R}_{g} \mathbf{q} ) \Gamma_{\kappa'\mu'; \kappa\mu}^{\mathbf{q}}(g)
\quad (\because M_{g\kappa} = M_{\kappa}),
```
where
```{math}
:label: dynamical_matrix_rep
\Gamma_{\kappa'\mu'; \kappa\mu}^{\mathbf{q}}(g)
&:= \exp \left( -i \mathbf{R}_{g} \mathbf{q} \cdot \mathbf{h}_{g}(\kappa) \right) [\mathbf{R}_{g}]_{\mu'\mu} \delta_{ g\kappa, \kappa' } \\
&:= \exp \left( -2 \pi i \mathbf{R}_{g, f}^{\top} \mathbf{q}_{f} \cdot \mathbf{h}_{g, f}(\kappa) \right) [\mathbf{R}_{g}]_{\mu'\mu} \delta_{ g\kappa, \kappa' } \\
&\quad (
\mathbf{R}_{g, f} := \mathbf{A}^{-1} \mathbf{R}_{g} \mathbf{A},
\mathbf{q} =: 2\pi \mathbf{A}^{-\top} \mathbf{q}_{f},
\mathbf{v} =: \mathbf{A} \mathbf{v}_{f}
)
```
Equation {eq}`dynamical_matrix_rep` is essentially the same with Eq. (2.37) of {cite}`RevModPhys.40.1`.
We write matrix representation {math}`[\mathbf{\Gamma}^{\mathbf{q}}(g)]_{ \kappa'\mu'; \kappa\mu } := \Gamma_{\kappa'\mu'; \kappa\mu}^{\mathbf{q}}(g)`.
Then,
```{math}
\left[ \mathbf{\Gamma}^{ \mathbf{q}}(gg') \right]_{ \kappa'\mu', \kappa\mu }
&= \delta_{gg'\kappa, \kappa'} R_{gg', \mu'\mu} \exp \left( -i \mathbf{R}_{gg'}\mathbf{q} \cdot \mathbf{h}_{gg'}(\kappa) \right) \\
&= \sum_{ \kappa''\nu }
\delta_{g\kappa'', \kappa'} \delta_{g'\kappa, \kappa''}
R_{g, \mu'\nu} R_{g', \nu\mu}
\exp \left( -i \mathbf{R}_{g'}\mathbf{q} \cdot \mathbf{h}_{g'}(\kappa) \right)
\exp \left( -i \mathbf{R}_{g}\mathbf{R}_{g'}\mathbf{q} \cdot \mathbf{h}_{g}(g\kappa') \right) \\
&= \left[ \mathbf{\Gamma}^{ \mathbf{R}_{g'} \mathbf{q}}(g) \mathbf{\Gamma}^{ \mathbf{q}}(g') \right]_{ \kappa'\mu', \kappa\mu } \\
\mathbf{\Gamma}^{\mathbf{q}}(g)^{\dagger} \mathbf{\Gamma}^{ \mathbf{q}}(g)
&= \mathbf{1} \quad \mbox{(Unitary)} \\
\Gamma^{\mathbf{q}}((E, \mathbf{t}))_{ \kappa'\mu', \kappa\mu }
&= \exp \left( -i \mathbf{q} \cdot \mathbf{t} \right) \delta_{\mu'\mu} \delta_{ \kappa, \kappa' }.
```
Fourier transformation of force constants
```{math}
\Phi_{\mu\mu'}(\kappa\kappa'; \mathbf{q})
&:= \frac{1}{\sqrt{M_{\kappa}M_{\kappa'}}} \sum_{l'} \Phi_{\mu\mu'}(0\kappa; l'\kappa') e^{ i \mathbf{q} \cdot \mathbf{r}(l') } \\
\sum_{ l l' } \Phi_{ \mu \mu' }(l\kappa, l'\kappa') u_{\mu}(l\kappa) u_{\mu'}(l'\kappa')
&= \sum_{\mathbf{q}} \Phi_{\mu\mu'}(\kappa\kappa'; \mathbf{q}) u_{\mu}(\kappa; \mathbf{q}) u_{\mu'}(\kappa'; -\mathbf{q}) \\
```
The condition that potential energy is invariant under symmetry operations is rewritten as [^fourier_force_constant]
```{math}
\mathbf{\Phi}(\mathbf{R}_{g} \mathbf{q})
= \mathbf{\Gamma}^{\mathbf{q}}(g) \mathbf{\Phi}(\mathbf{q}) \mathbf{\Gamma}^{\mathbf{q}}(g)^{\dagger}.
```
[^fourier_force_constant]: The derivation is as follows:
```{math}
\sum_{ l\kappa\mu l'\kappa'\mu' } \Phi_{ \mu \mu' }(l\kappa, l'\kappa') u_{\mu}(l\kappa) u_{\mu'}(l'\kappa')
&= \sum_{ l\kappa\mu l'\kappa'\mu' } \Phi_{ \mu \mu' }(l\kappa, l'\kappa') gu_{\mu}(l\kappa) gu_{\mu'}(l'\kappa') \\
&= \dots = \sum_{ } \left[ \mathbf{\Gamma}^{\mathbf{q}}(g) \mathbf{\Phi}(\mathbf{q}) \mathbf{\Gamma}^{\mathbf{q}}(g)^{\dagger} \right]_{\kappa\mu, \kappa'\mu'} u_{\mu}(\kappa; \mathbf{R}_{g}\mathbf{q}) u_{\mu'}(\kappa'; -\mathbf{R}_{g}\mathbf{q}).
```
## Small representation of {math}`\mathcal{G}^{\mathbf{q}}`
For {math}`h, h' \in \mathcal{G}^{\mathbf{q}}`,
```{math}
\mathbf{\Gamma}^{ \mathbf{q}}(h) \mathbf{\Gamma}^{ \mathbf{q}}(h')
&= \mathbf{\Gamma}^{ \mathbf{q}}(hh') \\
\mathbf{\Phi}(\mathbf{q})
&= \mathbf{\Gamma}^{\mathbf{q}}(h) \mathbf{\Phi}(\mathbf{q}) \mathbf{\Gamma}^{\mathbf{q}}(h)^{\dagger}
\quad (\forall h \in \mathcal{G}^{\mathbf{q}}).
```
We can introduce projective representation {math}`\overline{\Gamma}^{ \mathbf{q}}`,
```{math}
\mathbf{\Gamma}^{ \mathbf{q}}(h)
&=: e^{ -i \mathbf{q} \cdot \mathbf{v}_{h} } \overline{\mathbf{\Gamma}}^{ \mathbf{q}}(h) \\
\overline{\mathbf{\Gamma}}^{ \mathbf{q}}(h) \overline{\mathbf{\Gamma}}^{ \mathbf{q}}(h')
&= e^{ -i \mathbf{q} \cdot ( \mathbf{R}_{h} \mathbf{v}_{h'} - \mathbf{v}_{h'} ) } \overline{\mathbf{\Gamma}}^{ \mathbf{q}}(hh') \\
\overline{\mathbf{\Gamma}}^{ \mathbf{q}}((E, \mathbf{t}))
&= \mathbf{1}
```
Thus, we only need to consider projective representation {math}`\overline{\mathbf{\Gamma}}^{ \mathbf{q}}` for little co-group {math}`\overline{\mathcal{G}}^{\mathbf{q}} \simeq \mathcal{G}^{\mathbf{q}} / \mathcal{T}`.
The decomposition of the projective representation
```{math}
\overline{\Gamma}^{\mathbf{q}} &= \sum_{\alpha} \sum_{\sigma} \overline{\Gamma}^{\mathbf{q}\alpha\sigma} \\
```
can be performed with [spgrep](https://github.com/spglib/spgrep), where {math}`\alpha` represent irrep and {math}`\sigma = 1,\dots, m_{\alpha}` distinguish equivalent irreps to {math}`\alpha`.
The corresponding small representation of {math}`\mathcal{G}^{\mathbf{q}}` is obtained by {math}`\mathbf{\Gamma}^{ \mathbf{q}\omega}(h) := e^{ -i \mathbf{q} \cdot \mathbf{v}_{h} } \overline{\mathbf{\Gamma}}^{ \mathbf{q}\omega }(h)`.
We call orthonormal basis vectors {math}`f_{\mu}(\kappa; \mathbf{q}\alpha\sigma\nu)` forming irrep {math}`\Gamma^{\mathbf{q}\alpha}` as *modified eigenvectors*:
```{math}
h f_{\mu}(\kappa; \mathbf{q}\alpha\sigma\nu)
&= \sum_{\nu'} f_{\mu}(\kappa; \mathbf{q}\alpha\sigma\nu') \Gamma^{\mathbf{q}\alpha}(h)_{\nu',\nu}
\quad (h \in \mathcal{G}^{\mathbf{q}}, \nu = 1, \dots, d_{\alpha}) \\
\sum_{\kappa\mu} f_{\mu}(\kappa; \mathbf{q}\alpha\sigma\nu)^{\ast} f_{\mu}(\kappa; \mathbf{q}\alpha\sigma\nu')
&= \delta_{\nu\nu'}
\quad (\nu, \nu' = 1, \dots, d_{\alpha})
```
We can subdivide eigenvectors further by decomposing {math}`\Gamma^{\mathbf{q}}` into irreps,
```{math}
\left[ F^{\mathbf{q}\alpha\sigma} \right]_{\kappa\mu, \nu}
&:= f_{\mu}(\kappa; \mathbf{q}\alpha\sigma\nu) \\
\mathbf{F}^{\mathbf{q}\alpha\sigma \dagger} \mathbf{\Gamma}^{\mathbf{q}}(h) \mathbf{F}^{\mathbf{q}\alpha\sigma}
&= \mathbf{\Gamma}^{\mathbf{q}\alpha}(h)
\quad (h \in \mathcal{G}^{\mathbf{q}}) \\
```
## Solve dynamical matrix w.r.t. modified eigenvectors
Block-diagonalize fourier transformed force constants:
```{math}
\mathbf{F}^{\mathbf{q}\alpha}
&:= \left( \mathbf{F}^{\mathbf{q}\alpha 1} \dots \mathbf{F}^{\mathbf{q}\alpha m_{\alpha}} \right)
\quad \in \mathbb{C}^{3N \times m_{\alpha}d_{\alpha}} \\
\mathbf{\Phi}(\mathbf{q}\alpha)
&:= \mathbf{F}^{\mathbf{q}\alpha \dagger} \mathbf{\Phi}(\mathbf{q}) \mathbf{F}^{\mathbf{q}\alpha}
\quad \in \mathbb{C}^{ m_{\alpha}d_{\alpha} \times m_{\alpha}d_{\alpha} }, \\
```
where {math}`\mathbf{\Phi}(\mathbf{q}\alpha)` is hermitian.
Diagonalize {math}`\mathbf{\Phi}(\mathbf{q}\alpha)`
```{math}
\sum_{\sigma' \nu'} \Phi(\mathbf{q}\alpha)_{\sigma\nu, \sigma'\nu'} c(\mathbf{q} \alpha s\lambda)_{\sigma' \nu'}
&= \omega_{\alpha s}^{2} c(\mathbf{q} \alpha s\lambda)_{\sigma \nu}
\quad (s = 1, \dots, m_{\alpha}, \lambda = 1, \dots, d_{\alpha}) \\
\sum_{\sigma\nu} c(\mathbf{q}\alpha s\lambda)_{\sigma\nu}^{\ast} c(\mathbf{q}\alpha s'\lambda')_{\sigma\nu}
&= \delta_{s s'} \delta_{\lambda \lambda'} \\
\sum_{s\lambda} c(\mathbf{q}\alpha s\lambda)_{\sigma\nu}^{\ast} c(\mathbf{q}\alpha s\lambda)_{\sigma'\nu'}
&= \delta_{\sigma \sigma'} \delta_{\nu \nu'}
\quad (\sigma = 1, \dots, m_{\alpha}, \nu = 1, \dots, d_{\alpha}) \\
```
where {math}`s` labels real eigenvalues {math}`\omega_{\alpha s}^{2}` and {math}`\lambda` labels degenerated eigenvectors.
Here, **we assume each equivalent irrep {math}`\Gamma^{\mathbf{q}\alpha \sigma}` gives a different eigenvalue**.
Also, we choice eigenvectors {math}`\mathbf{c}(\mathbf{q} \alpha s\lambda) := \{ c(\mathbf{q}\alpha s\lambda)_{\sigma\nu} \}_{\sigma\nu}` are mutually orthogonal even within degenerated eigenvalues.
When irrep {math}`\alpha` appears more than once ({math}`m_{\alpha} > 1`), irrep formed by eigenvectors is no longer same as {math}`\Gamma^{\mathbf{q}\alpha}` {cite}`RevModPhys.40.1`:
```{math}
\tilde{\mathbf{f}}(\mathbf{q}\alpha s \lambda)
&:= \sum_{\sigma\nu} \mathbf{f}(\mathbf{q}\alpha \sigma \nu) c(\mathbf{q}\alpha s\lambda)_{\sigma \nu} \\
h \tilde{\mathbf{f}}(\mathbf{q}\alpha s \lambda)
&= \sum_{\lambda'} \tilde{\mathbf{f}}(\mathbf{q}\alpha s\lambda') \tilde{\Gamma}^{\mathbf{q}\alpha s}(h)_{\lambda'\lambda},
```
where [^eigmode_rep]
```{math}
\tilde{\Gamma}^{\mathbf{q}\alpha s}(h)_{\lambda'\lambda}
:= \sum_{\sigma\nu\nu'} c(\mathbf{q}\alpha s\lambda')_{\sigma\nu'}^{\ast} \Gamma^{\mathbf{q}\alpha}(h)_{\nu'\nu} c(\mathbf{q}\alpha s\lambda)_{\sigma\nu}
\quad (h \in \mathcal{G}^{\mathbf{q}}).
```
[^eigmode_rep]: The derivation is cumbersome (to me).
Let {math}`\mathbf{C}^{\mathbf{q}\alpha s} = ( \mathbf{c}(\mathbf{q}\alpha s 1), \dots, \mathbf{c}(\mathbf{q}\alpha s d_{\alpha}) )`.
```{math}
\tilde{\mathbf{\Gamma}}^{\alpha s}(h)
&:= \mathbf{C}^{\mathbf{q}\alpha s}
\begin{pmatrix}
\mathbf{\Gamma}^{\mathbf{q}\alpha}(h) & & \\
& \ddots & \\
& & \mathbf{\Gamma}^{\mathbf{q}\alpha}(h) \\
\end{pmatrix}
\mathbf{C}^{\mathbf{q}\alpha s \dagger} \\
\tilde{\Gamma}^{\alpha s}(h)_{\lambda' \lambda}
&= \sum_{\sigma' \nu' \sigma \nu}
c(\mathbf{q}\alpha s \lambda')_{\sigma'\nu'}^{\ast} \delta_{\sigma \sigma'}
\Gamma^{\mathbf{q}\alpha}(h)_{\nu'\nu}
c(\mathbf{q}\alpha s \lambda)_{\sigma\nu} \\
&= \sum_{\sigma \nu' \nu}
c(\mathbf{q}\alpha s \lambda')_{\sigma\nu'}^{\ast}
\Gamma^{\mathbf{q}\alpha}(h)_{\nu'\nu}
c(\mathbf{q}\alpha s \lambda)_{\sigma\nu} \\
```
Now go back to the other convention of dynamical matrix:
```{math}
e_{\mu}(\kappa; \mathbf{q}\alpha s \lambda)
&:= e^{ -i\mathbf{q} \cdot \mathbf{r}(0\kappa) } \tilde{f}_{\mu}(\kappa; \mathbf{q}\alpha s \lambda) \\
D_{\mu\mu'}(\kappa\kappa'; \mathbf{q})
&= e^{ i \mathbf{q} \cdot \left( \mathbf{r}(0\kappa') - \mathbf{r}(0\kappa) \right) } \Phi_{\mu\mu'}(\kappa\kappa'; \mathbf{q}) \\
\sum_{ \kappa'\mu' } D_{\mu\mu'}(\kappa\kappa'; \mathbf{q}) e_{\mu'}(\kappa'; \mathbf{q}\alpha s\lambda)
&= \omega_{\alpha s}^{2} e_{\mu}(\kappa; \mathbf{q}\alpha s \lambda) \\
h \mathbf{e}(\mathbf{q}\alpha s \lambda)
&= \sum_{\lambda'} \mathbf{e}(\mathbf{q}\alpha s'\lambda') \tilde{\Gamma}^{\mathbf{q}\alpha s}(h)_{\lambda'\lambda}
\quad (h \in \mathcal{G}^{\mathbf{q}}).
```
## Modulation
Modulation associated with qpoint {math}`\mathbf{q}` and frequency {math}`\mathbf{\omega}_{\mathbf{q}}`:
{math}`\mathbf{q} \neq \mathbf{0}` case:
```{math}
u^{( \mathbf{q} \alpha s)}_{\mu}(l\kappa)
&= \frac{1}{\sqrt{L^{3}M_{\kappa}}} \sum_{ \lambda }
\left(
Q^{ (\mathbf{q} \alpha s) }_{\lambda} e_{\mu}(\kappa; \mathbf{q} \alpha s \lambda) e^{ i \mathbf{q} \cdot \mathbf{r}(l\kappa) }
+ \mathrm{c.c.}
\right) \\
Q^{ (\mathbf{q} \alpha s) }_{\lambda} &\in \mathbb{C} \\
```
{math}`\mathbf{q} = \mathbf{0}` case:
```{math}
u^{( \mathbf{0} \alpha s)}_{\mu}(l\kappa)
&= \frac{1}{\sqrt{L^{3}M_{\kappa}}} \sum_{ \lambda }
Q^{ (\mathbf{0} \alpha s) }_{\lambda} e_{\mu}(\kappa; \mathbf{0} \alpha s \lambda ) \\
Q^{ (\mathbf{0} \alpha s) }_{\lambda} &\in \mathbb{R} \\
```
% When we sample modulations in grid, we fix the norm of {math}`\mathbf{Q}^{ (\mathbf{q} \alpha s) }`, which is equivalent to fix the norm of the following mass-weighted displacements,
% ```{math}
% \sum_{l \kappa \mu} M_{\kappa} \left| u^{( \mathbf{q} \alpha s)}_{\mu}(l\kappa) \right|^{2}
% \propto \sum_{\lambda} \left| Q^{ (\mathbf{q} \alpha s) }_{\lambda} \right|^{2}.
% ```
From unitary arbitrariness of {math}`\tilde{\Gamma}^{\mathbf{q}\alpha s}`, we can choose as {math}`\mathrm{Im}\, Q^{ (\mathbf{q} \alpha s) }_{\lambda=1} = 0`.
Thus, sampling of {math}`\mathbf{Q}^{ (\mathbf{q} \alpha s) }` is attributed to sampling points from unit sphere {math}`S^{2 d_{\alpha} - 2}`.
% chain-adapted mode {cite}`Aroyo:js0048`
## References
```{bibliography}
:filter: docname in docnames
```
|
PypiClean
|
/safegate-pro-frontend-20210805.0.tar.gz/safegate-pro-frontend-20210805.0/hass_frontend/frontend_latest/chunk.dcca18d66e88fc405587.js
|
(self.webpackChunkhome_assistant_frontend=self.webpackChunkhome_assistant_frontend||[]).push([[5205],{21157:(e,t,n)=>{"use strict";n(65233);const l=n(50856).d`
/* Most common used flex styles*/
<dom-module id="iron-flex">
<template>
<style>
.layout.horizontal,
.layout.vertical {
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
}
.layout.inline {
display: -ms-inline-flexbox;
display: -webkit-inline-flex;
display: inline-flex;
}
.layout.horizontal {
-ms-flex-direction: row;
-webkit-flex-direction: row;
flex-direction: row;
}
.layout.vertical {
-ms-flex-direction: column;
-webkit-flex-direction: column;
flex-direction: column;
}
.layout.wrap {
-ms-flex-wrap: wrap;
-webkit-flex-wrap: wrap;
flex-wrap: wrap;
}
.layout.no-wrap {
-ms-flex-wrap: nowrap;
-webkit-flex-wrap: nowrap;
flex-wrap: nowrap;
}
.layout.center,
.layout.center-center {
-ms-flex-align: center;
-webkit-align-items: center;
align-items: center;
}
.layout.center-justified,
.layout.center-center {
-ms-flex-pack: center;
-webkit-justify-content: center;
justify-content: center;
}
.flex {
-ms-flex: 1 1 0.000000001px;
-webkit-flex: 1;
flex: 1;
-webkit-flex-basis: 0.000000001px;
flex-basis: 0.000000001px;
}
.flex-auto {
-ms-flex: 1 1 auto;
-webkit-flex: 1 1 auto;
flex: 1 1 auto;
}
.flex-none {
-ms-flex: none;
-webkit-flex: none;
flex: none;
}
</style>
</template>
</dom-module>
/* Basic flexbox reverse styles */
<dom-module id="iron-flex-reverse">
<template>
<style>
.layout.horizontal-reverse,
.layout.vertical-reverse {
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
}
.layout.horizontal-reverse {
-ms-flex-direction: row-reverse;
-webkit-flex-direction: row-reverse;
flex-direction: row-reverse;
}
.layout.vertical-reverse {
-ms-flex-direction: column-reverse;
-webkit-flex-direction: column-reverse;
flex-direction: column-reverse;
}
.layout.wrap-reverse {
-ms-flex-wrap: wrap-reverse;
-webkit-flex-wrap: wrap-reverse;
flex-wrap: wrap-reverse;
}
</style>
</template>
</dom-module>
/* Flexbox alignment */
<dom-module id="iron-flex-alignment">
<template>
<style>
/**
* Alignment in cross axis.
*/
.layout.start {
-ms-flex-align: start;
-webkit-align-items: flex-start;
align-items: flex-start;
}
.layout.center,
.layout.center-center {
-ms-flex-align: center;
-webkit-align-items: center;
align-items: center;
}
.layout.end {
-ms-flex-align: end;
-webkit-align-items: flex-end;
align-items: flex-end;
}
.layout.baseline {
-ms-flex-align: baseline;
-webkit-align-items: baseline;
align-items: baseline;
}
/**
* Alignment in main axis.
*/
.layout.start-justified {
-ms-flex-pack: start;
-webkit-justify-content: flex-start;
justify-content: flex-start;
}
.layout.center-justified,
.layout.center-center {
-ms-flex-pack: center;
-webkit-justify-content: center;
justify-content: center;
}
.layout.end-justified {
-ms-flex-pack: end;
-webkit-justify-content: flex-end;
justify-content: flex-end;
}
.layout.around-justified {
-ms-flex-pack: distribute;
-webkit-justify-content: space-around;
justify-content: space-around;
}
.layout.justified {
-ms-flex-pack: justify;
-webkit-justify-content: space-between;
justify-content: space-between;
}
/**
* Self alignment.
*/
.self-start {
-ms-align-self: flex-start;
-webkit-align-self: flex-start;
align-self: flex-start;
}
.self-center {
-ms-align-self: center;
-webkit-align-self: center;
align-self: center;
}
.self-end {
-ms-align-self: flex-end;
-webkit-align-self: flex-end;
align-self: flex-end;
}
.self-stretch {
-ms-align-self: stretch;
-webkit-align-self: stretch;
align-self: stretch;
}
.self-baseline {
-ms-align-self: baseline;
-webkit-align-self: baseline;
align-self: baseline;
}
/**
* multi-line alignment in main axis.
*/
.layout.start-aligned {
-ms-flex-line-pack: start; /* IE10 */
-ms-align-content: flex-start;
-webkit-align-content: flex-start;
align-content: flex-start;
}
.layout.end-aligned {
-ms-flex-line-pack: end; /* IE10 */
-ms-align-content: flex-end;
-webkit-align-content: flex-end;
align-content: flex-end;
}
.layout.center-aligned {
-ms-flex-line-pack: center; /* IE10 */
-ms-align-content: center;
-webkit-align-content: center;
align-content: center;
}
.layout.between-aligned {
-ms-flex-line-pack: justify; /* IE10 */
-ms-align-content: space-between;
-webkit-align-content: space-between;
align-content: space-between;
}
.layout.around-aligned {
-ms-flex-line-pack: distribute; /* IE10 */
-ms-align-content: space-around;
-webkit-align-content: space-around;
align-content: space-around;
}
</style>
</template>
</dom-module>
/* Non-flexbox positioning helper styles */
<dom-module id="iron-flex-factors">
<template>
<style>
.flex,
.flex-1 {
-ms-flex: 1 1 0.000000001px;
-webkit-flex: 1;
flex: 1;
-webkit-flex-basis: 0.000000001px;
flex-basis: 0.000000001px;
}
.flex-2 {
-ms-flex: 2;
-webkit-flex: 2;
flex: 2;
}
.flex-3 {
-ms-flex: 3;
-webkit-flex: 3;
flex: 3;
}
.flex-4 {
-ms-flex: 4;
-webkit-flex: 4;
flex: 4;
}
.flex-5 {
-ms-flex: 5;
-webkit-flex: 5;
flex: 5;
}
.flex-6 {
-ms-flex: 6;
-webkit-flex: 6;
flex: 6;
}
.flex-7 {
-ms-flex: 7;
-webkit-flex: 7;
flex: 7;
}
.flex-8 {
-ms-flex: 8;
-webkit-flex: 8;
flex: 8;
}
.flex-9 {
-ms-flex: 9;
-webkit-flex: 9;
flex: 9;
}
.flex-10 {
-ms-flex: 10;
-webkit-flex: 10;
flex: 10;
}
.flex-11 {
-ms-flex: 11;
-webkit-flex: 11;
flex: 11;
}
.flex-12 {
-ms-flex: 12;
-webkit-flex: 12;
flex: 12;
}
</style>
</template>
</dom-module>
<dom-module id="iron-positioning">
<template>
<style>
.block {
display: block;
}
[hidden] {
display: none !important;
}
.invisible {
visibility: hidden !important;
}
.relative {
position: relative;
}
.fit {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
}
body.fullbleed {
margin: 0;
height: 100vh;
}
.scroll {
-webkit-overflow-scrolling: touch;
overflow: auto;
}
/* fixed position */
.fixed-bottom,
.fixed-left,
.fixed-right,
.fixed-top {
position: fixed;
}
.fixed-top {
top: 0;
left: 0;
right: 0;
}
.fixed-right {
top: 0;
right: 0;
bottom: 0;
}
.fixed-bottom {
right: 0;
bottom: 0;
left: 0;
}
.fixed-left {
top: 0;
bottom: 0;
left: 0;
}
</style>
</template>
</dom-module>
`;l.setAttribute("style","display: none;"),document.head.appendChild(l.content)},68928:(e,t,n)=>{"use strict";n.d(t,{WU:()=>Y});var l=/d{1,4}|M{1,4}|YY(?:YY)?|S{1,3}|Do|ZZ|Z|([HhMsDm])\1?|[aA]|"[^"]*"|'[^']*'/g,i="[1-9]\\d?",r="\\d\\d",a="[^\\s]+",o=/\[([^]*?)\]/gm;function s(e,t){for(var n=[],l=0,i=e.length;l<i;l++)n.push(e[l].substr(0,t));return n}var f=function(e){return function(t,n){var l=n[e].map((function(e){return e.toLowerCase()})).indexOf(t.toLowerCase());return l>-1?l:null}};function u(e){for(var t=[],n=1;n<arguments.length;n++)t[n-1]=arguments[n];for(var l=0,i=t;l<i.length;l++){var r=i[l];for(var a in r)e[a]=r[a]}return e}var m=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],c=["January","February","March","April","May","June","July","August","September","October","November","December"],d=s(c,3),x={dayNamesShort:s(m,3),dayNames:m,monthNamesShort:d,monthNames:c,amPm:["am","pm"],DoFn:function(e){return e+["th","st","nd","rd"][e%10>3?0:(e-e%10!=10?1:0)*e%10]}},g=u({},x),p=function(e,t){for(void 0===t&&(t=2),e=String(e);e.length<t;)e="0"+e;return e},b={D:function(e){return String(e.getDate())},DD:function(e){return p(e.getDate())},Do:function(e,t){return t.DoFn(e.getDate())},d:function(e){return String(e.getDay())},dd:function(e){return p(e.getDay())},ddd:function(e,t){return t.dayNamesShort[e.getDay()]},dddd:function(e,t){return t.dayNames[e.getDay()]},M:function(e){return String(e.getMonth()+1)},MM:function(e){return p(e.getMonth()+1)},MMM:function(e,t){return t.monthNamesShort[e.getMonth()]},MMMM:function(e,t){return t.monthNames[e.getMonth()]},YY:function(e){return p(String(e.getFullYear()),4).substr(2)},YYYY:function(e){return p(e.getFullYear(),4)},h:function(e){return String(e.getHours()%12||12)},hh:function(e){return p(e.getHours()%12||12)},H:function(e){return String(e.getHours())},HH:function(e){return p(e.getHours())},m:function(e){return String(e.getMinutes())},mm:function(e){return p(e.getMinutes())},s:function(e){return String(e.getSeconds())},ss:function(e){return p(e.getSeconds())},S:function(e){return String(Math.round(e.getMilliseconds()/100))},SS:function(e){return p(Math.round(e.getMilliseconds()/10),2)},SSS:function(e){return p(e.getMilliseconds(),3)},a:function(e,t){return e.getHours()<12?t.amPm[0]:t.amPm[1]},A:function(e,t){return e.getHours()<12?t.amPm[0].toUpperCase():t.amPm[1].toUpperCase()},ZZ:function(e){var t=e.getTimezoneOffset();return(t>0?"-":"+")+p(100*Math.floor(Math.abs(t)/60)+Math.abs(t)%60,4)},Z:function(e){var t=e.getTimezoneOffset();return(t>0?"-":"+")+p(Math.floor(Math.abs(t)/60),2)+":"+p(Math.abs(t)%60,2)}},y=function(e){return+e-1},w=[null,i],h=[null,a],k=["isPm",a,function(e,t){var n=e.toLowerCase();return n===t.amPm[0]?0:n===t.amPm[1]?1:null}],M=["timezoneOffset","[^\\s]*?[\\+\\-]\\d\\d:?\\d\\d|[^\\s]*?Z?",function(e){var t=(e+"").match(/([+-]|\d\d)/gi);if(t){var n=60*+t[1]+parseInt(t[2],10);return"+"===t[0]?n:-n}return 0}],v=(f("monthNamesShort"),f("monthNames"),{default:"ddd MMM DD YYYY HH:mm:ss",shortDate:"M/D/YY",mediumDate:"MMM D, YYYY",longDate:"MMMM D, YYYY",fullDate:"dddd, MMMM D, YYYY",isoDate:"YYYY-MM-DD",isoDateTime:"YYYY-MM-DDTHH:mm:ssZ",shortTime:"HH:mm",mediumTime:"HH:mm:ss",longTime:"HH:mm:ss.SSS"}),Y=function(e,t,n){if(void 0===t&&(t=v.default),void 0===n&&(n={}),"number"==typeof e&&(e=new Date(e)),"[object Date]"!==Object.prototype.toString.call(e)||isNaN(e.getTime()))throw new Error("Invalid Date pass to format");var i=[];t=(t=v[t]||t).replace(o,(function(e,t){return i.push(t),"@@@"}));var r=u(u({},g),n);return(t=t.replace(l,(function(t){return b[t](e,r)}))).replace(/@@@/g,(function(){return i.shift()}))}}}]);
//# sourceMappingURL=chunk.dcca18d66e88fc405587.js.map
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/identity_governance/workflow.py
|
from __future__ import annotations
from datetime import datetime
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import run, task_report, user_processing_result, workflow_base, workflow_version
from .. import user
from . import workflow_base
class Workflow(workflow_base.WorkflowBase):
def __init__(self,) -> None:
"""
Instantiates a new Workflow and sets the default values.
"""
super().__init__()
self.odata_type = "#microsoft.graph.identityGovernance.workflow"
# When the workflow was deleted.Supports $filter(lt, le, gt, ge, eq, ne) and $orderby.
self._deleted_date_time: Optional[datetime] = None
# The unique identifier of the Azure AD identity that last modified the workflow object.
self._execution_scope: Optional[List[user.User]] = None
# Identifier used for individually addressing a specific workflow.Supports $filter(eq, ne) and $orderby.
self._id: Optional[str] = None
# The date time when the workflow is expected to run next based on the schedule interval, if there are any users matching the execution conditions. Supports $filter(lt,gt) and $orderBy.
self._next_schedule_run_date_time: Optional[datetime] = None
# Workflow runs.
self._runs: Optional[List[run.Run]] = None
# Represents the aggregation of task execution data for tasks within a workflow object.
self._task_reports: Optional[List[task_report.TaskReport]] = None
# Per-user workflow execution results.
self._user_processing_results: Optional[List[user_processing_result.UserProcessingResult]] = None
# The current version number of the workflow. Value is 1 when the workflow is first created.Supports $filter(lt, le, gt, ge, eq, ne) and $orderby.
self._version: Optional[int] = None
# The workflow versions that are available.
self._versions: Optional[List[workflow_version.WorkflowVersion]] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> Workflow:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: Workflow
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return Workflow()
@property
def deleted_date_time(self,) -> Optional[datetime]:
"""
Gets the deletedDateTime property value. When the workflow was deleted.Supports $filter(lt, le, gt, ge, eq, ne) and $orderby.
Returns: Optional[datetime]
"""
return self._deleted_date_time
@deleted_date_time.setter
def deleted_date_time(self,value: Optional[datetime] = None) -> None:
"""
Sets the deletedDateTime property value. When the workflow was deleted.Supports $filter(lt, le, gt, ge, eq, ne) and $orderby.
Args:
value: Value to set for the deleted_date_time property.
"""
self._deleted_date_time = value
@property
def execution_scope(self,) -> Optional[List[user.User]]:
"""
Gets the executionScope property value. The unique identifier of the Azure AD identity that last modified the workflow object.
Returns: Optional[List[user.User]]
"""
return self._execution_scope
@execution_scope.setter
def execution_scope(self,value: Optional[List[user.User]] = None) -> None:
"""
Sets the executionScope property value. The unique identifier of the Azure AD identity that last modified the workflow object.
Args:
value: Value to set for the execution_scope property.
"""
self._execution_scope = value
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from . import run, task_report, user_processing_result, workflow_base, workflow_version
from .. import user
fields: Dict[str, Callable[[Any], None]] = {
"deletedDateTime": lambda n : setattr(self, 'deleted_date_time', n.get_datetime_value()),
"executionScope": lambda n : setattr(self, 'execution_scope', n.get_collection_of_object_values(user.User)),
"id": lambda n : setattr(self, 'id', n.get_str_value()),
"nextScheduleRunDateTime": lambda n : setattr(self, 'next_schedule_run_date_time', n.get_datetime_value()),
"runs": lambda n : setattr(self, 'runs', n.get_collection_of_object_values(run.Run)),
"taskReports": lambda n : setattr(self, 'task_reports', n.get_collection_of_object_values(task_report.TaskReport)),
"userProcessingResults": lambda n : setattr(self, 'user_processing_results', n.get_collection_of_object_values(user_processing_result.UserProcessingResult)),
"version": lambda n : setattr(self, 'version', n.get_int_value()),
"versions": lambda n : setattr(self, 'versions', n.get_collection_of_object_values(workflow_version.WorkflowVersion)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
@property
def id(self,) -> Optional[str]:
"""
Gets the id property value. Identifier used for individually addressing a specific workflow.Supports $filter(eq, ne) and $orderby.
Returns: Optional[str]
"""
return self._id
@id.setter
def id(self,value: Optional[str] = None) -> None:
"""
Sets the id property value. Identifier used for individually addressing a specific workflow.Supports $filter(eq, ne) and $orderby.
Args:
value: Value to set for the id property.
"""
self._id = value
@property
def next_schedule_run_date_time(self,) -> Optional[datetime]:
"""
Gets the nextScheduleRunDateTime property value. The date time when the workflow is expected to run next based on the schedule interval, if there are any users matching the execution conditions. Supports $filter(lt,gt) and $orderBy.
Returns: Optional[datetime]
"""
return self._next_schedule_run_date_time
@next_schedule_run_date_time.setter
def next_schedule_run_date_time(self,value: Optional[datetime] = None) -> None:
"""
Sets the nextScheduleRunDateTime property value. The date time when the workflow is expected to run next based on the schedule interval, if there are any users matching the execution conditions. Supports $filter(lt,gt) and $orderBy.
Args:
value: Value to set for the next_schedule_run_date_time property.
"""
self._next_schedule_run_date_time = value
@property
def runs(self,) -> Optional[List[run.Run]]:
"""
Gets the runs property value. Workflow runs.
Returns: Optional[List[run.Run]]
"""
return self._runs
@runs.setter
def runs(self,value: Optional[List[run.Run]] = None) -> None:
"""
Sets the runs property value. Workflow runs.
Args:
value: Value to set for the runs property.
"""
self._runs = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
super().serialize(writer)
writer.write_datetime_value("deletedDateTime", self.deleted_date_time)
writer.write_collection_of_object_values("executionScope", self.execution_scope)
writer.write_str_value("id", self.id)
writer.write_datetime_value("nextScheduleRunDateTime", self.next_schedule_run_date_time)
writer.write_collection_of_object_values("runs", self.runs)
writer.write_collection_of_object_values("taskReports", self.task_reports)
writer.write_collection_of_object_values("userProcessingResults", self.user_processing_results)
writer.write_int_value("version", self.version)
writer.write_collection_of_object_values("versions", self.versions)
@property
def task_reports(self,) -> Optional[List[task_report.TaskReport]]:
"""
Gets the taskReports property value. Represents the aggregation of task execution data for tasks within a workflow object.
Returns: Optional[List[task_report.TaskReport]]
"""
return self._task_reports
@task_reports.setter
def task_reports(self,value: Optional[List[task_report.TaskReport]] = None) -> None:
"""
Sets the taskReports property value. Represents the aggregation of task execution data for tasks within a workflow object.
Args:
value: Value to set for the task_reports property.
"""
self._task_reports = value
@property
def user_processing_results(self,) -> Optional[List[user_processing_result.UserProcessingResult]]:
"""
Gets the userProcessingResults property value. Per-user workflow execution results.
Returns: Optional[List[user_processing_result.UserProcessingResult]]
"""
return self._user_processing_results
@user_processing_results.setter
def user_processing_results(self,value: Optional[List[user_processing_result.UserProcessingResult]] = None) -> None:
"""
Sets the userProcessingResults property value. Per-user workflow execution results.
Args:
value: Value to set for the user_processing_results property.
"""
self._user_processing_results = value
@property
def version(self,) -> Optional[int]:
"""
Gets the version property value. The current version number of the workflow. Value is 1 when the workflow is first created.Supports $filter(lt, le, gt, ge, eq, ne) and $orderby.
Returns: Optional[int]
"""
return self._version
@version.setter
def version(self,value: Optional[int] = None) -> None:
"""
Sets the version property value. The current version number of the workflow. Value is 1 when the workflow is first created.Supports $filter(lt, le, gt, ge, eq, ne) and $orderby.
Args:
value: Value to set for the version property.
"""
self._version = value
@property
def versions(self,) -> Optional[List[workflow_version.WorkflowVersion]]:
"""
Gets the versions property value. The workflow versions that are available.
Returns: Optional[List[workflow_version.WorkflowVersion]]
"""
return self._versions
@versions.setter
def versions(self,value: Optional[List[workflow_version.WorkflowVersion]] = None) -> None:
"""
Sets the versions property value. The workflow versions that are available.
Args:
value: Value to set for the versions property.
"""
self._versions = value
|
PypiClean
|
/rlpy3-2.0.0a0-cp36-cp36m-win_amd64.whl/rlpy/Policies/Policy.py
|
from rlpy.Tools import className, discrete_sample
import numpy as np
import logging
from abc import ABC, abstractmethod
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class Policy(ABC):
"""The Policy determines the discrete action that an
:py:class:`~rlpy.Agents.Agent.Agent` will take given its
:py:class:`~rlpy.Representations.Representation.Representation`.
The Agent learns about the :py:class:`~rlpy.Domains.Domain.Domain`
as the two interact.
At each step, the Agent passes information about its current state
to the Policy; the Policy uses this to decide what discrete action the
Agent should perform next (see :py:meth:`~rlpy.Policies.Policy.Policy.pi`) \n
The Policy class is a base class that provides the basic framework for all
policies. It provides the methods and attributes that allow child classes
to interact with the Agent and Representation within the RLPy library. \n
.. note::
All new policy implementations should inherit from Policy.
"""
DEBUG = False
def __init__(self, representation, seed=1):
"""
:param representation: the :py:class:`~rlpy.Representations.Representation.Representation`
to use in learning the value function.
"""
self.representation = representation
# An object to record the print outs in a file
self.logger = logging.getLogger("rlpy.Policies." + self.__class__.__name__)
# a new stream of random numbers for each domain
self.random_state = np.random.RandomState(seed=seed)
def init_randomization(self):
"""
Any stochastic behavior in __init__() is broken out into this function
so that if the random seed is later changed (eg, by the Experiment),
other member variables and functions are updated accordingly.
"""
pass
@abstractmethod
def pi(self, s, terminal, p_actions):
"""
*Abstract Method:*\n Select an action given a state.
:param s: The current state
:param terminal: boolean, whether or not the *s* is a terminal state.
:param p_actions: a list / array of all possible actions in *s*.
"""
raise NotImplementedError
def turnOffExploration(self):
"""
*Abstract Method:* \n Turn off exploration (e.g., epsilon=0 in epsilon-greedy)
"""
pass
# [turnOffExploration code]
# \b ABSTRACT \b METHOD: Turn exploration on. See code
# \ref Policy_turnOnExploration "Here".
# [turnOnExploration code]
def turnOnExploration(self):
"""
*Abstract Method:* \n
If :py:meth:`~rlpy.Policies.Policy.Policy.turnOffExploration` was called
previously, reverse its effects (e.g. restore epsilon to its previous,
possibly nonzero, value).
"""
pass
def printAll(self):
""" Prints all class information to console. """
print(className(self))
print("=======================================")
for property, value in vars(self).items():
print(property, ": ", value)
class DifferentiablePolicy(Policy):
def pi(self, s, terminal, p_actions):
"""Sample action from policy"""
p = self.probabilities(s, terminal)
return discrete_sample(p)
@abstractmethod
def dlogpi(self, s, a):
"""derivative of the log probabilities of the policy"""
return NotImplementedError
def prob(self, s, a):
"""
probability of chosing action a given the state s
"""
v = self.probabilities(s, False)
return v[a]
@property
def theta(self):
return self.representation.weight_vec
@theta.setter
def theta(self, v):
self.representation.weight_vec = v
@abstractmethod
def probabilities(self, s, terminal):
"""
returns a vector of num_actions length containing the normalized
probabilities for taking each action given the state s
"""
return NotImplementedError
|
PypiClean
|
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/Orange/classification/majority.py
|
from hashlib import sha1
import numpy as np
from Orange.classification import Learner, Model
from Orange.statistics import distribution
from Orange.i18n_config import *
def __(key):
return i18n.t("orange." + key)
__all__ = ["MajorityLearner"]
class MajorityLearner(Learner):
"""
A majority classifier. Always returns most frequent class from the
training set, regardless of the attribute values from the test data
instance. Returns class value distribution if class probabilities
are requested. Can be used as a baseline when comparing classifiers.
In the special case of uniform class distribution within the training data,
class value is selected randomly. In order to produce consistent results on
the same dataset, this value is selected based on hash of the class vector.
"""
name = __("name.majority")
def fit_storage(self, dat):
if not dat.domain.has_discrete_class:
raise ValueError(__("value_error.expect_domain"))
dist = distribution.get_distribution(dat, dat.domain.class_var)
N = dist.sum()
if N > 0:
dist /= N
else:
dist.fill(1 / len(dist))
probs = np.array(dist)
ties = np.flatnonzero(probs == probs.max())
if len(ties) > 1:
random_idx = int(sha1(np.ascontiguousarray(dat.Y).data)
.hexdigest(), 16) % len(ties)
unif_maj = ties[random_idx]
else:
unif_maj = None
return ConstantModel(dist=dist, unif_maj=unif_maj)
class ConstantModel(Model):
"""
A classification model that returns a given class value.
"""
def __init__(self, dist, unif_maj=None):
"""
Constructs `Orange.classification.MajorityModel` that always
returns majority value of given distribution.
If no or empty distribution given, constructs a model that returns equal
probabilities for each class value.
:param dist: domain for the `Table`
:param unif_maj: majority class for the special case of uniform
class distribution in the training data
:type dist: Orange.statistics.distribution.Discrete
:return: regression model that returns majority value
:rtype: Orange.classification.Model
"""
self.dist = np.array(dist)
self.unif_maj = unif_maj
def predict(self, X):
"""
Returns majority class for each given instance in X.
:param X: data table for which to make predictions
:type X: Orange.data.Table
:return: predicted value
:rtype: vector of majority values
"""
probs = np.tile(self.dist, (X.shape[0], 1))
if self.unif_maj is not None:
value = np.tile(self.unif_maj, (X.shape[0], ))
return value, probs
return probs
def __str__(self):
return 'ConstantModel {}'.format(self.dist)
MajorityLearner.__returns__ = ConstantModel
|
PypiClean
|
/mysql-utilities-1.4.3.tar.gz/mysql-utilities-1.4.3/mysql/utilities/common/options.py
|
import copy
import optparse
import os.path
import re
from optparse import Option as CustomOption, OptionValueError
from mysql.utilities import LICENSE_FRM, VERSION_FRM
from mysql.utilities.exception import UtilError, FormatError
from mysql.connector.conversion import MySQLConverter
from mysql.utilities.common.messages import PARSE_ERR_OBJ_NAME_FORMAT
from mysql.utilities.common.my_print_defaults import (MyDefaultsReader,
my_login_config_exists)
from mysql.utilities.common.pattern_matching import REGEXP_QUALIFIED_OBJ_NAME
from mysql.utilities.common.sql_transform import (is_quoted_with_backticks,
remove_backtick_quoting)
_PERMITTED_FORMATS = ["grid", "tab", "csv", "vertical"]
_PERMITTED_DIFFS = ["unified", "context", "differ"]
_PERMITTED_RPL_DUMP = ["master", "slave"]
class UtilitiesParser(optparse.OptionParser):
"""Special subclass of parser that allows showing of version information
when --help is used.
"""
def print_help(self, output=None):
"""Show version information before help
"""
print self.version
optparse.OptionParser.print_help(self, output)
def format_epilog(self, formatter):
return self.epilog if self.epilog is not None else ''
def prefix_check_choice(option, opt, value):
"""Check option values using case insensitive prefix compare
This method checks to see if the value specified is a prefix of one of the
choices. It converts the string provided by the user (value) to lower case
to permit case insensitive comparison of the user input. If multiple
choices are found for a prefix, an error is thrown. If the value being
compared does not match the list of choices, an error is thrown.
option[in] Option class instance
opt[in] option name
value[in] the value provided by the user
Returns string - valid option chosen
"""
# String of choices
choices = ", ".join([repr(choice) for choice in option.choices])
# Get matches for prefix given
alts = [alt for alt in option.choices if alt.startswith(value.lower())]
if len(alts) == 1: # only 1 match
return alts[0]
elif len(alts) > 1: # multiple matches
raise OptionValueError(
("option %s: there are multiple prefixes "
"matching: %r (choose from %s)") % (opt, value, choices))
# Doesn't match. Show user possible choices.
raise OptionValueError("option %s: invalid choice: %r (choose from %s)"
% (opt, value, choices))
def license_callback(self, opt, value, parser, *args, **kwargs):
"""Show license information and exit.
"""
print(LICENSE_FRM.format(program=parser.prog))
parser.exit()
class CaseInsensitiveChoicesOption(CustomOption):
"""Case insensitive choices option class
This is an extension of the Option class. It replaces the check_choice
method with the prefix_check_choice() method above to provide
shortcut aware choice selection. It also ensures the choice compare is
done with a case insensitve test.
"""
TYPE_CHECKER = copy.copy(CustomOption.TYPE_CHECKER)
TYPE_CHECKER["choice"] = prefix_check_choice
def __init__(self, *opts, **attrs):
if 'choices' in attrs:
attrs['choices'] = [attr.lower() for attr in attrs['choices']]
CustomOption.__init__(self, *opts, **attrs)
def setup_common_options(program_name, desc_str, usage_str,
append=False, server=True,
server_default="root@localhost:3306",
extended_help=None):
"""Setup option parser and options common to all MySQL Utilities.
This method creates an option parser and adds options for user
login and connection options to a MySQL database system including
user, password, host, socket, and port.
program_name[in] The program name
desc_str[in] The description of the utility
usage_str[in] A brief usage example
append[in] If True, allow --server to be specified multiple times
(default = False)
server[in] If True, add the --server option
(default = True)
server_default[in] Default value for option
(default = "root@localhost:3306")
extended_help[in] Extended help (by default: None).
Returns parser object
"""
program_name = program_name.replace(".py", "")
parser = UtilitiesParser(
version=VERSION_FRM.format(program=program_name),
description=desc_str,
usage=usage_str,
add_help_option=False,
option_class=CaseInsensitiveChoicesOption,
epilog=extended_help,
prog=program_name)
parser.add_option("--help", action="help", help="display a help message "
"and exit")
parser.add_option("--license", action='callback',
callback=license_callback,
help="display program's license and exit")
if server:
# Connection information for the first server
if append:
parser.add_option("--server", action="append", dest="server",
help="connection information for the server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>]"
"[:<socket>].")
else:
parser.add_option("--server", action="store", dest="server",
type="string", default=server_default,
help="connection information for the server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>]"
"[:<socket>].")
return parser
def add_character_set_option(parser):
"""Add the --character-set option.
parser[in] the parser instance
"""
parser.add_option("--character-set", action="store", dest="charset",
type="string", default=None,
help="sets the client character set. The default is "
"retrieved from the server variable "
"'character_set_client'.")
_SKIP_VALUES = (
"tables", "views", "triggers", "procedures",
"functions", "events", "grants", "data",
"create_db"
)
def add_skip_options(parser):
"""Add the common --skip options for database utilties.
parser[in] the parser instance
"""
parser.add_option("--skip", action="store", dest="skip_objects",
default=None, help="specify objects to skip in the "
"operation in the form of a comma-separated list (no "
"spaces). Valid values = tables, views, triggers, proc"
"edures, functions, events, grants, data, create_db")
def check_skip_options(skip_list):
"""Check skip options for validity
skip_list[in] List of items from parser option.
Returns new skip list with items converted to upper case.
"""
new_skip_list = []
if skip_list is not None:
items = skip_list.split(",")
for item in items:
obj = item.lower()
if obj in _SKIP_VALUES:
new_skip_list.append(obj)
else:
raise UtilError("The value %s is not a valid value for "
"--skip." % item)
return new_skip_list
def add_format_option(parser, help_text, default_val, sql=False,
extra_formats=None):
"""Add the format option.
parser[in] the parser instance
help_text[in] help text
default_val[in] default value
sql[in] if True, add 'sql' format
default=False
extra_formats[in] list with extra formats
Returns corrected format value
"""
formats = _PERMITTED_FORMATS
if sql:
formats.append('sql')
if extra_formats:
formats.extend(extra_formats)
parser.add_option("-f", "--format", action="store", dest="format",
default=default_val, help=help_text, type="choice",
choices=formats)
def add_format_option_with_extras(parser, help_text, default_val,
extra_formats):
"""Add the format option.
parser[in] the parser instance
help_text[in] help text
default_val[in] default value
extra_formats[in] list of additional formats to support
Returns corrected format value
"""
formats = _PERMITTED_FORMATS
formats.extend(extra_formats)
parser.add_option("-f", "--format", action="store", dest="format",
default=default_val, help=help_text, type="choice",
choices=formats)
def add_verbosity(parser, quiet=True):
"""Add the verbosity and quiet options.
parser[in] the parser instance
quiet[in] if True, include the --quiet option
(default is True)
"""
parser.add_option("-v", "--verbose", action="count", dest="verbosity",
help="control how much information is displayed. "
"e.g., -v = verbose, -vv = more verbose, -vvv = debug")
if quiet:
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="turn off all messages for quiet execution.",
default=False)
def check_verbosity(options):
"""Check to see if both verbosity and quiet are being used.
"""
# Warn if quiet and verbosity are both specified
if options.quiet is not None and options.quiet and \
options.verbosity is not None and options.verbosity > 0:
print "WARNING: --verbosity is ignored when --quiet is specified."
options.verbosity = None
def add_changes_for(parser, default="server1"):
"""Add the changes_for option.
parser[in] the parser instance
"""
parser.add_option("--changes-for", action="store", dest="changes_for",
type="choice", default=default, help="specify the "
"server to show transformations to match the other "
"server. For example, to see the transformation for "
"transforming server1 to match server2, use "
"--changes-for=server1. Valid values are 'server1' or "
"'server2'. The default is 'server1'.",
choices=['server1', 'server2'])
def add_reverse(parser):
"""Add the show-reverse option.
parser[in] the parser instance
"""
parser.add_option("--show-reverse", action="store_true", dest="reverse",
default=False, help="produce a transformation report "
"containing the SQL statements to transform the object "
"definitions specified in reverse. For example if "
"--changes-for is set to server1, also generate the "
"transformation for server2. Note: the reverse changes "
"are annotated and marked as comments.")
def add_difftype(parser, allow_sql=False, default="unified"):
"""Add the difftype option.
parser[in] the parser instance
allow_sql[in] if True, allow sql as a valid option
(default is False)
default[in] the default option
(default is unified)
"""
choice_list = ['unified', 'context', 'differ']
if allow_sql:
choice_list.append('sql')
parser.add_option("-d", "--difftype", action="store", dest="difftype",
type="choice", default="unified", choices=choice_list,
help="display differences in context format in one of "
"the following formats: [%s] (default: unified)." %
'|'.join(choice_list))
def add_engines(parser):
"""Add the engine and default-storage-engine options.
parser[in] the parser instance
"""
# Add engine
parser.add_option("--new-storage-engine", action="store",
dest="new_engine", default=None, help="change all "
"tables to use this storage engine if storage engine "
"exists on the destination.")
# Add default storage engine
parser.add_option("--default-storage-engine", action="store",
dest="def_engine", default=None, help="change all "
"tables to use this storage engine if the original "
"storage engine does not exist on the destination.")
def check_engine_options(server, new_engine, def_engine,
fail=False, quiet=False):
"""Check to see if storage engines specified in options exist.
This method will check to see if the storage engine in new exists on the
server. If new_engine is None, the check is skipped. If the storage engine
does not exist and fail is True, an exception is thrown else if quiet is
False, a warning message is printed.
Similarly, def_engine will be checked and if not present and fail is True,
an exception is thrown else if quiet is False a warning is printed.
server[in] server instance to be checked
new_engine[in] new storage engine
def_engine[in] default storage engine
fail[in] If True, issue exception on failure else print warning
default = False
quiet[in] If True, suppress warning messages (not exceptions)
default = False
"""
def _find_engine(server, target, message, fail, default):
"""Find engine
"""
if target is not None:
found = server.has_storage_engine(target)
if not found and fail:
raise UtilError(message)
elif not found and not quiet:
print message
server.get_storage_engines()
message = "WARNING: %s storage engine %s is not supported on the server."
_find_engine(server, new_engine,
message % ("New", new_engine),
fail, quiet)
_find_engine(server, def_engine,
message % ("Default", def_engine),
fail, quiet)
def add_all(parser, objects):
"""Add the --all option.
parser[in] the parser instance
objects[in] name of the objects for which all includes
"""
parser.add_option("-a", "--all", action="store_true", dest="all",
default=False, help="include all %s" % objects)
def check_all(parser, options, args, objects):
"""Check to see if both all and specific arguments are used.
This method will throw an exception if there are arguments listed and
the all option has been turned on.
parser[in] the parser instance
options[in] command options
args[in] arguments list
objects[in] name of the objects for which all includes
"""
if options.all and len(args) > 0:
parser.error("You cannot use the --all option with a list of "
"%s." % objects)
def add_locking(parser):
"""Add the --locking option.
parser[in] the parser instance
"""
parser.add_option("--locking", action="store", dest="locking",
type="choice", default="snapshot",
choices=['no-locks', 'lock-all', 'snapshot'],
help="choose the lock type for the operation: no-locks "
"= do not use any table locks, lock-all = use table "
"locks but no transaction and no consistent read, "
"snaphot (default): consistent read using a single "
"transaction.")
def add_regexp(parser):
"""Add the --regexp option.
parser[in] the parser instance
"""
parser.add_option("-G", "--basic-regexp", "--regexp", dest="use_regexp",
action="store_true", default=False, help="use 'REGEXP' "
"operator to match pattern. Default is to use 'LIKE'.")
def add_rpl_user(parser, default_val="rpl:rpl"):
"""Add the --rpl-user option.
parser[in] the parser instance
default_val[in] default value for user, password
Default = rpl, rpl
"""
parser.add_option("--rpl-user", action="store", dest="rpl_user",
type="string", default=default_val,
help="the user and password for the replication "
"user requirement, in the form: <user>[:<password>]"
" or <login-path>. E.g. rpl:passwd - By default = "
"%default")
def add_rpl_mode(parser, do_both=True, add_file=True):
"""Add the --rpl and --rpl-file options.
parser[in] the parser instance
do_both[in] if True, include the "both" value for the --rpl option
Default = True
add_file[in] if True, add the --rpl-file option
Default = True
"""
rpl_mode_both = ""
rpl_mode_options = _PERMITTED_RPL_DUMP
if do_both:
rpl_mode_options.append("both")
rpl_mode_both = (", and 'both' = include 'master' and 'slave' options "
"where applicable")
parser.add_option("--rpl", "--replication", dest="rpl_mode",
action="store", help="include replication information. "
"Choices: 'master' = include the CHANGE MASTER command "
"using the source server as the master, "
"'slave' = include the CHANGE MASTER command for "
"the source server's master (only works if the source "
"server is a slave){0}.".format(rpl_mode_both),
choices=rpl_mode_options)
if add_file:
parser.add_option("--rpl-file", "--replication-file", dest="rpl_file",
action="store", help="path and file name to place "
"the replication information generated. Valid on if "
"the --rpl option is specified.")
def check_rpl_options(parser, options):
"""Check replication dump options for validity
This method ensures the optional --rpl-* options are valid only when
--rpl is specified.
parser[in] the parser instance
options[in] command options
"""
if options.rpl_mode is None:
errors = []
if parser.has_option("--comment-rpl") and options.rpl_file is not None:
errors.append("--rpl-file")
if options.rpl_user is not None:
errors.append("--rpl-user")
# It's Ok if the options do not include --comment-rpl
if parser.has_option("--comment-rpl") and options.comment_rpl:
errors.append("--comment-rpl")
if len(errors) > 1:
num_opt_str = "s"
else:
num_opt_str = ""
if len(errors) > 0:
parser.error("The %s option%s must be used with the --rpl "
"option." % (", ".join(errors), num_opt_str))
def add_discover_slaves_option(parser):
"""Add the --discover-slaves-login option.
This method adds the --discover-slaves-login option that is used to
discover the list of slaves associated to the specified login (user and
password).
parser[in] the parser instance.
"""
parser.add_option("--discover-slaves-login", action="store",
dest="discover", default=None, type="string",
help="at startup, query master for all registered "
"slaves and use the user name and password specified to "
"connect. Supply the user and password in the form "
"<user>[:<password>] or <login-path>. For example, "
"--discover-slaves-login=joe:secret will use 'joe' as "
"the user and 'secret' as the password for each "
"discovered slave.")
def add_log_option(parser):
"""Add the --log option.
This method adds the --log option that is used the specify the target file
for logging messages from the utility.
parser[in] the parser instance.
"""
parser.add_option("--log", action="store", dest="log_file", default=None,
type="string", help="specify a log file to use for "
"logging messages")
def add_master_option(parser):
"""Add the --master option.
This method adds the --master option that is used to specify the connection
string for the server with the master role.
parser[in] the parser instance.
"""
parser.add_option("--master", action="store", dest="master", default=None,
type="string", help="connection information for master "
"server in the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>]")
def add_slaves_option(parser):
"""Add the --slaves option.
This method adds the --slaves option that is used to specify a list of
slaves, more precisely their connection strings (separated by comma).
parser[in] the parser instance.
"""
parser.add_option("--slaves", action="store", dest="slaves",
type="string", default=None,
help="connection information for slave servers in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>]. "
"List multiple slaves in comma-separated list.")
def add_failover_options(parser):
"""Add the common failover options.
This adds the following options:
--candidates
--discover-slaves-login
--exec-after
--exec-before
--log
--log-age
--master
--max-position
--ping
--seconds-behind
--slaves
--timeout
--script-threshold
parser[in] the parser instance
"""
parser.add_option("--candidates", action="store", dest="candidates",
type="string", default=None,
help="connection information for candidate slave servers"
" for failover in the form: <user>[:<password>]@<host>[:"
"<port>][:<socket>] or <login-path>[:<port>][:<socket>]."
" Valid only with failover command. List multiple slaves"
" in comma-separated list.")
add_discover_slaves_option(parser)
parser.add_option("--exec-after", action="store", dest="exec_after",
default=None, type="string", help="name of script to "
"execute after failover or switchover")
parser.add_option("--exec-before", action="store", dest="exec_before",
default=None, type="string", help="name of script to "
"execute before failover or switchover")
add_log_option(parser)
parser.add_option("--log-age", action="store", dest="log_age", default=7,
type="int", help="specify maximum age of log entries in "
"days. Entries older than this will be purged on "
"startup. Default = 7 days.")
add_master_option(parser)
parser.add_option("--max-position", action="store", dest="max_position",
default=0, type="int", help="used to detect slave "
"delay. The maximum difference between the master's "
"log position and the slave's reported read position of "
"the master. A value greater than this means the slave "
"is too far behind the master. Default is 0.")
parser.add_option("--ping", action="store", dest="ping", default=None,
help="Number of ping attempts for detecting downed "
"server.")
parser.add_option("--seconds-behind", action="store", dest="max_delay",
default=0, type="int", help="used to detect slave "
"delay. The maximum number of seconds behind the master "
"permitted before slave is considered behind the "
"master. Default is 0.")
add_slaves_option(parser)
parser.add_option("--timeout", action="store", dest="timeout", default=300,
help="maximum timeout in seconds to wait for each "
"replication command to complete. For example, timeout "
"for slave waiting to catch up to master. "
"Default = 300.")
parser.add_option("--script-threshold", action="store", default=None,
dest="script_threshold",
help="Value for external scripts to trigger aborting "
"the operation if result is greater than or equal to "
"the threshold. Default = None (no threshold "
"checking).")
def check_server_lists(parser, master, slaves):
"""Check to see if master is listed in slaves list
Returns bool - True = master not in slaves, issue error if it appears
"""
if slaves:
for slave in slaves.split(',', 1):
if master == slave:
parser.error("You cannot list the master as a slave.")
return True
def obj2sql(obj):
"""Convert a Python object to an SQL object.
This function convert Python objects to SQL values using the
conversion functions in the database connector package."""
return MySQLConverter().quote(obj)
def parse_user_password(userpass_values, my_defaults_reader=None,
options=None):
""" This function parses a string with the user/password credentials.
This function parses the login string, determines the used format, i.e.
user[:password] or login-path. If the ':' (colon) is not in the login
string, the it can refer to a login-path or to a username (without a
password). In this case, first it is assumed that the specified value is a
login-path and the function attempts to retrieve the associated username
and password, in a quiet way (i.e., without raising exceptions). If it
fails to retrieve the login-path data, then the value is assumed to be a
username.
userpass_values[in] String indicating the user/password credentials. It
must be in the form: user[:password] or login-path.
my_defaults_reader[in] Instance of MyDefaultsReader to read the
information of the login-path from configuration
files. By default, the value is None.
options[in] Dictionary of options (e.g. basedir), from the used
utility. By default, it set with an empty
dictionary. Note: also supports options values
from optparse.
Returns a tuple with the username and password.
"""
if options is None:
options = {}
# Split on the ':' to determine if a login-path is used.
login_values = userpass_values.split(':')
if len(login_values) == 1:
# Format is login-path or user (without a password): First, assume it
# is a login-path and quietly try to retrieve the user and password.
# If it fails, assume a user name is being specified.
#Check if the login configuration file (.mylogin.cnf) exists
if login_values[0] and not my_login_config_exists():
return login_values[0], None
if not my_defaults_reader:
# Attempt to create the MyDefaultsReader
try:
my_defaults_reader = MyDefaultsReader(options)
except UtilError:
# Raise an UtilError when my_print_defaults tool is not found.
return login_values[0], None
elif not my_defaults_reader.tool_path:
# Try to find the my_print_defaults tool
try:
my_defaults_reader.search_my_print_defaults_tool()
except UtilError:
# Raise an UtilError when my_print_defaults tool is not found.
return login_values[0], None
# Check if the my_print_default tool is able to read a login-path from
# the mylogin configuration file
if not my_defaults_reader.check_login_path_support():
return login_values[0], None
# Read and parse the login-path data (i.e., user and password)
try:
loginpath_data = my_defaults_reader.get_group_data(login_values[0])
if loginpath_data:
user = loginpath_data.get('user', None)
passwd = loginpath_data.get('password', None)
return user, passwd
else:
return login_values[0], None
except UtilError:
# Raise an UtilError if unable to get the login-path group data
return login_values[0], None
elif len(login_values) == 2:
# Format is user:password; return a tuple with the user and password
return login_values[0], login_values[1]
else:
# Invalid user credentials format
return FormatError("Unable to parse the specified user credentials "
"(accepted formats: <user>[:<password> or "
"<login-path>): %s" % userpass_values)
def add_basedir_option(parser):
""" Add the --basedir option.
"""
parser.add_option("--basedir", action="store", dest="basedir",
default=None, type="string",
help="the base directory for the server")
def check_basedir_option(parser, opt_basedir):
""" Check if the specified --basedir option is valid.
"""
if opt_basedir and not os.path.isdir(get_absolute_path(opt_basedir)):
parser.error("The specified path for --basedir option is not a "
"directory: %s" % opt_basedir)
def get_absolute_path(path):
""" Returns the absolute path.
"""
return os.path.abspath(os.path.expanduser(os.path.normpath(path)))
def db_objects_list_to_dictionary(parser, obj_list, option_desc):
"""Process database object list and convert to a dictionary.
Check the qualified name format of the given database objects and convert
the given list of object to a dictionary organized by database names and
sets of specific objects.
Note: It is assumed that the given object list is obtained from the
arguments or an option returned by the parser.
parser[in] Instance of the used option/arguments parser
obj_list[in] List of objects to process.
option_desc[in] Short description of the option for the object list (e.g.,
"the --exclude option", "the database/table arguments") to
refer appropriately in any parsing error.
returns a dictionary with the objects grouped by database (without
duplicates). None value associated to a database entry means that all
objects are to be considered.
E.g. {'db_name1': set(['table1','table2']), 'db_name2': None}.
"""
db_objs_dict = {}
obj_name_regexp = re.compile(REGEXP_QUALIFIED_OBJ_NAME)
for obj_name in obj_list:
m_obj = obj_name_regexp.match(obj_name)
if not m_obj:
parser.error(PARSE_ERR_OBJ_NAME_FORMAT.format(
obj_name=obj_name, option=option_desc
))
else:
db_name, obj_name = m_obj.groups()
# Remove backtick quotes.
db_name = remove_backtick_quoting(db_name) \
if is_quoted_with_backticks(db_name) else db_name
obj_name = remove_backtick_quoting(obj_name) \
if obj_name and is_quoted_with_backticks(obj_name) \
else obj_name
# Add database object to result dictionary.
if not obj_name:
# If only the database is specified, then add entry with
# db name and value None (to include all object) even if a
# previous specific object was already added.
if db_name in db_objs_dict:
if db_objs_dict[db_name]:
db_objs_dict[db_name] = None
else:
db_objs_dict[db_name] = None
else:
# If a specific object object is given add it to the set
# associated to the database, except if the database entry
# is None (meaning that all objects are included).
if db_name in db_objs_dict:
if db_objs_dict[db_name]:
db_objs_dict[db_name].add(obj_name)
else:
db_objs_dict[db_name] = set([obj_name])
return db_objs_dict
|
PypiClean
|
/who_added_this_tag-0.0.1.tar.gz/who_added_this_tag-0.0.1/who_added_this_tag/statistics.py
|
import thin_osm_api_wrapper
import csv
import collections
from osm_bot_abstraction_layer.overpass_downloader import download_overpass_query
from osm_iterator import osm_iterator
import time
import datetime
def download_object_list(config, filepath):
area_query = ""
area_filter = ""
if config['area_identifier_key'] != None:
area_query = "\n area['" + config['area_identifier_key'] + "'='" + config['area_identifier_value'] + "']->.searchArea;\n"
area_filter = "(area.searchArea)"
value_part = ""
if config['value'] != None:
value_part = """'='""" + config['value']
download_query = """
[out:xml][timeout:25];""" + area_query + """
(
nwr['""" + config['key'] + value_part + """']""" + area_filter + """;
);
out skel qt;
"""
"""
note:
use
out skel qt;
instead of
out body;
>;
otherwise total object count will be inaccurate
"""
download_overpass_query(download_query, filepath)
def this_tags_are_matching_what_was_requested(tags, config):
if config['key'] in tags:
if config['value'] == None:
return True
if tags[config['key']] == config['value']:
return True
return False
def record_objects(element):
global osm_object_store
print(element.element.tag, element.element.attrib['id'])
osm_object_store.append({"type": element.get_type(), "id": element.get_id()})
def create_object_store_from_downloaded(filepath):
global osm_object_store
osm_object_store = []
osm = osm_iterator.Data(filepath)
osm.iterate_over_data(record_objects)
return osm_object_store
def sleep_before_retry(error_summary, url, params, json_data):
print("sleeping before retry due to", error_summary)
print(url)
print(params)
print(json_data)
print()
sleep(100)
print()
print("retrying on", datetime.now().strftime("%H:%M:%S (%Y-%m-%d)"))
def process_case(config):
filepath = "output.osm"
download_object_list(config, filepath)
osm_object_store = create_object_store_from_downloaded(filepath)
changeset_list = ""
users = collections.Counter()
edits = {}
objects = {}
for entry in osm_object_store:
object_type = entry["type"]
object_id = entry["id"]
for history_revision in thin_osm_api_wrapper.api.history_json(object_type, object_id, user_agent='who_added_this_tag_script'):
if 'tags' in history_revision:
if this_tags_are_matching_what_was_requested(history_revision['tags'], config):
# uncomment following line to show all relevant changesets
#print("https://www.openstreetmap.org/changeset/"+ str(history_revision['changeset']))
uid = history_revision['uid']
user = history_revision['user']
object_link = "https://www.openstreetmap.org/" + object_type + '/' + object_id + "/history"
changeset_link = "https://www.openstreetmap.org/changeset/" + str(history_revision['changeset'])
users[user] = users[user] + 1
if user in config['list_all_edits_made_by_this_users'] or config['list_all_edits']:
changeset_list += changeset_link + " editing "+ object_link + "\n\n"
if users[user] == 1 or edits[user] < int(history_revision['changeset']):
edits[user] = int(history_revision['changeset'])
objects[user] = object_link
break
summary(users, edits, objects, config)
print(len(osm_object_store), "objects exist in total")
print(changeset_list)
def summary(users, edits, objects, config):
print(users)
total = 0
for entry in users.most_common():
name = entry[0]
count = entry[1]
total += count
print("https://www.openstreetmap.org/user/"+name.replace(" ", "%20"), "https://www.openstreetmap.org/changeset/"+ str(edits[name]), objects[name], count)
print(total, "objects listed with their authors")
print()
value_description = ""
if config['value'] != None:
value_description = "|" + config['value']
tag_description = "{{tag|" + config['key'] + value_description + "}}"
print(usage_table(tag_description, users, edits, objects))
def usage_table(tag_description, users, edits, objects):
returned = """{| class="wikitable"
|+ What added """ + tag_description + """?
|-
! User !! Example changeset !! History of object !! Count""" + "\n"
for entry in users.most_common():
name = entry[0]
count = entry[1]
returned += """|-
| """ + name + """ || """ + "https://www.openstreetmap.org/changeset/"+ str(edits[name]) + """ || """ + objects[name] + """ || """ + str(count) + "\n"
returned += """|}
Generated with https://codeberg.org/matkoniecz/who-added-this-tag - counts first addition to objects were carrying this tag as of """ + datetime.datetime.now().strftime("%Y-%m-%d") + "."
return returned
|
PypiClean
|
/ressources/lib/node_modules/highcharts/modules/exporting.src.js
|
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define(function () {
return factory;
});
} else {
factory(Highcharts);
}
}(function (Highcharts) {
(function (H) {
/**
* Exporting module
*
* (c) 2010-2017 Torstein Honsi
*
* License: www.highcharts.com/license
*/
/* eslint indent:0 */
// create shortcuts
var defaultOptions = H.defaultOptions,
doc = H.doc,
Chart = H.Chart,
addEvent = H.addEvent,
removeEvent = H.removeEvent,
fireEvent = H.fireEvent,
createElement = H.createElement,
discardElement = H.discardElement,
css = H.css,
merge = H.merge,
pick = H.pick,
each = H.each,
objectEach = H.objectEach,
extend = H.extend,
isTouchDevice = H.isTouchDevice,
win = H.win,
userAgent = win.navigator.userAgent,
SVGRenderer = H.SVGRenderer,
symbols = H.Renderer.prototype.symbols,
isMSBrowser = /Edge\/|Trident\/|MSIE /.test(userAgent),
isFirefoxBrowser = /firefox/i.test(userAgent);
// Add language
extend(defaultOptions.lang, {
/**
* Exporting module only. The text for the menu item to print the chart.
*
* @type {String}
* @default Print chart
* @since 3.0.1
* @apioption lang.printChart
*/
printChart: 'Print chart',
/**
* Exporting module only. The text for the PNG download menu item.
*
* @type {String}
* @default Download PNG image
* @since 2.0
* @apioption lang.downloadPNG
*/
downloadPNG: 'Download PNG image',
/**
* Exporting module only. The text for the JPEG download menu item.
*
* @type {String}
* @default Download JPEG image
* @since 2.0
* @apioption lang.downloadJPEG
*/
downloadJPEG: 'Download JPEG image',
/**
* Exporting module only. The text for the PDF download menu item.
*
* @type {String}
* @default Download PDF document
* @since 2.0
* @apioption lang.downloadPDF
*/
downloadPDF: 'Download PDF document',
/**
* Exporting module only. The text for the SVG download menu item.
*
* @type {String}
* @default Download SVG vector image
* @since 2.0
* @apioption lang.downloadSVG
*/
downloadSVG: 'Download SVG vector image',
/**
* Exporting module menu. The tooltip title for the context menu holding
* print and export menu items.
*
* @type {String}
* @default Chart context menu
* @since 3.0
* @apioption lang.contextButtonTitle
*/
contextButtonTitle: 'Chart context menu'
});
// Buttons and menus are collected in a separate config option set called
// 'navigation'. This can be extended later to add control buttons like zoom and
// pan right click menus.
defaultOptions.navigation = {
buttonOptions: {
theme: {},
/**
* Whether to enable buttons.
*
* @type {Boolean}
* @sample highcharts/navigation/buttonoptions-enabled/
* Exporting module loaded but buttons disabled
* @default true
* @since 2.0
* @apioption navigation.buttonOptions.enabled
*/
/**
* The pixel size of the symbol on the button.
*
* @type {Number}
* @sample highcharts/navigation/buttonoptions-height/
* Bigger buttons
* @default 14
* @since 2.0
* @apioption navigation.buttonOptions.symbolSize
*/
symbolSize: 14,
/**
* The x position of the center of the symbol inside the button.
*
* @type {Number}
* @sample highcharts/navigation/buttonoptions-height/
* Bigger buttons
* @default 12.5
* @since 2.0
* @apioption navigation.buttonOptions.symbolX
*/
symbolX: 12.5,
/**
* The y position of the center of the symbol inside the button.
*
* @type {Number}
* @sample highcharts/navigation/buttonoptions-height/
* Bigger buttons
* @default 10.5
* @since 2.0
* @apioption navigation.buttonOptions.symbolY
*/
symbolY: 10.5,
/**
* Alignment for the buttons.
*
* @validvalue ["left", "center", "right"]
* @type {String}
* @sample highcharts/navigation/buttonoptions-align/
* Center aligned
* @default right
* @since 2.0
* @apioption navigation.buttonOptions.align
*/
align: 'right',
/**
* The pixel spacing between buttons.
*
* @type {Number}
* @default 3
* @since 2.0
* @apioption navigation.buttonOptions.buttonSpacing
*/
buttonSpacing: 3,
/**
* Pixel height of the buttons.
*
* @type {Number}
* @sample highcharts/navigation/buttonoptions-height/
* Bigger buttons
* @default 22
* @since 2.0
* @apioption navigation.buttonOptions.height
*/
height: 22,
/**
* A text string to add to the individual button.
*
* @type {String}
* @sample highcharts/exporting/buttons-text/
* Full text button
* @sample highcharts/exporting/buttons-text-symbol/
* Combined symbol and text
* @default null
* @since 3.0
* @apioption navigation.buttonOptions.text
*/
/**
* The vertical offset of the button's position relative to its
* `verticalAlign`.
*
* @type {Number}
* @sample highcharts/navigation/buttonoptions-verticalalign/
* Buttons at lower right
* @default 0
* @since 2.0
* @apioption navigation.buttonOptions.y
*/
/**
* The vertical alignment of the buttons. Can be one of "top", "middle"
* or "bottom".
*
* @validvalue ["top", "middle", "bottom"]
* @type {String}
* @sample highcharts/navigation/buttonoptions-verticalalign/
* Buttons at lower right
* @default top
* @since 2.0
* @apioption navigation.buttonOptions.verticalAlign
*/
verticalAlign: 'top',
/**
* The pixel width of the button.
*
* @type {Number}
* @sample highcharts/navigation/buttonoptions-height/
* Bigger buttons
* @default 24
* @since 2.0
* @apioption navigation.buttonOptions.width
*/
width: 24
}
};
// Presentational attributes
merge(true, defaultOptions.navigation,
/**
* A collection of options for buttons and menus appearing in the exporting
* module.
* @type {Object}
* @optionparent navigation
*/
{
/**
* CSS styles for the popup menu appearing by default when the export
* icon is clicked. This menu is rendered in HTML.
*
* @type {CSSObject}
* @see In styled mode, the menu is styled with the `.highcharts-menu`
* class.
* @sample highcharts/navigation/menustyle/ Light gray menu background
* @default { "border": "1px solid #999999", "background": "#ffffff", "padding": "5px 0" }
* @since 2.0
*/
menuStyle: {
border: '1px solid #999999',
background: '#ffffff',
padding: '5px 0'
},
/**
* CSS styles for the individual items within the popup menu appearing
* by default when the export icon is clicked. The menu items are rendered
* in HTML.
*
* @type {CSSObject}
* @see In styled mode, the menu items are styled with the
* `.highcharts-menu-item` class.
* @sample {highcharts} highcharts/navigation/menuitemstyle/
* Add a grey stripe to the left
* @default { "padding": "0.5em 1em", "color": "#333333", "background": "none" }
* @since 2.0
*/
menuItemStyle: {
padding: '0.5em 1em',
background: 'none',
color: '#333333',
/**
* Defaults to `14px` on touch devices and `11px` on desktop.
* @type {String}
*/
fontSize: isTouchDevice ? '14px' : '11px',
transition: 'background 250ms, color 250ms'
},
/**
* CSS styles for the hover state of the individual items within the
* popup menu appearing by default when the export icon is clicked.
* The menu items are rendered in HTML.
*
* @type {CSSObject}
* @see In styled mode, the menu items are styled with the
* `.highcharts-menu-item` class.
* @sample highcharts/navigation/menuitemhoverstyle/ Bold text on hover
* @default { "background": "#335cad", "color": "#ffffff" }
* @since 2.0
*/
menuItemHoverStyle: {
background: '#335cad',
color: '#ffffff'
},
/**
* A collection of options for buttons appearing in the exporting module.
*
*
* In styled mode, the buttons are styled with the
* `.highcharts-contextbutton` and `.highcharts-button-symbol` classes.
*
*/
buttonOptions: {
/**
* Fill color for the symbol within the button.
*
* @type {Color}
* @sample highcharts/navigation/buttonoptions-symbolfill/
* Blue symbol stroke for one of the buttons
* @default #666666
* @since 2.0
*/
symbolFill: '#666666',
/**
* The color of the symbol's stroke or line.
*
* @type {Color}
* @sample highcharts/navigation/buttonoptions-symbolstroke/
* Blue symbol stroke
* @default #666666
* @since 2.0
*/
symbolStroke: '#666666',
/**
* The pixel stroke width of the symbol on the button.
*
* @type {Number}
* @sample highcharts/navigation/buttonoptions-height/
* Bigger buttons
* @default 1
* @since 2.0
*/
symbolStrokeWidth: 3,
/**
* A configuration object for the button theme. The object accepts
* SVG properties like `stroke-width`, `stroke` and `fill`. Tri-state
* button styles are supported by the `states.hover` and `states.select`
* objects.
*
* @type {Object}
* @sample highcharts/navigation/buttonoptions-theme/
* Theming the buttons
* @since 3.0
*/
theme: {
/**
* The default fill exists only to capture hover events.
* @type {String}
*/
fill: '#ffffff',
/**
* @type {String}
*/
stroke: 'none',
/**
* @type {Number}
* @default 5
*/
padding: 5
}
}
});
// Add the export related options
/**
* Options for the exporting module. For an overview on the matter, see
* [the docs](https://www.highcharts.com/docs/export-module/export-module-overview).
* @type {Object}
* @optionparent exporting
*/
defaultOptions.exporting = {
/**
* Experimental setting to allow HTML inside the chart (added through
* the `useHTML` options), directly in the exported image. This allows
* you to preserve complicated HTML structures like tables or bi-directional
* text in exported charts.
*
* Disclaimer: The HTML is rendered in a `foreignObject` tag in the
* generated SVG. The official export server is based on PhantomJS,
* which supports this, but other SVG clients, like Batik, does not
* support it. This also applies to downloaded SVG that you want to
* open in a desktop client.
*
* @type {Boolean}
* @default false
* @since 4.1.8
* @apioption exporting.allowHTML
*/
/**
* Additional chart options to be merged into an exported chart. For
* example, a common use case is to add data labels to improve readability
* of the exported chart, or to add a printer-friendly color scheme.
*
* @type {Object}
* @sample {highcharts} highcharts/exporting/chartoptions-data-labels/
* Added data labels
* @sample {highstock} highcharts/exporting/chartoptions-data-labels/
* Added data labels
* @default null
* @apioption exporting.chartOptions
*/
/**
* Whether to enable the exporting module. Disabling the module will
* hide the context button, but API methods will still be available.
*
* @type {Boolean}
* @sample {highcharts} highcharts/exporting/enabled-false/
* Exporting module is loaded but disabled
* @sample {highstock} highcharts/exporting/enabled-false/
* Exporting module is loaded but disabled
* @default true
* @since 2.0
* @apioption exporting.enabled
*/
/**
* Function to call if the offline-exporting module fails to export
* a chart on the client side, and [fallbackToExportServer](
* #exporting.fallbackToExportServer) is disabled. If left undefined, an
* exception is thrown instead. Receives two parameters, the exporting
* options, and the error from the module.
*
* @type {Function}
* @see [fallbackToExportServer](#exporting.fallbackToExportServer)
* @default undefined
* @since 5.0.0
* @apioption exporting.error
*/
/**
* Whether or not to fall back to the export server if the offline-exporting
* module is unable to export the chart on the client side. This happens for
* certain browsers, and certain features (e.g.
* [allowHTML](#exporting.allowHTML)), depending on the image type exporting
* to. For very complex charts, it is possible that export can fail in
* browsers that don't support Blob objects, due to data URL length limits.
* It is recommended to define the [exporting.error](#exporting.error)
* handler if disabling fallback, in order to notify users in case export
* fails.
*
* @type {Boolean}
* @default true
* @since 4.1.8
* @apioption exporting.fallbackToExportServer
*/
/**
* The filename, without extension, to use for the exported chart.
*
* @type {String}
* @sample {highcharts} highcharts/exporting/filename/ Custom file name
* @sample {highstock} highcharts/exporting/filename/ Custom file name
* @default chart
* @since 2.0
* @apioption exporting.filename
*/
/**
* An object containing additional attributes for the POST form that
* sends the SVG to the export server. For example, a `target` can be
* set to make sure the generated image is received in another frame,
* or a custom `enctype` or `encoding` can be set.
*
* @type {Object}
* @since 3.0.8
* @apioption exporting.formAttributes
*/
/**
* Path where Highcharts will look for export module dependencies to
* load on demand if they don't already exist on `window`. Should currently
* point to location of [CanVG](https://github.com/canvg/canvg) library,
* [RGBColor.js](https://github.com/canvg/canvg), [jsPDF](https://github.
* com/yWorks/jsPDF) and [svg2pdf.js](https://github.com/yWorks/svg2pdf.
* js), required for client side export in certain browsers.
*
* @type {String}
* @default https://code.highcharts.com/{version}/lib
* @since 5.0.0
* @apioption exporting.libURL
*/
/**
* Analogous to [sourceWidth](#exporting.sourceWidth).
*
* @type {Number}
* @since 3.0
* @apioption exporting.sourceHeight
*/
/**
* The width of the original chart when exported, unless an explicit
* [chart.width](#chart.width) is set. The width exported raster image
* is then multiplied by [scale](#exporting.scale).
*
* @type {Number}
* @sample {highcharts} highcharts/exporting/sourcewidth/ Source size demo
* @sample {highstock} highcharts/exporting/sourcewidth/ Source size demo
* @sample {highmaps} maps/exporting/sourcewidth/ Source size demo
* @since 3.0
* @apioption exporting.sourceWidth
*/
/**
* The pixel width of charts exported to PNG or JPG. As of Highcharts
* 3.0, the default pixel width is a function of the [chart.width](
* #chart.width) or [exporting.sourceWidth](#exporting.sourceWidth) and the
* [exporting.scale](#exporting.scale).
*
* @type {Number}
* @sample {highcharts} highcharts/exporting/width/
* Export to 200px wide images
* @sample {highstock} highcharts/exporting/width/
* Export to 200px wide images
* @default undefined
* @since 2.0
* @apioption exporting.width
*/
/**
* Default MIME type for exporting if `chart.exportChart()` is called
* without specifying a `type` option. Possible values are `image/png`,
* `image/jpeg`, `application/pdf` and `image/svg+xml`.
*
* @validvalue ["image/png", "image/jpeg", "application/pdf", "image/svg+xml"]
* @since 2.0
*/
type: 'image/png',
/**
* The URL for the server module converting the SVG string to an image
* format. By default this points to Highchart's free web service.
*
* @type {String}
* @default https://export.highcharts.com
* @since 2.0
*/
url: 'https://export.highcharts.com/',
/**
* When printing the chart from the menu item in the burger menu, if
* the on-screen chart exceeds this width, it is resized. After printing
* or cancelled, it is restored. The default width makes the chart
* fit into typical paper format. Note that this does not affect the
* chart when printing the web page as a whole.
*
* @type {Number}
* @default 780
* @since 4.2.5
*/
printMaxWidth: 780,
/**
* Defines the scale or zoom factor for the exported image compared
* to the on-screen display. While for instance a 600px wide chart
* may look good on a website, it will look bad in print. The default
* scale of 2 makes this chart export to a 1200px PNG or JPG.
*
* @see [chart.width](#chart.width),
* [exporting.sourceWidth](#exporting.sourceWidth)
* @sample {highcharts} highcharts/exporting/scale/ Scale demonstrated
* @sample {highstock} highcharts/exporting/scale/ Scale demonstrated
* @sample {highmaps} maps/exporting/scale/ Scale demonstrated
* @since 3.0
*/
scale: 2,
/**
* Options for the export related buttons, print and export. In addition
* to the default buttons listed here, custom buttons can be added.
* See [navigation.buttonOptions](#navigation.buttonOptions) for general
* options.
*
*/
buttons: {
/**
* Options for the export button.
*
* In styled mode, export button styles can be applied with the
* `.highcharts-contextbutton` class.
*
* @extends navigation.buttonOptions
*/
contextButton: {
/**
* A click handler callback to use on the button directly instead of
* the popup menu.
*
* @type {Function}
* @sample highcharts/exporting/buttons-contextbutton-onclick/
* Skip the menu and export the chart directly
* @since 2.0
* @apioption exporting.buttons.contextButton.onclick
*/
/**
* See [navigation.buttonOptions.symbolFill](
* #navigation.buttonOptions.symbolFill).
*
* @type {Color}
* @default #666666
* @since 2.0
* @apioption exporting.buttons.contextButton.symbolFill
*/
/**
* The horizontal position of the button relative to the `align`
* option.
*
* @type {Number}
* @default -10
* @since 2.0
* @apioption exporting.buttons.contextButton.x
*/
/**
* The class name of the context button.
* @type {String}
*/
className: 'highcharts-contextbutton',
/**
* The class name of the menu appearing from the button.
* @type {String}
*/
menuClassName: 'highcharts-contextmenu',
/**
* The symbol for the button. Points to a definition function in
* the `Highcharts.Renderer.symbols` collection. The default
* `exportIcon` function is part of the exporting module.
*
* @validvalue ["exportIcon", "circle", "square", "diamond", "triangle", "triangle-down", "menu"]
* @type {String}
* @sample highcharts/exporting/buttons-contextbutton-symbol/
* Use a circle for symbol
* @sample highcharts/exporting/buttons-contextbutton-symbol-custom/
* Custom shape as symbol
* @default menu
* @since 2.0
*/
symbol: 'menu',
/**
* The key to a [lang](#lang) option setting that is used for the
* button's title tooltip. When the key is `contextButtonTitle`, it
* refers to [lang.contextButtonTitle](#lang.contextButtonTitle)
* that defaults to "Chart context menu".
*
* @since 6.1.4
*/
titleKey: 'contextButtonTitle',
/**
* This option is deprecated, use
* [titleKey](#exporting.buttons.contextButton.titleKey) instead.
*
* @deprecated
* @type {string}
* @apioption exporting.buttons.contextButton._titleKey
*/
/**
* A collection of strings pointing to config options for the menu
* items. The config options are defined in the
* `menuItemDefinitions` option.
*
* By default, there is the "Print" menu item plus one menu item
* for each of the available export types.
*
* Defaults to
* <pre>
* [
* 'printChart',
* 'separator',
* 'downloadPNG',
* 'downloadJPEG',
* 'downloadPDF',
* 'downloadSVG'
* ]
* </pre>
*
* @type {Array<String>|Array<Object>}
* @sample {highcharts} highcharts/exporting/menuitemdefinitions/
* Menu item definitions
* @sample {highstock} highcharts/exporting/menuitemdefinitions/
* Menu item definitions
* @sample {highmaps} highcharts/exporting/menuitemdefinitions/
* Menu item definitions
* @since 2.0
*/
menuItems: [
'printChart',
'separator',
'downloadPNG',
'downloadJPEG',
'downloadPDF',
'downloadSVG'
]
}
},
/**
* An object consisting of definitions for the menu items in the context
* menu. Each key value pair has a `key` that is referenced in the
* [menuItems](#exporting.buttons.contextButton.menuItems) setting,
* and a `value`, which is an object with the following properties:
*
* <dl>
*
* <dt>onclick</dt>
*
* <dd>The click handler for the menu item</dd>
*
* <dt>text</dt>
*
* <dd>The text for the menu item</dd>
*
* <dt>textKey</dt>
*
* <dd>If internationalization is required, the key to a language string
* </dd>
*
* </dl>
*
* @type {Object}
* @sample {highcharts} highcharts/exporting/menuitemdefinitions/
* Menu item definitions
* @sample {highstock} highcharts/exporting/menuitemdefinitions/
* Menu item definitions
* @sample {highmaps} highcharts/exporting/menuitemdefinitions/
* Menu item definitions
* @since 5.0.13
*/
menuItemDefinitions: {
/**
* @ignore
*/
printChart: {
textKey: 'printChart',
onclick: function () {
this.print();
}
},
/**
* @ignore
*/
separator: {
separator: true
},
/**
* @ignore
*/
downloadPNG: {
textKey: 'downloadPNG',
onclick: function () {
this.exportChart();
}
},
/**
* @ignore
*/
downloadJPEG: {
textKey: 'downloadJPEG',
onclick: function () {
this.exportChart({
type: 'image/jpeg'
});
}
},
/**
* @ignore
*/
downloadPDF: {
textKey: 'downloadPDF',
onclick: function () {
this.exportChart({
type: 'application/pdf'
});
}
},
/**
* @ignore
*/
downloadSVG: {
textKey: 'downloadSVG',
onclick: function () {
this.exportChart({
type: 'image/svg+xml'
});
}
}
}
};
/**
* Fires after a chart is printed through the context menu item or the
* `Chart.print` method. Requires the exporting module.
*
* @type {Function}
* @context Chart
* @sample highcharts/chart/events-beforeprint-afterprint/
* Rescale the chart to print
* @since 4.1.0
* @apioption chart.events.afterPrint
*/
/**
* Fires before a chart is printed through the context menu item or
* the `Chart.print` method. Requires the exporting module.
*
* @type {Function}
* @context Chart
* @sample highcharts/chart/events-beforeprint-afterprint/
* Rescale the chart to print
* @since 4.1.0
* @apioption chart.events.beforePrint
*/
// Add the H.post utility
H.post = function (url, data, formAttributes) {
// create the form
var form = createElement('form', merge({
method: 'post',
action: url,
enctype: 'multipart/form-data'
}, formAttributes), {
display: 'none'
}, doc.body);
// add the data
objectEach(data, function (val, name) {
createElement('input', {
type: 'hidden',
name: name,
value: val
}, null, form);
});
// submit
form.submit();
// clean up
discardElement(form);
};
extend(Chart.prototype, /** @lends Highcharts.Chart.prototype */ {
/**
* Exporting module only. A collection of fixes on the produced SVG to
* account for expando properties, browser bugs, VML problems and other.
* Returns a cleaned SVG.
*
* @private
*/
sanitizeSVG: function (svg, options) {
// Move HTML into a foreignObject
if (options && options.exporting && options.exporting.allowHTML) {
var html = svg.match(/<\/svg>(.*?$)/);
if (html && html[1]) {
html = '<foreignObject x="0" y="0" ' +
'width="' + options.chart.width + '" ' +
'height="' + options.chart.height + '">' +
'<body xmlns="http://www.w3.org/1999/xhtml">' +
html[1] +
'</body>' +
'</foreignObject>';
svg = svg.replace('</svg>', html + '</svg>');
}
}
svg = svg
.replace(/zIndex="[^"]+"/g, '')
.replace(/symbolName="[^"]+"/g, '')
.replace(/jQuery[0-9]+="[^"]+"/g, '')
.replace(/url\(("|")(\S+)("|")\)/g, 'url($2)')
.replace(/url\([^#]+#/g, 'url(#')
.replace(
/<svg /,
'<svg xmlns:xlink="http://www.w3.org/1999/xlink" '
)
.replace(/ (|NS[0-9]+\:)href=/g, ' xlink:href=') // #3567
.replace(/\n/, ' ')
// Any HTML added to the container after the SVG (#894)
.replace(/<\/svg>.*?$/, '</svg>')
// Batik doesn't support rgba fills and strokes (#3095)
.replace(
/(fill|stroke)="rgba\(([ 0-9]+,[ 0-9]+,[ 0-9]+),([ 0-9\.]+)\)"/g, // eslint-disable-line max-len
'$1="rgb($2)" $1-opacity="$3"'
)
// Replace HTML entities, issue #347
.replace(/ /g, '\u00A0') // no-break space
.replace(/­/g, '\u00AD'); // soft hyphen
// Further sanitize for oldIE
if (this.ieSanitizeSVG) {
svg = this.ieSanitizeSVG(svg);
}
return svg;
},
/**
* Return the unfiltered innerHTML of the chart container. Used as hook for
* plugins. In styled mode, it also takes care of inlining CSS style rules.
*
* @see Chart#getSVG
*
* @returns {String}
* The unfiltered SVG of the chart.
*/
getChartHTML: function () {
return this.container.innerHTML;
},
/**
* Return an SVG representation of the chart.
*
* @param chartOptions {Options}
* Additional chart options for the generated SVG representation.
* For collections like `xAxis`, `yAxis` or `series`, the additional
* options is either merged in to the orininal item of the same
* `id`, or to the first item if a common id is not found.
* @return {String}
* The SVG representation of the rendered chart.
* @sample highcharts/members/chart-getsvg/
* View the SVG from a button
*/
getSVG: function (chartOptions) {
var chart = this,
chartCopy,
sandbox,
svg,
seriesOptions,
sourceWidth,
sourceHeight,
cssWidth,
cssHeight,
// Copy the options and add extra options
options = merge(chart.options, chartOptions);
// create a sandbox where a new chart will be generated
sandbox = createElement('div', null, {
position: 'absolute',
top: '-9999em',
width: chart.chartWidth + 'px',
height: chart.chartHeight + 'px'
}, doc.body);
// get the source size
cssWidth = chart.renderTo.style.width;
cssHeight = chart.renderTo.style.height;
sourceWidth = options.exporting.sourceWidth ||
options.chart.width ||
(/px$/.test(cssWidth) && parseInt(cssWidth, 10)) ||
600;
sourceHeight = options.exporting.sourceHeight ||
options.chart.height ||
(/px$/.test(cssHeight) && parseInt(cssHeight, 10)) ||
400;
// override some options
extend(options.chart, {
animation: false,
renderTo: sandbox,
forExport: true,
renderer: 'SVGRenderer',
width: sourceWidth,
height: sourceHeight
});
options.exporting.enabled = false; // hide buttons in print
delete options.data; // #3004
// prepare for replicating the chart
options.series = [];
each(chart.series, function (serie) {
seriesOptions = merge(serie.userOptions, { // #4912
animation: false, // turn off animation
enableMouseTracking: false,
showCheckbox: false,
visible: serie.visible
});
// Used for the navigator series that has its own option set
if (!seriesOptions.isInternal) {
options.series.push(seriesOptions);
}
});
// Assign an internal key to ensure a one-to-one mapping (#5924)
each(chart.axes, function (axis) {
if (!axis.userOptions.internalKey) { // #6444
axis.userOptions.internalKey = H.uniqueKey();
}
});
// generate the chart copy
chartCopy = new H.Chart(options, chart.callback);
// Axis options and series options (#2022, #3900, #5982)
if (chartOptions) {
each(['xAxis', 'yAxis', 'series'], function (coll) {
var collOptions = {};
if (chartOptions[coll]) {
collOptions[coll] = chartOptions[coll];
chartCopy.update(collOptions);
}
});
}
// Reflect axis extremes in the export (#5924)
each(chart.axes, function (axis) {
var axisCopy = H.find(chartCopy.axes, function (copy) {
return copy.options.internalKey ===
axis.userOptions.internalKey;
}),
extremes = axis.getExtremes(),
userMin = extremes.userMin,
userMax = extremes.userMax;
if (
axisCopy &&
(
(userMin !== undefined && userMin !== axisCopy.min) ||
(userMax !== undefined && userMax !== axisCopy.max)
)
) {
axisCopy.setExtremes(userMin, userMax, true, false);
}
});
// Get the SVG from the container's innerHTML
svg = chartCopy.getChartHTML();
fireEvent(this, 'getSVG', { chartCopy: chartCopy });
svg = chart.sanitizeSVG(svg, options);
// free up memory
options = null;
chartCopy.destroy();
discardElement(sandbox);
return svg;
},
getSVGForExport: function (options, chartOptions) {
var chartExportingOptions = this.options.exporting;
return this.getSVG(merge(
{ chart: { borderRadius: 0 } },
chartExportingOptions.chartOptions,
chartOptions,
{
exporting: {
sourceWidth: (
(options && options.sourceWidth) ||
chartExportingOptions.sourceWidth
),
sourceHeight: (
(options && options.sourceHeight) ||
chartExportingOptions.sourceHeight
)
}
}
));
},
/**
* Exporting module required. Submit an SVG version of the chart to a server
* along with some parameters for conversion.
* @param {Object} exportingOptions
* Exporting options in addition to those defined in {@link
* https://api.highcharts.com/highcharts/exporting|exporting}.
* @param {String} exportingOptions.filename
* The file name for the export without extension.
* @param {String} exportingOptions.url
* The URL for the server module to do the conversion.
* @param {Number} exportingOptions.width
* The width of the PNG or JPG image generated on the server.
* @param {String} exportingOptions.type
* The MIME type of the converted image.
* @param {Number} exportingOptions.sourceWidth
* The pixel width of the source (in-page) chart.
* @param {Number} exportingOptions.sourceHeight
* The pixel height of the source (in-page) chart.
* @param {Options} chartOptions
* Additional chart options for the exported chart. For example a
* different background color can be added here, or `dataLabels`
* for export only.
*
* @sample highcharts/members/chart-exportchart/
* Export with no options
* @sample highcharts/members/chart-exportchart-filename/
* PDF type and custom filename
* @sample highcharts/members/chart-exportchart-custom-background/
* Different chart background in export
* @sample stock/members/chart-exportchart/
* Export with Highstock
*/
exportChart: function (exportingOptions, chartOptions) {
var svg = this.getSVGForExport(exportingOptions, chartOptions);
// merge the options
exportingOptions = merge(this.options.exporting, exportingOptions);
// do the post
H.post(exportingOptions.url, {
filename: exportingOptions.filename || 'chart',
type: exportingOptions.type,
// IE8 fails to post undefined correctly, so use 0
width: exportingOptions.width || 0,
scale: exportingOptions.scale,
svg: svg
}, exportingOptions.formAttributes);
},
/**
* Exporting module required. Clears away other elements in the page and
* prints the chart as it is displayed. By default, when the exporting
* module is enabled, a context button with a drop down menu in the upper
* right corner accesses this function.
*
* @sample highcharts/members/chart-print/
* Print from a HTML button
*/
print: function () {
var chart = this,
container = chart.container,
origDisplay = [],
origParent = container.parentNode,
body = doc.body,
childNodes = body.childNodes,
printMaxWidth = chart.options.exporting.printMaxWidth,
resetParams,
handleMaxWidth;
if (chart.isPrinting) { // block the button while in printing mode
return;
}
chart.isPrinting = true;
chart.pointer.reset(null, 0);
fireEvent(chart, 'beforePrint');
// Handle printMaxWidth
handleMaxWidth = printMaxWidth && chart.chartWidth > printMaxWidth;
if (handleMaxWidth) {
resetParams = [chart.options.chart.width, undefined, false];
chart.setSize(printMaxWidth, undefined, false);
}
// hide all body content
each(childNodes, function (node, i) {
if (node.nodeType === 1) {
origDisplay[i] = node.style.display;
node.style.display = 'none';
}
});
// pull out the chart
body.appendChild(container);
// Give the browser time to draw WebGL content, an issue that randomly
// appears (at least) in Chrome ~67 on the Mac (#8708).
setTimeout(function () {
win.focus(); // #1510
win.print();
// allow the browser to prepare before reverting
setTimeout(function () {
// put the chart back in
origParent.appendChild(container);
// restore all body content
each(childNodes, function (node, i) {
if (node.nodeType === 1) {
node.style.display = origDisplay[i];
}
});
chart.isPrinting = false;
// Reset printMaxWidth
if (handleMaxWidth) {
chart.setSize.apply(chart, resetParams);
}
fireEvent(chart, 'afterPrint');
}, 1000);
}, 1);
},
/**
* Display a popup menu for choosing the export type.
*
* @private
*
* @param {String} className An identifier for the menu
* @param {Array} items A collection with text and onclicks for the items
* @param {Number} x The x position of the opener button
* @param {Number} y The y position of the opener button
* @param {Number} width The width of the opener button
* @param {Number} height The height of the opener button
*/
contextMenu: function (className, items, x, y, width, height, button) {
var chart = this,
navOptions = chart.options.navigation,
chartWidth = chart.chartWidth,
chartHeight = chart.chartHeight,
cacheName = 'cache-' + className,
menu = chart[cacheName],
menuPadding = Math.max(width, height), // for mouse leave detection
innerMenu,
menuStyle;
// create the menu only the first time
if (!menu) {
// create a HTML element above the SVG
chart.exportContextMenu = chart[cacheName] = menu =
createElement('div', {
className: className
}, {
position: 'absolute',
zIndex: 1000,
padding: menuPadding + 'px',
pointerEvents: 'auto'
}, chart.fixedDiv || chart.container);
innerMenu = createElement(
'div',
{ className: 'highcharts-menu' },
null,
menu
);
// Presentational CSS
css(innerMenu, extend({
MozBoxShadow: '3px 3px 10px #888',
WebkitBoxShadow: '3px 3px 10px #888',
boxShadow: '3px 3px 10px #888'
}, navOptions.menuStyle));
// hide on mouse out
menu.hideMenu = function () {
css(menu, { display: 'none' });
if (button) {
button.setState(0);
}
chart.openMenu = false;
H.clearTimeout(menu.hideTimer);
};
// Hide the menu some time after mouse leave (#1357)
chart.exportEvents.push(
addEvent(menu, 'mouseleave', function () {
menu.hideTimer = setTimeout(menu.hideMenu, 500);
}),
addEvent(menu, 'mouseenter', function () {
H.clearTimeout(menu.hideTimer);
}),
// Hide it on clicking or touching outside the menu (#2258,
// #2335, #2407)
addEvent(doc, 'mouseup', function (e) {
if (!chart.pointer.inClass(e.target, className)) {
menu.hideMenu();
}
}),
addEvent(menu, 'click', function () {
if (chart.openMenu) {
menu.hideMenu();
}
})
);
// create the items
each(items, function (item) {
if (typeof item === 'string') {
item = chart.options.exporting.menuItemDefinitions[item];
}
if (H.isObject(item, true)) {
var element;
if (item.separator) {
element = createElement('hr', null, null, innerMenu);
} else {
element = createElement('div', {
className: 'highcharts-menu-item',
onclick: function (e) {
if (e) { // IE7
e.stopPropagation();
}
menu.hideMenu();
if (item.onclick) {
item.onclick.apply(chart, arguments);
}
},
innerHTML: (
item.text ||
chart.options.lang[item.textKey]
)
}, null, innerMenu);
element.onmouseover = function () {
css(this, navOptions.menuItemHoverStyle);
};
element.onmouseout = function () {
css(this, navOptions.menuItemStyle);
};
css(element, extend({
cursor: 'pointer'
}, navOptions.menuItemStyle));
}
// Keep references to menu divs to be able to destroy them
chart.exportDivElements.push(element);
}
});
// Keep references to menu and innerMenu div to be able to destroy
// them
chart.exportDivElements.push(innerMenu, menu);
chart.exportMenuWidth = menu.offsetWidth;
chart.exportMenuHeight = menu.offsetHeight;
}
menuStyle = { display: 'block' };
// if outside right, right align it
if (x + chart.exportMenuWidth > chartWidth) {
menuStyle.right = (chartWidth - x - width - menuPadding) + 'px';
} else {
menuStyle.left = (x - menuPadding) + 'px';
}
// if outside bottom, bottom align it
if (
y + height + chart.exportMenuHeight > chartHeight &&
button.alignOptions.verticalAlign !== 'top'
) {
menuStyle.bottom = (chartHeight - y - menuPadding) + 'px';
} else {
menuStyle.top = (y + height - menuPadding) + 'px';
}
css(menu, menuStyle);
chart.openMenu = true;
},
/**
* Add the export button to the chart, with options.
*
* @private
*/
addButton: function (options) {
var chart = this,
renderer = chart.renderer,
btnOptions = merge(chart.options.navigation.buttonOptions, options),
onclick = btnOptions.onclick,
menuItems = btnOptions.menuItems,
symbol,
button,
symbolSize = btnOptions.symbolSize || 12;
if (!chart.btnCount) {
chart.btnCount = 0;
}
// Keeps references to the button elements
if (!chart.exportDivElements) {
chart.exportDivElements = [];
chart.exportSVGElements = [];
}
if (btnOptions.enabled === false) {
return;
}
var attr = btnOptions.theme,
states = attr.states,
hover = states && states.hover,
select = states && states.select,
callback;
delete attr.states;
if (onclick) {
callback = function (e) {
if (e) {
e.stopPropagation();
}
onclick.call(chart, e);
};
} else if (menuItems) {
callback = function (e) {
// consistent with onclick call (#3495)
if (e) {
e.stopPropagation();
}
chart.contextMenu(
button.menuClassName,
menuItems,
button.translateX,
button.translateY,
button.width,
button.height,
button
);
button.setState(2);
};
}
if (btnOptions.text && btnOptions.symbol) {
attr.paddingLeft = pick(attr.paddingLeft, 25);
} else if (!btnOptions.text) {
extend(attr, {
width: btnOptions.width,
height: btnOptions.height,
padding: 0
});
}
button = renderer
.button(btnOptions.text, 0, 0, callback, attr, hover, select)
.addClass(options.className)
.attr({
'stroke-linecap': 'round',
title: pick(
chart.options.lang[
btnOptions._titleKey || btnOptions.titleKey
],
''
)
});
button.menuClassName = (
options.menuClassName ||
'highcharts-menu-' + chart.btnCount++
);
if (btnOptions.symbol) {
symbol = renderer.symbol(
btnOptions.symbol,
btnOptions.symbolX - (symbolSize / 2),
btnOptions.symbolY - (symbolSize / 2),
symbolSize,
symbolSize,
// If symbol is an image, scale it (#7957)
{
width: symbolSize,
height: symbolSize
}
)
.addClass('highcharts-button-symbol')
.attr({
zIndex: 1
}).add(button);
symbol.attr({
stroke: btnOptions.symbolStroke,
fill: btnOptions.symbolFill,
'stroke-width': btnOptions.symbolStrokeWidth || 1
});
}
button.add(chart.exportingGroup)
.align(extend(btnOptions, {
width: button.width,
x: pick(btnOptions.x, chart.buttonOffset) // #1654
}), true, 'spacingBox');
chart.buttonOffset += (
(button.width + btnOptions.buttonSpacing) *
(btnOptions.align === 'right' ? -1 : 1)
);
chart.exportSVGElements.push(button, symbol);
},
/**
* Destroy the export buttons.
*
* @private
*/
destroyExport: function (e) {
var chart = e ? e.target : this,
exportSVGElements = chart.exportSVGElements,
exportDivElements = chart.exportDivElements,
exportEvents = chart.exportEvents,
cacheName;
// Destroy the extra buttons added
if (exportSVGElements) {
each(exportSVGElements, function (elem, i) {
// Destroy and null the svg elements
if (elem) { // #1822
elem.onclick = elem.ontouchstart = null;
cacheName = 'cache-' + elem.menuClassName;
if (chart[cacheName]) {
delete chart[cacheName];
}
chart.exportSVGElements[i] = elem.destroy();
}
});
exportSVGElements.length = 0;
}
// Destroy the exporting group
if (chart.exportingGroup) {
chart.exportingGroup.destroy();
delete chart.exportingGroup;
}
// Destroy the divs for the menu
if (exportDivElements) {
each(exportDivElements, function (elem, i) {
// Remove the event handler
H.clearTimeout(elem.hideTimer); // #5427
removeEvent(elem, 'mouseleave');
// Remove inline events
chart.exportDivElements[i] =
elem.onmouseout =
elem.onmouseover =
elem.ontouchstart =
elem.onclick = null;
// Destroy the div by moving to garbage bin
discardElement(elem);
});
exportDivElements.length = 0;
}
if (exportEvents) {
each(exportEvents, function (unbind) {
unbind();
});
exportEvents.length = 0;
}
}
});
symbols.menu = function (x, y, width, height) {
var arr = [
'M', x, y + 2.5,
'L', x + width, y + 2.5,
'M', x, y + height / 2 + 0.5,
'L', x + width, y + height / 2 + 0.5,
'M', x, y + height - 1.5,
'L', x + width, y + height - 1.5
];
return arr;
};
// Add the buttons on chart load
Chart.prototype.renderExporting = function () {
var chart = this,
exportingOptions = chart.options.exporting,
buttons = exportingOptions.buttons,
isDirty = chart.isDirtyExporting || !chart.exportSVGElements;
chart.buttonOffset = 0;
if (chart.isDirtyExporting) {
chart.destroyExport();
}
if (isDirty && exportingOptions.enabled !== false) {
chart.exportEvents = [];
chart.exportingGroup = chart.exportingGroup ||
chart.renderer.g('exporting-group').attr({
zIndex: 3 // #4955, // #8392
}).add();
objectEach(buttons, function (button) {
chart.addButton(button);
});
chart.isDirtyExporting = false;
}
// Destroy the export elements at chart destroy
addEvent(chart, 'destroy', chart.destroyExport);
};
// Add update methods to handle chart.update and chart.exporting.update and
// chart.navigation.update. These must be added to the chart instance rather
// than the Chart prototype in order to use the chart instance inside the update
// function.
addEvent(Chart, 'init', function () {
var chart = this;
function update(prop, options, redraw) {
chart.isDirtyExporting = true;
merge(true, chart.options[prop], options);
if (pick(redraw, true)) {
chart.redraw();
}
}
each(['exporting', 'navigation'], function (prop) {
chart[prop] = {
update: function (options, redraw) {
update(prop, options, redraw);
}
};
});
});
Chart.prototype.callbacks.push(function (chart) {
chart.renderExporting();
addEvent(chart, 'redraw', chart.renderExporting);
// Uncomment this to see a button directly below the chart, for quick
// testing of export
/*
var button, viewImage, viewSource;
if (!chart.renderer.forExport) {
viewImage = function () {
var div = doc.createElement('div');
div.innerHTML = chart.getSVGForExport();
chart.renderTo.parentNode.appendChild(div);
};
viewSource = function () {
var pre = doc.createElement('pre');
pre.innerHTML = chart.getSVGForExport()
.replace(/</g, '\n<')
.replace(/>/g, '>');
chart.renderTo.parentNode.appendChild(pre);
};
viewImage();
// View SVG Image
button = doc.createElement('button');
button.innerHTML = 'View SVG Image';
chart.renderTo.parentNode.appendChild(button);
button.onclick = viewImage;
// View SVG Source
button = doc.createElement('button');
button.innerHTML = 'View SVG Source';
chart.renderTo.parentNode.appendChild(button);
button.onclick = viewSource;
}
//*/
});
}(Highcharts));
return (function () {
}());
}));
|
PypiClean
|
/os-net-config-16.0.0.tar.gz/os-net-config-16.0.0/os_net_config/sriov_config.py
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# The sriov_config.py module does the SR-IOV PF configuration.
# It'll be invoked by the sriov_config systemd service for the persistence of
# the SR-IOV configuration across reboots. And os-net-config:utils also invokes
# it for the first time configuration.
# An entry point os-net-config-sriov is added for invocation of this module.
import argparse
import os
import pyudev
import queue
import re
import sys
import time
from json import loads
from os_net_config import common
from os_net_config import sriov_bind_config
from oslo_concurrency import processutils
logger = common.configure_logger()
_UDEV_RULE_FILE = '/etc/udev/rules.d/80-persistent-os-net-config.rules'
_UDEV_LEGACY_RULE_FILE = '/etc/udev/rules.d/70-os-net-config-sriov.rules'
_IFUP_LOCAL_FILE = '/sbin/ifup-local'
_RESET_SRIOV_RULES_FILE = '/etc/udev/rules.d/70-tripleo-reset-sriov.rules'
_ALLOCATE_VFS_FILE = '/etc/sysconfig/allocate_vfs'
_MLNX_DRIVER = "mlx5_core"
MLNX_UNBIND_FILE_PATH = "/sys/bus/pci/drivers/mlx5_core/unbind"
MLNX5_VDPA_KMODS = [
"vdpa",
"vhost_vdpa",
"mlx5_vdpa",
]
MAX_RETRIES = 10
PF_FUNC_RE = re.compile(r"\.(\d+)$", 0)
VF_PCI_RE = re.compile(r'/[\d]{4}\:(\d+):(\d+)\.(\d+)/net/[^\/]+$')
# In order to keep VF representor name consistent specially after the upgrade
# proccess, we should have a udev rule to handle that.
# The udev rule will rename the VF representor as "<sriov_pf_name>_<vf_num>"
_REP_LINK_NAME_FILE = "/etc/udev/rep-link-name.sh"
_REP_LINK_NAME_DATA = '''#!/bin/bash
# This file is autogenerated by os-net-config
set -x
PORT="$1"
echo "NUMBER=${PORT##pf*vf}"
'''
# Create a queue for passing the udev network events
vf_queue = queue.Queue()
# Global variable to store link between pci/pf
# for udev rule creationg when dealing with mlnx vdpa
vf_to_pf = {}
class SRIOVNumvfsException(ValueError):
pass
def udev_event_handler(action, device):
event = {"action": action, "device": device.sys_path}
logger.info(
f"Received udev event {event['action']} for {event['device']}"
)
vf_queue.put(event)
def _norm_path(dev, suffix):
return os.path.normpath(os.path.join(dev, suffix))
def _get_pf_path(device):
pf_path = _norm_path(device, "../../physfn/net")
if not os.path.isdir(pf_path):
pf_path = _norm_path(device, "physfn/net")
if not os.path.isdir(pf_path):
pf_path = None
return pf_path
def _driver_unbind(dev):
vf_pci_path = f"/sys/bus/pci/devices/{dev}/driver"
if os.path.exists(vf_pci_path):
logger.info(f"{dev}: Unbinding driver")
with open(MLNX_UNBIND_FILE_PATH, 'w') as f:
f.write(dev)
else:
logger.info(f"{dev}: No driver to unbind")
def _wait_for_vf_creation(pf_name, numvfs):
vf_count = 0
pf_config = common.get_sriov_map(pf_name)
vdpa = False
if len(pf_config):
vdpa = pf_config[0].get('vdpa', False)
while vf_count < numvfs:
try:
# wait for 5 seconds after every udev event
event = vf_queue.get(True, 5)
vf_name = os.path.basename(event["device"])
pf_path = _get_pf_path(event["device"])
logger.debug(f"{event['device']}: Got udev event: {event}")
if pf_path:
pf_nic = os.listdir(pf_path)
# NOTE(dvd): For vDPA devices, the VF event we're interrested
# in contains all the VFs. We can also use this to build a dict
# to correlate the VF pci address to the PF when creating the
# vdpa representator udev rule
#
# Data structure sample for vDPA:
# pf_path:
# /sys/devices/pci0000:00/0000:00:02.2/0000:06:01.2/physfn/net
# pf_nic: ['enp6s0f1np1_0', 'enp6s0f1np1_1', 'enp6s0f1np1']
# pf_name: enp6s0f1np1
if vf_name not in vf_to_pf and pf_name in pf_nic:
vf_to_pf[vf_name] = {
'device': event['device'],
'pf': pf_name
}
logger.info(
f"{pf_name}: VF {vf_name} created"
)
vf_count += 1
elif vf_name in vf_to_pf:
logger.debug(
f"{pf_name}: VF {vf_name} was already created"
)
elif vdpa:
logger.warning(f"{pf_name}: This PF is not in {pf_path}")
else:
logger.warning(
f"{pf_name}: Unable to parse event {event['device']}"
)
elif not vdpa:
logger.warning(f"{event['device']}: Unable to find PF")
except queue.Empty:
logger.info(f"{pf_name}: Timeout in the creation of VFs")
return
logger.info(f"{pf_name}: Required VFs are created")
def get_numvfs(ifname):
"""Getting sriov_numvfs for PF
Wrapper that will get the sriov_numvfs file for a PF.
:param ifname: interface name (ie: p1p1)
:returns: int -- the number of current VFs on ifname
:raises: SRIOVNumvfsException
"""
sriov_numvfs_path = common.get_dev_path(ifname, "sriov_numvfs")
logger.debug(f"{ifname}: Getting numvfs for interface")
try:
with open(sriov_numvfs_path, 'r') as f:
curr_numvfs = int(f.read())
except IOError as exc:
msg = f"{ifname}: Unable to read numvfs: {exc}"
raise SRIOVNumvfsException(msg)
logger.debug(f"{ifname}: Interface has {curr_numvfs} configured")
return curr_numvfs
def set_numvfs(ifname, numvfs):
"""Setting sriov_numvfs for PF
Wrapper that will set the sriov_numvfs file for a PF.
After numvfs has been set for an interface, _wait_for_vf_creation will be
called to monitor the creation.
Some restrictions:
- if current number of VF is already defined, we can't change numvfs
- if sriov_numvfs doesn't exist for an interface, we can't create it
:param ifname: interface name (ie: p1p1)
:param numvfs: an int that represents the number of VFs to be created.
:returns: int -- the number of current VFs on ifname
:raises: SRIOVNumvfsException
"""
curr_numvfs = get_numvfs(ifname)
logger.debug(f"{ifname}: Interface has {curr_numvfs} configured, setting "
f"to {numvfs}")
if not isinstance(numvfs, int):
msg = (f"{ifname}: Unable to configure pf with numvfs: {numvfs}\n"
f"numvfs must be an integer")
raise SRIOVNumvfsException(msg)
if numvfs != curr_numvfs:
if curr_numvfs != 0:
logger.warning(f"{ifname}: Numvfs already configured to "
f"{curr_numvfs}")
return curr_numvfs
sriov_numvfs_path = common.get_dev_path(ifname, "sriov_numvfs")
try:
logger.debug(f"Setting {sriov_numvfs_path} to {numvfs}")
with open(sriov_numvfs_path, "w") as f:
f.write("%d" % numvfs)
except IOError as exc:
msg = (f"{ifname} Unable to configure pf with numvfs: {numvfs}\n"
f"{exc}")
raise SRIOVNumvfsException(msg)
_wait_for_vf_creation(ifname, numvfs)
curr_numvfs = get_numvfs(ifname)
if curr_numvfs != numvfs:
msg = (f"{ifname}: Unable to configure pf with numvfs: {numvfs}\n"
"sriov_numvfs file is not set to the targeted number of "
"vfs")
raise SRIOVNumvfsException(msg)
return curr_numvfs
def restart_ovs_and_pfs_netdevs():
sriov_map = common.get_sriov_map()
processutils.execute('/usr/bin/systemctl', 'restart', 'openvswitch')
for item in sriov_map:
if item['device_type'] == 'pf':
if_down_interface(item['name'])
if_up_interface(item['name'])
def cleanup_puppet_config():
file_contents = ""
if os.path.exists(_RESET_SRIOV_RULES_FILE):
os.remove(_RESET_SRIOV_RULES_FILE)
if os.path.exists(_ALLOCATE_VFS_FILE):
os.remove(_ALLOCATE_VFS_FILE)
if os.path.exists(_IFUP_LOCAL_FILE):
# Remove the invocation of allocate_vfs script generated by puppet
# After the removal of allocate_vfs, if the ifup-local file has just
# "#!/bin/bash" left, then remove the file as well.
with open(_IFUP_LOCAL_FILE) as oldfile:
for line in oldfile:
if "/etc/sysconfig/allocate_vfs" not in line:
file_contents = file_contents + line
if file_contents.strip() == "#!/bin/bash":
os.remove(_IFUP_LOCAL_FILE)
else:
with open(_IFUP_LOCAL_FILE, 'w') as newfile:
newfile.write(file_contents)
def udev_monitor_setup():
# Create a context for pyudev and observe udev events for network
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by('net')
observer = pyudev.MonitorObserver(monitor, udev_event_handler)
return observer
def udev_monitor_start(observer):
observer.start()
def udev_monitor_stop(observer):
observer.stop()
def is_partitioned_pf(dev_name: str) -> bool:
"""Check if any nic-partition(VF) is already used
Given a PF device, returns True if any VFs of this
device are in-use.
"""
sriov_map = common.get_sriov_map()
for config in sriov_map:
devtype = config.get('device_type', None)
if devtype == 'vf':
name = config.get('device', {}).get('name')
vf_name = config.get('name')
if dev_name == name:
logger.warning(f"{name} has VF({vf_name}) used by host")
return True
return False
def configure_sriov_pf(execution_from_cli=False, restart_openvswitch=False):
observer = udev_monitor_setup()
udev_monitor_start(observer)
sriov_map = common.get_sriov_map()
dpdk_vfs_pcis_list = []
trigger_udev_rule = False
# Cleanup the previous config by puppet-tripleo
cleanup_puppet_config()
if any(item.get('vdpa') for item in sriov_map):
common.load_kmods(MLNX5_VDPA_KMODS)
vdpa_devices = get_vdpa_vhost_devices()
for item in sriov_map:
if item['device_type'] == 'pf':
_pf_interface_up(item)
if item.get('link_mode') == "legacy":
# Add a udev rule to configure the VF's when PF's are
# released by a guest
if not is_partitioned_pf(item['name']):
add_udev_rule_for_legacy_sriov_pf(item['name'],
item['numvfs'])
# When configuring vdpa, we need to configure switchdev before
# we create the VFs
is_mlnx = common.is_mellanox_interface(item['name'])
vdpa = item.get('vdpa')
# Configure switchdev mode when vdpa
# It has to happen before we set_numvfs
if vdpa and is_mlnx:
configure_switchdev(item['name'])
set_numvfs(item['name'], item['numvfs'])
# Configure switchdev, unbind driver and configure vdpa
if item.get('link_mode') == "switchdev" and is_mlnx:
logger.info(f"{item['name']}: Mellanox card")
vf_pcis_list = get_vf_pcis_list(item['name'])
for vf_pci in vf_pcis_list:
if not vdpa:
# For DPDK, we need to unbind the driver
_driver_unbind(vf_pci)
else:
if vf_pci not in vdpa_devices:
configure_vdpa_vhost_device(vf_pci)
else:
logger.info(
f"{item['name']}: vDPA device already created "
f"for {vf_pci}"
)
if vdpa:
common.restorecon('/dev/vhost-*')
logger.info(f"{item['name']}: Adding udev rules")
# Adding a udev rule to make vf-representors unmanaged by
# NetworkManager
add_udev_rule_to_unmanage_vf_representors_by_nm()
# Adding a udev rule to save the sriov_pf name
trigger_udev_rule = add_udev_rule_for_sriov_pf(item['name'])\
or trigger_udev_rule
trigger_udev_rule = add_udev_rule_for_vf_representors(
item['name']) or trigger_udev_rule
if not vdpa:
# This is used for the sriov_bind_config
dpdk_vfs_pcis_list += vf_pcis_list
# Configure flow steering mode, default to smfs
configure_flow_steering(item['name'],
item.get('steering_mode', 'smfs'))
# Configure switchdev mode
configure_switchdev(item['name'])
# Adding a udev rule to rename vf-representors
else:
trigger_udev_rule = add_udev_rule_for_vdpa_representors(
item['name']) or trigger_udev_rule
# Moving the sriov-PFs to switchdev mode will put the netdev
# interfaces in down state.
# In case we are running during initial deployment,
# bring the interfaces up.
# In case we are running as part of the sriov_config service
# after reboot, net config scripts, which run after
# sriov_config service will bring the interfaces up.
if execution_from_cli:
if_up_interface(item['name'])
if dpdk_vfs_pcis_list and not vdpa:
sriov_bind_pcis_map = {_MLNX_DRIVER: dpdk_vfs_pcis_list}
if not execution_from_cli:
sriov_bind_config.update_sriov_bind_pcis_map(sriov_bind_pcis_map)
else:
sriov_bind_config.configure_sriov_bind_service()
sriov_bind_config.bind_vfs(sriov_bind_pcis_map)
# Trigger udev rules if there is new rules written
if trigger_udev_rule:
trigger_udev_rules()
udev_monitor_stop(observer)
if restart_openvswitch:
restart_ovs_and_pfs_netdevs()
def _wait_for_uplink_rep_creation(pf_name):
uplink_rep_phys_switch_id_path = f"/sys/class/net/{pf_name}/phys_switch_id"
for i in range(MAX_RETRIES):
if common.get_file_data(uplink_rep_phys_switch_id_path):
logger.info(f"{pf_name} Uplink representor ready")
break
time.sleep(1)
else:
raise RuntimeError(f"{pf_name}: Timeout waiting uplink representor")
def create_rep_link_name_script():
with open(_REP_LINK_NAME_FILE, "w") as f:
f.write(_REP_LINK_NAME_DATA)
# Make the _REP_LINK_NAME_FILE executable
os.chmod(_REP_LINK_NAME_FILE, 0o755)
def add_udev_rule_for_sriov_pf(pf_name):
logger.info(f"{pf_name}: adding udev rules for sriov")
pf_pci = get_pf_pci(pf_name)
udev_data_line = 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", '\
f'KERNELS=="{pf_pci}", NAME="{pf_name}"'
return add_udev_rule(udev_data_line, _UDEV_RULE_FILE)
def add_udev_rule_for_legacy_sriov_pf(pf_name, numvfs):
logger.info(f"{pf_name}: adding udev rules for legacy sriov: {numvfs}")
udev_line = f'KERNEL=="{pf_name}", '\
f'RUN+="/bin/os-net-config-sriov -n %k:{numvfs}"'
pattern = f'KERNEL=="{pf_name}", RUN+="/bin/os-net-config-sriov -n'
return add_udev_rule(udev_line, _UDEV_LEGACY_RULE_FILE, pattern)
def add_udev_rule_for_vf_representors(pf_name):
logger.info(f"{pf_name}: adding udev rules for vf representators")
phys_switch_id_path = common.get_dev_path(pf_name,
"_phys_switch_id")
phys_switch_id = common.get_file_data(phys_switch_id_path).strip()
pf_pci = get_pf_pci(pf_name)
pf_fun_num_match = PF_FUNC_RE.search(pf_pci)
if not pf_fun_num_match:
logger.error(f"{pf_name}: Failed to get function number "
"and so failed to create a udev rule for renaming "
"its vf-represent")
return
pf_fun_num = pf_fun_num_match.group(1)
udev_data_line = 'SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}'\
'=="%s", ATTR{phys_port_name}=="pf%svf*", '\
'IMPORT{program}="%s $attr{phys_port_name}", '\
'NAME="%s_$env{NUMBER}"' % (phys_switch_id,
pf_fun_num,
_REP_LINK_NAME_FILE,
pf_name)
create_rep_link_name_script()
return add_udev_rule(udev_data_line, _UDEV_RULE_FILE)
def add_udev_rule_for_vdpa_representors(pf_name):
logger.info(f"{pf_name}: adding udev rules for vdpa representators")
udev_lines = ""
for vf, att in vf_to_pf.items():
mac = common.interface_mac(vf)
vadd = VF_PCI_RE.search(att.get('device'))
if not vadd:
logger.error(
f"{att.get('device')}/{vf}: Failed to get pf/vf numbers "
"and so failed to create a udev rule for renaming vdpa dev"
)
continue
vdpa_rep = f"vdpa{vadd.group(1)}p{vadd.group(2)}vf{vadd.group(3)}"
logger.info(f"{vdpa_rep}: Adding udev representor rule.")
udev_lines += (
'SUBSYSTEM=="net", ACTION=="add", '
f'ATTR{{address}}=="{mac}", NAME="{vdpa_rep}"\n'
)
return add_udev_rule(udev_lines, _UDEV_RULE_FILE)
def add_udev_rule_to_unmanage_vf_representors_by_nm():
logger.info("adding udev rules to unmanage vf representators")
udev_data_line = 'SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}'\
'!="", ATTR{phys_port_name}=="pf*vf*", '\
'ENV{NM_UNMANAGED}="1"'
return add_udev_rule(udev_data_line, _UDEV_RULE_FILE)
def add_udev_rule(udev_data, udev_file, pattern=None):
logger.debug(f"adding udev rule to {udev_file}: {udev_data}")
trigger_udev_rule = False
udev_data = udev_data.strip()
if not pattern:
pattern = udev_data
if not os.path.exists(udev_file):
with open(udev_file, "w") as f:
data = "# This file is autogenerated by os-net-config\n"\
f"{udev_data}\n"
f.write(data)
else:
file_data = common.get_file_data(udev_file)
udev_lines = file_data.splitlines()
if pattern in file_data:
if udev_data in udev_lines:
return trigger_udev_rule
with open(udev_file, "w") as f:
for line in udev_lines:
if pattern in line:
f.write(udev_data + "\n")
else:
f.write(line + "\n")
else:
with open(udev_file, "a") as f:
f.write(udev_data + "\n")
reload_udev_rules()
trigger_udev_rule = True
return trigger_udev_rule
def reload_udev_rules():
try:
processutils.execute('/usr/sbin/udevadm', 'control', '--reload-rules')
logger.info("udev rules reloaded successfully")
except processutils.ProcessExecutionError as exc:
logger.error(f"Failed to reload udev rules: {exc}")
raise
def trigger_udev_rules():
try:
processutils.execute('/usr/sbin/udevadm', 'trigger', '--action=add',
'--attr-match=subsystem=net')
logger.info("udev rules triggered successfully")
except processutils.ProcessExecutionError as exc:
logger.error(f"Failed to trigger udev rules: {exc}")
raise
def configure_switchdev(pf_name):
pf_pci = get_pf_pci(pf_name)
pf_device_id = get_pf_device_id(pf_name)
if pf_device_id == "0x1013" or pf_device_id == "0x1015":
try:
processutils.execute('/usr/sbin/devlink', 'dev', 'eswitch', 'set',
f'pci/{pf_pci}', 'inline-mode', 'transport')
except processutils.ProcessExecutionError as exc:
logger.error(f"{pf_name}: Failed to set inline-mode to transport "
f"for {pf_pci}: {exc}")
raise
try:
processutils.execute('/usr/sbin/devlink', 'dev', 'eswitch', 'set',
f'pci/{pf_pci}', 'mode', 'switchdev')
except processutils.ProcessExecutionError as exc:
logger.error(f"{pf_name}: Failed to set mode to switchdev for "
f"{pf_pci}: {exc}")
raise
logger.info(f"{pf_name}: Device pci/{pf_pci} set to switchdev mode.")
# WA to make sure that the uplink_rep is ready after moving to switchdev,
# as moving to switchdev will remove the sriov_pf and create uplink
# representor, so we need to make sure that uplink representor is ready
# before proceed
_wait_for_uplink_rep_creation(pf_name)
try:
processutils.execute('/usr/sbin/ethtool', '-K', pf_name,
'hw-tc-offload', 'on')
logger.info(f"{pf_name}: Enabled \"hw-tc-offload\" for PF.")
except processutils.ProcessExecutionError as exc:
logger.error(f"{pf_name}: Failed to enable hw-tc-offload: {exc}")
raise
def get_vdpa_vhost_devices():
logger.info(f"Getting list of vdpa devices")
try:
stdout, stderr = processutils.execute('vdpa', '-j', 'dev')
except processutils.ProcessExecutionError as exc:
logger.error(f"Failed to get vdpa vhost devices: {exc}")
raise
return loads(stdout)['dev']
def configure_vdpa_vhost_device(pci):
logger.info(f"{pci}: Creating vdpa device")
try:
processutils.execute('vdpa', 'dev', 'add', 'name', pci,
'mgmtdev', f'pci/{pci}')
except processutils.ProcessExecutionError as exc:
logger.error(f"{pci}: Failed to create vdpa vhost device: {exc}")
raise
def configure_flow_steering(pf_name, steering_mode):
pf_pci = get_pf_pci(pf_name)
try:
processutils.execute('/usr/sbin/devlink', 'dev', 'param', 'set',
f'pci/{pf_pci}', 'name', 'flow_steering_mode',
'value', steering_mode, 'cmode', 'runtime')
logger.info(f"{pf_name}: Device pci/{pf_pci} is set to"
" {steering_mode} steering mode.")
except processutils.ProcessExecutionError as exc:
logger.warning(f"{pf_name}: Could not set pci/{pf_pci} to"
f" {steering_mode} steering mode: {exc}")
def run_ip_config_cmd(*cmd, **kwargs):
logger.info("Running %s" % ' '.join(cmd))
try:
processutils.execute(*cmd, delay_on_retry=True, attempts=10, **kwargs)
except processutils.ProcessExecutionError as exc:
logger.error("Failed to execute %s: %s" % (' '.join(cmd), exc))
raise
def _pf_interface_up(pf_device):
if 'promisc' in pf_device:
run_ip_config_cmd('ip', 'link', 'set', 'dev', pf_device['name'],
'promisc', pf_device['promisc'])
logger.info(f"{pf_device['name']}: Bringing up PF")
run_ip_config_cmd('ip', 'link', 'set', 'dev', pf_device['name'], 'up')
def run_ip_config_cmd_safe(raise_error, *cmd, **kwargs):
try:
run_ip_config_cmd(*cmd)
except processutils.ProcessExecutionError:
if raise_error:
raise
def get_pf_pci(pf_name):
pf_pci_path = common.get_dev_path(pf_name, "uevent")
pf_info = common.get_file_data(pf_pci_path)
pf_pci = re.search(r'PCI_SLOT_NAME=(.*)', pf_info, re.MULTILINE).group(1)
return pf_pci
def get_pf_device_id(pf_name):
pf_device_path = common.get_dev_path(pf_name, "device")
pf_device_id = common.get_file_data(pf_device_path).strip()
return pf_device_id
def get_vf_pcis_list(pf_name):
vf_pcis_list = []
pf_files = os.listdir(common.get_dev_path(pf_name, "_device"))
for pf_file in pf_files:
if pf_file.startswith("virtfn"):
vf_info = common.get_file_data(common.get_dev_path(pf_name,
f"{pf_file}/uevent"))
vf_pcis_list.append(re.search(r'PCI_SLOT_NAME=(.*)',
vf_info, re.MULTILINE).group(1))
return vf_pcis_list
def if_down_interface(device):
logger.info(f"{device}: Running /sbin/ifdown")
try:
processutils.execute('/sbin/ifdown', device)
except processutils.ProcessExecutionError:
logger.error(f"{device}: Failed to ifdown")
raise
def if_up_interface(device):
logger.info(f"{device}: Running /sbin/ifup")
try:
processutils.execute('/sbin/ifup', device)
except processutils.ProcessExecutionError:
logger.error(f"{device}: Failed to ifup")
raise
def configure_sriov_vf():
sriov_map = common.get_sriov_map()
for item in sriov_map:
raise_error = True
if item['device_type'] == 'vf':
pf_name = item['device']['name']
vfid = item['device']['vfid']
base_cmd = ('ip', 'link', 'set', 'dev', pf_name, 'vf', str(vfid))
logger.info(f"{pf_name}: Configuring settings for VF: {vfid} "
f"VF name: {item['name']}")
raise_error = True
if 'macaddr' in item:
cmd = base_cmd + ('mac', item['macaddr'])
run_ip_config_cmd(*cmd)
if 'vlan_id' in item:
vlan_cmd = base_cmd + ('vlan', str(item['vlan_id']))
if 'qos' in item:
vlan_cmd = vlan_cmd + ('qos', str(item['qos']))
run_ip_config_cmd(*vlan_cmd)
if 'max_tx_rate' in item:
cmd = base_cmd + ('max_tx_rate', str(item['max_tx_rate']))
if item['max_tx_rate'] == 0:
raise_error = False
run_ip_config_cmd_safe(raise_error, *cmd)
if 'min_tx_rate' in item:
cmd = base_cmd + ('min_tx_rate', str(item['min_tx_rate']))
if item['min_tx_rate'] == 0:
raise_error = False
run_ip_config_cmd_safe(raise_error, *cmd)
if 'spoofcheck' in item:
cmd = base_cmd + ('spoofchk', item['spoofcheck'])
run_ip_config_cmd(*cmd)
if 'state' in item:
cmd = base_cmd + ('state', item['state'])
run_ip_config_cmd(*cmd)
if 'trust' in item:
cmd = base_cmd + ('trust', item['trust'])
run_ip_config_cmd(*cmd)
if 'promisc' in item:
run_ip_config_cmd('ip', 'link', 'set', 'dev', item['name'],
'promisc', item['promisc'])
if 'driver' in item:
common.set_driverctl_override(item['pci_address'],
item['driver'])
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Configure SR-IOV PF and VF interfaces using a YAML'
' config file format.')
parser.add_argument(
'-d', '--debug',
dest="debug",
action='store_true',
help="Print debugging output.",
required=False)
parser.add_argument(
'-v', '--verbose',
dest="verbose",
action='store_true',
help="Print verbose output.",
required=False)
parser.add_argument(
'-n', '--numvfs',
dest="numvfs",
action='store',
help="Provide the numvfs for device in the format <device>:<numvfs>",
required=False)
opts = parser.parse_args(argv[1:])
return opts
def main(argv=sys.argv, main_logger=None):
opts = parse_opts(argv)
if not main_logger:
main_logger = common.configure_logger(log_file=True)
common.logger_level(main_logger, opts.verbose, opts.debug)
if opts.numvfs:
if re.match(r"^\w+:\d+$", opts.numvfs):
device_name, numvfs = opts.numvfs.split(':')
set_numvfs(device_name, int(numvfs))
else:
main_logger.error(f"Invalid arguments for --numvfs {opts.numvfs}")
return 1
else:
# Configure the PF's
configure_sriov_pf()
# Configure the VFs
configure_sriov_vf()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/securityinsights/get_watchlist_item.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetWatchlistItemResult',
'AwaitableGetWatchlistItemResult',
'get_watchlist_item',
'get_watchlist_item_output',
]
@pulumi.output_type
class GetWatchlistItemResult:
"""
Represents a Watchlist Item in Azure Security Insights.
"""
def __init__(__self__, created=None, created_by=None, entity_mapping=None, etag=None, id=None, is_deleted=None, items_key_value=None, name=None, system_data=None, tenant_id=None, type=None, updated=None, updated_by=None, watchlist_item_id=None, watchlist_item_type=None):
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if created_by and not isinstance(created_by, dict):
raise TypeError("Expected argument 'created_by' to be a dict")
pulumi.set(__self__, "created_by", created_by)
if entity_mapping and not isinstance(entity_mapping, dict):
raise TypeError("Expected argument 'entity_mapping' to be a dict")
pulumi.set(__self__, "entity_mapping", entity_mapping)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_deleted and not isinstance(is_deleted, bool):
raise TypeError("Expected argument 'is_deleted' to be a bool")
pulumi.set(__self__, "is_deleted", is_deleted)
if items_key_value and not isinstance(items_key_value, dict):
raise TypeError("Expected argument 'items_key_value' to be a dict")
pulumi.set(__self__, "items_key_value", items_key_value)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated and not isinstance(updated, str):
raise TypeError("Expected argument 'updated' to be a str")
pulumi.set(__self__, "updated", updated)
if updated_by and not isinstance(updated_by, dict):
raise TypeError("Expected argument 'updated_by' to be a dict")
pulumi.set(__self__, "updated_by", updated_by)
if watchlist_item_id and not isinstance(watchlist_item_id, str):
raise TypeError("Expected argument 'watchlist_item_id' to be a str")
pulumi.set(__self__, "watchlist_item_id", watchlist_item_id)
if watchlist_item_type and not isinstance(watchlist_item_type, str):
raise TypeError("Expected argument 'watchlist_item_type' to be a str")
pulumi.set(__self__, "watchlist_item_type", watchlist_item_type)
@property
@pulumi.getter
def created(self) -> Optional[str]:
"""
The time the watchlist item was created
"""
return pulumi.get(self, "created")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional['outputs.WatchlistUserInfoResponse']:
"""
Describes a user that created the watchlist item
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="entityMapping")
def entity_mapping(self) -> Optional[Any]:
"""
key-value pairs for a watchlist item entity mapping
"""
return pulumi.get(self, "entity_mapping")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isDeleted")
def is_deleted(self) -> Optional[bool]:
"""
A flag that indicates if the watchlist item is deleted or not
"""
return pulumi.get(self, "is_deleted")
@property
@pulumi.getter(name="itemsKeyValue")
def items_key_value(self) -> Any:
"""
key-value pairs for a watchlist item
"""
return pulumi.get(self, "items_key_value")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The tenantId to which the watchlist item belongs to
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def updated(self) -> Optional[str]:
"""
The last time the watchlist item was updated
"""
return pulumi.get(self, "updated")
@property
@pulumi.getter(name="updatedBy")
def updated_by(self) -> Optional['outputs.WatchlistUserInfoResponse']:
"""
Describes a user that updated the watchlist item
"""
return pulumi.get(self, "updated_by")
@property
@pulumi.getter(name="watchlistItemId")
def watchlist_item_id(self) -> Optional[str]:
"""
The id (a Guid) of the watchlist item
"""
return pulumi.get(self, "watchlist_item_id")
@property
@pulumi.getter(name="watchlistItemType")
def watchlist_item_type(self) -> Optional[str]:
"""
The type of the watchlist item
"""
return pulumi.get(self, "watchlist_item_type")
class AwaitableGetWatchlistItemResult(GetWatchlistItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWatchlistItemResult(
created=self.created,
created_by=self.created_by,
entity_mapping=self.entity_mapping,
etag=self.etag,
id=self.id,
is_deleted=self.is_deleted,
items_key_value=self.items_key_value,
name=self.name,
system_data=self.system_data,
tenant_id=self.tenant_id,
type=self.type,
updated=self.updated,
updated_by=self.updated_by,
watchlist_item_id=self.watchlist_item_id,
watchlist_item_type=self.watchlist_item_type)
def get_watchlist_item(resource_group_name: Optional[str] = None,
watchlist_alias: Optional[str] = None,
watchlist_item_id: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWatchlistItemResult:
"""
Get a watchlist item.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str watchlist_alias: The watchlist alias
:param str watchlist_item_id: The watchlist item id (GUID)
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['watchlistAlias'] = watchlist_alias
__args__['watchlistItemId'] = watchlist_item_id
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights:getWatchlistItem', __args__, opts=opts, typ=GetWatchlistItemResult).value
return AwaitableGetWatchlistItemResult(
created=pulumi.get(__ret__, 'created'),
created_by=pulumi.get(__ret__, 'created_by'),
entity_mapping=pulumi.get(__ret__, 'entity_mapping'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
is_deleted=pulumi.get(__ret__, 'is_deleted'),
items_key_value=pulumi.get(__ret__, 'items_key_value'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type'),
updated=pulumi.get(__ret__, 'updated'),
updated_by=pulumi.get(__ret__, 'updated_by'),
watchlist_item_id=pulumi.get(__ret__, 'watchlist_item_id'),
watchlist_item_type=pulumi.get(__ret__, 'watchlist_item_type'))
@_utilities.lift_output_func(get_watchlist_item)
def get_watchlist_item_output(resource_group_name: Optional[pulumi.Input[str]] = None,
watchlist_alias: Optional[pulumi.Input[str]] = None,
watchlist_item_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWatchlistItemResult]:
"""
Get a watchlist item.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str watchlist_alias: The watchlist alias
:param str watchlist_item_id: The watchlist item id (GUID)
:param str workspace_name: The name of the workspace.
"""
...
|
PypiClean
|
/fhi-vibes-1.0.5.tar.gz/fhi-vibes-1.0.5/vibes/helpers/stresses.py
|
import numpy as np
from ase.constraints import full_3x3_to_voigt_6_stress, voigt_6_to_full_3x3_stress
from vibes.helpers import talk, warn
from .stress import has_stress
def get_stresses(atoms):
"""Obtain intensive Nx3x3 stresses"""
stress = atoms.get_stress(voigt=False)
stresses = enforce_3x3(atoms.get_stresses())
stresses = enforce_intensive(stress, stresses, atoms.get_volume())
return stresses
def enforce_3x3(stresses):
"""Make stresses into Nx3x3 regardless of input form"""
if is_voigt(stresses):
return voigt_6_to_full_3x3_stress(stresses)
else:
return stresses
def enforce_intensive(stress, stresses, volume):
"""Ensure that sum(stresses) = stress"""
summed_stresses = stresses.sum(axis=0)
if np.allclose(stress, summed_stresses):
return stresses
else:
summed_stresses /= volume
diff = np.linalg.norm(stress - summed_stresses)
if diff > 1e-7:
msg = f"Stress and stresses differ by {diff}"
warn(msg, level=1)
talk(full_3x3_to_voigt_6_stress(stress), prefix="stress")
talk(full_3x3_to_voigt_6_stress(summed_stresses), prefix="stresses")
return stresses / volume
def is_voigt(stresses):
"""True if stresses is n_atoms x 6
Also asserts that the array has the
expected shape.
"""
if stresses.shape[1] == 6:
assert len(stresses.shape) == 2
return True
else:
assert stresses.shape[1::] == (3, 3)
return False
def has_stresses(atoms):
"""Check if we can obtain stresses with get_stresses
get_stresses requires that the stress is also avaible, so that's checked as well.
The helper is supposed to be run on atoms with a SinglePointCalculator attached,
not with a "live" calculator.
Args:
atoms: ase.Atoms object, with SinglePointCalculator attached.
Returns:
Whether get_stresses will be able to obtain stresses from atoms.
"""
return "stresses" in atoms.calc.results and has_stress(atoms)
|
PypiClean
|
/ka-lite-static-0.17.6b2.tar.gz/ka-lite-static-0.17.6b2/kalite/distributed/static/js/distributed/perseus/ke/local-only/localeplanet/icu.ar.js
|
(function() {
var dfs = {"am_pm":["ص","م"],"day_name":["الأحد","الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت"],"day_short":["الأحد","الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت"],"era":["ق.م","م"],"era_name":["قبل الميلاد","ميلادي"],"month_name":["يناير","فبراير","مارس","أبريل","مايو","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوفمبر","ديسمبر"],"month_short":["يناير","فبراير","مارس","أبريل","مايو","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوفمبر","ديسمبر"],"order_full":"DMY","order_long":"DMY","order_medium":"DMY","order_short":"DMY"};
var nfs = {"decimal_separator":"٫","grouping_separator":"٬","minus":"-"};
var df = {SHORT_PADDED_CENTURY:function(d){if(d){return(((d.getDate()+101)+'').substring(1)+'/'+((d.getMonth()+101)+'').substring(1)+'/'+d.getFullYear());}},SHORT:function(d){if(d){return(((d.getDate()+101)+'').substring(1)+'/'+((d.getMonth()+101)+'').substring(1)+'/'+(d.getFullYear()+'').substring(2));}},SHORT_NOYEAR:function(d){if(d){return(((d.getDate()+101)+'').substring(1)+'/'+((d.getMonth()+101)+'').substring(1));}},SHORT_NODAY:function(d){if(d){return(((d.getMonth()+101)+'').substring(1)+'/'+(d.getFullYear()+'').substring(2));}},MEDIUM:function(d){if(d){return(((d.getDate()+101)+'').substring(1)+'/'+((d.getMonth()+101)+'').substring(1)+'/'+d.getFullYear());}},MEDIUM_NOYEAR:function(d){if(d){return(((d.getDate()+101)+'').substring(1)+'/'+((d.getMonth()+101)+'').substring(1));}},MEDIUM_WEEKDAY_NOYEAR:function(d){if(d){return(dfs.day_short[d.getDay()]+' '+((d.getDate()+101)+'').substring(1)+'/'+((d.getMonth()+101)+'').substring(1));}},LONG_NODAY:function(d){if(d){return(dfs.month_name[d.getMonth()]+','+' '+d.getFullYear());}},LONG:function(d){if(d){return(((d.getDate()+101)+'').substring(1)+' '+dfs.month_name[d.getMonth()]+','+' '+d.getFullYear());}},FULL:function(d){if(d){return(((d.getDate()+101)+'').substring(1)+' '+dfs.month_name[d.getMonth()]+','+' '+d.getFullYear());}}};
window.icu = window.icu || new Object();
var icu = window.icu;
icu.getCountry = function() { return "" };
icu.getCountryName = function() { return "" };
icu.getDateFormat = function(formatCode) { var retVal = {}; retVal.format = df[formatCode]; return retVal; };
icu.getDateFormats = function() { return df; };
icu.getDateFormatSymbols = function() { return dfs; };
icu.getDecimalFormat = function(places) { var retVal = {}; retVal.format = function(n) { var ns = n < 0 ? Math.abs(n).toFixed(places) : n.toFixed(places); var ns2 = ns.split('.'); s = ns2[0]; var d = ns2[1]; var rgx = /(\d+)(\d{3})/;while(rgx.test(s)){s = s.replace(rgx, '$1' + nfs["grouping_separator"] + '$2');} return (n < 0 ? nfs["minus"] : "") + s + nfs["decimal_separator"] + d;}; return retVal; };
icu.getDecimalFormatSymbols = function() { return nfs; };
icu.getIntegerFormat = function() { var retVal = {}; retVal.format = function(i) { var s = i < 0 ? Math.abs(i).toString() : i.toString(); var rgx = /(\d+)(\d{3})/;while(rgx.test(s)){s = s.replace(rgx, '$1' + nfs["grouping_separator"] + '$2');} return i < 0 ? nfs["minus"] + s : s;}; return retVal; };
icu.getLanguage = function() { return "ar" };
icu.getLanguageName = function() { return "العربية" };
icu.getLocale = function() { return "ar" };
icu.getLocaleName = function() { return "العربية" };
})();
|
PypiClean
|
/funkiio-1.0.1-py3-none-any.whl/homeassistant/components/enigma2/media_player.py
|
import logging
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.helpers.config_validation import (PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_ON,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_STOP,
SUPPORT_SELECT_SOURCE, SUPPORT_VOLUME_STEP, MEDIA_TYPE_TVSHOW)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_SSL,
STATE_OFF, STATE_ON, STATE_PLAYING, CONF_PORT)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MEDIA_CURRENTLY_RECORDING = 'media_currently_recording'
ATTR_MEDIA_DESCRIPTION = 'media_description'
ATTR_MEDIA_END_TIME = 'media_end_time'
ATTR_MEDIA_START_TIME = 'media_start_time'
CONF_USE_CHANNEL_ICON = "use_channel_icon"
CONF_DEEP_STANDBY = "deep_standby"
CONF_MAC_ADDRESS = "mac_address"
CONF_SOURCE_BOUQUET = "source_bouquet"
DEFAULT_NAME = 'Enigma2 Media Player'
DEFAULT_PORT = 80
DEFAULT_SSL = False
DEFAULT_USE_CHANNEL_ICON = False
DEFAULT_USERNAME = 'root'
DEFAULT_PASSWORD = 'dreambox'
DEFAULT_DEEP_STANDBY = False
DEFAULT_MAC_ADDRESS = ''
DEFAULT_SOURCE_BOUQUET = ''
SUPPORTED_ENIGMA2 = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_OFF | SUPPORT_NEXT_TRACK | SUPPORT_STOP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_VOLUME_STEP | \
SUPPORT_TURN_ON | SUPPORT_PAUSE | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_USE_CHANNEL_ICON,
default=DEFAULT_USE_CHANNEL_ICON): cv.boolean,
vol.Optional(CONF_DEEP_STANDBY, default=DEFAULT_DEEP_STANDBY): cv.boolean,
vol.Optional(CONF_MAC_ADDRESS, default=DEFAULT_MAC_ADDRESS): cv.string,
vol.Optional(CONF_SOURCE_BOUQUET,
default=DEFAULT_SOURCE_BOUQUET): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up of an enigma2 media player."""
if discovery_info:
# Discovery gives us the streaming service port (8001)
# which is not useful as OpenWebif never runs on that port.
# So use the default port instead.
config[CONF_PORT] = DEFAULT_PORT
config[CONF_NAME] = discovery_info['hostname']
config[CONF_HOST] = discovery_info['host']
config[CONF_USERNAME] = DEFAULT_USERNAME
config[CONF_PASSWORD] = DEFAULT_PASSWORD
config[CONF_SSL] = DEFAULT_SSL
config[CONF_USE_CHANNEL_ICON] = DEFAULT_USE_CHANNEL_ICON
config[CONF_MAC_ADDRESS] = DEFAULT_MAC_ADDRESS
config[CONF_DEEP_STANDBY] = DEFAULT_DEEP_STANDBY
config[CONF_SOURCE_BOUQUET] = DEFAULT_SOURCE_BOUQUET
from openwebif.api import CreateDevice
device = \
CreateDevice(host=config[CONF_HOST],
port=config.get(CONF_PORT),
username=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
is_https=config.get(CONF_SSL),
prefer_picon=config.get(CONF_USE_CHANNEL_ICON),
mac_address=config.get(CONF_MAC_ADDRESS),
turn_off_to_deep=config.get(CONF_DEEP_STANDBY),
source_bouquet=config.get(CONF_SOURCE_BOUQUET))
add_devices([Enigma2Device(config[CONF_NAME], device)], True)
class Enigma2Device(MediaPlayerDevice):
"""Representation of an Enigma2 box."""
def __init__(self, name, device):
"""Initialize the Enigma2 device."""
self._name = name
self.e2_box = device
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self.e2_box.is_recording_playback:
return STATE_PLAYING
return STATE_OFF if self.e2_box.in_standby else STATE_ON
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return SUPPORTED_ENIGMA2
def turn_off(self):
"""Turn off media player."""
self.e2_box.turn_off()
def turn_on(self):
"""Turn the media player on."""
self.e2_box.turn_on()
@property
def media_title(self):
"""Title of current playing media."""
return self.e2_box.current_service_channel_name
@property
def media_series_title(self):
"""Return the title of current episode of TV show."""
return self.e2_box.current_programme_name
@property
def media_channel(self):
"""Channel of current playing media."""
return self.e2_box.current_service_channel_name
@property
def media_content_id(self):
"""Service Ref of current playing media."""
return self.e2_box.current_service_ref
@property
def media_content_type(self):
"""Type of video currently playing."""
return MEDIA_TYPE_TVSHOW
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.e2_box.muted
@property
def media_image_url(self):
"""Picon url for the channel."""
return self.e2_box.picon_url
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.e2_box.set_volume(int(volume * 100))
def volume_up(self):
"""Volume up the media player."""
self.e2_box.set_volume(int(self.e2_box.volume * 100) + 5)
def volume_down(self):
"""Volume down media player."""
self.e2_box.set_volume(int(self.e2_box.volume * 100) - 5)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.e2_box.volume
def media_stop(self):
"""Send stop command."""
self.e2_box.set_stop()
def media_play(self):
"""Play media."""
self.e2_box.toggle_play_pause()
def media_pause(self):
"""Pause the media player."""
self.e2_box.toggle_play_pause()
def media_next_track(self):
"""Send next track command."""
self.e2_box.set_channel_up()
def media_previous_track(self):
"""Send next track command."""
self.e2_box.set_channel_down()
def mute_volume(self, mute):
"""Mute or unmute."""
self.e2_box.mute_volume()
@property
def source(self):
"""Return the current input source."""
return self.e2_box.current_service_channel_name
@property
def source_list(self):
"""List of available input sources."""
return self.e2_box.source_list
def select_source(self, source):
"""Select input source."""
self.e2_box.select_source(self.e2_box.sources[source])
def update(self):
"""Update state of the media_player."""
self.e2_box.update()
@property
def device_state_attributes(self):
"""Return device specific state attributes.
isRecording: Is the box currently recording.
currservice_fulldescription: Full program description.
currservice_begin: is in the format '21:00'.
currservice_end: is in the format '21:00'.
"""
attributes = {}
if not self.e2_box.in_standby:
attributes[ATTR_MEDIA_CURRENTLY_RECORDING] = \
self.e2_box.status_info['isRecording']
attributes[ATTR_MEDIA_DESCRIPTION] = \
self.e2_box.status_info['currservice_fulldescription']
attributes[ATTR_MEDIA_START_TIME] = \
self.e2_box.status_info['currservice_begin']
attributes[ATTR_MEDIA_END_TIME] = \
self.e2_box.status_info['currservice_end']
return attributes
|
PypiClean
|
/scikit-rf-0.28.0.tar.gz/scikit-rf-0.28.0/doc/source/examples/metrology/TwoPortOnePath, EnhancedResponse, and FakeFlip.ipynb
|
# TwoPortOnePath, EnhancedResponse, and FakeFlip
## Intro
This example demonstrates a macgyver-ish shortcut you can take if you are measuring a device that is ** reciprocal** and **symmetric** on a switch-less three-receiver system. For more information about error correction this type of architecture, see [Calibration With Three Receivers](Calibration With Three Receivers.ipynb).
In general, full error correction of a 2-port network on a switchless three-receiver architecture requires each DUT to measured in two orientations. However, if the DUT is known to be reciprocal ($S_{21}=S_{12}$) and symmetric ($S_{11}=S_{22}$), then measurements in both orientations produce the same response, and therefore are unnecessary.
The following worked example compares the corrected response of a 10dB attenuator at WR-12 as corrected using full error correction and pseudo-full error correction using:
1. Full Correction
2. Pseudo-Full Correction (FakeFlip)
3. Partial (EnhancedResponse)
```
from IPython.display import *
Image('three_receiver_cal/pics/macgyver.jpg', width='50%')
```
## Example
These measurements where taken on a Agilent PNAX with a set of VDI WR-12 TXRX-RX Frequency Extender heads. The measurements of the calibration standards and DUT's were downloaded from the VNA by saving touchstone files of the raw s-parameter data to disk.
In the code that follows a TwoPortOnePath calibration is created from corresponding measured and ideal responses of the calibration standards. The measured networks are read from disk, while their corresponding ideal responses are generated using scikit-rf. More information about using scikit-rf to do offline calibrations can be found [here](../../tutorials/Calibration.ipynb).
```
import skrf as rf
%matplotlib inline
from pylab import *
rf.stylely()
from skrf.calibration import TwoPortOnePath
from skrf.media import RectangularWaveguide
from skrf import two_port_reflect as tpr
from skrf import mil
raw = rf.read_all_networks('three_receiver_cal/data/')
# pull frequency information from measurements
frequency = raw['short'].frequency
# the media object
wg = RectangularWaveguide(frequency=frequency, a=120*mil, z0_override=50)
# list of 'ideal' responses of the calibration standards
ideals = [wg.short(nports=2),
tpr(wg.delay_short( 90,'deg'), wg.match()),
wg.match(nports=2),
wg.thru()]
# corresponding measurements to the 'ideals'
measured = [raw['short'],
raw['quarter wave delay short'],
raw['load'],
raw['thru']]
# the Calibration object
cal = TwoPortOnePath(measured = measured, ideals = ideals )
```
## Correction Options
With the calibration created above, we compare the corrected response of WR-12 10dB attenuator using **Full**, **Pseudo-Full**, and **Partial** Correction. Each correction algorithm is described below.
```
Image('three_receiver_cal/pics/symmetric DUT.jpg', width='75%')
```
### Full Correction (TwoPortOnePath)
Full correction on this type of architecture has been called *TwoPortOnePath*. In `scikit-rf` using this correction algorithm requires the device to be measured in both orientations, **forward** and **reverse**, and passing them both to the `apply_cal()` function as a `tuple`. Neglecting the connector uncertainty, this type of correction is identical to full two-port **SOLT** calibration.
### Pseudo-full Correction ( FakeFlip)
If we assume the DUT is **reciprocal** and **symmetric**, then measuring the device in both orientations will produce the same result. Therefore, the reverse orientation measurement may be replaced by a copy of the forward orientation measurement. We refer to this technique as the *Fake Flip*.
<div class="alert ">
**Warning**:
Be sure that you understand the assumptions of reciprocity and symmetry before using this macgyver technique, incorrect usage can lead to nonsense results.
</div>
### Partial Correction (EnhancedResponse)
If you pass a single measurement to the `apply_cal()` function, then the calibration will employ partial correction. This type of correction is known as `EnhancedResponse`. While the *Fake Flip* technique assumes the device is reciprocal and symmetric, the `EnhancedResponse` algorithm *implicitly* assumes that the port 2 of the device is perfectly matched. The accuracy of the corrected result produced with either of these algorithms depends on accuracy of the assumptions.
## Comparison
```
dutf = raw['attenuator (forward)']
dutr = raw['attenuator (reverse)']
# note the correction algorithm is different depending on what is passed to
# apply_cal
corrected_full = cal.apply_cal((dutf, dutr))
corrected_fakeflip = cal.apply_cal((dutf,dutf))
corrected_partial = cal.apply_cal(dutf)
f, ax = subplots(2,2, figsize=(8,8))
for m in [0,1]:
for n in [0,1]:
ax_ = ax[m,n]
ax_.set_title('$S_{%i%i}$'%(m+1,n+1))
corrected_full.plot_s_db(m,n, label='Full Correction',ax=ax_ )
corrected_fakeflip.plot_s_db(m,n, label='Pseudo-full Correction', ax=ax_)
if n==0:
corrected_partial.plot_s_db(m,n, label='Partial Correction', ax=ax_)
tight_layout()
```
|
PypiClean
|
/adversarial_robustness_toolbox-1.15.1-py3-none-any.whl/art/defences/postprocessor/reverse_sigmoid.py
|
import logging
import numpy as np
from art.defences.postprocessor.postprocessor import Postprocessor
logger = logging.getLogger(__name__)
class ReverseSigmoid(Postprocessor):
"""
Implementation of a postprocessor based on adding the Reverse Sigmoid perturbation to classifier output.
"""
params = ["beta", "gamma"]
def __init__(
self,
beta: float = 1.0,
gamma: float = 0.1,
apply_fit: bool = False,
apply_predict: bool = True,
) -> None:
"""
Create a ReverseSigmoid postprocessor.
:param beta: A positive magnitude parameter.
:param gamma: A positive dataset and model specific convergence parameter.
:param apply_fit: True if applied during fitting/training.
:param apply_predict: True if applied during predicting.
"""
super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)
self.beta = beta
self.gamma = gamma
self._check_params()
def __call__(self, preds: np.ndarray) -> np.ndarray:
"""
Perform model postprocessing and return postprocessed output.
:param preds: model output to be postprocessed.
:return: Postprocessed model output.
"""
clip_min = 1e-9
clip_max = 1.0 - clip_min
def sigmoid(var_z):
return 1.0 / (1.0 + np.exp(-var_z))
preds_clipped = np.clip(preds, clip_min, clip_max)
if preds.shape[1] > 1:
perturbation_r = self.beta * (sigmoid(-self.gamma * np.log((1.0 - preds_clipped) / preds_clipped)) - 0.5)
preds_perturbed = preds - perturbation_r
preds_perturbed = np.clip(preds_perturbed, 0.0, 1.0)
alpha = 1.0 / np.sum(preds_perturbed, axis=-1, keepdims=True)
reverse_sigmoid = alpha * preds_perturbed
else:
preds_1 = preds
preds_2 = 1.0 - preds
preds_clipped_1 = preds_clipped
preds_clipped_2 = 1.0 - preds_clipped
perturbation_r_1 = self.beta * (
sigmoid(-self.gamma * np.log((1.0 - preds_clipped_1) / preds_clipped_1)) - 0.5
)
perturbation_r_2 = self.beta * (
sigmoid(-self.gamma * np.log((1.0 - preds_clipped_2) / preds_clipped_2)) - 0.5
)
preds_perturbed_1 = preds_1 - perturbation_r_1
preds_perturbed_2 = preds_2 - perturbation_r_2
preds_perturbed_1 = np.clip(preds_perturbed_1, 0.0, 1.0)
preds_perturbed_2 = np.clip(preds_perturbed_2, 0.0, 1.0)
alpha = 1.0 / (preds_perturbed_1 + preds_perturbed_2)
reverse_sigmoid = alpha * preds_perturbed_1
return reverse_sigmoid
def _check_params(self) -> None:
if self.beta <= 0:
raise ValueError("Magnitude parameter must be positive.")
if self.gamma <= 0:
raise ValueError("Convergence parameter must be positive.")
|
PypiClean
|
/starlink-pywrapper-0.3.tar.gz/starlink-pywrapper-0.3/starlink/kappa_help/hislist.rst
|
HISLIST
=======
Purpose
~~~~~~~
Lists NDF history records
Description
~~~~~~~~~~~
This lists all the history records in an NDF. The reported information
comprises the date, time, and application name, and optionally the
history text.
Usage
~~~~~
::
hislist ndf
ADAM parameters
~~~~~~~~~~~~~~~
BRIEF = _LOGICAL (Read)
```````````````````````
This controls whether a summary or the full history information is
reported. BRIEF=TRUE requests that only the date and application name
in each history record is listed. BRIEF=FALSE causes the task to
report the history text in addition. [FALSE]
NDF = NDF (Read)
````````````````
The NDF whose history information is to be reported.
Examples
~~~~~~~~
hislist vcc953
This lists the full history information for the NDF called vcc935. The
information comprises the names of the applications and the times they
were used, and the associated history text.
hislist vcc953 brief
This gives a summary of the history information for the NDF called
vcc935. It comprises the names of the applications and the times they
were used.
Related Applications
~~~~~~~~~~~~~~~~~~~~
KAPPA: HISCOM, HISSET, NDFTRACE.
Copyright
~~~~~~~~~
Copyright (C) 1993 Science & Engineering Research Council. Copyright
(C) 1995 Central Laboratory of the Research Councils. All Rights
Reserved.
Licence
~~~~~~~
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street,Fifth Floor, Boston, MA
02110-1301, USA
|
PypiClean
|
/KnowledgeCore-2.8.10.tar.gz/KnowledgeCore-2.8.10/README-pypi.rst
|
KnowledgeCore
==============
KnowledgeCore is a RDFlib-backed minimalistic knowledge base, initially designed
for robots (in particular human-robot interaction or multi-robot interaction).
It features full `ROS <https://www.ros.org>`__ support.
It stores triples (like RDF/OWL triples), and provides an API accessible
via a simple socket protocol.
`pykb <https://github.com/severin-lemaignan/pykb>`__ provides an
idiomatic Python binding, making easy to integrate the knowledge base in
your applications.
It integrates with the `reasonable <https://github.com/gtfierro/reasonable>`__ OWL2
RL reasoner to provide OWL2 semantics and fast knowledge materialisation.
Example
-------
This example uses the ROS API (see below), with some Pythonic syntatic sugar:
.. code:: python
from knowledge_core.api import KB
rospy.init_node("test_knowledge_base")
kb = KB()
def on_robot_entering_antonio_property(evt):
print("A robot entered Antonio's %s: %s" (evt[0]["place"], evt[0]["robot"]))
kb += "ari rdf:type Robot"
kb += ["antonio looksAt ari", "ari isIn kitchen"]
kb.subscribe(["?robot isIn ?place", "?place belongsTo antonio", "?robot rdf:type Robot"], onRobotEnteringAntonioProperty)
kb += "kitchen belongsTo antonio"
# try as well:
# kb -= "antonio looksAt ari" to remove facts
# kb["* rdf:type Robot"] to query the knowledge base
rospy.spin()
will print:
```
A robot entered Antonio's kitchen: ari
```
Installation
------------
**KnowledgeCore only supports Python 3**
Prerequisite
~~~~~~~~~~~~
``rdlib >= 6.0.0``:
::
$ pip install rdflib
For reasoning (optional):
::
$ pip install reasonable
Installation
~~~~~~~~~~~~
From ``pypi``:
::
$ pip install knowledge_core
From source:
::
$ git clone https://github.com/severin-lemaignan/knowledge_core.git
$ cd knowledge_core
$ python setup.py install
$ knowledge_core
Documentation
-------------
You can use ``KnowledgeCore`` either as a server, accessible from multiple
applications (clients), or in *embedded* mode (which does not require to
start a server process, but is limited to one single client). Note
that the embedded mode is only available for Python applications.
In both case, and if your application is written in Python, it is highly
recommended to use `pykb <https://github.com/severin-lemaignan/pykb>`__
to interact the knowledge base.
Server mode
~~~~~~~~~~~
To start the knowledge base as a server, simply type:
::
$ knowledge_core
(run ``knowledge_core --help`` for available options)
Then:
.. code:: python
import kb
with kb.KB() as kb:
#...
See usage examples on the
`pykb <https://github.com/severin-lemaignan/pykb>`__ page, or in the
``KnowledgeCore`` unit-tests.
Embedded mode
~~~~~~~~~~~~~
No need to start ``KnowledgeCore``. Simply use the following code to start
using the knowledge base in your code:
.. code:: python
import kb
with kb.KB(embedded=True) as kb:
#...
Interacting with KnowledgeCore from other languages
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- from C++: check
`liboro <https://github.com/severin-lemaignan/liboro>`__
- from any other language: the communication with the server relies on
a simply socket-based text protocol. Feel free to get in touch if you
need help to add support for your favourite language!
ROS usage
~~~~~~~~~
To start:
::
rosrun knowledge_core knowledge_core
Then, ``knowledge_core`` exposes two topics, ``/kb/add_facts`` and
``/kb/remove_facts``, to add/remove triples to the knowledge base. Both topics
expect a simple string with 3 tokens separated by spaces (if the object is a
literal string, use double quotes to escape it).
It also exposes the following services:
- ``/kb/revise`` to add/remove facts using a synchronous interface
- ``/kb/query`` to perform simple queries
- ``/kb/sparql`` to perform complex queries (full SPARQL end-point)
- ``/kb/events`` to subscribe to 'events' by providing a (set of) partially-bound
triples. Calling the service returns an event *id*. Subscribe then to
``/kb/events/<id>`` to be notified everytime a new instance/class match the
provided pattern
- ``/kb/manage`` to manage the knowledge base (including eg clearing all the
facts)
Features
--------
Server-Client or embedded
~~~~~~~~~~~~~~~~~~~~~~~~~
``KnowledgeCore`` can be run as a stand-alone (socket) server, or directly
embedded in Python applications.
Multi-models
~~~~~~~~~~~~
``KnowledgeCore`` is intended for dynamic environments, with possibly
several contexts/agents requiring separate knowledge models.
New models can be created at any time and each operation (like knowledge
addition/retractation/query) can operate on a specific subset of models.
Each models are also independently classified by the reasoner.
Event system
~~~~~~~~~~~~
``KnowledgeCore`` provides a mechanism to *subscribe* to some conditions
(like: an instance of a given type is added to the knowledge base, some
statement becomes true, etc.) and get notified back.
Reasoning
~~~~~~~~~
`KnowledgeCore` provides RDFS/OWL reasoning capabilities via the
`reasonable <https://github.com/gtfierro/reasonable>`__ reasoner.
See `reasonable README <https://github.com/gtfierro/reasonable#owl-2-rules>`__ for
the exact level of support of the different OWL2 RL rules.
Transient knowledge
~~~~~~~~~~~~~~~~~~~
``KnowledgeCore`` allows to attach ‘lifespans’ to statements: after a given
duration, they are automatically collected.
**[this functionality is currently disabled. Please open an issue of you need it
urgently]**
Ontology walking
~~~~~~~~~~~~~~~~
``KnowledgeCore`` exposes several methods to explore the different
ontological models of the knowledge base. It is compatible with the
visualization tool
`oro-view <https://github.com/severin-lemaignan/oro-view>`__.
|
PypiClean
|
/saltext.sap_pse-1.0.0.tar.gz/saltext.sap_pse-1.0.0/src/saltext/sap_pse/_modules/sap_pse.py
|
import logging
import re
import salt.utils.http
import salt.utils.platform
import yaml
# Third Party libs
# Globals
# the following control characters are not allowed in YAML
INVALID_CHARACTERS = [chr(i) for i in range(0x80, 0x9F)]
# default path to sapgenpse
DEFAULT_SAPGENPSE = "/usr/sap/hostctrl/exe/sapgenpse"
log = logging.getLogger(__name__)
__virtualname__ = "sap_pse"
def __virtual__():
"""
Only work on POSIX-like systems
"""
if salt.utils.platform.is_windows():
return False, "This module doesn't work on Windows."
return __virtualname__
def _which(executable, runas=None):
"""
Similar to ``salt.utils.path.which()``, but:
- Only works on Linux
- Allows runas
If not runas is given, the salt minion user is used
"""
ret = __salt__["cmd.run_all"](cmd=f"which {executable}", runas=runas)
if ret["retcode"]:
return None
return ret["stdout"]
def _get_sapgenpse_path(user=None):
"""
Retrieve the path to sapgenpse.
.. note::
Because this depends on the user, we cannot run this in __virtual__().
"""
# first, try the userspath
sapgenpse = _which("sapgenpse", runas=user)
if sapgenpse:
return sapgenpse
# if not available, check the default path
elif __salt__["file.file_exists"](DEFAULT_SAPGENPSE):
return DEFAULT_SAPGENPSE
else:
msg = "No executable sapgenpse could be found"
log.error(msg)
raise Exception(msg)
def _remove_invalid_characters(s_string):
"""
Remove all unprintable characters from a string for yaml loading.
"""
return "".join(ch for ch in s_string if ch not in INVALID_CHARACTERS)
def _parse_cert_output(lines):
"""
Parses the ``-vv`` certificate output of sapgenpse
"""
yaml_data = []
for line in lines:
log.trace(f"Processing line '{line.strip()}'")
if ":" in line:
# mapping
key, value = line.split(":", 1)
if value:
# quote data if required and remove hashtags from the key -> interpreted as comments
k_insert = key.replace("#", "_")
v_insert = value.strip().replace('"', '\\"')
yaml_line = f'{k_insert}: "{v_insert}"'
else:
yaml_line = line.replace("#", "_")
else:
# list item
groups = re.findall(r"( +)(.*)", line)[0]
groups_0 = groups[0]
groups_1 = groups[1].replace('"', '\\"')
yaml_line = f'{groups_0}- "{groups_1}"'
yaml_data.append(yaml_line)
data = {}
try:
unicode_string = "\n".join(yaml_data)
old_l = len(unicode_string)
unicode_string = _remove_invalid_characters(unicode_string)
if old_l != len(unicode_string):
msg = (
"Invalid unicode control characters found in certificate output. "
"Because these cannot be part of YAML, they will be stripped!"
)
log.warning(msg)
data = yaml.safe_load(unicode_string)
except Exception:
yaml_str = "\n".join(yaml_data)
log.error(f"Got an error when trying to parse the following yaml data:\n{yaml_str}")
raise
return data
# pylint: disable=invalid-name,unused-argument
def gen_pse(
pse_file,
dn,
pse_pwd=None,
algo="RSA:2048:SHA512",
runas=None,
groupas=None,
add_ca_bundle=True,
**kwargs,
):
"""
Wrapper for the function ``gen_pse`` of the CLI tool ``sapgenpse``.
Create a new PSE. This will **not** create a signing request.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path for the PSE.
dn
Distinguished name.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for the PSE. Default is no PIN.
algo
Equivalent to ``-a <algo>``, i.e. the algorithm used for the PSE, e.g. DSA, ECDSA or
RSA (default is ``RSA:2048:SHA512``).
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
add_ca_bundle
If False, will not add the OpenSSL CA bundle returned by
``salt.utils.http.get_ca_bundle()`` which is all certificate authorities that are
trusted by the operating system.
Returns True / False based on success.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.gen_pse pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLA.pse" dn="cn=ANONYMOUS"
"""
log.debug("Running function")
if not runas:
runas = __grains__["username"]
if not groupas:
groupas = __grains__["groupname"]
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f'{genpse_exec} gen_pse -p {pse_file} -a {algo} {pin} -noreq "{dn}"'
log.trace(f"Executing '{cmd}'")
env = {
"TZ": "UTC", # required for correct handling
}
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env
)
log.trace(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not create {pse_file} :\n{out}")
return False
log.debug(f"Created {pse_file}")
if add_ca_bundle:
log.debug(f"Adding CA bundle certificates to PSE {pse_file}")
success = maintain_pk_add(
pse_file=pse_file, pse_pwd=pse_pwd, runas=runas, certs=[salt.utils.http.get_ca_bundle()]
)
else:
success = True
return success
# pylint: disable=unused-argument
def import_p8(
pse_file,
pub_key,
priv_key,
pse_pwd=None,
priv_key_pwd=None,
add_certs=None,
runas=None,
groupas=None,
add_ca_bundle=True,
**kwargs,
):
"""
Wrapper for the function ``import_p8`` of the CLI tool ``sapgenpse``.
This function creates a new PSE file from a PKCS#8 format private key
(optionally protected by PKCS#5 password-based encryption) along with all
necessary X.509 certs.
You will have to supply the X.509 certificate matching the private key
plus all intermediate and root CA certificates which might be necessary
to build a certificate chain that ends with a self-signed certificate.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path for the PSE.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for the PSE. Default is no PIN.
pub_key
Equivalent to ``-c <cert(s)-file>``, i.e. a X.509 certificate containing the public key.
priv_key
Path to the X.509 certificate containing the private key.
priv_key_pwd
Equivalent to ``-z <password>``, i.e. the Password/Passphrase for decryption of
private key. Default is no password.
add_certs
Equivalent to ``-r <file2>``, i.e. additional certificate(s) for an incomplete PKCS#8
file. This list can contain to 10 additional files for building complete certification
path up to the RootCA (PEM, Base64 or DER binary). Default is no additional
certificates.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
add_ca_bundle
If False, will not add the OpenSSL CA bundle returned by
``salt.utils.http.get_ca_bundle()`` which is all certificate authorities that are trusted
by the operating system.
Returns True / False based on success.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.import_p8 pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse" pub_key="/etc/pki/cert.crt" priv_key="/etc/pki/cert.key"
""" # pylint: disable=line-too-long
log.debug("Running function")
if not add_certs:
add_certs = []
elif len(add_certs) > 10:
log.error("Only 10 additional files are allowed")
return False
if not runas:
runas = __grains__["username"]
if not groupas:
groupas = __grains__["groupname"]
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
if add_certs:
certs = " ".join([f"-r {x}" for x in add_certs])
else:
certs = ""
cmd = f"{genpse_exec} import_p8 -p {pse_file} {pin} -c {pub_key}"
if priv_key_pwd:
cmd += f" -z {priv_key_pwd}"
cmd += f" {certs} {priv_key}"
log.trace(f"Executing '{cmd}'")
env = {
"TZ": "UTC", # required for correct handling
}
log.debug(f"Running the following command: '{cmd}'")
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not create {pse_file} from {pub_key}/{priv_key}:\n{out}")
return False
log.debug(f"Created {pse_file} from {pub_key}/{priv_key}")
if add_ca_bundle:
log.debug(f"Adding CA bundle certificates to PSE {pse_file}")
success = maintain_pk_add(
pse_file=pse_file, pse_pwd=pse_pwd, runas=runas, certs=[salt.utils.http.get_ca_bundle()]
)
else:
success = True
return success
# pylint: disable=unused-argument
def export_p8(
pse_file, pem_file, pem_pwd, pse_pwd=None, runas=None, groupas=None, secudir=None, **kwargs
):
"""
Wrapper for the function ``export_p8`` of the CLI tool ``sapgenpse``.
Exports the key of a PSE into PKCS#8 transfer format (PEM-File) for transfer/export to
software of other vendors.
The private key and its corresponding certificat plus forward certificate chain up to and
including the RootCA's certificate are written into a PEM file.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
pem_file
Path to the PEM file which will contain both public and private key.
pem_pwd
Equivalent to ``-z <password>``, i.e. the Password/Passphrase for the encryption
of the PEM-file.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
Returns True / False based on success.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.export_p8 pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse" pem_file="/etc/pki/pse.crt" pem_pwd=Abcd1234
""" # pylint: disable=line-too-long
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f"{genpse_exec} export_p8 -p {pse_file} {pin}"
cmd += f" -z {pem_pwd}"
cmd += f" {pem_file}"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.trace(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not create {pem_file} from {pse_file}:\n{out}")
return False
log.debug(f"Created {pem_file} from {pse_file}")
return True
# pylint: disable=unused-argument
def get_my_name(pse_file, pse_pwd=None, runas=None, groupas=None, secudir=None, **kwargs):
"""
Wrapper for the function ``get_my_name`` of the CLI tool ``sapgenpse``.
Displays the attributes/properties of the user/owner certificate in a PSE.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.get_my_name pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse"
"""
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
# Note: sapgenpse does not support machine-readable output, but the verbose output
# can be interpreted as YAML
cmd = f"{genpse_exec} get_my_name -vv -p {pse_file} {pin}"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.trace(f"Output:\n{cmd_ret}")
if "(Wrong PIN/Passphrase)" in cmd_ret.get("stderr"):
log.error(f"Cannot open {pse_file} due to wrong PIN")
return {}
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not retrieve data from pse {pse_file}:\n{out}")
return False
data = {}
out = cmd_ret.get("stdout")
if not out:
log.debug("No data over stdout, checking stderr")
out = cmd_ret.get("stderr")
lines = out.splitlines()
i = 0
while True:
if i + 1 >= len(lines):
break
log.trace(f"Processing line '{lines[i]}'")
if lines[i].startswith("--------------------------------"):
cert_name = lines[i - 1].split(":", 1)[0].strip()
log.debug(f"Processing certificate {cert_name}")
# all the lines under the element can be interpreted as yaml
i += 2 # next line is always "Certificate: and can be skipped"
yaml_data = []
while True:
if (
i >= len(lines) - 1
or not lines[i].strip()
or lines[i].startswith("--------------------------------")
):
cert_data = _parse_cert_output(yaml_data)
data[cert_name] = cert_data
i += 1
break
else:
log.trace(f"Adding line '{lines[i]}'")
yaml_data.append(lines[i])
i += 1
i += 1
log.trace(f"Returning data:\n{data}")
return data
# pylint: disable=unused-argument
def maintain_pk_add(
pse_file, certs, runas=None, groupas=None, pse_pwd=None, secudir=None, **kwargs
):
"""
Wrapper for the function ``maintain_pk`` of the CLI tool ``sapgenpse``.
Adds certificates to the PK list of a PSE.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
certs
Equivalent to ``-m <cert-file>``, i.e. add multiple certificates from <file>.
Must be a list.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.maintain_pk_add pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse" certs=["/etc/pki/trust/anchors/ca.crt"]
""" # pylint: disable=line-too-long
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
certs = " ".join([f"-m {x}" for x in certs])
cmd = f"{genpse_exec} maintain_pk -y -p {pse_file} {pin} {certs}"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not add certificates {certs} to pse {pse_file}:\n{out}")
return False
log.debug(f"Successfully added certificates {certs} to pse {pse_file}")
return True
# pylint: disable=unused-argument
def maintain_pk_delete(
pse_file, del_cert, runas=None, groupas=None, pse_pwd=None, secudir=None, **kwargs
):
"""
Wrapper for the function ``maintain_pk`` of the CLI tool ``sapgenpse``.
Delete certificates from the PKList of a PSE.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
del_cert
Equivalent to ``"-d <num>`` (delete certificate/key number <num> from PKList) or
``-d <string>`` (delete certificates/keys from PKList containing <string>)
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.maintain_pk_delete pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse" del_cert=0
"""
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f"{genpse_exec} maintain_pk -p {pse_file} {pin} -d {del_cert}"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not delete certificate {del_cert} from pse {pse_file}:\n{out}")
return False
log.debug(f"Successfully deleted certificate {del_cert} from pse {pse_file}")
return True
# pylint: disable=unused-argument
def maintain_pk_list(pse_file, runas=None, groupas=None, pse_pwd=None, secudir=None, **kwargs):
"""
Wrapper for the function ``maintain_pk`` of the CLI tool ``sapgenpse``.
List certificates from the PKList of a PSE.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.maintain_pk_list pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse"
"""
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f"{genpse_exec} maintain_pk -l -p {pse_file} {pin}"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not retrieve certificates for pse {pse_file}:\n{out}")
return False
log.debug("Parsing output")
data = []
out = cmd_ret.get("stdout")
if not out:
log.debug("No data over stdout, checking stderr")
out = cmd_ret.get("stderr")
lines = out.splitlines()
i = 0
while True:
if i + 1 >= len(lines):
break
log.trace(f"Processing line '{lines[i]}'")
# when only one certificate is maintained, "Element" is skipped
if lines[i].startswith(" Element") or (
lines[i].startswith("PKList") and lines[i + 1].startswith(" Version")
):
cert_number = re.findall(r"Element #([0-9]+):", lines[i].strip())
if not cert_number:
# if only one certificate is maintained
cert_number = 1
else:
cert_number = cert_number[0]
log.debug(f"Processing certificate {cert_number}")
# all the lines under the element can be interpreted as yaml
i += 1
yaml_data = []
while True:
if i >= len(lines) or not lines[i].strip() or lines[i].startswith(" Element"):
cert_data = _parse_cert_output(yaml_data)
cert_data["number"] = cert_number
data.append(cert_data)
break
else:
log.trace(f"Adding line '{lines[i]}'")
yaml_data.append(lines[i])
i += 1
else:
i += 1
log.trace(f"Returning data:\n{data}")
return data
# pylint: disable=unused-argument
def seclogin_add(
pse_file, pse_pwd=None, user=None, runas=None, groupas=None, secudir=None, **kwargs
):
"""
Wrapper for the function ``seclogin`` of the CLI tool ``sapgenpse``.
Creates Single Sign-On (SSO) credentials for a PSE / user.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
user
Equivalent to ``-O <username>``, i.e. create SSO-credential for OTHER user <username>.
Will be set to runas or salt minion user if None.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.seclogin_add pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse" user="sapadm"
"""
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not user:
user = runas
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to {secudir}")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f"{genpse_exec} seclogin -p {pse_file} {pin} -O {user}"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not create seclogin for pse {pse_file} / user {user}:\n{out}")
return False
log.debug(f"Created seclogin for pse {pse_file} / user {user}")
return True
# pylint: disable=unused-argument
def seclogin_contains(
pse_file, pse_pwd=None, user=None, runas=None, groupas=None, secudir=None, **kwargs
):
"""
Wrapper for the function ``seclogin`` of the CLI tool ``sapgenpse``.
Returns success and if Single Sign-On (SSO) credentials for user already exist.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
user
Equivalent to ``-O <username>``, i.e. create SSO-credential for OTHER user <username>.
Will be set to runas or salt minion user if None.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.seclogin_contains pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse" user="sapadm"
"""
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not user:
user = runas
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f"{genpse_exec} seclogin -p {pse_file} {pin} -l -O {user}"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
if cmd_ret.get("retcode") == 21 and "No SSO credentials available" in out:
log.debug("No credentials available")
return True, False
log.error(f"Could not list seclogin for pse {pse_file} / user {user}:\n{out}")
return False, None
out = cmd_ret.get("stdout")
if not out:
out = cmd_ret.get("stderr")
m = re.search(
r"([0-9]+|NO) readable (\(of [0-9]+ matching\) )?SSO-Credentials available( \(total [0-9]+\))?",
out,
)
if not m:
log.error(f"Could not determine list of SSO credentials for user {user}")
return False, None
if m.groups()[0] == "NO":
return True, False
return True, True
# pylint: disable=unused-argument
def seclogin_count(pse_file, runas=None, groupas=None, pse_pwd=None, secudir=None, **kwargs):
"""
Wrapper for the function ``seclogin`` of the CLI tool ``sapgenpse``.
Returns success and the count of SSO credentials for the given PSE.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.seclogin_count pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse"
"""
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f"{genpse_exec} seclogin -p {pse_file} {pin} -l"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not list seclogin for pse {pse_file}:\n{out}")
return False
out = cmd_ret.get("stdout")
if not out:
out = cmd_ret.get("stderr")
m = re.findall(r"[0-9]+ \(LPS:", out)
ret = len(m)
log.debug(f"Returning:\n{ret}")
return ret
# pylint: disable=unused-argument
def seclogin_delete(pse_file, pse_pwd=None, runas=None, groupas=None, secudir=None, **kwargs):
"""
Wrapper for the function ``seclogin`` of the CLI tool ``sapgenpse``.
Removes all SSO credentials for a PSE file.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
pse_pwd
Equivalent to ``-x <pin>``, i.e. the PIN/Passphrase for PSE file. Default is no PIN.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
secudir
SECUDIR to use. If not defined, the path of the PSE file will be set as SECUDIR.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.seclogin_delete pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse"
"""
log.debug("Running function")
if not runas:
runas = __salt__["file.get_user"](pse_file)
log.warning(f"No user defined to run with, using PSE file owner '{runas}'")
if not groupas:
groupas = __salt__["file.get_group"](pse_file)
log.warning(f"No group defined to run with, using PSE file owner group '{groupas}'")
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
if not secudir:
secudir = pse_file.rsplit("/", 1)[0]
log.debug(f"Setting SECUDIR to '{secudir}'")
if pse_pwd:
pin = f"-x {pse_pwd}"
else:
pin = "-x ''" # empty PIN set
cmd = f"{genpse_exec} seclogin -p {pse_file} {pin} -d"
log.trace(f"Executing '{cmd}'")
env = {"TZ": "UTC", "SECUDIR": secudir} # required for correct handling
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env, cwd=secudir
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not delete seclogin for pse {pse_file}:\n{out}")
return False
return True
# pylint: disable=unused-argument
def gen_verify_pse(pse_file=None, runas=None, groupas=None, **kwargs):
"""
Wrapper for the function ``gen_verify_pse`` of the CLI tool ``sapgenpse``.
Create a new PSE for verification without own key pair.
pse_file
Equivalent to ``-p <pse-file>``, i.e. the path of the PSE.
runas
User that will run the command, default is the user that runs the salt minion.
groupas
Group that will run the command, default is the group that runs the salt minion.
.. note::
This will utilze the OpenSSL CA bundle returned by ``salt.utils.http.get_ca_bundle()``.
CLI Example:
.. code-block:: bash
salt "*" sap_pse.seclogin_delete pse_file="/usr/sap/hostctrl/exe/sec/SAPSSLS.pse"
"""
log.debug("Running function")
ca_bundle = (
salt.utils.http.get_ca_bundle()
) # On SLES returns /var/lib/ca-certificates/ca-bundle.pem
if not pse_file:
pse_file = "SAPVERIFY1.pse"
if not runas:
runas = __grains__["username"]
if not groupas:
groupas = __grains__["groupname"]
genpse_exec = _get_sapgenpse_path(runas)
log.debug(f"Running with user {runas} and using executable {genpse_exec}")
cmd = f'{genpse_exec} gen_verify_pse -p {pse_file} -x "" -a {ca_bundle}'
log.trace(f"Executing '{cmd}'")
env = {
"TZ": "UTC", # required for correct handling
}
cmd_ret = __salt__["cmd.run_all"](
cmd, python_shell=True, runas=runas, group=groupas, timeout=30, env=env
)
log.debug(f"Output:\n{cmd_ret}")
if cmd_ret.get("retcode"):
out = cmd_ret.get("stderr").strip()
log.error(f"Could not create {pse_file} from {pse_file}:\n{out}")
return False
log.debug(f"Created {pse_file} from {pse_file}")
return True
|
PypiClean
|
/cooltools-0.5.4.tar.gz/cooltools-0.5.4/docs/cooltools.lib.rst
|
cooltools.lib package
=====================
common
--------
.. automodule:: cooltools.lib.common
:members:
:undoc-members:
:show-inheritance:
numutils
--------
.. automodule:: cooltools.lib.numutils
:members:
:undoc-members:
:show-inheritance:
peaks
-----
.. automodule:: cooltools.lib.peaks
:members:
:undoc-members:
:show-inheritance:
plotting
--------
.. automodule:: cooltools.lib.plotting
:members:
:undoc-members:
:show-inheritance:
schemas
--------
.. automodule:: cooltools.lib.schemas
:members:
:undoc-members:
:show-inheritance:
|
PypiClean
|
/mailtoplone.scrawl-0.2.zip/mailtoplone.scrawl-0.2/mailtoplone/scrawl/README.txt
|
Introduction
============
We need manager role for this test::
>>> self.setRoles(('Manager',))
Configure Folder for Blog Entries
---------------------------------
We use a standard Folder for dropping the emails in::
>>> self.portal.invokeFactory('Folder', 'blog_entries')
'blog_entries'
>>> blog_entries = self.portal.blog_entries
To enable MailDropbox functionality, we equip the
folder with an additional marker interface::
>>> from zope.interface import alsoProvides
>>> from mailtoplone.scrawl.interfaces import IScrawlMailDropBoxMarker
>>> alsoProvides(blog_entries, IScrawlMailDropBoxMarker)
mailtoplone uses a more generic interface IMailDropBox, to adapt the
context, we use that to call drop::
>>> from mailtoplone.base.interfaces import IMailDropBox
>>> IMailDropBox(blog_entries).drop("Subject: my first blog entry")
As result, we wexpect a single blog entry, with the title, id set
according to the email's subject::
>>> blog_entries.objectIds()
['my-first-blog-entry']
>>> blog_entry = blog_entries.listFolderContents()[0]
>>> blog_entry.title
u'my first blog entry'
>>> blog_entry.portal_type
'Blog Entry'
|
PypiClean
|
/pyixexplorer-3.1.3-py3-none-any.whl/ixexplorer/ixe_port.py
|
import re
from enum import Enum
from pathlib import Path
from trafficgenerator import TgnError
from ixexplorer.api.ixapi import FLAG_IGERR, FLAG_RDONLY, MacStr, TclMember, ixe_obj_meta
from ixexplorer.ixe_object import IxeObject, IxeObjectObj
from ixexplorer.ixe_statistics_view import IxeCapFileFormat, IxePortsStats, IxeStat, IxeStreamsStats
from ixexplorer.ixe_stream import IxeStream
class IxePhyMode(Enum):
copper = "portPhyModeCopper"
fiber = "portPhyModeFibber"
sgmii = "portPhyModeSgmii"
ignore = None
class IxeReceiveMode(Enum):
none = 0x0000
capture = 0x0001
packetGroup = 0x0002
tcpSessions = 0x0004
tcpRoundTrip = 0x0008
dataIntegrity = 0x0010
firstTimeStamp = 0x0020
sequenceChecking = 0x0040
bert = 0x0080
isl = 0x0100
bertChannelized = 0x0200
echo = 0x0400
dcc = 0x0800
widePacketGroup = 0x1000
prbs = 0x2000
rateMonitoring = 0x4000
perFlowErrorStats = 0x8000
class IxeTransmitMode(Enum):
packetStreams = "portTxPacketStreams"
packetFlows = "portTxPacketFlows"
advancedScheduler = "portTxModeAdvancedScheduler"
bert = "portTxModeBert"
bertChannelized = "portTxModeBertChannelized"
echo = "portTxModeEcho"
dccStreams = "portTxModeDccStreams"
dccAdvancedScheduler = "portTxModeDccAvancedScheduler"
dccFlowSpecStreams = "portTxModeDccFlowsSpeStreams"
dccFlowSpecAdvancedScheduler = "portTxModeDccFlowsSpeAdvancedScheduler"
advancedSchedulerCoarse = "portTxModeAdvancedSchedulerCoarse"
streamsCoarse = "portTxModePacketStreamsCoarse"
class IxeLinkState(Enum):
linkDown = 0
linkUp = 1
linkLoopback = 2
noTransceiver = 7
invalidAddress = 8
noGbicModule = 13
lossOfFrame = 24
lossOfSignal = 25
forcedLinkUp = 34
noXenpakModule = 40
demoMode = 42
noXFPModule = 45
inactive = 47
noX2Module = 48
ethernetOamLoopback = 54
class StreamWarningsError(TgnError):
pass
class IxePort(IxeObject, metaclass=ixe_obj_meta):
__tcl_command__ = "port"
__tcl_members__ = [
TclMember("advertise2P5FullDuplex", type=int, flags=FLAG_IGERR),
TclMember("advertise5FullDuplex", type=int, flags=FLAG_IGERR),
TclMember("advertise1000FullDuplex", type=bool),
TclMember("advertise100FullDuplex", type=bool),
TclMember("advertise100HalfDuplex", type=bool),
TclMember("advertise10FullDuplex", type=bool),
TclMember("advertise10HalfDuplex", type=bool),
TclMember("advertiseAbilities"),
TclMember("autoDetectInstrumentationMode", type=bool),
TclMember("autonegotiate", type=bool),
TclMember("dataCenterMode"),
TclMember("DestMacAddress", type=MacStr),
TclMember("directedAddress"),
TclMember("duplex"),
TclMember("enableAutoDetectInstrumentation", type=bool),
TclMember("enableDataCenterMode", type=bool),
TclMember("enableManualAutoNegotiate", type=bool),
TclMember("enablePhyPolling", type=bool),
TclMember("enableRepeatableLastRandomPattern", type=bool),
TclMember("enableSimulateCableDisconnect", type=bool),
TclMember("enableTransparentDynamicRateChange", type=bool),
TclMember("enableTxRxSyncStatsMode", type=bool),
TclMember("flowControl", type=bool),
TclMember("flowControlType", int),
TclMember("ignoreLink", type=bool),
TclMember("linkState", type=int, flags=FLAG_RDONLY),
TclMember("loopback"),
TclMember("MacAddress", type=MacStr),
TclMember("masterSlave"),
TclMember("multicastPauseAddress"),
TclMember("negotiateMasterSlave", type=bool),
TclMember("operationModeList"),
TclMember("owner"),
TclMember("packetFlowFileName"),
TclMember("pfcEnableValueList"),
TclMember("pfcEnableValueListBitMatrix"),
TclMember("pfcResponseDelayEnabled"),
TclMember("pfcResponseDelayQuanta"),
TclMember("phyMode", flags=FLAG_RDONLY),
TclMember("pmaClock", type=int),
TclMember("portMode", type=int),
TclMember("preEmphasis"),
TclMember("receiveMode", type=int),
TclMember("rxTxMode", type=int),
TclMember("speed", type=int),
TclMember("timeoutEnable"),
TclMember("transmitClockDeviation", type=bool),
TclMember("transmitClockMode", type=int),
TclMember("transmitMode"),
TclMember("txRxSyncInterval", type=int),
TclMember("type", flags=FLAG_RDONLY),
TclMember("typeName", flags=FLAG_RDONLY),
TclMember("usePacketFlowImageFile", type=bool),
TclMember("enableRsFec", type=bool),
TclMember("ieeeL1Defaults", type=int),
]
__tcl_commands__ = [
"export",
"getFeature",
"getStreamCount",
"reset",
"setFactoryDefaults",
"setModeDefaults",
"restartAutoNegotiation",
"getPortState",
"isValidFeature",
"isActiveFeature",
"isCapableFeature",
]
mode_2_speed = {
"0": "10000",
"5": "100000",
"6": "40000",
"7": "100000",
"8": "40000",
"9": "10000",
"10": "10000",
"18": "25000",
"19": "50000",
"20": "25000",
}
def __init__(self, parent, uri):
super().__init__(parent=parent, uri=uri.replace("/", " "))
self.cap_file_name = None
def supported_speeds(self):
# todo FIX once parent is Session(by reserve_ports) - no active_ports ,only if parent is card(by discover)!!!
# if self.parent.active_ports == self.parent.ports:
supported_speeds = re.findall(r"\d+", self.getFeature("ethernetLineRate"))
# Either active_ports != self.parent.ports or empty supported speeds for whatever reason...
if not supported_speeds:
for rg in self.parent.resource_groups.values():
if self.index in rg.active_ports:
speed = rg.mode if int(rg.mode) >= 1000 else self.mode_2_speed.get(rg.mode, rg.mode)
supported_speeds = [speed]
break
return supported_speeds
def reserve(self, force: bool = False) -> None:
"""Reserve port.
:param force: True - take forcefully, False - fail if port is reserved by other user.
"""
if force:
self.api.call_rc(f"ixPortTakeOwnership {self.uri} force")
else:
try:
self.api.call_rc(f"ixPortTakeOwnership {self.uri}")
except Exception:
raise TgnError(f"Failed to take ownership for port {self} current owner is {self.owner}")
def release(self, force: bool = False) -> None:
"""Release port.
:param force: True - release forcefully, False - fail if port is reserved by other user.
"""
if force:
self.api.call_rc(f"ixPortClearOwnership {self.uri} force")
else:
try:
self.api.call_rc(f"ixPortClearOwnership {self.uri}")
except Exception:
raise TgnError(f"Failed to clear ownership for port {self} current owner is {self.owner}")
def write(self) -> None:
"""Write configuration to chassis.
Raise StreamWarningsError if configuration warnings found.
"""
self.ix_command("write")
stream_warnings = self.streamRegion.generateWarningList()
warnings_list = (
self.api.call("join " + " {" + stream_warnings + "} " + " LiStSeP").split("LiStSeP")
if self.streamRegion.generateWarningList()
else []
)
for warning in warnings_list:
if warning:
raise StreamWarningsError(warning)
def clear(self, stats: bool = True, phy_mode: IxePhyMode = IxePhyMode.ignore) -> None:
self.ix_set_default()
self.setFactoryDefaults()
self.set_phy_mode(phy_mode)
self.reset()
self.write()
if stats:
self.clear_port_stats()
self.clear_all_stats()
self.del_objects_by_type("stream")
def load_config(self, config_file: Path) -> None:
"""Load configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:TODO: Investigate why port import can only import files that were exported with port export, not from File -> export.
:param config_file: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine.
"""
ext = config_file.suffix
if ext == ".prt":
self.api.call_rc(f'port import "{config_file}" {self.uri}')
elif ext == ".str":
self.reset()
self.api.call_rc(f'stream import "{config_file}" {self.uri}')
else:
raise ValueError(f"Configuration file type {ext} not supported.")
self.write()
self.discover()
def save_config(self, config_file: Path) -> None:
"""Save configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:param config_file: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine.
"""
ext = config_file.suffix
if ext == ".prt":
self.api.call_rc(f'port export "{config_file}" {self.uri}')
elif ext == ".str":
self.api.call_rc(f'stream export "{config_file}" {self.uri}')
else:
raise ValueError(f"Configuration file type {ext} not supported.")
def wait_for_up(self, timeout: int = 16) -> None:
"""Wait until port is up and running.
:param timeout: seconds to wait.
"""
self.session.wait_for_up(timeout, [self])
def discover(self) -> None:
self.logger.info("Discover port {}".format(self.obj_name()))
for stream_id in range(1, int(self.getStreamCount()) + 1):
IxeStream(self, self.uri + "/" + str(stream_id))
def start_transmit(self, blocking: bool = False) -> None:
"""Start transmit on port.
:param blocking: True - wait for traffic end, False - return after traffic start.
"""
self.session.start_transmit(blocking, False, self)
def stop_transmit(self) -> None:
"""Stop traffic on port."""
self.session.stop_transmit(self)
def start_capture(self) -> None:
"""Start capture on port."""
self.session.start_capture(self)
def stop_capture(self, cap_file_name: str = None, cap_file_format: IxeCapFileFormat = IxeCapFileFormat.mem) -> int:
"""Stop capture on port.
:param cap_file_name: prefix for the capture file name.
Capture file will be saved as pcap file named 'prefix' + 'URI'.pcap.
:param cap_file_format: exported file format
:return: number of captured frames
"""
return self.session.stop_capture(cap_file_name, cap_file_format, self)[self]
def get_cap_file(self):
return self.session.get_cap_files(self)[self]
def get_cap_frames(self, *frame_nums):
"""Stop capture on ports.
:param frame_nums: list of frame numbers to read.
:return: list of captured frames.
"""
frames = []
for frame_num in frame_nums:
if self.captureBuffer.getframe(frame_num) == "0":
frames.append(self.captureBuffer.frame)
else:
frames.append(None)
return frames
#
# Statistics.
#
def clear_port_stats(self) -> None:
"""Clear only port stats (leave stream and packet group stats).
Do not use - still working with Ixia to resolve.
"""
stat = IxeStat(self)
stat.ix_set_default()
stat.enableValidStats = True
stat.ix_set()
stat.write()
def clear_all_stats(self) -> None:
"""Clear all statistic counters (port, streams and packet groups) on list of ports."""
self.session.clear_all_stats(self)
def read_stats(self, *stats):
return IxePortsStats(self).read_stats(*stats)[str(self)]
def read_stream_stats(self, *stats):
return IxeStreamsStats(*self.get_objects_by_type("stream")).read_stats(*stats)
#
# Others...
#
def set_phy_mode(self, mode=IxePhyMode.ignore):
"""Set phy mode to copper or fiber.
:param mode: requested PHY mode.
"""
if isinstance(mode, IxePhyMode):
if mode.value:
self.api.call_rc("port setPhyMode {} {}".format(mode.value, self.uri))
else:
self.api.call_rc(
"port setPhyMode {} {}".format(
mode,
self.uri,
)
)
def set_receive_modes(self, *modes):
"""Set port receive modes (overwrite existing value).
:param modes: requested receive modes
:type modes: list[ixexplorer.ixe_port.IxeReceiveMode]
"""
self._set_receive_modes(0, *modes)
def add_receive_modes(self, *modes):
"""Add port receive modes to exiting modes.
:param modes: requested receive modes
:type modes: list[ixexplorer.ixe_port.IxeReceiveMode]
"""
self._set_receive_modes(self.receiveMode, *modes)
def set_transmit_mode(self, mode):
"""set port transmit mode
:param mode: request transmit mode
:type mode: ixexplorer.ixe_port.IxeTransmitMode
"""
self.api.call_rc("port setTransmitMode {} {}".format(mode, self.uri))
def set_rx_ports(self, *rx_ports):
for stream in self.get_objects_by_type("stream"):
stream.rx_ports = rx_ports
rx_ports = property(fset=set_rx_ports)
def ix_set_list(self, optList):
self.ix_get()
for opt in optList:
value = optList[opt]
self.api.call("%s config -%s %s" % (self.__tcl_command__, opt, value))
self.ix_set()
def set_wide_packet_group(self) -> None:
self.set_receive_modes(IxeReceiveMode.widePacketGroup, IxeReceiveMode.dataIntegrity)
def add_stream(self, name: str = None) -> IxeStream:
stream = IxeStream(self, f"{self.uri} {str(int(self.getStreamCount()) + 1)}")
stream.create(name)
return stream
#
# Port objects.
#
def get_autoDetectInstrumentation(self):
return self._get_object("_autoDetectInstrumentation", IxeAutoDetectInstrumentationPort)
autoDetectInstrumentation = property(get_autoDetectInstrumentation)
def get_capture(self):
return self._get_object("_capture", IxeCapture)
capture = property(get_capture)
def get_captureBuffer(self):
return self._get_object("_captureBuffer", IxeCaptureBuffer)
def set_captureBuffer(self, value):
self._captureBuffer = value
captureBuffer = property(fget=get_captureBuffer, fset=set_captureBuffer)
def get_dataIntegrity(self):
return self._get_object("_dataIntegrity", IxeDataIntegrityPort)
dataIntegrity = property(get_dataIntegrity)
def get_filter(self):
return self._get_object("_filter", IxeFilterPort)
filter = property(get_filter)
def get_filterPallette(self):
return self._get_object("_filterPallette", IxeFilterPalettePort)
filterPallette = property(get_filterPallette)
def get_packetGroup(self):
return self._get_object("_packetGroup", IxePacketGroupPort)
packetGroup = property(get_packetGroup)
def get_splitPacketGroup(self):
return self._get_object("_splitPacketGroup", IxeSplitPacketGroup)
splitPacketGroup = property(get_splitPacketGroup)
def get_streamRegion(self):
return self._get_object("_streamRegion", IxeStreamRegion)
streamRegion = property(get_streamRegion)
#
# Properties.
#
def get_streams(self):
"""
:return: dictionary {stream id: object} of all streams.
"""
return {int(s.index): s for s in self.get_objects_by_type("stream")}
streams = property(get_streams)
#
# Private methods.
#
def _set_receive_modes(self, receiveMode, *modes):
for mode in modes:
receiveMode += mode.value
self.receiveMode = receiveMode
#
# Port object classes.
#
class IxePortObj(IxeObjectObj):
def __init__(self, parent):
super().__init__(parent=parent, uri=parent.uri)
class IxeCapture(IxePortObj, metaclass=ixe_obj_meta):
__tcl_command__ = "capture"
__tcl_members__ = [
TclMember("afterTriggerFilter"),
TclMember("beforeTriggerFilter"),
TclMember("captureMode"),
TclMember("continuousFilter"),
TclMember("enableSmallPacketCapture"),
TclMember("fullAction"),
TclMember("nPackets", type=int, flags=FLAG_RDONLY),
TclMember("sliceSize"),
TclMember("triggerPosition"),
]
class IxeCaptureBuffer(IxeObject, metaclass=ixe_obj_meta):
__tcl_command__ = "captureBuffer"
__tcl_members__ = [
TclMember("frame", flags=FLAG_RDONLY),
]
__tcl_commands__ = ["export", "getframe"]
def __init__(self, parent):
super().__init__(parent=parent, uri=parent.uri)
if not self.parent.capture.nPackets:
return
self.api.call_rc("captureBuffer get {} 1 {}".format(self.uri, self.parent.capture.nPackets))
def ix_command(self, command, *args, **kwargs):
return self.api.call(("captureBuffer {} " + len(args) * " {}").format(command, *args))
def ix_get(self, member=None, force=False):
pass
class IxeFilterPalettePort(IxePortObj, metaclass=ixe_obj_meta):
__tcl_command__ = "filterPallette"
__tcl_members__ = [
TclMember("DA1"),
TclMember("DAMask1"),
TclMember("DA2"),
TclMember("DAMask2"),
TclMember("SA1"),
TclMember("SAMask1"),
TclMember("SA2"),
TclMember("SAMask2"),
TclMember("pattern1"),
TclMember("patternMask1"),
TclMember("pattern2"),
TclMember("patternMask2"),
TclMember("patternOffset1", type=int),
TclMember("patternOffset2", type=int),
]
class IxeFilterPort(IxePortObj, metaclass=ixe_obj_meta):
__tcl_command__ = "filter"
__tcl_members__ = [
TclMember(""),
TclMember("captureTriggerDA"),
TclMember("captureTriggerSA"),
TclMember("captureTriggerPattern"),
TclMember("captureTriggerError"),
TclMember("captureTriggerFrameSizeEnable"),
TclMember("captureTriggerFrameSizeFrom"),
TclMember("captureTriggerFrameSizeTo"),
TclMember("captureTriggerCircuit"),
TclMember("captureFilterDA"),
TclMember("captureFilterSA"),
TclMember("captureFilterPattern"),
TclMember("captureFilterError"),
TclMember("captureFilterFrameSizeEnable"),
TclMember("captureFilterFrameSizeFrom"),
TclMember("captureFilterFrameSizeTo"),
TclMember("captureFilterCircuit"),
TclMember("userDefinedStat1DA"),
TclMember("userDefinedStat1SA"),
TclMember("userDefinedStat1Pattern"),
TclMember("userDefinedStat1Error"),
TclMember("userDefinedStat1FrameSizeEnable"),
TclMember("userDefinedStat1FrameSizeFrom"),
TclMember("userDefinedStat1FrameSizeTo"),
TclMember("userDefinedStat1Circuit"),
TclMember("userDefinedStat2DA"),
TclMember("userDefinedStat2SA"),
TclMember("userDefinedStat2Pattern"),
TclMember("userDefinedStat2Error"),
TclMember("userDefinedStat2FrameSizeEnable"),
TclMember("userDefinedStat2FrameSizeFrom"),
TclMember("userDefinedStat2FrameSizeTo"),
TclMember("userDefinedStat2Circuit"),
TclMember("asyncTrigger1DA"),
TclMember("asyncTrigger1SA"),
TclMember("asyncTrigger1Pattern"),
TclMember("asyncTrigger1Error"),
TclMember("asyncTrigger1FrameSizeEnable"),
TclMember("asyncTrigger1FrameSizeFrom"),
TclMember("asyncTrigger1FrameSizeTo"),
TclMember("asyncTrigger1Circuit"),
TclMember("asyncTrigger2DA"),
TclMember("asyncTrigger2SA"),
TclMember("asyncTrigger2Pattern"),
TclMember("asyncTrigger2Error"),
TclMember("asyncTrigger2FrameSizeEnable"),
TclMember("asyncTrigger2FrameSizeFrom"),
TclMember("asyncTrigger2FrameSizeTo"),
TclMember("asyncTrigger2Circuit"),
TclMember("captureTriggerEnable"),
TclMember("captureFilterEnable"),
TclMember("userDefinedStat1Enable"),
TclMember("userDefinedStat2Enable"),
TclMember("asyncTrigger1Enable"),
TclMember("asyncTrigger2Enable"),
TclMember("userDefinedStat1PatternExpressionEnable"),
TclMember("userDefinedStat2PatternExpressionEnable"),
TclMember("captureTriggerPatternExpressionEnable"),
TclMember("captureFilterPatternExpressionEnable"),
TclMember("asyncTrigger1PatternExpressionEnable"),
TclMember("asyncTrigger2PatternExpressionEnable"),
TclMember("userDefinedStat1PatternExpression"),
TclMember("userDefinedStat2PatternExpression"),
TclMember("captureTriggerPatternExpression"),
TclMember("captureFilterPatternExpression"),
TclMember("asyncTrigger1PatternExpression"),
TclMember("asyncTrigger2PatternExpression"),
]
class IxeSplitPacketGroup(IxePortObj, metaclass=ixe_obj_meta):
__tcl_command__ = "splitPacketGroup"
__tcl_members__ = [
TclMember("groupIdOffset", type=int),
TclMember("groupIdOffsetBaseType"),
TclMember("groupIdWidth", type=int),
TclMember("groupIdMask"),
]
def __init__(self, parent):
super().__init__(parent)
self.ix_set_default()
class IxeStreamRegion(IxePortObj, metaclass=ixe_obj_meta):
__tcl_command__ = "streamRegion"
__tcl_commands__ = ["generateWarningList"]
#
# RX port object classes.
#
class IxePortRxObj(IxePortObj, metaclass=ixe_obj_meta):
__get_command__ = "getRx"
__set_command__ = "setRx"
class IxeAutoDetectInstrumentationPort(IxePortRxObj, metaclass=ixe_obj_meta):
__tcl_command__ = "autoDetectInstrumentation"
__tcl_members__ = [
TclMember("enableMisdirectedPacketMask", type=bool),
TclMember("enablePRBS", type=bool),
TclMember("enableSignatureMask", type=bool),
TclMember("misdirectedPacketMask"),
TclMember("signature"),
TclMember("signatureMask"),
TclMember("startOfScan", type=int),
]
class IxeDataIntegrityPort(IxePortRxObj, metaclass=ixe_obj_meta):
__tcl_command__ = "dataIntegrity"
__tcl_members__ = [
TclMember("enableTimeStamp", type=bool),
TclMember("insertSignature", type=bool),
TclMember("floatingTimestampAndDataIntegrityMode"),
TclMember("numBytesFromEndOfFrame", type=int),
TclMember("payloadLength", type=int),
TclMember("signature"),
TclMember("signatureOffset", type=int),
]
class IxePacketGroupPort(IxePortRxObj, metaclass=ixe_obj_meta):
__tcl_command__ = "packetGroup"
__tcl_members__ = [
TclMember("allocateUdf", type=bool),
TclMember("delayVariationMode"),
TclMember("enable128kBinMode", type=bool),
TclMember("enableGroupIdMask", type=bool),
TclMember("enableInsertPgid", type=bool),
TclMember("enableLastBitTimeStamp", type=bool),
TclMember("enableLatencyBins", type=bool),
TclMember("enableReArmFirstTimeStamp", type=bool),
TclMember("enableRxFilter", type=bool),
TclMember("enableSignatureMask", type=bool),
TclMember("enableTimeBins", type=bool),
TclMember("groupId", type=int),
TclMember("groupIdMask"),
TclMember("groupIdMode"),
TclMember("groupIdOffset", type=int),
TclMember("headerFilter"),
TclMember("headerFilterMask"),
TclMember("ignoreSignature", type=bool),
TclMember("insertSequenceSignature", type=bool),
TclMember("insertSignature", type=bool),
TclMember("latencyBinList"),
TclMember("latencyControl"),
TclMember("maxRxGroupId", type=int),
TclMember("measurementMode"),
TclMember("multiSwitchedPathMode"),
TclMember("numPgidPerTimeBin", type=int),
TclMember("numTimeBins", type=int),
TclMember("preambleSize", type=int),
TclMember("seqAdvTrackingLateThreshold", type=int),
TclMember("sequenceErrorThreshold", type=int),
TclMember("sequenceCheckingMode"),
TclMember("sequenceNumberOffset", type=int),
TclMember("signature"),
TclMember("signatureMask"),
TclMember("signatureOffset", type=int),
TclMember("timeBinDuration", type=int),
]
class IxePortCpu(IxePortObj, metaclass=ixe_obj_meta):
__tcl_command__ = "portCpu"
__tcl_members__ = []
__tcl_commands__ = [
"reset",
]
def reset_cpu(self) -> None:
if self.parent.isValidFeature("portFeatureLocalCPU"):
self.reset()
|
PypiClean
|
/google-datacatalog-teradata-connector-0.9.0.tar.gz/google-datacatalog-teradata-connector-0.9.0/README.md
|
# google-datacatalog-teradata-connector
Library for ingesting Teradata metadata into Google Cloud Data Catalog.
[![Python package][2]][2] [![PyPi][3]][4] [![License][5]][5] [![Issues][6]][7]
**Disclaimer: This is not an officially supported Google product.**
<!--
⚠️ DO NOT UPDATE THE TABLE OF CONTENTS MANUALLY ️️⚠️
run `npx markdown-toc -i README.md`.
Please stick to 80-character line wraps as much as you can.
-->
## Table of Contents
<!-- toc -->
- [1. Installation](#1-installation)
* [1.1. Mac/Linux](#11-maclinux)
* [1.2. Windows](#12-windows)
* [1.3. Install from source](#13-install-from-source)
+ [1.3.1. Get the code](#131-get-the-code)
+ [1.3.2. Create and activate a *virtualenv*](#132-create-and-activate-a-virtualenv)
+ [1.3.3. Install the library](#133-install-the-library)
- [2. Environment setup](#2-environment-setup)
* [2.1. Auth credentials](#21-auth-credentials)
+ [2.1.1. Create a service account and grant it below roles](#211-create-a-service-account-and-grant-it-below-roles)
+ [2.1.2. Download a JSON key and save it as](#212-download-a-json-key-and-save-it-as)
* [2.2. Set environment variables](#22-set-environment-variables)
- [3. Run entry point](#3-run-entry-point)
* [3.1. Run Python entry point](#31-run-python-entry-point)
* [3.2. Run Docker entry point](#32-run-docker-entry-point)
- [4 Scripts inside tools](#4-scripts-inside-tools)
* [4.1. Run clean up](#41-run-clean-up)
* [4.2. Extract CSV](#42-extract-csv)
- [5. Developer environment](#5-developer-environment)
* [5.1. Install and run Yapf formatter](#51-install-and-run-yapf-formatter)
* [5.2. Install and run Flake8 linter](#52-install-and-run-flake8-linter)
* [5.3. Run Tests](#53-run-tests)
- [6. Metrics](#6-metrics)
- [7. Troubleshooting](#7-troubleshooting)
<!-- tocstop -->
-----
## 1. Installation
Install this library in a [virtualenv][1] using pip. [virtualenv][1] is a tool to
create isolated Python environments. The basic problem it addresses is one of
dependencies and versions, and indirectly permissions.
With [virtualenv][1], it's possible to install this library without needing system
install permissions, and without clashing with the installed system
dependencies. Make sure you use Python 3.6+.
### 1.1. Mac/Linux
```bash
pip3 install virtualenv
virtualenv --python python3.6 <your-env>
source <your-env>/bin/activate
<your-env>/bin/pip install google-datacatalog-teradata-connector
```
### 1.2. Windows
```bash
pip3 install virtualenv
virtualenv --python python3.6 <your-env>
<your-env>\Scripts\activate
<your-env>\Scripts\pip.exe install google-datacatalog-teradata-connector
```
### 1.3. Install from source
#### 1.3.1. Get the code
````bash
git clone https://github.com/GoogleCloudPlatform/datacatalog-connectors-rdbms/
cd datacatalog-connectors-rdbms/google-datacatalog-teradata-connector
````
#### 1.3.2. Create and activate a *virtualenv*
```bash
pip3 install virtualenv
virtualenv --python python3.6 <your-env>
source <your-env>/bin/activate
```
#### 1.3.3. Install the library
```bash
pip install .
```
## 2. Environment setup
### 2.1. Auth credentials
#### 2.1.1. Create a service account and grant it below roles
- Data Catalog Admin
#### 2.1.2. Download a JSON key and save it as
- `<YOUR-CREDENTIALS_FILES_FOLDER>/teradata2dc-credentials.json`
> Please notice this folder and file will be required in next steps.
### 2.2. Set environment variables
Replace below values according to your environment:
```bash
export GOOGLE_APPLICATION_CREDENTIALS=data_catalog_credentials_file
export TERADATA2DC_DATACATALOG_PROJECT_ID=google_cloud_project_id
export TERADATA2DC_DATACATALOG_LOCATION_ID=google_cloud_location_id
export TERADATA2DC_TERADATA_SERVER=teradata_server
export TERADATA2DC_TERADATA_USERNAME=teradata_username
export TERADATA2DC_TERADATA_PASSWORD=teradata_password
export TERADATA2DC_RAW_METADATA_CSV=teradata_raw_csv (If supplied ignores the Teradata server credentials)
```
## 3. Run entry point
### 3.1. Run Python entry point
- Virtualenv
```bash
google-datacatalog-teradata-connector \
--datacatalog-project-id=$TERADATA2DC_DATACATALOG_PROJECT_ID \
--datacatalog-location-id=$TERADATA2DC_DATACATALOG_LOCATION_ID \
--teradata-host=$TERADATA2DC_TERADATA_SERVER \
--teradata-user=$TERADATA2DC_TERADATA_USERNAME \
--teradata-pass=$TERADATA2DC_TERADATA_PASSWORD \
--raw-metadata-csv=$TERADATA2DC_RAW_METADATA_CSV
```
### 3.2. Run Docker entry point
```bash
docker build -t teradata2datacatalog .
docker run --rm --tty -v YOUR-CREDENTIALS_FILES_FOLDER:/data teradata2datacatalog \
--datacatalog-project-id=$TERADATA2DC_DATACATALOG_PROJECT_ID \
--datacatalog-location-id=$TERADATA2DC_DATACATALOG_LOCATION_ID \
--teradata-host=$TERADATA2DC_TERADATA_SERVER \
--teradata-user=$TERADATA2DC_TERADATA_USERNAME \
--teradata-pass=$TERADATA2DC_TERADATA_PASSWORD \
--raw-metadata-csv=$TERADATA2DC_RAW_METADATA_CSV
```
## 4 Scripts inside tools
### 4.1. Run clean up
```bash
# List of projects split by comma. Can be a single value without comma
export TERADATA2DC_DATACATALOG_PROJECT_IDS=my-project-1,my-project-2
```
```bash
# Run the clean up
python tools/cleanup_datacatalog.py --datacatalog-project-ids=$TERADATA2DC_DATACATALOG_PROJECT_IDS
```
### 4.2. Extract CSV
```bash
export TERADATA2DC_DATACATALOG_PROJECT_ID=my-project
export TERADATA2DC_DATACATALOG_LOCATION_ID=us-central1
export TERADATA2DC_TERADATA_SERVER=localhost
export GOOGLE_APPLICATION_CREDENTIALS=teradata2dc-datacatalog-credentials.json
export TERADATA2DC_RAW_METADATA_CSV=teradata_extract.csv
```
```bash
python tools/teradata2datacatalog_csv_extractor.py \
--teradata-host=$TERADATA2DC_TERADATA_SERVER \
--teradata-user=$TERADATA2DC_TERADATA_USERNAME \
--teradata-pass=$TERADATA2DC_TERADATA_PASSWORD \
--raw-metadata-csv=$TERADATA2DC_RAW_METADATA_CSV
```
## 5. Developer environment
### 5.1. Install and run Yapf formatter
```bash
pip install --upgrade yapf
# Auto update files
yapf --in-place --recursive src tests
# Show diff
yapf --diff --recursive src tests
# Set up pre-commit hook
# From the root of your git project.
curl -o pre-commit.sh https://raw.githubusercontent.com/google/yapf/master/plugins/pre-commit.sh
chmod a+x pre-commit.sh
mv pre-commit.sh .git/hooks/pre-commit
```
### 5.2. Install and run Flake8 linter
```bash
pip install --upgrade flake8
flake8 src tests
```
### 5.3. Run Tests
```bash
python setup.py test
```
## 6. Metrics
[Metrics README.md](docs/README.md)
## 7. Troubleshooting
In the case a connector execution hits Data Catalog quota limit, an error will be raised and logged with the following detailement, depending on the performed operation READ/WRITE/SEARCH:
```
status = StatusCode.RESOURCE_EXHAUSTED
details = "Quota exceeded for quota metric 'Read requests' and limit 'Read requests per minute' of service 'datacatalog.googleapis.com' for consumer 'project_number:1111111111111'."
debug_error_string =
"{"created":"@1587396969.506556000", "description":"Error received from peer ipv4:172.217.29.42:443","file":"src/core/lib/surface/call.cc","file_line":1056,"grpc_message":"Quota exceeded for quota metric 'Read requests' and limit 'Read requests per minute' of service 'datacatalog.googleapis.com' for consumer 'project_number:1111111111111'.","grpc_status":8}"
```
For more info about Data Catalog quota, go to: [Data Catalog quota docs](https://cloud.google.com/data-catalog/docs/resources/quotas).
[1]: https://virtualenv.pypa.io/en/latest/
[2]: https://github.com/GoogleCloudPlatform/datacatalog-connectors-rdbms/workflows/Python%20package/badge.svg?branch=master
[3]: https://img.shields.io/pypi/v/google-datacatalog-teradata-connector.svg
[4]: https://pypi.org/project/google-datacatalog-teradata-connector/
[5]: https://img.shields.io/github/license/GoogleCloudPlatform/datacatalog-connectors-rdbms.svg
[6]: https://img.shields.io/github/issues/GoogleCloudPlatform/datacatalog-connectors-rdbms.svg
[7]: https://github.com/GoogleCloudPlatform/datacatalog-connectors-rdbms/issues
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.