ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df6a851b76539265ef71e5f0f1a8eaae7cf1b78 | from bisect import insort
class Solution(object):
def secondHighest(self, s):
"""
:type s: str
:rtype: int
"""
# Runtime: 28 ms
# Memory: 13.5 MB
digits = []
for char in s:
if char.isdigit() and char not in digits:
insort(digits, char)
if len(digits) >= 2:
return int(digits[-2])
else:
return -1
|
py | 7df6a8cdb1ebd7f47024ebb7562c8506a2c23d3e | """
ASGI config for wiki_influences_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wiki_influences_app.settings')
application = get_asgi_application()
|
py | 7df6a9a03bec3a2d674cba504f76f74dd4631f1c | import logging
import os
import re
import tempfile
from pathlib import Path
from . import helpers
from . import splitter
from .constants import COMPRESSED_ARCHIVE_SUFFIX, ENCRYPTED_ARCHIVE_SUFFIX, \
DEFAULT_COMPRESSION_LEVEL
from .encryption import encrypt_list_of_archives
def encrypt_existing_archive(archive_path, encryption_keys, destination_dir=None, remove_unencrypted=False, force=False, threads=1):
helpers.encryption_keys_must_exist(encryption_keys)
if destination_dir:
helpers.handle_destination_directory_creation(destination_dir, force)
if archive_path.is_dir():
if helpers.get_files_with_type_in_directory(archive_path, ENCRYPTED_ARCHIVE_SUFFIX):
helpers.terminate_with_message("Encrypted archvies present. Doing nothing.")
archive_files = helpers.get_files_with_type_in_directory_or_terminate(archive_path, COMPRESSED_ARCHIVE_SUFFIX)
encrypt_list_of_archives(archive_files, encryption_keys, remove_unencrypted, destination_dir, threads=threads)
return
helpers.terminate_if_path_not_file_of_type(archive_path, COMPRESSED_ARCHIVE_SUFFIX)
logging.info("Start encryption of existing archive " + helpers.get_absolute_path_string(archive_path))
encrypt_list_of_archives([archive_path], encryption_keys, remove_unencrypted, destination_dir, threads=threads)
def create_archive(source_path, destination_path, threads=None, encryption_keys=None, compression=DEFAULT_COMPRESSION_LEVEL, splitting=None, remove_unencrypted=False, force=False, work_dir=None):
# Argparse already checks if arguments are present, so only argument format needs to be validated
helpers.terminate_if_path_nonexistent(source_path)
if encryption_keys:
helpers.encryption_keys_must_exist(encryption_keys)
source_name = source_path.name
logging.info(f"Start creating archive for: {helpers.get_absolute_path_string(source_path)}")
if not threads:
threads = 1
if splitting:
create_split_archive(source_path, destination_path, source_name, int(splitting), threads, encryption_keys, compression, remove_unencrypted, work_dir, force)
else:
# Create destination folder if nonexistent or overwrite if --force option used
helpers.handle_destination_directory_creation(destination_path, force)
logging.info("Create and write hash list...")
create_file_listing_hash(source_path, destination_path, source_name, max_workers=threads)
logging.info(f"Create tar archive in {destination_path}...")
create_tar_archive(source_path, destination_path, source_name, work_dir)
logging.info(f"Generating hash for tar archive {destination_path}...")
create_and_write_archive_hash(destination_path, source_name)
logging.info(f"Generating archive listing for tar archive {destination_path}...")
create_archive_listing(destination_path, source_name)
logging.info("Starting compression of tar archive...")
compress_using_lzip(destination_path, source_name, threads, compression)
create_and_write_compressed_archive_hash(destination_path, source_name)
if encryption_keys:
logging.info("Starting encryption...")
archive_list = [destination_path.joinpath(source_name + COMPRESSED_ARCHIVE_SUFFIX)]
encrypt_list_of_archives(archive_list, encryption_keys, remove_unencrypted, threads=threads)
logging.info(f"Archive created: {helpers.get_absolute_path_string(destination_path)}")
def create_split_archive(source_path, destination_path, source_name, splitting, threads, encryption_keys, compression, remove_unencrypted, work_dir=None, force=False):
logging.info("Start creation of split archive")
if not threads:
threads = 1
create_filelist_and_hashs(source_path, destination_path, splitting, threads, force)
create_tar_archives_and_listings(source_path, destination_path, work_dir, workers=threads)
compress_and_hash(destination_path, threads, compression)
if encryption_keys:
do_encryption(destination_path, encryption_keys, threads)
def create_filelist_and_hashs(source_path, destination_path, split_size, threads, force=False):
helpers.handle_destination_directory_creation(destination_path, force)
if split_size:
logging.info(f"Using a split size of {split_size} bytes ({split_size/1024**3:.3f}GB).")
nr_parts = create_file_listing_hash_split_archives(source_path, destination_path,
split_size, threads)
with open(destination_path / f"{source_path.name}.parts.txt", "w") as f:
f.write(f"{nr_parts}\n")
else:
create_file_listing_hash(source_path, destination_path,
source_path.name, archive_list=None,
max_workers=threads)
def create_file_listing_hash_split_archives(source_path, destination_path, split_size, threads):
split_archives = splitter.split_directory(source_path, split_size)
source_name = source_path.name
nr_parts = 0
for index, archive in enumerate(split_archives):
logging.info(f"Generate file listings for part {index + 1}")
source_part_name = f"{source_name}.part{index + 1}"
create_file_listing_hash(source_path, destination_path,
source_part_name, archive,
max_workers=threads)
nr_parts += 1
return nr_parts
def create_file_listing_hash(source_path_root, destination_path, source_name, archive_list=None, max_workers=1):
if archive_list:
paths_to_hash_list = archive_list
else:
paths_to_hash_list = [source_path_root]
hashes = sorted(hashes_for_path_list(paths_to_hash_list, source_path_root, max_workers), key=lambda p: p[0])
hash_file_path = destination_path.joinpath(source_name + ".md5")
logging.info(f"Writing file hash list to {hash_file_path}")
with open(hash_file_path, "a") as hash_file:
for line in hashes:
file_path = line[0]
hash_prefix = ''
if '\n' in file_path or '\\' in file_path:
file_path = file_path.replace('\\', '\\\\').replace('\n', '\\n') # escaping new lines in filenames...
hash_prefix = '\\' # see https://www.gnu.org/software/coreutils/manual/html_node/md5sum-invocation.html#md5sum-invocation
file_hash = line[1]
hash_file.write(f"{hash_prefix}{file_hash} {file_path}\n")
def hashes_for_path_list(path_list, source_path_root, max_workers=1):
files = [path for path in path_list if not path.is_dir()]
for path in path_list:
if path.is_dir():
files.extend(helpers.get_files_in_folder(path))
return helpers.hash_files_and_check_symlinks(source_path_root, files, max_workers=max_workers)
def _process_part(source_path, destination_path, work_dir, source_part_name):
archive_list = [ source_path.parent / f for f in helpers.read_hash_file(destination_path / f"{source_part_name}.md5").keys()]
logging.info(f"Create tar archive for {source_part_name}")
create_tar_archive(source_path, destination_path, source_part_name, archive_list, work_dir)
logging.info(f"Generating hash for tar archive {source_part_name}")
create_and_write_archive_hash(destination_path, source_part_name)
logging.info(f"Generating tar archive listing for {source_part_name}")
create_archive_listing(destination_path, source_part_name)
def create_tar_archives_and_listings(source_path, destination_path, work_dir, parts=None, workers=1):
source_name = source_path.name
if parts:
part_hashes = [destination_path / f"{source_name}.part{part}.md5" for part in parts]
else:
single_part_md5 = destination_path /f'{source_name}.md5'
if single_part_md5.exists():
part_hashes = [single_part_md5]
else:
part_hashes = helpers.list_files_matching_name(destination_path, re.compile(rf'{source_name}\.part[0-9]+\.md5'))
if not part_hashes:
helpers.terminate_with_message(f"No {source_name}.md5 or files matching {source_name}.part[0-9]*.md5 found in {destination_path}")
part_names = [os.path.splitext(p.name)[0] for p in helpers.sort_paths_with_part(part_hashes)]
logging.info(f"Creating tar archives and listings for {','.join(part_names)} using {workers} workers.")
helpers.exec_parallel(_process_part, part_names, lambda p: (source_path, destination_path, work_dir, p), workers)
def create_tar_archive(source_path, destination_path, source_name, archive_list=None, work_dir=None):
destination_file_path = destination_path.joinpath(source_name + ".tar")
source_path_parent = source_path.absolute().parent
if archive_list:
create_tar_archive_from_list(source_path, archive_list, destination_file_path, source_path_parent, work_dir)
return
# -C flag on tar necessary to get relative path in tar archive
helpers.run_shell_cmd(["tar", "--posix", "-cf", destination_file_path, "-C", source_path_parent, source_path.stem])
def create_tar_archive_from_list(source_path, archive_list, destination_file_path, source_path_parent, work_dir=None):
relative_archive_list = [path.absolute().relative_to(source_path.absolute().parent) for path in archive_list]
files_string_list = [path.as_posix() for path in relative_archive_list]
# Using TemporaryDirectory instead of NamedTemporaryFile to have full control over file creation
with tempfile.TemporaryDirectory(dir=work_dir) as temp_path_string:
tmp_file_path = Path(temp_path_string) / "paths.txt"
with open(tmp_file_path, "w") as tmp_file:
tmp_file.write("\0".join(files_string_list))
helpers.run_shell_cmd(["tar", "--posix", "-cf", destination_file_path, "-C", source_path_parent, "--null", "--files-from", tmp_file_path])
def create_archive_listing(destination_path, source_name):
listing_path = destination_path.joinpath(source_name + ".tar.lst")
tar_path = destination_path.joinpath(source_name + ".tar")
helpers.run_shell_cmd(["tar", "-tvf", tar_path], file_output=listing_path)
def compress_and_hash(destination_path, threads, compression, part=None):
if part:
parts = list(destination_path.glob(f'*part{part}.tar'))
else:
parts = list(destination_path.glob('*.tar'))
if not parts:
helpers.terminate_with_message(f"No suitable tar files found to be compressed in {destination_path}")
part_names = [os.path.splitext(p.name)[0] for p in helpers.sort_paths_with_part(parts)]
# compress sequentially
for part in part_names:
logging.info(f"Compressing {part} using {threads} threads.")
compress_using_lzip(destination_path, part, threads, compression)
# compute md5sums of archive parts in parallel
logging.info(f"Generate hash of compressed tar {','.join(part_names)} using {threads} threads.")
helpers.exec_parallel(create_and_write_compressed_archive_hash, part_names, lambda part: (destination_path, part),
min(threads, len(parts)))
def compress_using_lzip(destination_path, source_name, threads, compression):
path = destination_path.joinpath(source_name + ".tar")
additional_arguments = []
if threads:
additional_arguments.extend(["--threads", str(threads)])
helpers.run_shell_cmd(["plzip", path, f"-{compression}"] + additional_arguments)
def create_and_write_archive_hash(destination_path, source_name):
path = destination_path.joinpath(source_name + ".tar").absolute()
helpers.create_and_write_file_hash(path)
def create_and_write_compressed_archive_hash(destination_path, source_name):
path = destination_path.joinpath(source_name + ".tar.lz").absolute()
helpers.create_and_write_file_hash(path)
def do_encryption(destination_path, encryption_keys, remove_unencrypted=False, part=None, threads=1):
if part:
parts = list(destination_path.glob(f'*part{part}{COMPRESSED_ARCHIVE_SUFFIX}'))
else:
parts = list(destination_path.glob(f'*{COMPRESSED_ARCHIVE_SUFFIX}'))
if not parts:
helpers.terminate_with_message(
f"No suitable {COMPRESSED_ARCHIVE_SUFFIX} files found to be encrypted in {destination_path}")
encrypt_list_of_archives(parts, encryption_keys, remove_unencrypted, threads=threads)
|
py | 7df6aa3ee6770876c3e3fd60a6b9ffc73a1188eb | import torch
import torch.nn as nn
from torch.nn import functional as F
# from https://github.com/YijinHuang/pytorch-DR/blob/reimplement/model.py
class RMSPool(nn.Module):
def __init__(self, kernel_size, stride):
super(RMSPool, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
def forward(self, x):
x = torch.pow(x, 2)
x = F.avg_pool2d(x, kernel_size=self.kernel_size, stride=self.stride)
x = torch.sqrt(x)
return x
class Conv2dUntiedBias(nn.Module):
def __init__(self, in_channels, out_channels, height, width, kernel_size, stride=1, padding=0):
super(Conv2dUntiedBias, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size)
self.stride = (stride, stride)
self.padding = (padding, padding)
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels, height, width))
def forward(self, x):
output = F.conv2d(x, self.weight, None, self.stride, self.padding)
output += self.bias.unsqueeze(0).repeat(x.size(0), 1, 1, 1)
return output
class RetinoNet(nn.Module):
def __init__(self, name, args, net_size='small', input_size=112, feature_dim=512):
super(RetinoNet, self).__init__()
self.name = name
self.args = args
# require inputs width and height in each layer because of the using of untied biases.
sizes = self.cal_sizes(net_size, input_size)
# named layers
self.conv = nn.Sequential()
if net_size in ['small', 'medium', 'large']:
# 1-11 layers
small_conv = nn.Sequential(
self.basic_conv2d(3, 32, sizes[0], sizes[0], kernel_size=5, stride=2, padding=2),
self.basic_conv2d(32, 32, sizes[0], sizes[0], kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0),
self.basic_conv2d(32, 64, sizes[1], sizes[1], kernel_size=5, stride=2, padding=2),
self.basic_conv2d(64, 64, sizes[1], sizes[1], kernel_size=3, stride=1, padding=1),
self.basic_conv2d(64, 64, sizes[1], sizes[1], kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0),
self.basic_conv2d(64, 128, sizes[2], sizes[2], kernel_size=3, stride=1, padding=1),
self.basic_conv2d(128, 128, sizes[2], sizes[2], kernel_size=3, stride=1, padding=1),
self.basic_conv2d(128, 128, sizes[2], sizes[2], kernel_size=3, stride=1, padding=1),
)
self.conv.add_module('small_conv', small_conv)
if net_size in ['medium', 'large']:
# 12-15 layers
medium_conv = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2, padding=0),
self.basic_conv2d(128, 256, sizes[3], sizes[3], kernel_size=3, stride=1, padding=1),
self.basic_conv2d(256, 256, sizes[3], sizes[3], kernel_size=3, stride=1, padding=1),
self.basic_conv2d(256, 256, sizes[3], sizes[3], kernel_size=3, stride=1, padding=1),
)
self.conv.add_module('medium_conv', medium_conv)
if net_size in ['large']:
# 16-18 layers
large_conv = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2, padding=0),
self.basic_conv2d(256, 512, sizes[4], sizes[4], kernel_size=3, stride=1, padding=1),
self.basic_conv2d(512, 512, sizes[4], sizes[4], kernel_size=3, stride=1, padding=1),
)
self.conv.add_module('large_conv', large_conv)
# RMSPooling layer
self.conv.add_module('rmspool', RMSPool(3, 3))
# regression part
self.fc = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(feature_dim, 1024),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(negative_slope=0.01),
nn.Dropout(p=0.5),
nn.Linear(512, 1024),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(negative_slope=0.01),
nn.Linear(512, 1)
)
# initial parameters
for m in self.modules():
if isinstance(m, Conv2dUntiedBias) or isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight, 1)
nn.init.constant_(m.bias, 0.05)
def basic_conv2d(self, in_channels, out_channels, height, width, kernel_size, stride, padding):
return nn.Sequential(
Conv2dUntiedBias(in_channels, out_channels, height, width, kernel_size, stride, padding),
nn.LeakyReLU(negative_slope=0.01)
)
def forward(self, x):
features = self.conv(x)
# reshape to satisify maxpool1d input shape requirement
features = features.view(features.size(0), 1, -1)
predict = self.fc(features)
predict = torch.squeeze(predict)
return predict
# load part of pretrained_model like o_O solution \
# using multi-scale image to train model by setting type to part \
# or load full weights by setting type to full.
def load_weights(self, pretrained_model_path, exclude=[]):
pretrained_model = torch.load(pretrained_model_path)
pretrained_dict = pretrained_model.state_dict()
if isinstance(pretrained_model, nn.DataParallel):
pretrained_dict = {key[7:]: value for key, value in pretrained_dict.items()}
model_dict = self.state_dict()
# exclude
for name in list(pretrained_dict.keys()):
# using untied biases will make it unable to reload.
if name in model_dict.keys() and pretrained_dict[name].shape != model_dict[name].shape:
pretrained_dict.pop(name)
continue
for e in exclude:
if e in name:
pretrained_dict.pop(name)
break
# load weights
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
return pretrained_dict
def layer_configs(self):
model_dict = self.state_dict()
return [(tensor, model_dict[tensor].size()) for tensor in model_dict]
def cal_sizes(self, net_size, input_size):
sizes = []
if net_size in ['small', 'medium', 'large']:
sizes.append(self._reduce_size(input_size, 5, 2, 2))
after_maxpool = self._reduce_size(sizes[-1], 3, 0, 2)
sizes.append(self._reduce_size(after_maxpool, 5, 2, 2))
after_maxpool = self._reduce_size(sizes[-1], 3, 0, 2)
sizes.append(self._reduce_size(after_maxpool, 3, 1, 1))
if net_size in ['medium', 'large']:
after_maxpool = self._reduce_size(sizes[-1], 3, 0, 2)
sizes.append(self._reduce_size(after_maxpool, 3, 1, 1))
if net_size in ['large']:
after_maxpool = self._reduce_size(sizes[-1], 3, 0, 2)
sizes.append(self._reduce_size(after_maxpool, 3, 1, 1))
return sizes
def _reduce_size(self, input_size, kernel_size, padding, stride):
return (input_size + (2 * padding) - (kernel_size - 1) - 1) // stride + 1
# from https://www.kaggle.com/meenavyas/diabetic-retinopathy-detection
class SimpleRetinoNet(nn.Module):
def __init__(self, name, args, input_channels=3, input_size=128):
super(SimpleRetinoNet, self).__init__()
self.name = name
self.args = args
self.conv1 = nn.Conv2d(in_channels = input_channels, out_channels=32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channels = 32, out_channels=32, kernel_size=3)
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.dropout1 = nn.Dropout(p=0.25)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3)
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.dropout2 = nn.Dropout(p=0.25)
self.conv5 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3)
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
self.dropout3 = nn.Dropout(p=0.25)
self.fc1 = nn.Linear(12544, 512)
self.dropout4 = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(512, self.args.num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.dropout1(self.maxpool1(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.dropout2(self.maxpool2(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = self.dropout3(self.maxpool3(x))
x = x.view(-1, x.shape[-3]*x.shape[-2]*x.shape[-1])
x = F.relu(self.fc1(x))
x = self.dropout4(x)
x = self.fc2(x)
out = F.log_softmax(x, dim=1)
return out
|
py | 7df6ace22c75fd10df934313ea51b74ed42d1860 | test1 = [9,1,3,4,2,6,7,5,8]
test2 = [6,5,4,1,7,3,9,8,2]
test3 = [7,9,13,1,14,16,8,15,3,6,10,5,2,11,12,4]
test4 = [15,14,1,6,9,11,4,12,16,10,7,3,13,8,5,2]
test5 = [4,1,2,9,8,7,6,3,5]
test6 = [8,6,7,2,5,4,3,9,1] |
py | 7df6ad147ead2e39a04bc6b50444831a404adf42 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from os.path import abspath
import mock
from preggy import expect
from tests.base import DetectorTestCase
from thumbor.detectors.glasses_detector import Detector as GlassesDetector
class GlassesDetectorTestCase(DetectorTestCase):
def test_detector_uses_proper_cascade(self):
cascade = './tests/fixtures/haarcascade_eye_tree_eyeglasses.xml'
ctx = mock.Mock(
config=mock.Mock(
GLASSES_DETECTOR_CASCADE_FILE=abspath(cascade),
)
)
detector = GlassesDetector(ctx, 1, [])
expect(detector).not_to_be_null()
def test_should_detect_glasses(self):
with open(abspath('./tests/fixtures/images/glasses.jpg')) as f:
self.engine.load(f.read(), None)
self.context.config.GLASSES_DETECTOR_CASCADE_FILE = abspath(
'./thumbor/detectors/glasses_detector/haarcascade_eye_tree_eyeglasses.xml',
)
if hasattr(GlassesDetector, 'cascade'):
del GlassesDetector.cascade
GlassesDetector(self.context, 0, []).detect(lambda: None)
detection_result = self.context.request.focal_points[0]
expect(detection_result.origin).to_equal('detection')
expect(detection_result.x).to_be_numeric()
expect(detection_result.y).to_be_numeric()
expect(detection_result.width).to_be_numeric()
expect(detection_result.height).to_be_numeric()
|
py | 7df6ae2492b0ed87885eed9a8a522e64daeb64ed | #!/usr/bin/env python
"""
This script contains code used by the following jupytr notebooks:
1. master-sso.ipynb
2.
3.
"""
# ===========
# ENVIRONMENT
# ===========
import os
import sys
import pandas as pd
import numpy as np
# ===========
# PREPARATION
# ===========
def missing_values_col(df):
"""
This functions returns the total missing values and
the percent missing values by column.
"""
null_count = df.isnull().sum()
null_percentage = (null_count / df.shape[0]) * 100
empty_count = pd.Series(((df == ' ') | (df == '')).sum())
empty_percentage = (empty_count / df.shape[0]) * 100
nan_count = pd.Series(((df == 'nan') | (df == 'NaN')).sum())
nan_percentage = (nan_count / df.shape[0]) * 100
return pd.DataFrame({'num_missing': null_count, 'missing_percentage': null_percentage,
'num_empty': empty_count, 'empty_percentage': empty_percentage,
'nan_count': nan_count, 'nan_percentage': nan_percentage})
def missing_values_row(df):
"""
This functions returns the total missing values and
the percent missing values by row.
"""
null_count = df.isnull().sum(axis=1)
null_percentage = (null_count / df.shape[1]) * 100
return pd.DataFrame({'num_missing': null_count, 'percentage': null_percentage})
def handle_missing_threshold(df, prop_required_column = .3, prop_required_row = .9):
"""
This functions removes columns and rows whose
count of missing values exceeds threshold.
"""
threshold = int(round(prop_required_column*len(df.index),0))
df.dropna(axis=1, thresh=threshold, inplace=True)
threshold = int(round(prop_required_row*len(df.columns),0))
df.dropna(axis=0, thresh=threshold, inplace=True)
return df
def count_values(df):
"""
This function counts the value of columns in a dataframe.
"""
for col in df.columns:
n = df[col].unique().shape[0]
col_bins = min(n, 10)
print(f"{col}:")
if df[col].dtype in ['int64', 'float64'] and n > 10:
print(df[col].value_counts(bins=col_bins, sort=False))
else:
print(df[col].value_counts())
print("\n")
def remove_columns(df, columns):
return df.drop(columns=columns)
def fill_with_zeroes(df, *cols):
"""
This functions returns the column names as input and
return the dataframe with the
null values in those columns replace by 0.
"""
for col in cols:
df[col] = df[col].fillna(0)
return df
def fill_with_median(df, *cols):
"""
This function fills the NaN values with
respective median values.
"""
for col in cols:
df[col] = df[col].fillna(df[col].median())
return df
def fill_with_none(df, *cols):
"""
This function fills the NaN values with
'None' string value.
"""
for col in cols:
df[col] = df[col].fillna('None')
return df
def fill_with_unknown(df, *cols):
"""
This functions fills the NaN values with
'Unknown' string value.
"""
for col in cols:
df[col] = df[col].fillna('Unknown')
return df
def lowercase_columns(df):
"""
This function returns a lowercase version of the column values.
"""
df.columns = map(str.lower, df.columns)
return df
def lowercase_column_values(df, *columns):
"""
This function returns a lowercase version of the column values.
"""
for col in columns:
df[col] = df[col].str.lower()
return df
def titlecase_column_values(df, *columns):
"""
This function returns a titlecase version of the values.
"""
for col in columns:
df[col] = df[col].str.title()
return df
def rename_columns_all(df):
"""
takes in selected dataframe and renames columns to intuitive non-capitalized titles
"""
return df.rename(index=str, columns={'inspkey':'inspection_key',
'servno':'service_number',
'reportdate':'report_date',
'spill_st_name':'spill_street_name',
'total_gal':'total_gallons',
'galsret':'gallons_returned',
'gal':'gallons_1',
'spill_start':'spill_start_1',
'spill_stop':'spill_stop_1',
'hrs':'hours_1',
'unitid':'unit_id_1',
'unitid2':'unit_id_2',
'earz_zone':'edwards_zone',
'expr1029':'expr_1029',
'pipediam':'pipe_diameter',
'pipelen':'pipe_length',
'pipetype':'pipe_type',
'instyear':'installation_year',
'dwndpth':'downstream_depth',
'upsdpth':'upstream_depth',
'rainfall_less3':'rainfall_less_3',
'spill address': 'spill_address_full',
'sewerassetexp':'sewer_asset_exp',
'prevspill_24mos':'previous_spill_24mos',
'unittype':'unit_type',
'assettype':'asset_type',
'lastclnd':'last_cleaned',
'responsetime':'response_time',
'responsedttm':'response_datetime',
'public notice':'public_notice',
'timeint':'time_int',
'hrs_2':'hours_2',
'gal_2':'gallons_2',
'hrs_3':'hours_3',
'gal_3':'gallons_3'
})
def lowercase_and_rename(df):
"""
This function changes the column names' case to lowercase
and renames the column.
"""
return rename_columns_all(lowercase_columns(df))
def ready_df1(df):
"""
This function prepares the dataframe for EDA.
"""
df = remove_columns(df, columns=[ 'sso_id',
'inspection_key',
'service_number',
'comments',
'ferguson',
'expr_1029',
'downstream_depth',
'upstream_depth',
'sewer_asset_exp',
'previous_spill_24mos',
])
df['spill_street_address'] = df['spill_address'].map(str)+ ' ' + df['spill_street_name']
df = df.drop(columns=['spill_address', 'spill_street_name'])
df['multiple_spills'] = np.where(df['spill_start_2'].isnull(), False, True)
df = df.drop(columns=['spill_start_2',
'spill_stop_2',
'hours_2',
'gallons_2',
'spill_start_3',
'spill_stop_3',
'hours_3',
'gallons_3',
'gallons_1',
'spill_address_full'
])
df = df.rename(index=str, columns={ "spill_start_1": "spill_start",
"spill_stop_1": "spill_stop",
"hours_1": "hours"})
df = lowercase_column_values( df, 'unit_type',
'asset_type',
'cause',
'actions',
'watershed',
'discharge_to',
'discharge_route',
'pipe_type',
'root_cause',
)
df = titlecase_column_values(df, 'spill_street_address')
df[['council_district',
'edwards_zone',
'num_spills_24mos',
'time_int'
]] = df[['council_district',
'edwards_zone',
'num_spills_24mos',
'time_int'
]].fillna(0.0).astype(int)
df['installation_year'] = df['installation_year'].fillna(9999).astype(int)
df[['gallons_returned',
'hours',
'pipe_diameter',
'pipe_length',
'inches_no',
'rainfall_less_3',
'response_time',
]] = df[['gallons_returned',
'hours',
'pipe_diameter',
'pipe_length',
'inches_no',
'rainfall_less_3',
'response_time'
]].fillna(0.0)
df[['actions',
'unit_id_1',
'unit_id_2',
'discharge_to',
'discharge_route',
'pipe_type',
'spill_street_address',
'unit_type',
'asset_type',
'root_cause',
'steps_to_prevent',
]] = df[[ 'actions',
'unit_id_1',
'unit_id_2',
'discharge_to',
'discharge_route',
'pipe_type',
'spill_street_address',
'unit_type',
'asset_type',
'root_cause',
'steps_to_prevent',
]].fillna('na')
df['report_date'] = pd.to_datetime(df['report_date'])
df['response_datetime'] = pd.to_datetime(df['response_datetime'])
df['last_cleaned'] = pd.to_datetime(df['last_cleaned'])
return df
# ==================================================
# MAIN
# ==================================================
def clear():
os.system("cls" if os.name == "nt" else "clear")
def main():
"""Main entry point for the script."""
pass
if __name__ == '__main__':
sys.exit(main())
__authors__ = ["Joseph Burton", "Ednalyn C. De Dios", "Sandy Graham"]
__copyright__ = "Copyright 2019, Codeup Data Science"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainers__ = "Ednalyn C. De Dios"
__email__ = "[email protected]"
__status__ = "Prototype"
|
py | 7df6ae3f2a6f08c4a686b425df1278385b440c46 | from enum import Enum
from typing import Any, Dict, List
from mypy_extensions import TypedDict
from typing_extensions import Protocol
from openslides_backend.shared.interfaces import Filter
from openslides_backend.shared.patterns import Collection, FullQualifiedId
PartialModel = Dict[str, Any]
Found = TypedDict("Found", {"exists": bool, "position": int})
Count = TypedDict("Count", {"count": int, "position": int})
Aggregate = Dict[str, Any]
class DeletedModelsBehaviour(Enum):
NO_DELETED = 1
ONLY_DELETED = 2
ALL_MODELS = 3
class GetManyRequest:
"""Encapsulates a single GetManyRequests
"""
def __init__(
self, collection: Collection, ids: List[int], mapped_fields: List[str] = None,
):
self.collection = collection
self.ids = ids
self.mapped_fields = mapped_fields
def to_dict(self) -> Dict[str, Any]:
result: Dict[str, Any] = {}
result["collection"] = str(self.collection)
if self.ids is not None:
result["ids"] = self.ids
if self.mapped_fields is not None:
result["mapped_fields"] = self.mapped_fields
return result
class Datastore(Protocol):
"""Datastore defines the interface to the datastore
"""
def get(
self,
fqid: FullQualifiedId,
mapped_fields: List[str] = None,
position: int = None,
get_deleted_models: int = None,
) -> PartialModel:
...
def getMany(
self,
get_many_requests: List[GetManyRequest],
mapped_fields: List[str] = None,
position: int = None,
get_deleted_models: int = None,
) -> Dict[str, Dict[int, PartialModel]]:
...
def getManyByFQIDs(
self, ids: List[FullQualifiedId]
) -> Dict[str, Dict[int, PartialModel]]:
...
def getAll(
self,
collection: Collection,
mapped_fields: List[str] = None,
get_deleted_models: int = None,
) -> List[PartialModel]:
...
def filter(
self,
collection: Collection,
filter: Filter,
meeting_id: int = None,
mapped_fields: List[str] = None,
) -> List[PartialModel]:
...
def exists(self, collection: Collection, filter: Filter) -> Found:
...
def count(self, collection: Collection, filter: Filter) -> Count:
...
def min(
self, collection: Collection, filter: Filter, field: str, type: str = None
) -> Aggregate:
...
def max(
self, collection: Collection, filter: Filter, field: str, type: str = None
) -> Aggregate:
...
|
py | 7df6aeb45775d76646e0232dc04eabf873438371 | from pyb import SPI
# test we can correctly create by id
for bus in (-1, 0, 1, 2):
try:
SPI(bus)
print("SPI", bus)
except ValueError:
print("ValueError", bus)
spi = SPI(1)
print(spi)
spi = SPI(1, SPI.CONTROLLER)
spi = SPI(1, SPI.CONTROLLER, baudrate=500000)
spi = SPI(
1, SPI.CONTROLLER, 500000, polarity=1, phase=0, bits=8, firstbit=SPI.MSB, ti=False, crc=None
)
print(str(spi)[:32], str(spi)[53:]) # don't print baudrate/prescaler
spi.init(SPI.PERIPHERAL, phase=1)
print(spi)
try:
# need to flush input before we get an error (error is what we want to test)
for i in range(10):
spi.recv(1, timeout=100)
except OSError:
print("OSError")
spi.init(SPI.CONTROLLER)
spi.send(1, timeout=100)
print(spi.recv(1, timeout=100))
print(spi.send_recv(1, timeout=100))
spi.deinit()
|
py | 7df6af4d631f3456f66e6a69ad0b5ed94eb95b0c | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: disable=no-value-for-parameter, protected-access, bad-super-call
import unittest
from argus.config import base
try:
import unittest.mock as mock
except ImportError:
import mock
class FakeOptions(base.Options):
def __init__(self):
super(FakeOptions, self).__init__(mock.sentinel)
def register(self):
pass
def list(self):
pass
class TestOptions(unittest.TestCase):
def setUp(self):
self._options = FakeOptions()
def test_group_name(self):
result = self._options.group_name
self.assertEqual(result, self._options._group_name)
def test_register(self):
result = super(FakeOptions, self._options).register()
self.assertEqual(result, None)
def test_list(self):
result = super(FakeOptions, self._options).list()
self.assertEqual(result, None)
|
py | 7df6af87c6659d8ce3ad4048b412a048ec88e793 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 20 09:30:10 2017
@author: nsde
"""
#%% Packages
from sys import platform as _platform
import tensorflow as tf
from tensorflow.python.framework import function
from ddtn.helper.utility import load_basis, get_dir
from ddtn.helper.tf_funcs import tf_repeat_matrix, tf_expm3x3_analytic, tf_findcellidx
#%% Load dynamic module
def load_dynamic_modules():
dir_path = get_dir(__file__)
transformer_module = tf.load_op_library(dir_path + '/./CPAB_ops.so')
transformer_op = transformer_module.calc_trans
grad_op = transformer_module.calc_grad
return transformer_op, grad_op
if _platform == "linux" or _platform == "linux2" or _platform == "darwin":
transformer_op, grad_op = load_dynamic_modules()
#%%
def _calc_trans(points, theta):
""" Tensorflow wrapper function for calculating the CPAB transformations.
The function extracts information for the current tesselation basis, and
then call the dynamic library functions compiled from the cpp code which
do the actual computations
Arguments:
points: `Matrix` [2, nb_points]. Grid of 2D points to transform
theta: `Matrix` [n_theta, dim]. Batch of parametrization vectors. Each
row specifies a specific transformation
Output:
newpoints: 3D-`Tensor` [n_theta, 2, nb_points]. Tensor of transformed points.
The slice newpoints[i] corresponds to the input points transformed
using the parametrization vector theta[i].
o
"""
with tf.name_scope('calc_trans'):
# Make sure that both inputs are in float32 format
points = tf.cast(points, tf.float32) # format [2, nb_points]
theta = tf.cast(theta, tf.float32) # format [n_theta, dim]
n_theta = tf.shape(theta)[0]
# Load file with basis
file = load_basis()
# Tessalation information
nC = tf.cast(file['nC'], tf.int32)
ncx = tf.cast(file['ncx'], tf.int32)
ncy = tf.cast(file['ncy'], tf.int32)
inc_x = tf.cast(file['inc_x'], tf.float32)
inc_y = tf.cast(file['inc_y'], tf.float32)
# Steps sizes
# NOTE: If this number is changed, then the allocation of the cell index
# need to be changed in the CPAB_ops.cc file as well
nStepSolver = tf.cast(50, dtype = tf.int32)
dT = 1.0 / tf.cast(nStepSolver , tf.float32)
# Get cpab basis
B = tf.cast(file['B'], tf.float32)
# Repeat basis for batch multiplication
B = tf_repeat_matrix(B, n_theta)
# Calculate the row-flatted affine transformations Avees
Avees = tf.matmul(B, tf.expand_dims(theta, 2))
# Reshape into (number of cells, 2, 3) tensor
As = tf.reshape(Avees, shape = (n_theta * nC, 2, 3)) # format [n_theta * nC, 2, 3]
# Multiply by the step size and do matrix exponential on each matrix
Trels = tf_expm3x3_analytic(dT*As)
Trels = tf.reshape(Trels, shape=(n_theta, nC, 2, 3))
# Call the dynamic library
with tf.name_scope('calc_trans_op'):
newpoints = transformer_op(points, Trels, nStepSolver, ncx, ncy, inc_x, inc_y)
return newpoints
#%%
def _calc_grad(op, grad): #grad: n_theta x 2 x nP
""" Tensorflow wrapper function for calculating the gradient of the CPAB
transformations. The function extracts information for the current
tesselation basis, and then call the dynamic library functions compiled
from the cpp code which do the actual computations
Arguments:
op: tensorflow operation class. The class holds information about the
input and output of the original operation we are trying to
differentiate
grad: 4D-`Tensor` [dim, n_theta, 2, nb_points]. Incoming gradient that
is propegated onwards by this layer. It can be viewed as the gradient
vector in each point, for all thetas and for all parameters of each
theta.
Output:
gradient: list of 2 elements. Each element corresponds to the gradient
w.r.t the input to the original function _calc_trans(points, theta).
Since we are only interested in the gradient w.r.t. theta, the first
element is None. The second is a `Matrix` [dim, n_theta] i.e. the gradient
of each element in all theta vectors.
"""
with tf.name_scope('calc_grad'):
# Grap input
points = op.inputs[0] # 2 x nP
theta = op.inputs[1] # n_theta x d
n_theta = tf.shape(theta)[0]
# Load file with basis
file = load_basis()
# Tessalation information
nC = tf.cast(file['nC'], tf.int32)
ncx = tf.cast(file['ncx'], tf.int32)
ncy = tf.cast(file['ncy'], tf.int32)
inc_x = tf.cast(file['inc_x'], tf.float32)
inc_y = tf.cast(file['inc_y'], tf.float32)
# Steps sizes
nStepSolver = tf.cast(50, dtype = tf.int32)
# Get cpab basis
B = tf.cast(file['B'], tf.float32)
Bs = tf.reshape(tf.transpose(B), (-1, nC, 2, 3))
B = tf_repeat_matrix(B, n_theta)
# Calculate the row-flatted affine transformations Avees
Avees = tf.matmul(B, tf.expand_dims(theta, 2))
# Reshape into (ntheta, number of cells, 2, 3) tensor
As = tf.reshape(Avees, shape = (n_theta, nC, 2, 3)) # n_theta x nC x 2 x 3
# Call cuda code
with tf.name_scope('calcT_batch_grad_operator'):
gradient = grad_op(points, As, Bs, nStepSolver,
ncx, ncy, inc_x, inc_y) # gradient: d x n_theta x 2 x n
# Reduce into: d x 1 vector
gradient = tf.reduce_sum(grad * gradient, axis = [2,3])
gradient = tf.transpose(gradient)
return [None, gradient]
#%%
def _calc_grad_numeric(op, grad): #grad: n_theta x 2 x nP
""" Similar to the _calc_grad(...) function above. Only difference is that
this function does a finite difference of the gradient by calling the
_calc_trans(...) again and again for small permutations of the input
theta vector, and then compare to the actual value of theta.
Arguments and output is the same _calc_grad(...).
"""
points = op.inputs[0] # 2 x n
theta = op.inputs[1] # n_theta x d
# Finite difference permutation size
h = tf.cast(0.01, tf.float32)
# Base function evaluation
f0 = _calc_trans(points, theta) # n_theta x 2 x nP
gradient = [ ]
for i in range(theta.get_shape()[1].value):
# Add small permutation to i element in theta
temp = tf.concat([theta[:,:i], tf.expand_dims(theta[:,i]+h,1), theta[:,(i+1):]], 1)
# Calculate new function value
f1 = _calc_trans(points, temp) # n_theta x 2 x nP
# Finite difference
diff = (f1 - f0) / h # n_theta x 2 x nP
if i != 0:
# Gradient
gradient = tf.concat([gradient, tf.expand_dims(tf.reduce_sum(grad * diff, axis=[1,2]), 1)], 1)
else:
gradient = tf.expand_dims(tf.reduce_sum(grad * diff, axis=[1,2]), 1)
return [None, gradient]
#%%
@function.Defun(tf.float32, tf.float32, func_name='tf_CPAB_transformer', python_grad_func=_calc_grad)
def tf_cuda_CPAB_transformer(points, theta):
transformed_points = _calc_trans(points, theta)
return transformed_points
#%%
@function.Defun(tf.float32, tf.float32, func_name = 'tf_CPAB_transformer_numeric_grad', python_grad_func = _calc_grad_numeric)
def tf_cuda_CPAB_transformer_numeric_grad(points, theta):
""" Similar to tf_CPAB_transformer(...) where the analytic gradient is have
been replaced with a numeric finite difference gradient
"""
transformed_points = _calc_trans(points, theta)
return transformed_points
#%%
def tf_pure_CPAB_transformer(points, theta):
""" CPAB transformer in pure tensorflow.
Transform the input points by repeatly appling the matrix-exponentials
parametrized by theta. This function should automatic be able to calculate
the gradient of the output w.r.t. theta.
Arguments:
points: `Matrix` [2, n_points]. 2D input points to transform
theta: `Matrix` [n_theta, dim]. Parametrization to use.
Output:
trans_points: 3D-`Tensor` [n_theta, 2, n_points]. The transformed points
for each parametrization in theta.
"""
with tf.name_scope('CPAB_transformer'):
# Make sure that both inputs are in float32 format
points = tf.cast(points, tf.float32) # format [2, nb_points]
theta = tf.cast(theta, tf.float32) # format [n_theta, dim]
n_theta = tf.shape(theta)[0]
n_points = tf.shape(points)[1]
# Repeat point matrix, one for each theta
newpoints = tf_repeat_matrix(points, n_theta) # [n_theta, 2, nb_points]
# Reshape into a [nb_points*n_theta, 2] matrix
newpoints = tf.reshape(tf.transpose(newpoints, perm=[0,2,1]), (-1, 2))
# Add a row of ones, creating a [nb_points*n_theta, 3] matrix
newpoints = tf.concat([newpoints, tf.ones((n_theta*n_points, 1))], axis=1)
# Expand dims for matrix multiplication later -> [nb_points*n_theta, 3, 1] tensor
newpoints = tf.expand_dims(newpoints, 2)
# Load file with basis
file = load_basis()
# Tessalation information
nC = tf.cast(file['nC'], tf.int32)
ncx = tf.cast(file['ncx'], tf.int32)
ncy = tf.cast(file['ncy'], tf.int32)
inc_x = tf.cast(file['inc_x'], tf.float32)
inc_y = tf.cast(file['inc_y'], tf.float32)
# Steps sizes
nStepSolver = 50 # Change this for more precision
dT = 1.0 / tf.cast(nStepSolver, tf.float32)
# Get cpab basis
B = tf.cast(file['B'], tf.float32)
# Repeat basis for batch multiplication
B = tf_repeat_matrix(B, n_theta)
# Calculate the row-flatted affine transformations Avees
Avees = tf.matmul(B, tf.expand_dims(theta, 2))
# Reshape into (number of cells, 2, 3) tensor
As = tf.reshape(Avees, shape = (n_theta * nC, 2, 3)) # format [n_theta * nC, 2, 3]
# Multiply by the step size and do matrix exponential on each matrix
Trels = tf_expm3x3_analytic(dT*As)
Trels = tf.concat([Trels, tf.cast(tf.reshape(tf.tile([0,0,1],
[n_theta*nC]), (n_theta*nC, 1, 3)), tf.float32)], axis=1)
# Batch index to add to correct for the batch effect
batch_idx = (4*ncx*ncy) * tf.reshape(tf.transpose(tf.ones((n_points, n_theta),
dtype=tf.int32)*tf.cast(tf.range(n_theta), tf.int32)),(-1,))
# Body function for while loop (executes the computation)
def body(i, points):
# Find cell index of each point
idx = tf_findcellidx(points, ncx, ncy, inc_x, inc_y)
# Correct for batch
corrected_idx = tf.cast(idx, tf.int32) + batch_idx
# Gether relevant matrices
Tidx = tf.gather(Trels, corrected_idx)
# Transform points
newpoints = tf.matmul(Tidx, points)
# Shape information is lost, but tf.while_loop requires shape
# invariance so we need to manually set it (easy in this case)
newpoints.set_shape((None, 3, 1))
return i+1, newpoints
# Condition function for while loop (indicates when to stop)
def cond(i, points):
# Return iteration bound
return tf.less(i, nStepSolver)
# Run loop
trans_points = tf.while_loop(cond, body, [tf.constant(0), newpoints],
parallel_iterations=10, back_prop=True)[1]
# Reshape to batch format
trans_points = tf.transpose(tf.reshape(tf.transpose(trans_points[:,:2,0]),
(2, n_theta, n_points)), perm=[1,0,2])
return trans_points
#%%
if __name__ == '__main__':
from ddtn.transformers.setup_CPAB_transformer import setup_CPAB_transformer
import numpy as np
import matplotlib.pyplot as plt
# Create basis
s = setup_CPAB_transformer(2, 2,
valid_outside=True,
zero_boundary=False,
override=True)
# Sample parametrization and grid
theta = 0.3*s.sample_theta_without_prior(3)
points = s.sample_grid(20)
# Convert to tf tensors
theta_tf = tf.cast(theta, tf.float32)
points_tf = tf.cast(points, tf.float32)
# Create computaitons
newpoints_ana_tf = tf_cuda_CPAB_transformer(points_tf, theta_tf)
newpoints_num_tf = tf_cuda_CPAB_transformer_numeric_grad(points_tf, theta_tf)
newpoints_pur_tf = tf_pure_CPAB_transformer(points_tf, theta_tf)
grad_ana_tf = tf.gradients(newpoints_ana_tf, [theta_tf])[0]
grad_num_tf = tf.gradients(newpoints_num_tf, [theta_tf])[0]
grad_pur_tf = tf.gradients(newpoints_pur_tf, [theta_tf])[0]
sess = tf.Session()
p1, p2, p3, g1, g2, g3 = sess.run([newpoints_ana_tf,
newpoints_num_tf,
newpoints_pur_tf,
grad_ana_tf,
grad_num_tf,
grad_pur_tf])
# Print gradient res
print('Analytic gradient:')
print(g1[0].round(3))
print('Numeric gradient:')
print(g2[0].round(3))
print('Pure gradient:')
print(g3[0].round(3))
print('Difference ana-num:', (np.linalg.norm(g1 - g2) / np.linalg.norm(g1)).round(3))
print('Difference ana-pur:', (np.linalg.norm(g1 - g3) / np.linalg.norm(g1)).round(3))
# Show deformation and velocity field
fig = plt.figure()
plt.plot(points[0], points[1], 'b.', label='original grid')
plt.plot(p1[0,0], p1[0,1], 'r+', label='deformed grid, cuda')
plt.plot(p3[0,0], p3[0,1], 'g.', label='deformed grid, pure')
plt.legend(bbox_to_anchor=(1.1, 1.05), fontsize=15)
plt.axis('equal')
s.visualize_vectorfield_arrow(theta[0].flatten())
plt.show()
|
py | 7df6b00205694ea98e807ccf46b30cfdc3cccb92 | from dataclasses import dataclass
from typing import Any, List, Text, Type, TypeVar, Union
from typefit import Fitter
from typefit.nodes import Node
from typefit.reporting import PrettyJson5Formatter
T = TypeVar("T")
def to_fit_node(t: Type[T], v: Any) -> Node:
f = Fitter()
node = f._as_node(v)
try:
f.fit_node(t, node)
except ValueError:
pass
return node
def test_format_flat():
node = to_fit_node(Union[bool, int], "42")
assert (
PrettyJson5Formatter().format(node)
== """// Not a bool
// Not an int
// No matching type in Union
\"42\""""
)
def test_format_mapping_wrong_key():
@dataclass
class Foo:
x: int
y: int
node = to_fit_node(Foo, {"x": 42, "y": "42"})
out = PrettyJson5Formatter().format(node)
assert (
out
== """// Wrong keys set for 'tests.issue_000014.test_format.test_format_mapping_wrong_key.<locals>.Foo'. No fit for keys: 'y'
{
"x": 42,
// Not an int
"y": "42",
}"""
)
def test_format_mapping_not_mapping():
@dataclass
class Foo:
x: int
y: int
node = to_fit_node(Foo, 42)
out = PrettyJson5Formatter().format(node)
assert (
out
== """// 'tests.issue_000014.test_format.test_format_mapping_not_mapping.<locals>.Foo' can only fit an object
42"""
)
def test_format_list_partial():
node = to_fit_node(List[int], [1, 2, 3, 4, 5, "6", 7, 8, 9, "10", 11, 12, 13])
out = PrettyJson5Formatter().format(node)
assert (
out
== """// Not all list items fit
[
// Not an int
"6",
]"""
)
def test_list_of_foo():
@dataclass
class Foo:
x: int
y: Text
@dataclass
class Results:
count: int
results: List[Foo]
node = to_fit_node(
Results,
{
"count": 3,
"results": [
{"x": 42, "y": "foo"},
{"x": 1337, "y": 1337},
{"x": 421, "y": True},
],
},
)
out = PrettyJson5Formatter().format(node)
assert (
out
== """// Wrong keys set for 'tests.issue_000014.test_format.test_list_of_foo.<locals>.Results'. No fit for keys: 'results'
{
"count": 3,
// Not all list items fit
"results": [
// Wrong keys set for 'tests.issue_000014.test_format.test_list_of_foo.<locals>.Foo'. No fit for keys: 'y'
{
"x": 1337,
// Not a string
"y": 1337,
},
],
}"""
)
|
py | 7df6b03be9c10cfef6a76c2bb3aff59cf7b21fd8 | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import iomodel
|
py | 7df6b13c65cd953d081ea4e0537c9c1c8e0a478d | from enum import Enum
import googleapiclient.discovery
import logging
from google.cloud import datastore, runtimeconfig, storage
from google.cloud import logging as g_logging
runtimeconfig_client = runtimeconfig.Client()
myconfig = runtimeconfig_client.config('cybergym')
project = myconfig.get_variable('project').value.decode("utf-8")
region = myconfig.get_variable('region').value.decode("utf-8")
zone = myconfig.get_variable('zone').value.decode("utf-8")
dns_suffix = myconfig.get_variable('dns_suffix').value.decode("utf-8")
script_repository = myconfig.get_variable('script_repository').value.decode("utf-8")
api_key = myconfig.get_variable('api_key').value.decode("utf-8")
custom_dnszone = myconfig.get_variable('dnszone')
student_instructions_url = myconfig.get_variable('student_instructions_url')
student_instructions_url = student_instructions_url.value.decode("utf-8") if student_instructions_url else \
'https://storage.googleapis.com/student_workout_instructions_ualr-cybersecurity/'
teacher_instructions_url = myconfig.get_variable('teacher_instructions_url')
teacher_instructions_url = teacher_instructions_url.value.decode("utf-8") if teacher_instructions_url else \
'https://storage.googleapis.com/teacher_workout_instructions_ualr-cybersecurity/'
if custom_dnszone != None:
dnszone = custom_dnszone.value.decode("utf-8")
else:
dnszone = 'cybergym-public'
main_app_url = myconfig.get_variable('main_app_url').value.decode("utf-8")
guac_db_password = myconfig.get_variable('guac_password')
guac_db_password = guac_db_password.value.decode("utf-8")
ds_client = datastore.Client()
compute = googleapiclient.discovery.build('compute', 'v1')
storage_client = storage.Client()
log_client = g_logging.Client()
workout_token = 'RG987S1GVNKYRYHYA'
auth_config = {
'api_key': api_key,
'auth_domain': str(project + ".firebaseapp.com"),
'project_id': project
}
# Use this for debugging. Uncomment the above endpoint for final environment.
post_endpoint = 'http://localhost:8080/complete'
logger = logging.getLogger()
class workout_globals():
MAX_RUN_HOURS = 10
yaml_bucket = project + '_cloudbuild'
yaml_folder = 'yaml-build-files/'
windows_startup_script_env = 'setx /m WORKOUTID {env_workoutid}\n' \
'setx /m URL ' + main_app_url + '\n' \
'setx /m DNS_SUFFIX ' + dns_suffix + '\n'
windows_startup_script_task = 'setx /m WORKOUTKEY{q_number} {env_workoutkey}\n' \
'call gsutil cp ' + script_repository + '{script} .\n' \
'schtasks /Create /SC MINUTE /TN {script_name} /RU System /TR {script_command}'
linux_startup_script_env = '#! /bin/bash\n' \
'cat >> /etc/environment << EOF\n' \
'WORKOUTID={env_workoutid}\n' \
'URL=' + main_app_url + '\n' \
'DNS_SUFFIX=' + dns_suffix + '\n'
linux_startup_script_task = 'cat >> /etc/environment << EOF\n' \
'WORKOUTKEY{q_number}={env_workoutkey}\n' \
'EOF\n' \
'gsutil cp ' + script_repository + '{script} {local_storage}\n' \
'(crontab -l 2>/dev/null; echo "* * * * * {script_command}") | crontab -'
max_workout_len = 100
max_num_workouts = 200
ps_build_workout_topic = 'build-workouts'
ps_build_arena_topic = 'build_arena'
student_instruction_folder = "student_workout_instructions_" + project
teacher_instruction_folder = "teacher_workout_instructions_" + project
@staticmethod
def extended_wait(project, zone, operation_id):
max_wait = 3
i = 0
complete = False
while not complete and i <= max_wait:
try:
compute.zoneOperations().wait(project=project, zone=zone, operation=operation_id).execute()
complete = True
except:
i += 1
if not complete:
logger.error("Timeout for operation %s" % operation_id)
return False
else:
return True
@staticmethod
def refresh_api():
compute = googleapiclient.discovery.build('compute', 'v1')
return compute
class BUILD_STATES:
START = 'START'
BUILDING_ASSESSMENT = 'BUILDING_ASSESSMENT'
BUILDING_NETWORKS = 'BUILDING_NETWORKS'
COMPLETED_NETWORKS = 'COMPLETED_NETWORKS'
BUILDING_SERVERS = 'BUILDING_SERVERS'
COMPLETED_SERVERS = 'COMPLETED_SERVERS'
BUILDING_ROUTES = 'BUILDING_ROUTES'
COMPLETED_ROUTES = 'COMPLETED_ROUTES'
BUILDING_FIREWALL = 'BUILDING_FIREWALL'
COMPLETED_FIREWALL = 'COMPLETED_FIREWALL'
BUILDING_STUDENT_ENTRY = 'BUILDING_STUDENT_ENTRY'
COMPLETED_STUDENT_ENTRY = 'COMPLETED_STUDENT_ENTRY'
RUNNING = 'RUNNING'
STOPPING = 'STOPPING'
STARTING = 'STARTING'
READY = 'READY'
EXPIRED = 'EXPIRED'
MISFIT = 'MISFIT'
BROKEN = 'BROKEN'
BUILDING_ARENA_STUDENT_NETWORKS = 'BUILDING_ARENA_STUDENT_NETWORKS'
COMPLETED_ARENA_STUDENT_NETWORKS = 'COMPLETED_ARENA_STUDENT_NETWORKS'
BUILDING_ARENA_STUDENT_SERVERS = 'BUILDING_ARENA_STUDENT_SERVERS'
COMPLETED_ARENA_STUDENT_SERVERS = 'COMPLETED_ARENA_STUDENT_SERVERS'
BUILDING_ARENA_NETWORKS = 'BUILDING_ARENA_NETWORKS'
COMPLETED_ARENA_NETWORKS = 'COMPLETED_ARENA_NETWORKS'
BUILDING_ARENA_SERVERS = 'BUILDING_ARENA_SERVERS'
COMPLETED_ARENA_SERVERS = 'COMPLETED_ARENA_SERVERS'
GUACAMOLE_SERVER_LOAD_TIMEOUT = 'GUACAMOLE_SERVER_LOAD_TIMEOUT'
DELETING_SERVERS = 'DELETING_SERVERS'
COMPLETED_DELETING_SERVERS = 'COMPLETED_DELETING_SERVERS'
DELETED = 'DELETED'
class LogIDs:
MAIN_APP = 'cyberarena-app'
USER_AUTHORIZATION = 'cyberarena-login'
class LOG_LEVELS:
"""
GCP Logging API Severity Levels
"""
DEBUG = 100
INFO = 200
NOTICE = 300
WARNING = 400
ERROR = 500
CRITICAL = 600
ALERT = 700
EMERGENCY = 800
def cloud_log(logging_id, message, severity):
"""
Global function to log messages to cloud project.
@param logging_id: The facility to log under
@param message: Logging message
@param severity: LOG_LEVELS
@type severity: Integer
@return: None
"""
g_logger = log_client.logger(logging_id)
g_logger.log_struct(
{
"message": message
}, severity=severity
)
class AdminInfoEntity:
KIND = "cybergym-admin-info"
class Entities:
ADMINS = "admins"
AUTHORIZED_USERS = "authorized_users"
PENDING_USERS = "pending_users"
CHILD_PROJECTS = "child_projects"
class AdminActions:
CREATE_DNS_FORWARDING_FOR_UNIT = {
"function_name": "create_dns_forwarding_for_unit",
"params": ["ip_address", "network"]
}
CREATE_NEW_SERVER_IN_UNIT = {
"function_name": "create_new_server_in_unit",
"params": ["build_id", "build_server_spec"]
}
CREATE_PRODUCTION_IMAGE = {
"function_name": "create_production_image",
"params": ["server_name"]
}
DELETE_FULL_UNIT = {
"function_name": "delete_full_unit",
"params": ["unit_id"]
}
EXTEND_EXPIRATION_DAYS_UNIT = {
"function_name":"extend_expiration_days_unit",
"params": ["unit_id", "expiration"]
}
FIX_SERVER_IN_UNIT = {
"function_name": "fix_server_in_unit",
"params": ["build_id", "server_name", "type", "parameters"]
}
NUKE_REBUILD_SERVER = {
"function_name": "nuke_rebuild_server",
"params": ["server_name"]
}
NUKE_REBUILD_UNIT = {
"function_name": "nuke_rebuild_unit",
"params": ["unit_id"]
}
QUERY_WORKOUTS = {
"function_name": "query_workouts",
"params": ["query_type"]
}
ADD_CHILD_PROJECT = {
"function_name": "add_child_project",
"params": ["child_project"]
}
class BuildTypes:
COMPUTE = "compute"
ARENA = "arena"
CONTAINER = "container"
Types = [COMPUTE, ARENA, CONTAINER]
class ComputeBuildTypes:
MACHINE_IMAGE = 'machine-image'
class BuildConstants:
class BuildType(str, Enum):
ARENA = "arena"
FIXED_ARENA = "fixed_arena"
WORKOUT = "workout"
class Protocols(str, Enum):
RDP = "rdp"
VNC = "vnc"
class Firewalls(str, Enum):
FORTINET = "fortinet"
VYOS = "vyos"
class TransportProtocols(str, Enum):
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
|
py | 7df6b15e25161b80872c160d570db8c34b5232c9 | import RPi.GPIO as GPIO
import random
import motorSelfTest, time, serial, pynmea2, math, os, sys
random.seed(time.time())
GPIO.setwarnings(False)
mode = GPIO.getmode()
LATITUDE_CONST = 0.0000063265 # Latitude distance for 1 meter
LONGITUDE_CONST = 0.000010488 # Longitude Distance for 1 meter
motor_1_Forward = 10
motor_2_Forward = 11
motor_3_Forward = 21
motor_4_Forward = 22
motor_1_Backward = 12
motor_2_Backward = 13
motor_3_Backward = 23
motor_4_Backward = 24
EnableM1 = 8
EnableM2 = 15
EnableM3 = 19
EnableM4 = 26
GPIO.setmode(GPIO.BOARD)
GPIO.setup(motor_1_Forward, GPIO.OUT)
GPIO.setup(motor_2_Forward, GPIO.OUT)
GPIO.setup(motor_3_Forward, GPIO.OUT)
GPIO.setup(motor_4_Forward, GPIO.OUT)
GPIO.setup(motor_1_Backward, GPIO.OUT)
GPIO.setup(motor_2_Backward, GPIO.OUT)
GPIO.setup(motor_3_Backward, GPIO.OUT)
GPIO.setup(motor_4_Backward, GPIO.OUT)
GPIO.setup(EnableM1, GPIO.OUT)
GPIO.setup(EnableM2, GPIO.OUT)
GPIO.setup(EnableM3, GPIO.OUT)
GPIO.setup(EnableM4, GPIO.OUT)
def TurnAngles():
turning = open("/home/pi/Desktop/angles.txt","r")
a = []
data = turning.readline()
while data != "":
a.append(float(data))
data = turning.readline()
print(a)
return a
def motorTesting():
motorSelfTest.Forward(100,75)
motorSelfTest.Stop()
motorSelfTest.Backward(100,75)
motorSelfTest.Stop()
motorSelfTest.RightTurn()
motorSelfTest.Stop()
motorSelfTest.LeftTurn()
motorSelfTest.Stop()
def GPSdata():
ser = serial.Serial("/dev/ttyUSB0", 4800, timeout=1)
if not(ser.isOpen()):
ser.open()
start_time = time.time()
while(time.time() - start_time < 1):
data = ser.readline()
x = data.split(',')
if x[0] == "$GPGGA":
location = pynmea2.parse(data)
ser.close()
return location.latitude, location.longitude
latit, longi = GPSdata()
ORIGIN_LATITUDE = latit # Global constants for the origin
ORIGIN_LONGITUDE = longi # Will not change and comparison using turret function
def TurretRotation(lat, lon):
x = lat - ORIGIN_LATITUDE
y = lon - ORIGIN_LONGITUDE
angle = math.degrees(math.atan2(y,x)) + 270
txtfile = open("/home/pi/Desktop/build/Platform/angles.txt","w")
txtfile.write(str(angle))
txtfile.close()
# write to text file for platform rotation
def distanceFormula(moveLatitude, moveLongitude, latitude, longitude):
latDis = ((moveLatitude - latitude)**2) * LATITUDE_CONST
longDis = ((longitude - moveLongitude)**2) * LONGITUDE_CONST
distance = math.sqrt(latDis + longDis)
return distance #returns distance in meters
def stripString(string):
tempData = string.strip(',')
latitude = float(tempData[0])
longitude = float(tempData[1])
return latitude, longitude
# Sets inital for IMU
def CalibrateIMU(readValue):
#os.system("python /home/pi/Desktop/build/imu.py")
#readValue = open("/home/pi/Desktop/build/imu.txt","r")
#read_ser = readValue.readline()
#while read_ser == None or read_ser == ' ':
flushIMU(readValue)
read_ser = readValue.readline()
Offset = 0.0 - float(read_ser)
print("Offset" + str(Offset))
return Offset
def IMU(Offset,readValue):
# os.system("python /home/pi/Desktop/build/imu.py")
# readValue = open("/home/pi/Desktop/build/imu.txt","r")
read_ser = readValue.readline()
read_ser = float(read_ser) + float(Offset)
if read_ser < 0.0:
read_ser = read_ser + 360.0
elif read_ser > 360.0:
read_ser = read_ser - 360.0
return read_ser
def Turning(flag, Offset, a, readValue):
calcAngle = a
if calcAngle < 0.0:
calcAngle = calcAngle + 360.0
elif calcAngle > 360.0:
calcAngle = calcAngle - 360.0
print("Turning now:")
print("Target Angle" + str(calcAngle))
if flag == 1:
angle = IMU(Offset,readValue)
motorSelfTest.RightTurn()
while angle < calcAngle:
print("Current angle " + str(angle))
#Loops until angle is achieved
angle = IMU(Offset,readValue)
motorSelfTest.Stop()
elif flag == 2:
angle = IMU(Offset,readValue)
motorSelfTest.LeftTurn()
while angle > calcAngle:
print("Current angle " + str(angle))
#Loops until angle is achieved
angle = IMU(Offset,readValue)
motorSelfTest.Stop()
motorSelfTest.Stop()
#Parameter: Flag
def returnTurn(flag, Offset,readValue):
angle = IMU(Offset,readValue)
if flag == 1:
while (angle > 10) or (angle < 350):
motorSelfTest.LeftTurn()
elif flag == 2:
while (angle > 10) or (angle < 350):
motorSelfTest.RightTurn()
motorSelfTest.Stop()
def calculateBounds():
fp = open("/home/pi/Desktop/GUI/examples.txt","r")
for i, line in enumerate(fp):
if i == 6:
rightLat = float(line)
print rightLat
if i == 1:
upLong = float(line)
print upLong
if i == 2:
lowLong = float(line)
print lowLong
if i == 3:
leftLat = float(line)
print leftLat
return leftLat, rightLat, upLong, lowLong
def checkBounds(lat,lon, leftLat, rightLat, upLong, lowLong):
if lat < leftLat or lat > rightLat:
return 1
if lon > upLong or lon < lowLong:
return 1
return 0
def flushIMU(readValue):
#Flushes Serial Ports
i=0
while i<3:
test = readValue.readline()
i=i+1
print test
return
#===== Main =====
LEFT_LAT, RIGHT_LAT, UP_LONG, LOW_LONG = calculateBounds()
#GUI CODE
os.system('python /home/pi/Desktop/GUI/gui.py')
os.system('python /home/pi/Desktop/RandNum.py')
os.system('python /home/pi/Desktop/angles.py')
readValue = serial.Serial('/dev/ttyACM0', baudrate=9600)
#Flushes Serial Ports
i=0
while i<15:
test = readValue.readline()
i=i+1
print test
print("Flushed IMU")
motorTesting()
#inputs for Forward() and Backward() are (timeSleep,dutyCycle,freq)
f = open('/home/pi/Desktop/angles.txt','r')
g = open('/home/pi/Desktop/Coordinates.txt', 'r')
points = g.readline()
latitude, longitude = stripString(points)
flag = 0
anglesArray = []
i = 0
#===== Loop =====
while True:
#== Reads in locations ==
message = f.readline()
points = g.readline()
if message == '' or points == '':
break
currentLine = float(message)
desLat, desLong = stripString(points)
if currentLine > 0.0:
flag = 1
elif currentLine < 0.0:
flag = 2
print("flag: " + str(flag))
Offset = CalibrateIMU(readValue)
#==== Angles =====
anglesArray = TurnAngles()
#==== Turning ====
print(anglesArray[i])
Turning(flag, Offset, anglesArray[i], readValue)
#==== Forward Movement ====
lat, lon = GPSdata()
distance = distanceFormula(lat, lon, desLat, desLong)
#Motor Activates for 10 Seconds
motorSelfTest.Forward(100,100)
while (distance >= 0.05):
lat, lon = GPSdata()
boundary = checkBounds(lat, lon, LEFT_LAT, RIGHT_LAT, UP_LONG, LOW_LONG)
if boundary == 1:
print("Out of Bounds")
# sys.exit()
if lat > desLat and flag==1:
motorSelfTest.Stop()
Turning(flag, Offset, 90, readValue)
lat, lon = GPSdata()
distance = distanceFormula(lat,lon,desLat,desLong)
motorSelfTest.Forward(random.randint(25,100),100)
elif lat < destLat and flag==2:
motorSelfTest.Stop()
Turning(flag, Offset, 270, readValue)
lat, lon = GPSdata()
distance = distanceFormula(lat,lon,desLat,desLong)
motorSelfTest.Forward(random.randint(25,100),100)
distance = distanceFormula(lat, lon, desLat, desLong)
print distance
motorSelfTest.Stop()
if flag == 1:
motorSelfTest.LeftTurn()
elif flag == 2:
motorSelfTest.RightTurn()
#===== Turn to North =====
returnTurning(flag, Offset, readValue)
#===== Turret Code =====
# Fix turret calculation
TurretRotation(lat, lon)
os.system('python /home/pi/Desktop/build/Platform/platformCCW.py')
# Arm Up works fine, no need to touch it
os.system('python /home/pi/Desktop/build/Platform/armUp.py')
start_time = time.time()
while time.time() - start_time <= 4:
os.system('python /home/pi/Desktop/build/Platform/vibration.py')
vibration = open("/home/pi/Desktop/build/Platform/vibration.txt","r")
data = vibration.readline()
data = int(data)
if data == 1:
print "Hit"
break
# Arm Down works fine, no need to touch it
os.system('python /home/pi/Desktop/build/Platform/armDown.py')
os.system('python /home/pi/Desktop/build/Platform/platformCW.py')
#===== Turning ======
flag = 0
Offset = 0
i = i + 1
f.close()
g.close()
|
py | 7df6b1c9544ed423dc2df92f82fb7c4799145724 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras SavedModel serialization.
TODO (kathywu): Move to layer_serialization.py. Some model-specific logic should
go to model_serialization.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import functools
import weakref
from tensorflow.python.eager import def_function
from keras import backend as K
from keras.engine import base_layer_utils
from keras.engine import input_spec
from keras.mixed_precision import autocast_variable
from keras.saving import saving_utils
from keras.saving.saved_model import constants
from keras.saving.saved_model import load as keras_load
from keras.saving.saved_model import serialized_attributes
from keras.saving.saved_model import utils
from keras.utils import tf_inspect
from keras.utils import tf_utils
from keras.utils import version_utils
from keras.utils.generic_utils import LazyLoader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
base_layer = LazyLoader(
"base_layer", globals(),
"keras.engine.base_layer")
metrics = LazyLoader("metrics", globals(),
"keras.metrics")
input_layer = LazyLoader(
"input_layer", globals(),
"keras.engine.input_layer")
training_lib = LazyLoader(
"training_lib", globals(),
"keras.engine.training")
sequential_lib = LazyLoader(
"sequential_lib", globals(),
"keras.engine.sequential")
# pylint:enable=g-inconsistent-quotes
def should_skip_serialization(layer):
"""Skip serializing extra objects and functions if layer inputs aren't set."""
saved_model_input_spec_set = (isinstance(layer, training_lib.Model) and
layer._saved_model_inputs_spec is not None) # pylint: disable=protected-access
if not layer.built and not saved_model_input_spec_set:
logging.warning('Skipping full serialization of Keras layer {}, because '
'it is not built.'.format(layer))
return True
return False
def wrap_layer_objects(layer, serialization_cache):
"""Returns extra trackable objects to attach to the serialized layer.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all checkpointable objects from a
SerializedAttributes object. See LayerAttributes and ModelAttributes for
entire list of objects
"""
# Wrap all regularization losses as tf.functions.
# First, generate list of all regularization losses in this layer and
# sublayers.
all_losses = layer._callable_losses[:] # pylint: disable=protected-access
for child_layer in utils.list_all_layers(layer):
all_losses.extend(child_layer._callable_losses) # pylint: disable=protected-access
# Next, wrap all loss functions as tf.functions. Use the serialization cache
# to store already-wrapped functions.
keras_loss_cache = serialization_cache.setdefault('keras_losses', {})
wrapped_loss_functions = []
for loss_fn in all_losses:
if loss_fn in keras_loss_cache:
wrapped_loss_functions.append(keras_loss_cache[loss_fn])
else:
wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))
keras_loss_cache[loss_fn] = wrapped_loss
wrapped_loss_functions.append(wrapped_loss)
wrapped_layer_losses = [keras_loss_cache[fn]
for fn in layer._callable_losses[:]] # pylint: disable=protected-access
layer_metrics = data_structures.wrap_or_unwrap(
{m.name: m for m in layer._metrics}) # pylint: disable=protected-access
return dict(
variables=data_structures.wrap_or_unwrap(layer.variables),
trainable_variables=data_structures.wrap_or_unwrap(
layer.trainable_variables),
non_trainable_variables=data_structures.wrap_or_unwrap(
layer.non_trainable_variables),
layers=data_structures.wrap_or_unwrap(utils.list_all_layers(layer)),
metrics=data_structures.wrap_or_unwrap(layer.metrics),
regularization_losses=data_structures.wrap_or_unwrap(
wrapped_loss_functions),
layer_regularization_losses=data_structures.wrap_or_unwrap(
wrapped_layer_losses),
layer_metrics=layer_metrics)
# pylint: disable=protected-access
def wrap_layer_functions(layer, serialization_cache):
"""Returns dict of wrapped layer call function and losses in tf.functions.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all keras tf.functions to serialize. See
LayerAttributes and ModelAttributes for the list of all attributes.
"""
# Since Sequential models may be modified in place using model.add() or
# model.pop(), don't use saved functions.
if (isinstance(layer, keras_load.RevivedLayer) and
not isinstance(layer, sequential_lib.Sequential)):
return {fn_name: getattr(layer.keras_api, fn_name, None)
for fn_name in serialized_attributes.LayerAttributes.all_functions}
# Reset the losses of the layer and its children. The call function in each
# child layer is replaced with tf.functions.
original_fns = _replace_child_layer_functions(layer, serialization_cache)
original_losses = _reset_layer_losses(layer)
# Wrap all the layer call and activity regularizer functions.
# Use LayerCallCollection to ensure that all layer call functions (__call__,
# call with losses) are traced with the same inputs.
call_collection = LayerCallCollection(layer)
call_fn_with_losses = call_collection.add_function(
_wrap_call_and_conditional_losses(layer),
'{}_layer_call_and_return_conditional_losses'.format(layer.name))
call_fn = call_collection.add_function(
_extract_outputs_from_fn(layer, call_fn_with_losses),
'{}_layer_call_fn'.format(layer.name))
fns = {'call_and_return_conditional_losses': call_fn_with_losses,
'__call__': call_fn}
if layer._activity_regularizer is not None: # pylint: disable=protected-access
fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer)
fns['call_and_return_all_conditional_losses'] = (
call_collection.add_function(
_append_activity_regularizer_loss(layer,
call_fn_with_losses,
fns['activity_regularizer_fn']),
'{}_layer_call_and_return_all_conditional_losses'.format(layer.name)
))
else:
fns['activity_regularizer_fn'] = None
fns['call_and_return_all_conditional_losses'] = call_fn_with_losses
# Manually trigger traces before restoring the overwritten functions. The
# functions are traced within the layer call context to ensure that layer
# functions (e.g. add_loss) behave as though running in graph mode.
with base_layer_utils.call_context().enter(
layer, inputs=None, build_graph=True, training=None, saving=True):
for fn in fns.values():
if fn is not None and fn.input_signature is not None:
fn.get_concrete_function()
# Restore overwritten functions and losses
_restore_child_layer_functions(original_fns)
_restore_layer_losses(original_losses)
return fns
def default_save_signature(layer):
original_losses = _reset_layer_losses(layer)
fn = saving_utils.trace_model_call(layer)
fn.get_concrete_function()
_restore_layer_losses(original_losses)
return fn
def _replace_child_layer_functions(layer, serialization_cache):
"""Replaces functions in the children layers with wrapped tf.functions.
This step allows functions from parent layers to reference the wrapped
functions from their children layers instead of retracing the ops.
This function also resets all losses stored in the layer. These are stored in
the returned dictionary. Use `_restore_child_layer_functions` to restore
the original attributes.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
Dictionary mapping layer objects -> original functions and losses:
{ Child layer 1: {
'losses': Original losses,
'call': Original call function
'_activity_regularizer': Original activity regularizer},
Child layer 2: ...
}
"""
# pylint: disable=protected-access
original_fns = {}
def replace_layer_functions(child_layer, serialized_fns):
"""Replaces layer call and activity regularizer with wrapped functions."""
original_fns[child_layer] = {
'call': child_layer.call,
'_activity_regularizer': child_layer._activity_regularizer
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
try:
child_layer._activity_regularizer = serialized_fns.get(
'activity_regularizer_fn')
except AttributeError:
# Some layers have an unsettable activity regularizer.
pass
child_layer.call = utils.use_wrapped_call(
child_layer,
serialized_fns['call_and_return_conditional_losses'],
default_training_value=False)
def replace_metric_functions(child_layer, serialized_fns):
"""Replaces metric functions with wrapped functions."""
original_fns[child_layer] = {
'__call__': child_layer.__call__,
'result': child_layer.result,
'update_state': child_layer.update_state
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
child_layer.__call__ = serialized_fns['__call__']
child_layer.result = serialized_fns['result']
child_layer.update_state = serialized_fns['update_state']
for child_layer in utils.list_all_layers(layer):
if isinstance(child_layer, input_layer.InputLayer):
continue
if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:
serialized_functions = (
child_layer._trackable_saved_model_saver._get_serialized_attributes(
serialization_cache).functions)
else:
serialized_functions = (
serialization_cache[constants.KERAS_CACHE_KEY][child_layer].functions)
if not serialized_functions:
# This indicates either:
# - circular dependency, which means the current layer's functions
# should be wrapped first.
# - Child layer's inputs are not defined, so its functions have not been
# wrapped. In this case, no replacement is necessary so move on to the
# next child.
continue
if isinstance(child_layer, metrics.Metric):
replace_metric_functions(child_layer, serialized_functions)
else:
replace_layer_functions(child_layer, serialized_functions)
return original_fns
# pylint: enable=protected-access
def _restore_child_layer_functions(original_fns):
"""Restores attributes replaced with `_replace_child_layer_functions`."""
for child_layer, fns in original_fns.items():
with trackable.no_automatic_dependency_tracking_scope(child_layer):
for fn_name, fn in fns.items():
try:
setattr(child_layer, fn_name, fn) # pylint: disable=protected-access
except AttributeError:
pass # In the case of _activity_regularizer, setting the attribute
# may be disallowed.
# pylint: disable=protected-access
def _reset_layer_losses(parent_layer):
"""Resets losses of layer and its sublayers, and returns original losses."""
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]}
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
def _restore_layer_losses(losses_dict):
for layer in losses_dict:
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = losses_dict[layer]['losses']
layer._eager_losses = losses_dict[layer]['eager_losses']
# pylint: enable=protected-access
class LayerCallCollection(object):
"""Groups wrapped layer call functions.
This is used to ensure that all layer call functions are traced with the same
inputs-
- call
- call_and_return_conditional_losses
- call_and_return_all_conditional_losses
"""
def __init__(self, layer):
self.layer = layer
self.layer_call_method = _get_layer_call_method(layer)
self._expects_training_arg = utils.layer_uses_training_bool(layer)
self._training_arg_index = utils.get_training_arg_index(
self.layer_call_method)
# If the layer call function has kwargs, then the traced function cannot
# have an input signature.
arg_spec = tf_inspect.getfullargspec(self.layer_call_method)
self._has_kwargs = bool(self._expects_training_arg or
arg_spec.defaults or
arg_spec.kwonlyargs or
arg_spec.varkw)
self._input_signature = self._generate_input_signature(layer)
self._functions = weakref.WeakValueDictionary()
# Bool indicating whether this object is currently tracing the layer call
# functions.
self.tracing = False
# Get the input argument name from the args.
args = arg_spec.args
if tf_inspect.ismethod(self.layer_call_method):
args = args[1:]
self._input_arg_name = args[0] if args else 'inputs'
def _generate_input_signature(self, layer):
"""Inspects layer object and returns the inferred input signature.
Args:
layer: Layer object.
Returns:
List of possibly nested TensorSpecs of the layer call function inputs.
The list does not contain the `training` argument.
"""
if (isinstance(layer.call, def_function.Function) and
layer.call.input_signature is not None):
return layer.call.input_signature
elif isinstance(layer, training_lib.Model):
return saving_utils.model_input_signature(layer)
elif (layer.input_spec is not None and
layer._use_input_spec_as_call_signature): # pylint: disable=protected-access
def to_tensor_spec_or_none(x):
spec = input_spec.to_tensor_spec(x, layer._compute_dtype) # pylint: disable=protected-access
# If the shape is too general (e.g. multiple dimensions are allowed),
# return None so that separate functions can be generated for each
# inferred input signature.
# TODO(b/134962016): currently partial signatures are not supported.
if spec.shape == tf.TensorShape(None):
return None
return spec
input_signature = [tf.nest.map_structure(
to_tensor_spec_or_none, layer.input_spec)]
return input_signature
else:
return None
def add_trace(self, *args, **kwargs):
"""Traces all functions with the same args and kwargs.
Args:
*args: Positional args passed to the original function.
**kwargs: Keyword args passed to the original function.
"""
args = list(args)
kwargs = kwargs.copy()
self.tracing = True
for fn in self._functions.values():
# TODO(kathywu): Replace arguments with broader shapes defined in the
# input signature.
if self._expects_training_arg:
def trace_with_training(value, fn=fn):
utils.set_training_arg(value, self._training_arg_index, args, kwargs)
with K.deprecated_internal_learning_phase_scope(value):
fn.get_concrete_function(*args, **kwargs)
trace_with_training(True)
trace_with_training(False)
else:
fn.get_concrete_function(*args, **kwargs)
self.tracing = False
@property
def fn_input_signature(self):
"""Returns input signature for the wrapped layer call function."""
if self._has_kwargs:
# Input signatures may only describe tensor arguments and kwargs are not
# supported.
return None
if None in tf.nest.flatten(self._input_signature):
# TODO(b/134962016): If input signature cannot be partially defined.
return None
return self._input_signature
def training_arg_was_passed(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return (utils.get_training_arg(self._training_arg_index, args, kwargs)
is not None)
else:
return self.layer._call_arg_was_passed( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_training_arg_value(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return utils.get_training_arg(self._training_arg_index, args, kwargs)
else:
return self.layer._get_call_arg_value( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_input_arg_value(self, args, kwargs):
return self.layer._get_call_arg_value( # pylint: disable=protected-access
self._input_arg_name, args, kwargs, inputs_in_args=True)
def _maybe_wrap_with_training_arg(self, call_fn):
"""Wraps call function with added training argument if necessary."""
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
# Add training arg to wrapper function.
arg_spec = tf_inspect.getfullargspec(call_fn)
args = arg_spec.args + ['training']
defaults = list(arg_spec.defaults or [])
defaults.append(False)
new_arg_spec = tf_inspect.FullArgSpec(
args=args,
varargs=arg_spec.varargs,
varkw=arg_spec.varkw,
defaults=defaults,
kwonlyargs=arg_spec.kwonlyargs,
kwonlydefaults=arg_spec.kwonlydefaults,
annotations=arg_spec.annotations)
# Set new training arg index
self._training_arg_index = len(args) - 1
if tf_inspect.ismethod(call_fn):
self._training_arg_index -= 1
def wrap_with_training_arg(*args, **kwargs):
# Remove the training value, since the original call_fn does not expect
# a training arg. Instead, the training value will be propagated using
# the call context created in LayerCall.
args = list(args)
kwargs = kwargs.copy()
utils.remove_training_arg(self._training_arg_index, args, kwargs)
return call_fn(*args, **kwargs)
return tf.__internal__.decorator.make_decorator(
target=call_fn,
decorator_func=wrap_with_training_arg,
decorator_argspec=new_arg_spec)
return call_fn
def add_function(self, call_fn, name):
"""Adds a layer call function to the collection."""
self._functions[name] = fn = LayerCall(
self, self._maybe_wrap_with_training_arg(call_fn), name,
input_signature=self.fn_input_signature)
if (None not in tf.nest.flatten(self._input_signature) and
self._has_kwargs):
# Manually add traces for layers that have keyword arguments and have
# a fully defined input signature.
self.add_trace(*self._input_signature)
return fn
def _filtered_inputs(inputs):
return list(filter(tf_utils.is_tensor_or_variable, tf.nest.flatten(inputs)))
def layer_call_wrapper(call_collection, method):
"""Ensures layer losses are kept the same, and runs method in call context."""
def wrapper(*args, **kwargs):
"""Calls method within call context."""
layer = call_collection.layer
training = None
inputs = _filtered_inputs([args, kwargs])
# pylint: disable=protected-access
if (args or kwargs) and call_collection.training_arg_was_passed(
args, kwargs):
training = call_collection.get_training_arg_value(args, kwargs)
# pylint: enable=protected-access
original_losses = _reset_layer_losses(layer)
with base_layer_utils.call_context().enter(
layer, inputs=inputs, build_graph=False, training=training,
saving=True):
with autocast_variable.enable_auto_cast_variables(
layer._compute_dtype_object): # pylint: disable=protected-access
ret = method(*args, **kwargs)
_restore_layer_losses(original_losses)
return ret
return tf.__internal__.decorator.make_decorator(target=method, decorator_func=wrapper)
class LayerCall(def_function.Function):
"""Function that triggers traces of other functions in the same collection."""
def __init__(self, call_collection, python_function, *args, **kwargs):
self.call_collection = call_collection
self.original_call = call_collection.layer_call_method
python_function = layer_call_wrapper(call_collection, python_function)
super(LayerCall, self).__init__(python_function, *args, **kwargs)
def __call__(self, *args, **kwargs):
if not self.call_collection.tracing:
self.call_collection.add_trace(*args, **kwargs)
return super(LayerCall, self).__call__(*args, **kwargs)
def get_concrete_function(self, *args, **kwargs):
if not self.call_collection.tracing:
self.call_collection.add_trace(*args, **kwargs)
return super(LayerCall, self).get_concrete_function(*args, **kwargs)
def _wrap_call_and_conditional_losses(layer):
"""Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call function.
Unconditional losses (e.g. weight regularizeration) are wrapped separately.
Args:
layer: a Keras layer object
Returns:
python call function that returns outputs and conditional losses -- excludes
activity regularizer
"""
# Create function that generates both outputs and losses
layer_call = _get_layer_call_method(layer)
def call_and_return_conditional_losses(*args, **kwargs):
"""Returns layer (call_output, conditional losses) tuple."""
call_output = layer_call(*args, **kwargs)
if version_utils.is_v1_layer_or_model(layer):
conditional_losses = layer.get_losses_for(
_filtered_inputs([args, kwargs]))
else:
conditional_losses = [
l for l in layer.losses if not hasattr(l, '_unconditional_loss')
]
return call_output, conditional_losses
return _create_call_fn_decorator(layer, call_and_return_conditional_losses)
def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):
"""Returns a function that returns only call function outputs."""
if isinstance(layer, keras_load.RevivedLayer):
return layer.keras_api.__call__ # pylint: disable=protected-access
def call(inputs, *args, **kwargs):
return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]
return _create_call_fn_decorator(layer, call)
def _append_activity_regularizer_loss(
layer, call_fn_with_losses, activity_regularizer_fn):
"""Appends activity regularizer loss to losses returned by the wrapped fn."""
def fn(inputs, *args, **kwargs):
outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)
losses.append(activity_regularizer_fn(outputs))
return outputs, losses
return _create_call_fn_decorator(layer, fn)
def _create_call_fn_decorator(layer, wrapped_call):
call_fn = _get_layer_call_method(layer)
fn, arg_spec = utils.maybe_add_training_arg(
call_fn, wrapped_call, layer._expects_training_arg, # pylint: disable=protected-access
default_training_value=False)
return tf.__internal__.decorator.make_decorator(
target=call_fn,
decorator_func=fn,
decorator_argspec=arg_spec)
def _wrap_unconditional_loss(loss_fn, index):
"""Wraps callable/unconditional loss, returning a serializable function."""
# Extract original loss function from partial function
fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn
if isinstance(fn, def_function.Function):
return fn
else:
return def_function.Function(
fn, 'loss_fn_{}'.format(index), input_signature=[])
def _wrap_activity_regularizer(layer):
"""Wraps the activity regularizer."""
# pylint: disable=protected-access
if isinstance(layer._activity_regularizer, def_function.Function):
return layer._activity_regularizer
return def_function.Function(
layer._activity_regularizer,
'{}_activity_regularizer'.format(layer.name),
input_signature=[
tf.TensorSpec(None, layer._compute_dtype or K.floatx())
])
# pylint: enable=protected-access
def _get_layer_call_method(layer):
if isinstance(layer.call, (def_function.Function)):
return layer.call.python_function
return layer.call
|
py | 7df6b4defc63ab53bcbd37c106ebdab8617bb120 | #!/usr/bin/env python
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
>> Thread Host Controller Interface
>> Device : OpenThread_BR THCI
>> Class : OpenThread_BR
"""
import logging
import re
import sys
import time
import ipaddress
import serial
from IThci import IThci
from THCI.OpenThread import OpenThreadTHCI, watched, API
RPI_FULL_PROMPT = 'pi@raspberrypi:~$ '
RPI_USERNAME_PROMPT = 'raspberrypi login: '
RPI_PASSWORD_PROMPT = 'Password: '
"""regex: used to split lines"""
LINESEPX = re.compile(r'\r\n|\n')
LOGX = re.compile(r'.*Under-voltage detected!')
"""regex: used to filter logging"""
assert LOGX.match('[57522.618196] Under-voltage detected! (0x00050005)')
OTBR_AGENT_SYSLOG_PATTERN = re.compile(r'raspberrypi otbr-agent\[\d+\]: (.*)')
assert OTBR_AGENT_SYSLOG_PATTERN.search(
'Jun 23 05:21:22 raspberrypi otbr-agent[323]: =========[[THCI] direction=send | type=JOIN_FIN.req | len=039]==========]'
).group(1) == '=========[[THCI] direction=send | type=JOIN_FIN.req | len=039]==========]'
logging.getLogger('paramiko').setLevel(logging.WARNING)
class SSHHandle(object):
def __init__(self, ip, port, username, password):
self.ip = ip
self.port = int(port)
self.username = username
self.password = password
self.__handle = None
self.__connect()
def __connect(self):
import paramiko
self.close()
self.__handle = paramiko.SSHClient()
self.__handle.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__handle.connect(self.ip, port=self.port, username=self.username, password=self.password)
def close(self):
if self.__handle is not None:
self.__handle.close()
self.__handle = None
def bash(self, cmd, timeout):
from paramiko import SSHException
retry = 3
for i in range(retry):
try:
stdin, stdout, stderr = self.__handle.exec_command(cmd, timeout=timeout)
sys.stderr.write(stderr.read())
output = [r.encode('utf8').rstrip('\r\n') for r in stdout.readlines()]
return output
except Exception:
if i < retry - 1:
print('SSH connection is lost, try reconnect after 1 second.')
time.sleep(1)
self.__connect()
else:
raise
def log(self, fmt, *args):
try:
msg = fmt % args
print('%s - %s - %s' % (self.port, time.strftime('%b %d %H:%M:%S'), msg))
except Exception:
pass
class SerialHandle:
def __init__(self, port, baudrate):
self.port = port
self.__handle = serial.Serial(port, baudrate, timeout=0)
self.__lines = ['']
assert len(self.__lines) >= 1, self.__lines
self.log("inputing username ...")
self.__bashWriteLine('pi')
deadline = time.time() + 20
loginOk = False
while time.time() < deadline:
time.sleep(1)
lastLine = None
while True:
line = self.__bashReadLine(timeout=1)
if not line:
break
lastLine = line
if lastLine == RPI_FULL_PROMPT:
self.log("prompt found, login success!")
loginOk = True
break
if lastLine == RPI_PASSWORD_PROMPT:
self.log("inputing password ...")
self.__bashWriteLine('raspberry')
elif lastLine == RPI_USERNAME_PROMPT:
self.log("inputing username ...")
self.__bashWriteLine('pi')
elif not lastLine:
self.log("inputing username ...")
self.__bashWriteLine('pi')
if not loginOk:
raise Exception('login fail')
self.bash('stty cols 256')
def log(self, fmt, *args):
try:
msg = fmt % args
print('%s - %s - %s' % (self.port, time.strftime('%b %d %H:%M:%S'), msg))
except Exception:
pass
def close(self):
self.__handle.close()
def bash(self, cmd, timeout=10):
"""
Execute the command in bash.
"""
self.__bashClearLines()
self.__bashWriteLine(cmd)
self.__bashExpect(cmd, timeout=timeout, endswith=True)
response = []
deadline = time.time() + timeout
while time.time() < deadline:
line = self.__bashReadLine()
if line is None:
time.sleep(0.01)
continue
if line == RPI_FULL_PROMPT:
# return response lines without prompt
return response
response.append(line)
self.__bashWrite('\x03')
raise Exception('%s: failed to find end of response' % self.port)
def __bashExpect(self, expected, timeout=20, endswith=False):
self.log('Expecting [%r]' % (expected))
deadline = time.time() + timeout
while time.time() < deadline:
line = self.__bashReadLine()
if line is None:
time.sleep(0.01)
continue
print('[%s] Got line [%r]' % (self.port, line))
if endswith:
matched = line.endswith(expected)
else:
matched = line == expected
if matched:
print('[%s] Expected [%r]' % (self.port, expected))
return
# failed to find the expected string
# send Ctrl+C to terminal
self.__bashWrite('\x03')
raise Exception('failed to find expected string[%s]' % expected)
def __bashRead(self, timeout=1):
deadline = time.time() + timeout
data = ''
while True:
piece = self.__handle.read()
data = data + piece.decode('utf8')
if piece:
continue
if data or time.time() >= deadline:
break
if data:
self.log('>>> %r', data)
return data
def __bashReadLine(self, timeout=1):
line = self.__bashGetNextLine()
if line is not None:
return line
assert len(self.__lines) == 1, self.__lines
tail = self.__lines.pop()
try:
tail += self.__bashRead(timeout=timeout)
tail = tail.replace(RPI_FULL_PROMPT, RPI_FULL_PROMPT + '\r\n')
tail = tail.replace(RPI_USERNAME_PROMPT, RPI_USERNAME_PROMPT + '\r\n')
tail = tail.replace(RPI_PASSWORD_PROMPT, RPI_PASSWORD_PROMPT + '\r\n')
finally:
self.__lines += [l.rstrip('\r') for l in LINESEPX.split(tail)]
assert len(self.__lines) >= 1, self.__lines
return self.__bashGetNextLine()
def __bashGetNextLine(self):
assert len(self.__lines) >= 1, self.__lines
while len(self.__lines) > 1:
line = self.__lines.pop(0)
assert len(self.__lines) >= 1, self.__lines
if LOGX.match(line):
logging.info('LOG: %s', line)
continue
else:
return line
assert len(self.__lines) >= 1, self.__lines
return None
def __bashWrite(self, data):
self.__handle.write(data)
self.log("<<< %r", data)
def __bashClearLines(self):
assert len(self.__lines) >= 1, self.__lines
while self.__bashReadLine(timeout=0) is not None:
pass
assert len(self.__lines) >= 1, self.__lines
def __bashWriteLine(self, line):
self.__bashWrite(line + '\n')
class OpenThread_BR(OpenThreadTHCI, IThci):
DEFAULT_COMMAND_TIMEOUT = 20
IsBorderRouter = True
def _connect(self):
self.log("logging in to Raspberry Pi ...")
self.__cli_output_lines = []
self.__syslog_skip_lines = None
self.__syslog_last_read_ts = 0
if self.connectType == 'ip':
self.__handle = SSHHandle(self.telnetIp, self.telnetPort, self.telnetUsername, self.telnetPassword)
else:
self.__handle = SerialHandle(self.port, 115200)
self.__afterConnect()
def _disconnect(self):
if self.__handle:
self.__handle.close()
self.__handle = None
def _deviceBeforeReset(self):
if self.isPowerDown:
self.log('Powering up the device')
self.powerUp()
if self.IsHost:
self.__stopRadvdService()
self.bash('sudo ip -6 addr del 910b::1 dev eth0 || true')
self.bash('sudo ip -6 addr del fd00:7d03:7d03:7d03::1 dev eth0 || true')
self.stopListeningToAddrAll()
def _deviceAfterReset(self):
self.__dumpSyslog()
self.__truncateSyslog()
if not self.IsHost:
self.bash('sudo systemctl restart otbr-agent')
time.sleep(2)
def _beforeRegisterMulticast(self, sAddr='ff04::1234:777a:1', timeout=300):
"""subscribe to the given ipv6 address (sAddr) in interface and send MLR.req OTA
Args:
sAddr : str : Multicast address to be subscribed and notified OTA.
"""
if self.externalCommissioner is not None:
self.externalCommissioner.MLR([sAddr], timeout)
return True
cmd = 'sudo nohup ~/repo/openthread/tests/scripts/thread-cert/mcast6.py wpan0 %s' % sAddr
cmd = cmd + ' > /dev/null 2>&1 &'
self.bash(cmd)
@API
def setupHost(self, setDua=False):
self.IsHost = True
if not setDua:
cmd = 'sudo ip -6 addr add 910b::1 dev eth0'
else:
cmd = 'sudo ip -6 addr add fd00:7d03:7d03:7d03::1 dev eth0'
self.bash(cmd)
self.__startRadvdService()
def _deviceEscapeEscapable(self, string):
"""Escape CLI escapable characters in the given string.
Args:
string (str): UTF-8 input string.
Returns:
[str]: The modified string with escaped characters.
"""
return '"' + string + '"'
@watched
def bash(self, cmd, timeout=DEFAULT_COMMAND_TIMEOUT):
return self.__handle.bash(cmd, timeout=timeout)
def bash_unwatched(self, cmd, timeout=DEFAULT_COMMAND_TIMEOUT):
return self.__handle.bash(cmd, timeout=timeout)
# Override send_udp
@API
def send_udp(self, interface, dst, port, payload):
if interface == 0: # Thread Interface
super(OpenThread_BR, self).send_udp(interface, dst, port, payload)
return
if interface == 1:
ifname = 'eth0'
else:
raise AssertionError('Invalid interface set to send UDP: {} '
'Available interface options: 0 - Thread; 1 - Ethernet'.format(interface))
cmd = 'sudo /home/pi/reference-device/send_udp.py %s %s %s %s' % (ifname, dst, port, payload)
print(cmd)
self.bash(cmd)
@API
def mldv2_query(self):
ifname = 'eth0'
dst = 'ff02::1'
cmd = 'sudo /home/pi/reference-device/send_mld_query.py %s %s' % (ifname, dst)
print(cmd)
self.bash(cmd)
@API
def ip_neighbors_flush(self):
print('%s call clear_cache' % self.port)
# clear neigh cache on linux
cmd1 = 'sudo ip -6 neigh flush nud all nud failed nud noarp dev eth0'
cmd2 = 'sudo ip -6 neigh list nud all dev eth0 ' \
'| cut -d " " -f1 ' \
'| sudo xargs -I{} ip -6 neigh delete {} dev eth0'
cmd = '%s ; %s' % (cmd1, cmd2)
self.bash(cmd)
@API
def ip_neighbors_add(self, addr, lladdr, nud='noarp'):
print('%s ip_neighbors_add' % self.port)
cmd1 = 'sudo ip -6 neigh delete %s dev eth0' % addr
cmd2 = 'sudo ip -6 neigh add %s dev eth0 lladdr %s nud %s' % (addr, lladdr, nud)
cmd = '%s ; %s' % (cmd1, cmd2)
self.bash(cmd)
@API
def get_eth_ll(self):
print('%s get_eth_ll' % self.port)
cmd = "ip -6 addr list dev eth0 | grep 'inet6 fe80' | awk '{print $2}'"
ret = self.bash(cmd)[0].split('/')[0]
return ret
@API
def ping(self, strDestination, ilength=0, hop_limit=5, timeout=5):
""" send ICMPv6 echo request with a given length to a unicast destination
address
Args:
strDestination: the unicast destination address of ICMPv6 echo request
ilength: the size of ICMPv6 echo request payload
hop_limit: the hop limit
timeout: time before ping() stops
"""
if hop_limit is None:
hop_limit = 5
if self.IsHost or self.IsBackboneRouter:
ifName = 'eth0'
else:
ifName = 'wpan0'
cmd = 'ping -6 -I %s %s -c 1 -s %d -W %d -t %d' % (
ifName,
strDestination,
int(ilength),
int(timeout),
int(hop_limit),
)
self.bash(cmd)
time.sleep(timeout)
def multicast_Ping(self, destination, length=20):
"""send ICMPv6 echo request with a given length to a multicast destination
address
Args:
destination: the multicast destination address of ICMPv6 echo request
length: the size of ICMPv6 echo request payload
"""
print('%s call multicast_Ping' % self.port)
print('destination: %s' % destination)
hop_limit = 5
if self.IsHost or self.IsBackboneRouter:
ifName = 'eth0'
else:
ifName = 'wpan0'
cmd = 'ping -6 -I %s %s -c 1 -s %d -t %d' % (ifName, destination, str(length), hop_limit)
self.bash(cmd)
@API
def getGUA(self, filterByPrefix=None, eth=False):
"""get expected global unicast IPv6 address of Thread device
note: existing filterByPrefix are string of in lowercase. e.g.
'2001' or '2001:0db8:0001:0000".
Args:
filterByPrefix: a given expected global IPv6 prefix to be matched
Returns:
a global IPv6 address
"""
# get global addrs set if multiple
if eth:
return self.__getEthGUA(filterByPrefix=filterByPrefix)
else:
return super(OpenThread_BR, self).getGUA(filterByPrefix=filterByPrefix)
def __getEthGUA(self, filterByPrefix=None):
globalAddrs = []
cmd = 'ip -6 addr list dev eth0 | grep inet6'
output = self.bash(cmd)
for line in output:
# example: inet6 2401:fa00:41:23:274a:1329:3ab9:d953/64 scope global dynamic noprefixroute
line = line.strip().split()
if len(line) < 4 or line[2] != 'scope':
continue
if line[3] != 'global':
continue
addr = line[1].split('/')[0]
addr = str(ipaddress.IPv6Address(addr.decode()).exploded)
globalAddrs.append(addr)
if not filterByPrefix:
return globalAddrs[0]
else:
if filterByPrefix[-2:] != '::':
filterByPrefix = '%s::' % filterByPrefix
prefix = ipaddress.IPv6Network((filterByPrefix + '/64').decode())
for fullIp in globalAddrs:
address = ipaddress.IPv6Address(fullIp.decode())
if address in prefix:
return fullIp
def _cliReadLine(self):
# read commissioning log if it's commissioning
if not self.__cli_output_lines:
self.__readSyslogToCli()
if self.__cli_output_lines:
return self.__cli_output_lines.pop(0)
return None
@watched
def _deviceGetEtherMac(self):
# Harness wants it in string. Because wireshark filter for eth
# cannot be applies in hex
return self.bash('ip addr list dev eth0 | grep ether')[0].strip().split()[1]
@watched
def _onCommissionStart(self):
assert self.__syslog_skip_lines is None
self.__syslog_skip_lines = int(self.bash('wc -l /var/log/syslog')[0].split()[0])
self.__syslog_last_read_ts = 0
@watched
def _onCommissionStop(self):
assert self.__syslog_skip_lines is not None
self.__syslog_skip_lines = None
def _deviceBeforeThreadStart(self):
self.bash('sudo sysctl net.ipv6.conf.eth0.accept_ra=2')
@watched
def __startRadvdService(self):
assert self.IsHost, "radvd service runs on Host only"
self.bash("""sudo sh -c "cat >/etc/radvd.conf <<EOF
interface eth0
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 30;
AdvDefaultPreference low;
prefix 910b::/64
{
AdvOnLink on;
AdvAutonomous on;
AdvRouterAddr on;
};
prefix fd00:7d03:7d03:7d03::/64
{
AdvOnLink on;
AdvAutonomous off;
AdvRouterAddr off;
};
};
EOF"
""")
self.bash('sudo service radvd restart')
self.bash('sudo service radvd status')
@watched
def __stopRadvdService(self):
assert self.IsHost, "radvd service runs on Host only"
self.bash('sudo service radvd stop')
def __readSyslogToCli(self):
if self.__syslog_skip_lines is None:
return 0
# read syslog once per second
if time.time() < self.__syslog_last_read_ts + 1:
return 0
self.__syslog_last_read_ts = time.time()
lines = self.bash_unwatched('tail +%d /var/log/syslog' % self.__syslog_skip_lines)
for line in lines:
m = OTBR_AGENT_SYSLOG_PATTERN.search(line)
if not m:
continue
self.__cli_output_lines.append(m.group(1))
self.__syslog_skip_lines += len(lines)
return len(lines)
def _cliWriteLine(self, line):
cmd = 'sudo ot-ctl -- %s' % line
output = self.bash(cmd)
# fake the line echo back
self.__cli_output_lines.append(line)
for line in output:
self.__cli_output_lines.append(line)
def __afterConnect(self):
self.__truncateSyslog()
self.__checkServiceStatus()
def __checkServiceStatus(self):
self.bash('sudo service radvd stop')
self.bash('sudo systemctl restart otbr-agent')
def __restartAgentService(self):
self.bash('sudo systemctl restart otbr-agent')
def __truncateSyslog(self):
self.bash('sudo truncate -s 0 /var/log/syslog')
def __dumpSyslog(self):
output = self.bash_unwatched('sudo grep "otbr-agent" /var/log/syslog')
for line in output:
self.log('%s', line)
@API
def mdns_query(self, dst='ff02::fb', service='_meshcop._udp.local', addrs_blacklist=[]):
print('mdns_query %s %s %s' % (dst, service, addrs_blacklist))
# For BBR-TC-03 or DH test cases just send a query
if dst == 'ff02::fb' and not addrs_blacklist:
self.bash('dig -p 5353 @%s %s ptr' % (dst, service))
return
# For MATN-TC-17 and MATN-TC-18 use Zeroconf to get the BBR address and border agent port
from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf, DNSAddress, DNSService, DNSText
def on_service_state_change(zeroconf, service_type, name, state_change):
if state_change is ServiceStateChange.Added:
zeroconf.get_service_info(service_type, name)
class BorderAgent(object):
alias = None
server_name = None
link_local_addr = None
port = None
thread_status = None
def __init__(self, alias):
self.alias = alias
def __repr__(self):
return '%s # [%s]:%s TIS=%s' % (self.alias, self.link_local_addr, self.port, self.thread_status)
def parse_cache(cache):
border_agents = []
# Find all border routers
for ptr in cache['_meshcop._udp.local.']:
border_agents.append(BorderAgent(ptr.alias))
# Find server name, port and Thread Interface status for each border router
for ba in border_agents:
for record in cache[ba.alias.lower()]:
if isinstance(record, DNSService):
ba.server_name = record.server
ba.port = record.port
elif isinstance(record, DNSText):
text = bytearray(record.text)
sb = text.split(b'sb=')[1][0:4]
ba.thread_status = (sb[3] & 0x18) >> 3
# Find link local address for each border router
for ba in border_agents:
for record in cache[ba.server_name]:
if isinstance(record, DNSAddress):
addr = ipaddress.ip_address(record.address)
if isinstance(addr, ipaddress.IPv6Address) and addr.is_link_local:
ba.link_local_addr = str(addr)
break
return border_agents
# Browse border agents
zeroconf = Zeroconf()
ServiceBrowser(zeroconf, "_meshcop._udp.local.", handlers=[on_service_state_change])
time.sleep(2)
cache = zeroconf.cache.cache
zeroconf.close()
# Find an active border agent not in the blacklist
border_agents = parse_cache(cache)
for ba in border_agents:
if ba.thread_status == 2 and ba.link_local_addr not in addrs_blacklist:
return ('%s%%eth0' % ba.link_local_addr, ba.port)
raise Exception('No active Border Agents found')
# Override powerDown
@API
def powerDown(self):
self.log('Powering down BBR')
self.bash('sudo systemctl stop otbr-agent')
super(OpenThread_BR, self).powerDown()
# Override powerUp
@API
def powerUp(self):
self.log('Powering up BBR')
self.bash('sudo systemctl start otbr-agent')
super(OpenThread_BR, self).powerUp()
# Override forceSetSlaac
@API
def forceSetSlaac(self, slaacAddress):
print('forceSetSlaac %s' % slaacAddress)
self.bash('sudo ip -6 addr add %s/64 dev wpan0' % slaacAddress)
# Override stopListeningToAddr
@API
def stopListeningToAddr(self, sAddr):
"""
Unsubscribe to a given IPv6 address which was subscribed earlier wiht `registerMulticast`.
Args:
sAddr : str : Multicast address to be unsubscribed. Use an empty string to unsubscribe
all the active multicast addresses.
"""
cmd = 'sudo pkill -f mcast6.*%s' % sAddr
self.bash(cmd)
def stopListeningToAddrAll(self):
return self.stopListeningToAddr('')
@API
def deregisterMulticast(self, sAddr):
"""
Unsubscribe to a given IPv6 address.
Only used by External Commissioner.
Args:
sAddr : str : Multicast address to be unsubscribed.
"""
self.externalCommissioner.MLR([sAddr], 0)
return True
|
py | 7df6b527463fc22448d0e2d7a1f54ba9e975f501 | # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
from recipe_engine.post_process import StepSuccess, StepFailure, StepException
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'context',
'step',
]
def RunSteps(api):
# Nest all steps below this.
with api.step.nest('complicated thing'):
with api.step.nest('first part'):
api.step('wait a bit', ['sleep', '1'])
# Outer nested step's status gets the worst child's status by default.
with api.step.nest('inherit status'):
with api.step.nest('inner step') as inner_step_presentation:
inner_step_presentation.step_text += 'Hey!'
inner_step_presentation.status = api.step.EXCEPTION
# But, you could also pick the last status.
with api.step.nest('last status', status='last'):
with api.step.nest('failpants') as failpants_presentation:
failpants_presentation.status = api.step.EXCEPTION
api.step('everything OK', ['echo', 'hi'])
# DEPRECATED; DO NOT USE
with api.step.nest('extra_nonsense') as fake_step_data:
fake_step_data.presentation.step_text = (
'Just use the yielded object as StepPresentation directly, do not'
'use the `.presentation` accessor.'
)
# Exceptions bubbling out take precedence.
try:
with api.step.nest('exception status'):
api.step('I am fine', ['echo', 'the chillest'])
raise Exception('Whoa! Bang!')
except Exception:
pass
try:
with api.step.nest('failure status'):
api.step('I fail', ['echo', 'fail'])
except api.step.StepFailure:
pass
try:
with api.step.nest('timeout status'):
api.step(
'I fail', ['echo', 'fail'], timeout=datetime.timedelta(seconds=1))
except api.step.StepFailure as ex:
assert ex.had_timeout
# Duplicate nesting names with unique child steps
for i in range(3):
with api.step.nest('Do Iteration'):
api.step('Iterate %d' % i, ['echo', 'lerpy'])
api.step('simple thing', ['sleep', '1'])
# Note that "|" is a reserved character:
try:
api.step('cool|step', ['echo', 'hi'])
assert False # pragma: no cover
except ValueError:
pass
# OK to have a nest parent without any children
with api.step.nest('lonely parent'):
pass
def GenTests(api):
yield (
api.test('basic')
+ api.post_process(StepException, 'inherit status')
+ api.post_process(StepSuccess, 'last status')
+ api.post_check(lambda check, steps: check(
'StepPresentation' in steps['extra_nonsense'].step_text
))
+ api.post_process(StepException, 'exception status')
# TODO(iannucci): switch to build.proto so these can actually be
# differentiated: annotator protocol only has a subset of the possible
# statuses.
+ api.step_data('failure status.I fail', retcode=1)
+ api.post_process(StepFailure, 'failure status')
# TODO(iannucci): switch to build.proto so these can actually be
# differentiated: annotator protocol only has a subset of the possible
# statuses.
+ api.step_data('timeout status.I fail', times_out_after=20)
+ api.post_process(StepFailure, 'timeout status')
)
|
py | 7df6b66f8cc2cb0fce7ba941356147635e6e4ab9 | #Command Context generator
import sys
FILLER_CHAR = "x"
COMMAND_SET = ["G","S","R"] #Maximum of 8 values
SIZE_OF_COMMAND_SET = 1
for x in range (0, len(COMMAND_SET)):
SIZE_OF_COMMAND_SET = SIZE_OF_COMMAND_SET * 2
for x in range (0, SIZE_OF_COMMAND_SET):
sys.stdout.write("#define ")
for y in xrange(0,len(COMMAND_SET)):
if((x & (1 << y)) >= 1):
sys.stdout.write(COMMAND_SET[y])
else:
sys.stdout.write(FILLER_CHAR)
sys.stdout.write(' ')
print "0x{:02x}".format(x)
print"\r\n"
COMMAND_SET = ["G","S","R","C","M","S","C"] #Maximum of 8 values
SIZE_OF_COMMAND_SET = 1
for x in range (0, len(COMMAND_SET)):
SIZE_OF_COMMAND_SET = SIZE_OF_COMMAND_SET * 2
for x in range (0, SIZE_OF_COMMAND_SET):
sys.stdout.write("#define ")
for y in xrange(0,len(COMMAND_SET)):
if((x & (1 << y)) >= 1):
sys.stdout.write(COMMAND_SET[y])
else:
sys.stdout.write(FILLER_CHAR)
sys.stdout.write(' ')
print "0x{:02x}".format(x)
print"\r\n"
COMMAND_SET = ["E","L","P"] #Maximum of 8 values
SIZE_OF_COMMAND_SET = 1
for x in range (0, len(COMMAND_SET)):
SIZE_OF_COMMAND_SET = SIZE_OF_COMMAND_SET * 2
for x in range (0, SIZE_OF_COMMAND_SET):
sys.stdout.write("#define ")
for y in xrange(0,len(COMMAND_SET)):
if((x & (1 << y)) >= 1):
sys.stdout.write(COMMAND_SET[y])
else:
sys.stdout.write(FILLER_CHAR)
sys.stdout.write(' ')
print "0x{:02x}".format(x)
|
py | 7df6b6af84beddb0e51a172fa5d8134882818306 | from discord.ext import commands
import discord
extensions = ('core', 'tags')
def callable_prefix(bot, msg):
config = bot.db.cache.get(msg.guild.id)
extras = config['prefixes'] if config else []
return commands.when_mentioned_or(*extras)(bot, msg)
class NineTwo(commands.AutoShardedBot):
def __init__(self, db, *args, **kwargs):
self.db = db
intents = discord.Intents.all()
super().__init__(callable_prefix, intents=intents, *args, **kwargs)
for ext in extensions:
self.load_extension(f'extensions.{ext}')
async def exit(self):
await self.db.close()
await self.close()
|
py | 7df6b71d2c7ed3d28ec3022e28c31051dc5a74b6 | import os
from pathlib import Path
import pyrefinebio
import statistics
import pandas as pd
import numpy as np
import seaborn as sns; sns.set_theme(color_codes=True)
import IPython
df1=pd.read_csv('a/file/path/only/I/have/SRP070849.tsv', sep='\t')
mdf=pd.read_csv('a/file/path/only/I/have/SRP070849_metadata.tsv', sep='\t')
df1["Gene"]
df1['calc'] =df1.var(axis = 1, skipna = True)
filter_num=df1.calc.quantile([0.90]).values
df2=df1[df1.calc >float(filter_num)]
df2 =df_by_var.drop(['calc'], 1)
# groups = mdf.pop('refinebio_title')
# df2
# type(df2)
refinebio_title = mdf.pop('refinebio_title')
keys = dict(zip(refinebio_title.unique(), "rbg"))
keys_df = pd.DataFrame(exp_group.map(keys))
keys_df = color_key_df.set_index(df2.columns)
heatmap = sns.clustermap(df2, cmap ="mako", col_colors = keys_df, dendrogram_ratio = (0, .2), cbar_pos = (-.1, .2, .03, .5))
heatmap.savefig('heatmap.png')
|
py | 7df6b7a1ec8ad70b243d1ea0af210aff8bf68566 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2021-03-30 10:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bills', '0015_auto_20200204_1218'),
]
operations = [
migrations.AlterField(
model_name='billcontact',
name='country',
field=models.CharField(choices=[('AF', 'Afghanistan'), ('AX', 'Åland Islands'), ('AL', 'Albania'), ('DZ', 'Algeria'), ('AS', 'American Samoa'), ('AD', 'Andorra'), ('AO', 'Angola'), ('AI', 'Anguilla'), ('AQ', 'Antarctica'), ('AG', 'Antigua and Barbuda'), ('AR', 'Argentina'), ('AM', 'Armenia'), ('AW', 'Aruba'), ('AU', 'Australia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BS', 'Bahamas'), ('BH', 'Bahrain'), ('BD', 'Bangladesh'), ('BB', 'Barbados'), ('BY', 'Belarus'), ('BE', 'Belgium'), ('BZ', 'Belize'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BT', 'Bhutan'), ('BO', 'Bolivia (Plurinational State of)'), ('BQ', 'Bonaire, Sint Eustatius and Saba'), ('BA', 'Bosnia and Herzegovina'), ('BW', 'Botswana'), ('BV', 'Bouvet Island'), ('BR', 'Brazil'), ('IO', 'British Indian Ocean Territory'), ('BN', 'Brunei Darussalam'), ('BG', 'Bulgaria'), ('BF', 'Burkina Faso'), ('BI', 'Burundi'), ('CV', 'Cabo Verde'), ('KH', 'Cambodia'), ('CM', 'Cameroon'), ('CA', 'Canada'), ('KY', 'Cayman Islands'), ('CF', 'Central African Republic'), ('TD', 'Chad'), ('CL', 'Chile'), ('CN', 'China'), ('CX', 'Christmas Island'), ('CC', 'Cocos (Keeling) Islands'), ('CO', 'Colombia'), ('KM', 'Comoros'), ('CG', 'Congo'), ('CD', 'Congo (the Democratic Republic of the)'), ('CK', 'Cook Islands'), ('CR', 'Costa Rica'), ('CI', "Côte d'Ivoire"), ('HR', 'Croatia'), ('CU', 'Cuba'), ('CW', 'Curaçao'), ('CY', 'Cyprus'), ('CZ', 'Czechia'), ('DK', 'Denmark'), ('DJ', 'Djibouti'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('EC', 'Ecuador'), ('EG', 'Egypt'), ('SV', 'El Salvador'), ('GQ', 'Equatorial Guinea'), ('ER', 'Eritrea'), ('EE', 'Estonia'), ('SZ', 'Eswatini'), ('ET', 'Ethiopia'), ('FK', 'Falkland Islands (Malvinas)'), ('FO', 'Faroe Islands'), ('FJ', 'Fiji'), ('FI', 'Finland'), ('FR', 'France'), ('GF', 'French Guiana'), ('PF', 'French Polynesia'), ('TF', 'French Southern Territories'), ('GA', 'Gabon'), ('GM', 'Gambia'), ('GE', 'Georgia'), ('DE', 'Germany'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GR', 'Greece'), ('GL', 'Greenland'), ('GD', 'Grenada'), ('GP', 'Guadeloupe'), ('GU', 'Guam'), ('GT', 'Guatemala'), ('GG', 'Guernsey'), ('GN', 'Guinea'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HT', 'Haiti'), ('HM', 'Heard Island and McDonald Islands'), ('VA', 'Holy See'), ('HN', 'Honduras'), ('HK', 'Hong Kong'), ('HU', 'Hungary'), ('IS', 'Iceland'), ('IN', 'India'), ('ID', 'Indonesia'), ('IR', 'Iran (Islamic Republic of)'), ('IQ', 'Iraq'), ('IE', 'Ireland'), ('IM', 'Isle of Man'), ('IL', 'Israel'), ('IT', 'Italy'), ('JM', 'Jamaica'), ('JP', 'Japan'), ('JE', 'Jersey'), ('JO', 'Jordan'), ('KZ', 'Kazakhstan'), ('KE', 'Kenya'), ('KI', 'Kiribati'), ('KP', "Korea (the Democratic People's Republic of)"), ('KR', 'Korea (the Republic of)'), ('KW', 'Kuwait'), ('KG', 'Kyrgyzstan'), ('LA', "Lao People's Democratic Republic"), ('LV', 'Latvia'), ('LB', 'Lebanon'), ('LS', 'Lesotho'), ('LR', 'Liberia'), ('LY', 'Libya'), ('LI', 'Liechtenstein'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('MO', 'Macao'), ('MG', 'Madagascar'), ('MW', 'Malawi'), ('MY', 'Malaysia'), ('MV', 'Maldives'), ('ML', 'Mali'), ('MT', 'Malta'), ('MH', 'Marshall Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MU', 'Mauritius'), ('YT', 'Mayotte'), ('MX', 'Mexico'), ('FM', 'Micronesia (Federated States of)'), ('MD', 'Moldova (the Republic of)'), ('MC', 'Monaco'), ('MN', 'Mongolia'), ('ME', 'Montenegro'), ('MS', 'Montserrat'), ('MA', 'Morocco'), ('MZ', 'Mozambique'), ('MM', 'Myanmar'), ('NA', 'Namibia'), ('NR', 'Nauru'), ('NP', 'Nepal'), ('NL', 'Netherlands'), ('NC', 'New Caledonia'), ('NZ', 'New Zealand'), ('NI', 'Nicaragua'), ('NE', 'Niger'), ('NG', 'Nigeria'), ('NU', 'Niue'), ('NF', 'Norfolk Island'), ('MK', 'North Macedonia'), ('MP', 'Northern Mariana Islands'), ('NO', 'Norway'), ('OM', 'Oman'), ('PK', 'Pakistan'), ('PW', 'Palau'), ('PS', 'Palestine, State of'), ('PA', 'Panama'), ('PG', 'Papua New Guinea'), ('PY', 'Paraguay'), ('PE', 'Peru'), ('PH', 'Philippines'), ('PN', 'Pitcairn'), ('PL', 'Poland'), ('PT', 'Portugal'), ('PR', 'Puerto Rico'), ('QA', 'Qatar'), ('RE', 'Réunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('BL', 'Saint Barthélemy'), ('SH', 'Saint Helena, Ascension and Tristan da Cunha'), ('KN', 'Saint Kitts and Nevis'), ('LC', 'Saint Lucia'), ('MF', 'Saint Martin (French part)'), ('PM', 'Saint Pierre and Miquelon'), ('VC', 'Saint Vincent and the Grenadines'), ('WS', 'Samoa'), ('SM', 'San Marino'), ('ST', 'Sao Tome and Principe'), ('SA', 'Saudi Arabia'), ('SN', 'Senegal'), ('RS', 'Serbia'), ('SC', 'Seychelles'), ('SL', 'Sierra Leone'), ('SG', 'Singapore'), ('SX', 'Sint Maarten (Dutch part)'), ('SK', 'Slovakia'), ('SI', 'Slovenia'), ('SB', 'Solomon Islands'), ('SO', 'Somalia'), ('ZA', 'South Africa'), ('GS', 'South Georgia and the South Sandwich Islands'), ('SS', 'South Sudan'), ('ES', 'Spain'), ('LK', 'Sri Lanka'), ('SD', 'Sudan'), ('SR', 'Suriname'), ('SJ', 'Svalbard and Jan Mayen'), ('SE', 'Sweden'), ('CH', 'Switzerland'), ('SY', 'Syrian Arab Republic'), ('TW', 'Taiwan (Province of China)'), ('TJ', 'Tajikistan'), ('TZ', 'Tanzania, the United Republic of'), ('TH', 'Thailand'), ('TL', 'Timor-Leste'), ('TG', 'Togo'), ('TK', 'Tokelau'), ('TO', 'Tonga'), ('TT', 'Trinidad and Tobago'), ('TN', 'Tunisia'), ('TR', 'Turkey'), ('TM', 'Turkmenistan'), ('TC', 'Turks and Caicos Islands'), ('TV', 'Tuvalu'), ('UG', 'Uganda'), ('UA', 'Ukraine'), ('AE', 'United Arab Emirates'), ('GB', 'United Kingdom of Great Britain and Northern Ireland'), ('UM', 'United States Minor Outlying Islands'), ('US', 'United States of America'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VU', 'Vanuatu'), ('VE', 'Venezuela (Bolivarian Republic of)'), ('VN', 'Viet Nam'), ('VG', 'Virgin Islands (British)'), ('VI', 'Virgin Islands (U.S.)'), ('WF', 'Wallis and Futuna'), ('EH', 'Western Sahara'), ('YE', 'Yemen'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')], default='ES', max_length=20, verbose_name='country'),
),
]
|
py | 7df6b8eff82864ea6f75a861a2786fa9eaa1ef66 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Limracoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for limracoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
LIMRACOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a limracoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, limracoind, limracoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = limracoind
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(limracoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any limracoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time limracoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by limracoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("limracoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the limracoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'limracoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. limracoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to limracoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=LIMRACOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to limracoind
expected_msg: regex that stderr should match when limracoind fails
Will throw if limracoind starts without an error.
Will throw if an expected_msg is provided and it does not match limracoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('limracoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "limracoind should have exited with an error"
else:
assert_msg = "limracoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes limracoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to limracoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.limracoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with limracoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run limracoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same limracoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running limracoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
py | 7df6b9b209f2e9913c07dbabc32ac17ecf64136d | #!/usr/bin/env python
"""
This is a setup script for imPROSE -- Integrated Methods for Prediction of Super-Enhancers
This code is free software; you can redistribute it and/or modify it under the terms of the
BSD License (see the file LICENSE.md included with the distribution).
@version: 1.0
@author: Aziz Khan
@email: [email protected]
"""
import os
from distutils.core import setup
from setuptools import find_packages
#from improse import __version__
#VERSION = __import__("improse").__version__
VERSION = '1.2'
CLASSIFIERS = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
]
install_requires = [
'scipy',
'numpy',
'scikit-learn',
'pandas',
'argparse'
]
setup(
name="improse",
description="Integrated Methods for Prediction of Super-Enhancers",
version=VERSION,
author="Aziz Khan",
#Keywords= "bioinformatics,genomics",
author_email="[email protected]",
url="https://github.com/asntech/improse",
package_dir={'improse': 'improse'},
packages=['improse'],
scripts=['improse/improse',
],
package_data={'improse': ['improse/data/*.csv']},
include_package_data=True,
install_requires = install_requires,
classifiers=CLASSIFIERS,
)
|
py | 7df6ba08c1502a6c280366cc3454593e9c971605 | VERSION = (1, 3, 0)
def get_version():
return '.'.join(map(str, VERSION))
|
py | 7df6ba2376e91a48393ddbbb82d58d29481c30b5 | import logging
from collections import Mapping
import salt.utils.files
import salt.utils.yaml
log = logging.getLogger(__name__)
DEFAULT_POD_NETWORK = '10.233.0.0/16'
DEFAULT_SERVICE_NETWORK = '10.96.0.0/12'
def _load_config(path):
log.debug('Loading MetalK8s configuration from %s', path)
config = None
try:
with salt.utils.files.fopen(path, 'rb') as fd:
config = salt.utils.yaml.safe_load(fd) or {}
except Exception as exc:
return __utils__['pillar_utils.errors_to_dict']([
"Failed to load {}: {}".format(path, exc)
])
if not config:
error_tplt = (
'Invalid BootstrapConfiguration at {}'
)
return __utils__['pillar_utils.errors_to_dict'](
[error_tplt.format(path)]
)
expected = {
'apiVersion': 'metalk8s.scality.com/v1alpha2',
'kind': 'BootstrapConfiguration'
}
errors = (
__utils__['pillar_utils.assert_equals'](config, expected) +
__utils__['pillar_utils.assert_keys'](config, ['archives'])
)
if errors:
return __utils__['pillar_utils.errors_to_dict'](errors)
return config
def _load_networks(config_data):
errors = __utils__['pillar_utils.assert_keys'](config_data, ['networks'])
if errors:
return __utils__['pillar_utils.errors_to_dict'](errors)
networks_data = config_data['networks']
if not isinstance(networks_data, Mapping):
return __utils__['pillar_utils.errors_to_dict']([
"Invalid network format in config file, mapping expected got {}"
.format(networks_data)
])
errors = __utils__['pillar_utils.assert_keys'](
networks_data,
['controlPlane', 'workloadPlane']
)
if errors:
return __utils__['pillar_utils.errors_to_dict'](errors)
return {
'control_plane': networks_data['controlPlane'],
'workload_plane': networks_data['workloadPlane'],
'pod': networks_data.get('pods', DEFAULT_POD_NETWORK),
'service': networks_data.get('services', DEFAULT_SERVICE_NETWORK),
}
def _load_ca(config_data):
errors = __utils__['pillar_utils.assert_keys'](config_data, ['ca'])
if errors:
return __utils__['pillar_utils.errors_to_dict'](errors)
ca_data = config_data['ca']
if not isinstance(ca_data, Mapping):
return __utils__['pillar_utils.errors_to_dict']([
"Invalid ca format in config file, mapping expected got {}"
.format(ca_data)
])
errors = __utils__['pillar_utils.assert_keys'](ca_data, ['minion'])
if errors:
return __utils__['pillar_utils.errors_to_dict'](errors)
return {
'minion': ca_data['minion'],
}
def _load_iso_path(config_data):
"""Load iso path from BootstrapConfiguration
"""
res = config_data['archives']
if isinstance(res, str):
res = [res]
if not isinstance(res, list):
return __utils__['pillar_utils.errors_to_dict']([
"Invalid archives format in config file, list or string expected "
"got {1}."
.format(res)
])
return res
def ext_pillar(minion_id, pillar, bootstrap_config):
config = _load_config(bootstrap_config)
if config.get('_errors'):
metal_data = __utils__['pillar_utils.errors_to_dict'](
config['_errors']
)
result = {
'metalk8s': metal_data,
}
for key in ['metalk8s']:
__utils__['pillar_utils.promote_errors'](result, key)
return result
else:
metal_data = {
'archives': _load_iso_path(config),
'ca': _load_ca(config),
}
result = {
'networks': _load_networks(config),
'metalk8s': metal_data,
'proxies': config.get('proxies', {})
}
if not isinstance(metal_data['archives'], list):
# Special case for archives in pillar
__utils__['pillar_utils.promote_errors'](metal_data, 'archives')
for key in ['ca',]:
__utils__['pillar_utils.promote_errors'](metal_data, key)
for key in ['networks', 'metalk8s']:
__utils__['pillar_utils.promote_errors'](result, key)
return result
|
py | 7df6ba9f1f244891b2f7fed0f6927110d4c718f1 | total = 1 # sets initial total, since its multiplication it cannot start at 0
userNum = int(input('Enter a number: ')) # takes the users input of a number
if userNum > 0: # if the users number is positive,
y = list(range(1, userNum+1)) # set the value of y to a positive list
elif userNum < 0: # but if the users number is negative
y = list(range(-1, userNum-1, -1)) # set the value of y to a negative list
for x in y: # set x to the next number in the list y each time through the loop
total = total * x # multiply the total by the number from the list (x)
print(total) # print the total
|
py | 7df6baad2b44b15f05474a78df9c822e2235938d | # Databricks notebook source
#!/usr/bin python3
# -------------------------------------------------------------------------
# Copyright (c) 2021 NHS England and NHS Improvement. All rights reserved.
# Licensed under the MIT License. See license.txt in the project root for
# license information.
# -------------------------------------------------------------------------
"""
FILE: dbrks_pomi_repeat_prescription_month_count.py
DESCRIPTION:
Databricks notebook with processing code for the NHSX Analyticus unit metric: No. of repeat prescription transactions (M054)
USAGE:
...
CONTRIBUTORS: Craig Shenton, Mattia Ficarelli
CONTACT: [email protected]
CREATED: 01 Sept 2021
VERSION: 0.0.1
"""
# COMMAND ----------
# Install libs
# -------------------------------------------------------------------------
%pip install geojson==2.5.* tabulate requests pandas pathlib azure-storage-file-datalake beautifulsoup4 numpy urllib3 lxml regex pyarrow==5.0.*
# COMMAND ----------
# Imports
# -------------------------------------------------------------------------
# Python:
import os
import io
import tempfile
from datetime import datetime
import json
# 3rd party:
import pandas as pd
from pathlib import Path
from azure.storage.filedatalake import DataLakeServiceClient
# Connect to Azure datalake
# -------------------------------------------------------------------------
# !env from databricks secrets
CONNECTION_STRING = dbutils.secrets.get(scope="datalakefs", key="CONNECTION_STRING")
# COMMAND ----------
# MAGIC %run /Repos/dev/au-azure-databricks/functions/dbrks_helper_functions
# COMMAND ----------
#Download JSON config from Azure datalake
file_path_config = "/config/pipelines/nhsx-au-analytics/"
file_name_config = "config_pomi_dbrks.json"
file_system_config = "nhsxdatalakesagen2fsprod"
config_JSON = datalake_download(CONNECTION_STRING, file_system_config, file_path_config, file_name_config)
config_JSON = json.loads(io.BytesIO(config_JSON).read())
# COMMAND ----------
#Get parameters from JSON config
source_path = config_JSON['pipeline']['project']['source_path']
source_file = config_JSON['pipeline']['project']['source_file']
file_system = config_JSON['pipeline']['adl_file_system']
sink_path = config_JSON['pipeline']['project']['databricks'][6]['sink_path']
sink_file = config_JSON['pipeline']['project']['databricks'][6]['sink_file']
# COMMAND ----------
#Processing
latestFolder = datalake_latestFolder(CONNECTION_STRING, file_system, source_path)
file = datalake_download(CONNECTION_STRING, file_system, source_path+latestFolder, source_file)
df = pd.read_parquet(io.BytesIO(file), engine="pyarrow")
df = df[df["Field"] == "Pat_Presc_Use"]
df["Report_Period_End"] = df["Report_Period_End"].astype("datetime64[ns]")
df1 = df.sort_values("Report_Period_End")
df2 = df1.reset_index(drop = True)
df2 = df2.drop(columns={"Field",
"Region_Code",
"CCG_Code",
"Subregion_Code",
"System_Supplier",
"Effective_Snapshot_Date",
"DataSourceFileForThisSnapshot_Version",
"Report_Period_Length",
"Unique_ID",
"AuditKey"})
df2.rename(columns={
"Value": "Number of repeat prescription transactions",
"Report_Period_End": "Date",
"Practice_Code": "Practice code"}, inplace=True)
df2.index.name = "Unique ID"
df_processed = df2.copy()
# COMMAND ----------
#Upload processed data to datalake
file_contents = io.StringIO()
df_processed.to_csv(file_contents)
datalake_upload(file_contents, CONNECTION_STRING, file_system, sink_path+latestFolder, sink_file)
|
py | 7df6bd504092aa1be4575a9cc2f2244897ce310e | import base64
import jwt
from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from .provider import EspooADFSProvider, HelsinkiADFSProvider
x509_backend = default_backend()
class ADFSOAuth2Adapter(OAuth2Adapter):
@classmethod
def get_login_view(cls):
return OAuth2LoginView.adapter_view(cls)
@classmethod
def get_callback_view(cls):
return OAuth2CallbackView.adapter_view(cls)
def complete_login(self, request, app, token, **kwargs):
cert_der = base64.b64decode(self.cert)
x509_cert = x509.load_der_x509_certificate(cert_der, backend=x509_backend)
jwt_token = jwt.decode(token.token, key=x509_cert.public_key(),
leeway=10, options={'verify_aud': False})
data = self.clean_attributes(jwt_token)
return self.get_provider().sociallogin_from_response(request, data)
class HelsinkiADFSOAuth2Adapter(ADFSOAuth2Adapter):
provider_id = HelsinkiADFSProvider.id
realm = 'helsinki'
access_token_url = 'https://fs.hel.fi/adfs/oauth2/token'
authorize_url = 'https://fs.hel.fi/adfs/oauth2/authorize'
profile_url = 'https://api.hel.fi/sso/user/'
cert = (
'MIIDMDCCAhigAwIBAgIBATANBgkqhkiG9w0BAQsFADAjMSEwHwYDVQQDExhBR'
'EZTIFNpZ25pbmcgLSBmcy5oZWwuZmkwHhcNMTYwNDAzMjIxMTAwWhcNMjEwND'
'AzMjIxMTAwWjAjMSEwHwYDVQQDExhBREZTIFNpZ25pbmcgLSBmcy5oZWwuZmk'
'wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrCo9kuzljk4F8R12A'
'eIYMARztxkMojcrN1KN3KQeoxcCPaFOTMYHWk8ww1N+m0PJoLl1Eray+cMsoH'
'rdd3iVxmApcQBxD02SnGsEn/3D/sTHcoi9WzqwM8ESbtm0jGIvfWrpJtMO/g7'
'ELW0dXBcWq4LRvBtyTt3jiehIO0HohS8xfQ4+vURFpjvfD0kjPemsMJ7QB8Eo'
'+JscSMTF2CNFO9vct1IJiQJUfRbVWk8I/JFA65ZuXrCjY//LSNLzLRZ+Iw1Bl'
'iSj4jbmOtG8mcb7Fql7dvvz91AMksguO4+9xATukZK7MBLb3DtT2FzYt9oUBR'
'wSsMXiNXh8AitTLUMgpAgMBAAGjbzBtMAwGA1UdEwEB/wQCMAAwHQYDVR0OBB'
'YEFBDL4FpHu+kQEI7MIpSjSACaA9ajMAsGA1UdDwQEAwIFIDARBglghkgBhvh'
'CAQEEBAMCBkAwHgYJYIZIAYb4QgENBBEWD3hjYSBjZXJ0aWZpY2F0ZTANBgkq'
'hkiG9w0BAQsFAAOCAQEAISn44oOdtfdMHh0Z4nezAuDHtKqTd6iV3MY7MwTFm'
'iUFQhJADO2ezpoW3Xj64wWeg3eVXyC7iHk/SV5OVmmo4uU/1YJHiBc5jEUZ5E'
'dvaZQaDH5iaJlK6aiCTznqwu7XJS7LbLeLrVqj3H3IYsV6BiGlT4Z1rXYX+nD'
'fi46TJCKqxE0zTArQQROocfKS+7JM+JU5dLMNOOC+6tCUOP3GEjuE3PMetpbH'
'+k6Wu6d3LzhpU2QICWJnFpj1yJTAb94pWRUKNoBhpxQlWvNzRgFgJesIfkZ4C'
'qqhmHqnV/BO+7MMv/g+WXRD09fo/YIXozpWzmO9LBzEvFe7Itz6C1R4Ng==')
def clean_attributes(self, attrs_in):
attr_map = {
'primarysid': 'primary_sid',
'company': 'department_name',
'email': 'email',
'winaccountname': 'username',
'group': 'ad_groups',
'unique_name': 'last_first_name',
'given_name': 'first_name',
'family_name': 'last_name',
}
# Convert attribute names to lowercase
attrs_in = {k.lower(): v for k, v in attrs_in.items()}
attrs = {}
for in_name, out_name in attr_map.items():
val = attrs_in.get(in_name, None)
if val is not None:
if out_name in ('department_name', 'email', 'username'):
val = val.lower()
attrs[out_name] = val
attrs[out_name] = val
if 'last_first_name' in attrs:
names = attrs['last_first_name'].split(' ')
if 'first_name' not in attrs:
attrs['first_name'] = [names[0]]
if 'last_name' not in attrs:
attrs['last_name'] = [' '.join(names[1:])]
del attrs['last_first_name']
return attrs
class EspooADFSOAuth2Adapter(ADFSOAuth2Adapter):
provider_id = EspooADFSProvider.id
realm = 'espoo'
access_token_url = 'https://fs.espoo.fi/adfs/oauth2/token'
authorize_url = 'https://fs.espoo.fi/adfs/oauth2/authorize'
profile_url = 'https://api.hel.fi/sso/user/'
cert = (
'MIIG1zCCBL+gAwIBAgITGgAAfQoAbggMFZQDYAAAAAB9CjANBgkqhkiG9w0BAQsF'
'ADBaMRQwEgYKCZImiZPyLGQBGRYEY2l0eTESMBAGCgmSJomT8ixkARkWAmFkMRUw'
'EwYKCZImiZPyLGQBGRYFZXNwb28xFzAVBgNVBAMTDkVzcG9vIEggU3ViIENBMB4X'
'DTE3MTEyMjEzMDIxMVoXDTIyMTEyMjEzMTIxMVowKDEmMCQGA1UEAxMdQURGUyBT'
'aWduIC0gZnMuZXNwb28uZmkgU0hBLTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw'
'ggEKAoIBAQCpNY8Z85B2zlNTJlVRjenLKGNRVOc0+Q/Ll+mA4W0+epMtWl5ljZQU'
'kWVBOm3vxT2Z5BcEDuv8eygl2R5eqVAExxAfxKbFuC2QrRTvl4frkdi0juVOY/Vs'
'AZVm6TxMvX4eletZT8iGdb6Al40EriFtdPrTX5NhoTG6YwcQtFa7UHstjsxDktb+'
'ZXphpPoFB65kSi948ThVPdo6UwIhLKioSw/zVUyfziRstce55CvqKdPbrhXZYRx4'
'dQY1gKScfbD1XMi+wVMwhp5Abn4D9BNbesMNsZqYHdzyANwMLqszJ6ASRuWoW4xp'
'/sjs/cs16HDOYyTHy09ppaCUx3wD7tqfAgMBAAGjggLGMIICwjA+BgkrBgEEAYI3'
'FQcEMTAvBicrBgEEAYI3FQiE3KFUgeH0QIS5mziD5egZh7aYPoEbhtfpHYSAlToC'
'AWQCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsG'
'AQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEwHQYDVR0OBBYEFA3f0BbRJG1stycIZ+gZ'
'djezdJ3mMB8GA1UdIwQYMBaAFKnS5DPbd9hr720Fh3H1s8Djw+GXMIH+BgNVHR8E'
'gfYwgfMwgfCgge2ggeqGLGh0dHA6Ly9wa2kuZXNwb28uZmkvRXNwb28lMjBIJTIw'
'U3ViJTIwQ0EuY3JshoG5bGRhcDovLy9DTj1Fc3BvbyUyMEglMjBTdWIlMjBDQSxD'
'Tj1zLWgtY2EtMDMsQ049Q0RQLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENO'
'PVNlcnZpY2VzLENOPUNvbmZpZ3VyYXRpb24sREM9YWQsREM9Y2l0eT9jZXJ0aWZp'
'Y2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/b2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0'
'aW9uUG9pbnQwgfwGCCsGAQUFBwEBBIHvMIHsMDgGCCsGAQUFBzAChixodHRwOi8v'
'cGtpLmVzcG9vLmZpL0VzcG9vJTIwSCUyMFN1YiUyMENBLmNydDCBrwYIKwYBBQUH'
'MAKGgaJsZGFwOi8vL0NOPUVzcG9vJTIwSCUyMFN1YiUyMENBLENOPUFJQSxDTj1Q'
'dWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxDTj1Db25maWd1cmF0'
'aW9uLERDPWFkLERDPWNpdHk/Y0FDZXJ0aWZpY2F0ZT9iYXNlP29iamVjdENsYXNz'
'PWNlcnRpZmljYXRpb25BdXRob3JpdHkwDQYJKoZIhvcNAQELBQADggIBAIGhXVtM'
'rRq2dNz66P1eO+NzZoV7g5RrN/tcOsBvplj4QjhIeyG9I22eESZNHrege0qZDHng'
'tkvYaKsIcrU0JAyK+2++D+1mLEVPsr0yo8GRnS3ROGRdm5tH52dt/esaGXmBCPoW'
'B4c4r8QeDXn7zcVvh0Z0FbIskAVEA9MoWdo7+uTMb/I+K6h97A9ysg9ry2bwAv/B'
'UletFRVJtMRHqDHd9QeS/G1EmkOP/PstDK5REN9TMo/EUpXYV1mNJF7k0TRtpXu1'
'pd14EaD2xI993Tf4Vzmeht34RjuKMGS3Rwn6DV4OoTr/49RlO6HARnkLrDz7hAT8'
'+CVM2iTOuDoswyP6Slbt/vZh9KJB+0g4f/GZCrcsq44DfpxEPAyomIAmSi0TPsjQ'
'mvQDQQXieY9b6ojxleHMGMD27GpTszXkmtS01Imwy2X7yeZyPEJuPyr0xW2tC6t9'
'ilyfuetzFr9cNawj2z0JvObVQ8X68Bq0MTBiMdtA/IWgzukGlFhCrLG+KCn/Idqz'
'dtXrlETkTPhKlm84Pr3MbEueS0MuIwGf6TGUt7arWJe6zDMf1/ZfBQV1kOjFOH6S'
'DNQhLHEL0mYumZUawi+EaNQOtTE8SN1tbKicI09WR0jdvNs7lvePrB/K1q19hz5m'
'U+rbNk9+8Jgpzd5ielj37oqQOJazbSxNt+xF'
)
def clean_attributes(self, attrs_in):
attr_map = {
'primarysid': 'primary_sid',
'given_name': 'first_name',
'family_name': 'last_name',
'email': 'email',
}
attrs = {}
for in_name, out_name in attr_map.items():
val = attrs_in.get(in_name, None)
if val is not None:
if out_name in ('department_name', 'email', 'username'):
val = val.lower()
attrs[out_name] = val
attrs[out_name] = val
return attrs
|
py | 7df6bd8bb66aa9cb116474b435f312ac6423e5a3 | # @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
import sys
import hashlib
import json
import datetime
import copy
import pprint
from FindGeneralScriptSupport import *
from GeneralScriptSupport import *
# Validate a date YYYY-MM-DD string and return a date object for the
# 'datetime' module.
#
def validateAndConvertYYYYMMDD(dateText):
try:
return datetime.datetime.strptime(dateText, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format for '"+dateText+"', should be YYYY-MM-DD")
# Get a file name string from a general text string.
#
# This replaces non-alphanumeric chars with '_'.
#
def getFileNameStrFromText(inputStr):
fileNameStr = ""
for char in inputStr:
if char.isalnum():
fileNameStr += char
else:
fileNameStr += "_"
return fileNameStr
# Check if the key/value pairs for two dicts are the same and if return an
# error message explaining how they are different.
#
# Returns tuple (hasSameKeyValuePairs, errMsg). If
# hasSameKeyValuePairs==True, then errMsg==None. Otherwise, if
# hasSameKeyValuePairs==False, then errMsg gives a string that explains how
# they are different.
#
# This improves on a simple check dict_1 == dict_2 in that shows exactly why
# the dicts are different for a single key/value pair.
#
def checkDictsAreSame(dict_1, dict_1_name, dict_2, dict_2_name):
# Assume all passing unless we find a difference
hasSameKeyValuePairs = True
errMsg = None
# Start with the fast internal Python check
if dict_1 == dict_2:
return (True, None)
# Check if they have the same number of keys
if hasSameKeyValuePairs and (len(dict_1.keys()) != len(dict_2.keys())):
hasSameKeyValuePairs = False
errMsg = "len("+dict_1_name+".keys())="+str(len(dict_1.keys()))+\
" != len("+dict_2_name+".keys())="+str(len(dict_2.keys()))
# Check that they have the same key/value pairs
if hasSameKeyValuePairs:
for key_1 in dict_1.keys():
if not key_1 in dict_2.keys():
hasSameKeyValuePairs = False
errMsg = dict_1_name+"['"+key_1+"'] does not exist in "+dict_2_name
break
keyVal_1 = dict_1[key_1]
keyVal_2 = dict_2[key_1]
if keyVal_1 != keyVal_2:
hasSameKeyValuePairs = False
errMsg = dict_1_name+"['"+key_1+"'] = '"+str(keyVal_1)+"' != "+\
dict_2_name+"['"+key_1+"'] = '"+str(keyVal_2)+"'"
break
#end for
#end if
# Return the final result
return (hasSameKeyValuePairs, errMsg)
# Compress a long file name to avoid open() error
#
# If the full file name must be shorted and if prefix!="", then it is added to
# the beginning of the shortened filename. Also, if ext!="", then "."+ext is
# added to the end of the shortened filename. Otherwise, if inputFileName is
# not too long, then it is returned without modification (i.e. 'prefix' and
# 'ext' are ignored). NOTE: If 'prefix' and 'ext' are too long, then the
# returned shortened filename may also be too long.
#
# This function should return a shorter unique file name that is platform
# independent.
#
def getCompressedFileNameIfTooLong(inputFileName, prefix="", ext=""):
maxFileNameLength = 255 # ToDo: Figure out for this system?
if len(inputFileName) > maxFileNameLength:
hashObject = hashlib.sha1(inputFileName)
hashStr = hashObject.hexdigest()
newFileName = prefix+hashObject.hexdigest()
if ext: newFileName += "." + ext
return newFileName
return inputFileName
# Filter and input list and return a list with elements where
# matchFunctor(inputList[i])==True.
#
def getFilteredList(inputList, matchFunctor):
filteredList = []
for ele in inputList:
if matchFunctor(ele): filteredList.append(ele)
return filteredList
# Filter an input list return a two lists (matchList, nomatchList) where the
# first list has elements where matchFunctor(inputList[i])==True and the
# second list has elements where matchFunctor(inputList[i])==False.
#
def splitListOnMatch(inputList, matchFunctor):
#print("\nsplitListOnMatch(): matchFunctor = "+str(matchFunctor))
matchList = []
nomatchList = []
for ele in inputList:
if matchFunctor(ele): matchList.append(ele)
else: nomatchList.append(ele)
return (matchList, nomatchList)
# DECORATOR match functor class that negates the match of a stored functor.
#
class NotMatchFunctor(object):
# Construct with another functor to negate
def __init__(self, matchFunctor):
self.__matchFunctor = matchFunctor
# Convert to string rep for debugging/etc.
def __str__(self):
myStr = "NotMatchFunctor{"+str(self.__matchFunctor)+"}"
return myStr
# Negate the matchFunctor
def __call__(self, item):
return (self.__matchFunctor(item) == False)
# Apply a functor to transform every element in a list
#
# The object transformFunctor is applied as:
#
# list_inout[i] = transformFunctor(list_inout[i])
#
# If the elements are small value-type objects, then the assignment is needed.
# However, if the list elements are handled with reference semantics like a
# list [] or a dict {} then really the object is being modified in place and
# the assignment is not needed but it cheap and harmess in that case.
#
# This returns the input list transformed but the return object can be ignored
# because it modifies the input list object's elements in place.
#
def foreachTransform(list_inout, transformFunctor):
for i in xrange(len(list_inout)):
list_inout[i] = transformFunctor(list_inout[i])
return list_inout
# Remove elements from a list given a list of indexes
#
# This modifies the orginal list inplace but also returns it. Therefore, if
# you want to keep the original list, you better create a copy of the base
# list object before passing it in.
#
def removeElementsFromListGivenIndexes(list_inout, indexesToRemoveList_in):
indexesToRemoveList = copy.copy(indexesToRemoveList_in)
indexesToRemoveList.sort()
numRemoved = 0
for index in indexesToRemoveList:
del list_inout[index-numRemoved]
numRemoved += 1
return list_inout
#
# Class CsvFileStructure
#
class CsvFileStructure(object):
def __init__(self, headersList, rowsList):
self.headersList = headersList
self.rowsList = rowsList
#
# Write a CsvFileStructure data to a string
#
def writeCsvFileStructureToStr(csvFileStruct):
csvFileStr = ", ".join(csvFileStruct.headersList)+"\n"
for rowFieldsList in csvFileStruct.rowsList:
csvFileStr += ", ".join(rowFieldsList)+"\n"
return csvFileStr
#
# CDash Specific stuff
#
def cdashColorPassed(): return 'green'
def cdashColorFailed(): return 'red'
def cdashColorNotRun(): return 'orange'
def cdashColorMissing(): return 'gray'
# ToDo: Make the above return different colors for a color-blind pallette
# Given a CDash query URL PHP page that returns JSON data, return the JSON
# data converged to a Python data-structure.
#
# The returned Python object will be a simple nested set of Python dicts and
# lists.
#
# NOTE: This function can't really be unit tested becuase it actually gets
# data from CDash. Therefore, the code below will be structured such that it
# we can avoid getting call it in any automated tests.
#
def extractCDashApiQueryData(cdashApiQueryUrl):
#print sys.version_info
if sys.version_info < (2,7,9):
raise Exception("Error: Must be using Python 2.7.9 or newer")
# NOTE: If we use Python 2.6.6. then the urllib2 function crashes!
response = urlopen(cdashApiQueryUrl)
return json.load(response)
# Read a CSV file into a list of dictionaries for each row where the rows of
# the output list are dicts with the column names as keys.
#
# For example, for the CSV file:
#
# col_0, col_1, col_2
# val_00, val_01, val_02
# val_10, val_11, val_12
#
# the returned list of dicts will be:
#
# [
# { 'col_0':'val_00', 'col_1':'val_01', 'col_2':'val_02' },
# { 'col_0':'val_10', 'col_1':'val_11', 'col_2':'val_12' },
# ]
#
# and the expected list of column headers would be:
#
# expectedColumnHeadersList = [ 'col_0', 'col_1', 'col_2' ]
#
# But the expectedColumnHeadersList argument is optional.
#
def readCsvFileIntoListOfDicts(csvFileName, expectedColumnHeadersList=None):
listOfDicts = []
with open(csvFileName, 'r') as csvFile:
# Get the list of column headers
columnHeadersLineStr = csvFile.readline().strip()
columnHeadersRawStrList = columnHeadersLineStr.split(',')
columnHeadersList = []
for headerRawStr in columnHeadersRawStrList:
columnHeadersList.append(headerRawStr.strip())
if expectedColumnHeadersList:
if len(columnHeadersList) != len(expectedColumnHeadersList):
raise Exception(
"Error, for CSV file '"+csvFileName+"' the"+\
" column headers '"+str(columnHeadersList)+"' has"+\
" "+str(len(columnHeadersList))+" items but the expected"+\
" set of column headers '"+str(expectedColumnHeadersList)+"'"+\
" has "+str(len(expectedColumnHeadersList))+" items!")
for i in range(len(columnHeadersList)):
if columnHeadersList[i] != expectedColumnHeadersList[i]:
raise Exception(
"Error, column header "+str(i)+" '"+columnHeadersList[i]+"' does"+\
" not match expected column header '"+expectedColumnHeadersList[i]+"'!")
# Read the rows of the CSV file into dicts
dataRow = 0
line = csvFile.readline().strip()
while line:
#print("\ndataRow = "+str(dataRow))
lineList = line.split(',')
#print(lineList)
# Assert that the row has the right number of entries
if len(lineList) != len(columnHeadersList):
raise Exception(
"Error, data row "+str(dataRow)+" '"+line+"' has"+\
" "+str(len(lineList))+" entries which does not macth"+\
" the number of column headers "+str(len(columnHeadersList))+"!")
# Read the row entries into a new dict
rowDict = {}
for j in range(len(columnHeadersList)):
rowDict.update( { columnHeadersList[j] : lineList[j].strip() } )
#print(rowDict)
listOfDicts.append(rowDict)
# Update for next row
line = csvFile.readline().strip()
dataRow += 1
# Return the constructed object
return listOfDicts
# Get list of expected builds from CSV file
def getExpectedBuildsListfromCsvFile(expectedBuildsFileName):
return readCsvFileIntoListOfDicts(expectedBuildsFileName,
['group', 'site', 'buildname'])
# Headers for basic CSV file
g_testsWithIssueTrackersCsvFileHeaders = \
('site', 'buildName', 'testname', 'issue_tracker_url', 'issue_tracker')
# Get list of tests from CSV file
def getTestsWtihIssueTrackersListFromCsvFile(testsWithIssueTrackersFile):
return readCsvFileIntoListOfDicts(testsWithIssueTrackersFile,
g_testsWithIssueTrackersCsvFileHeaders)
# Write list of tests from a Tests LOD to a CSV file structure meant to match
# tests with issue trackers CSV file.
#
def writeTestsLODToCsvFileStructure(testsLOD):
csvFileHeadersList = copy.deepcopy(g_testsWithIssueTrackersCsvFileHeaders)
csvFileRowsList = []
for testDict in testsLOD:
csvFileRow = (
testDict['site'],
testDict['buildName'],
testDict['testname'],
"", # issue_tracker_url
"", # issue_tracker
)
csvFileRowsList.append(csvFileRow)
return CsvFileStructure(csvFileHeadersList, csvFileRowsList)
# Write list of tests from a Tests LOD to a CSV file meant to match tests with
# issue trackers CSV file.
#
def writeTestsLODToCsvFile(testsLOD, csvFileName):
csvFileStruct = writeTestsLODToCsvFileStructure(testsLOD)
with open(csvFileName, 'w') as csvFile:
csvFile.write(writeCsvFileStructureToStr(csvFileStruct))
# Print print a nested Python data-structure to a file
#
# ToDo: Reimplement this to create a better looking set of indented that that
# involves less right-drift and the expense of more vertical space.
#
def pprintPythonDataToFile(pythonData, filePath):
pp = pprint.PrettyPrinter(stream=open(filePath,'w'), indent=2)
pp.pprint(pythonData)
# Get data off CDash and cache it or read from previously cached data.
#
# If useCachedCDashData == True, then the file cdashQueryDataCacheFile must
# exist and will be used to get the data instead of calling CDash
#
# If alwaysUseCacheFileIfExists==True and the file cdashQueryDataCacheFile
# already exists, then the file cdashQueryDataCacheFile will be used to get
# the dta instead of callig CDash.
#
# Otherwise, CDash will be called at cdashQueryUrl to get the data and then
# the data will be written to the the file cdashQueryDataCacheFile if
# cdashQueryDataCacheFile != None.
#
# This function can be used to get data off of CDash using any page on CDash
# including cdash/api/v1/index.php, cdash/api/v1/queryTests.php and anything
# other PHP page that returns a JSON data structure (which is all of the
# cdash/api/v1/XXX.php pages).
#
def getAndCacheCDashQueryDataOrReadFromCache(
cdashQueryUrl,
cdashQueryDataCacheFile, # File name
useCachedCDashData, # If 'True', then cdasyQueryDataCacheFile must be non-null
alwaysUseCacheFileIfExists = False,
verbose = False,
extractCDashApiQueryData_in=extractCDashApiQueryData,
):
if (
alwaysUseCacheFileIfExists \
and cdashQueryDataCacheFile \
and os.path.exists(cdashQueryDataCacheFile) \
):
if verbose:
print(" Since the file exists, using cached data from file:\n"+\
" "+cdashQueryDataCacheFile )
cdashQueryData=eval(open(cdashQueryDataCacheFile, 'r').read())
elif useCachedCDashData:
if verbose:
print(" Using cached data from file:\n "+cdashQueryUrl )
cdashQueryData=eval(open(cdashQueryDataCacheFile, 'r').read())
else:
if verbose:
print(" Downloading CDash data from:\n "+cdashQueryUrl )
cdashQueryData = extractCDashApiQueryData_in(cdashQueryUrl)
if cdashQueryDataCacheFile:
if verbose:
print(" Caching data downloaded from CDash to file:\n "+\
cdashQueryDataCacheFile)
pprintPythonDataToFile(cdashQueryData, cdashQueryDataCacheFile)
return cdashQueryData
# Construct full cdash/api/v1/index.php query URL to pull data down given the
# pieces
def getCDashIndexQueryUrl(cdashUrl, projectName, date, filterFields):
if date: dateArg = "&date="+date
else: dateArg = ""
return cdashUrl+"/api/v1/index.php?project="+projectName+dateArg \
+ "&"+filterFields
# Construct full cdash/index.php browser URL given the pieces
def getCDashIndexBrowserUrl(cdashUrl, projectName, date, filterFields):
if date: dateArg = "&date="+date
else: dateArg = ""
return cdashUrl+"/index.php?project="+projectName+dateArg \
+ "&"+filterFields
# Construct full cdash/api/v1/queryTests.php query URL given the pieces
def getCDashQueryTestsQueryUrl(cdashUrl, projectName, date, filterFields):
if date: dateArg = "&date="+date
else: dateArg = ""
return cdashUrl+"/api/v1/queryTests.php?project="+projectName+dateArg+"&"+filterFields
# Construct full cdash/queryTests.php browser URL given the pieces
def getCDashQueryTestsBrowserUrl(cdashUrl, projectName, date, filterFields):
if date: dateArg = "&date="+date
else: dateArg = ""
return cdashUrl+"/queryTests.php?project="+projectName+dateArg+"&"+filterFields
# Copy a key/value pair from one dict to another if it eixsts
def copyKeyDictIfExists(sourceDict_in, keyName_in, dict_inout):
value = sourceDict_in.get(keyName_in, None)
if value:
dict_inout.update( { keyName_in : value } )
# Extend the set of fields for a CDash index.phpb build dict.
#
# buildDict_in [in]: The build dict gotten from cdash/index.php. This will be
# modified in place.
#
# Returns the modified build dict.
#
# Change this to get all of the fields and add the 'group' field as well.
#
def extendCDashIndexBuildDict(buildDict_in, groupName):
buildDict = buildDict_in
buildDict[u'group'] = groupName
return buildDict
# Given the full Python JSON data-structure returned from the page
# cdash/api/v1/index.php query from extractCDashApiQueryData(), return a
# flattened-out data-structure that is easier to manipulate.
#
# This function takes in the JSON data-structure (as a nested set of Python
# dicts and listed) directly returned from a query gotten from the page
# cdash/api/v1/index.php with some filters.
#
# The input full CDash index.php JSON data-structure has the following
# structure and fields of interest:
#
# fullCDashIndexBuildsJson =
# {
# 'all_buildgroups': [ {'id':1,'name:"Nightly"}, ...],
# 'buildgroups': [
# {
# 'name':"???", # group name, e.g. Nightly
# 'builds":[
# {
# 'site':"???"
# 'buildname':"???",
# 'update': {'errors':???, ...},
# 'configure':{'error': ???, ...},
# 'compilation':{'error': ???, ...},
# 'test': {'fail':???, 'notrun':???, 'pass':???, ...},
# ...
# },
# ...
# ]
# },
# ...
# ...
# ]
# },
# ...
# }
#
# This function gets the data from *all* of the collapsed builds and returns
# the flatten-out list of dicts for each build with the 'group' field added in
# as:
#
# [
# {
# 'group':"???",
# 'site':"???",
# 'buildname':"???",
# 'update': {'errors':???, ...},
# 'configure':{'error': ???, ...},
# 'compilation':{'error': ???, ...},
# 'test': {'fail':???, 'notrun':???, 'pass':???, ...},
# ...
# },
# ...
# ]
#
# This collects *all* of the builds from all of the build groups provided by
# that data-structure, not just the 'Nighlty' build group. Therefore, if you
# want to only consider one set of build groups, you need to add that to the
# CDash query URL (e.g. group='Nighlty').
#
def flattenCDashIndexBuildsToListOfDicts(fullCDashIndexBuildsJson):
summaryCDashIndexBuilds = []
for buildgroup in fullCDashIndexBuildsJson["buildgroups"]:
groupName = buildgroup["name"]
for build in buildgroup["builds"]:
summaryBuild = extendCDashIndexBuildDict(build, groupName)
summaryCDashIndexBuilds.append(summaryBuild)
return summaryCDashIndexBuilds
# Given the full JSON data-structure returned from the page
# cdash/api/v1/queryTests.php query from extractCDashApiQueryData(), return a
# flattened-out data-structure that is easier to manipulate.
#
# This function takes in the JSON data-structure (as a nested set of Python
# dicts and listed) directly returned from a query gotten from the page
# cdash/api/v1/queryTests.php with some filters.
#
# The input full CDash queryTests.php JSON data-structure has the following
# structure and fields of interest:
#
# fullCDashQueryTestsJson =
# {
# 'version':???,
# 'feed_enabled':???,
# ...
# 'builds': [
# {
# 'buildName': 'Trilinos-atdm-mutrino-intel-opt-openmp-HSW',
# 'buildSummaryLink': 'buildSummary.php?buildid=4109735',
# 'buildstarttime': '2018-10-29T05:54:03 UTC',
# 'details': 'Completed (Failed)\n',
# 'nprocs': 4,
# 'prettyProcTime': '40s 400ms',
# 'prettyTime': '10s 100ms',
# 'procTime': 40.4,
# 'site': 'mutrino',
# 'siteLink': 'viewSite.php?siteid=223',
# 'status': 'Failed',
# 'statusclass': 'error',
# 'testDetailsLink': 'testDetails.php?test=57925465&build=4109735',
# 'testname': 'Anasazi_Epetra_BKS_norestart_test_MPI_4',
# 'time': 10.1
# },
# ...
# ],
# ...
# }
#
# This function gets the data from *all* of the tests and returns the
# flatten-out list of dicts with some additional fields for each test of the
# form:
#
# [
# {
# 'buildName': 'Trilinos-atdm-mutrino-intel-opt-openmp-HSW',
# 'buildSummaryLink': 'buildSummary.php?buildid=4109735',
# 'buildstarttime': '2018-10-29T05:54:03 UTC',
# 'details': 'Completed (Failed)\n',
# 'nprocs': 4,
# 'prettyProcTime': '40s 400ms',
# 'prettyTime': '10s 100ms',
# 'procTime': 40.4,
# 'site': 'mutrino',
# 'siteLink': 'viewSite.php?siteid=223',
# 'status': 'Failed',
# 'statusclass': 'error',
# 'testDetailsLink': 'testDetails.php?test=57925465&build=4109735',
# 'testname': 'Anasazi_Epetra_BKS_norestart_test_MPI_4',
# 'time': 10.1,
# },
# ...
# ]
#
# NOTE: This does a shallow copy so any modifications to the returned list and
# dicts will modify the original data-structure fullCDashQueryTestsJson. If
# that is a problem, then make sure and do a deep copy before passing in
# fullCDashQueryTestsJson.
#
# This collects *all* of the tests from all of the "build" list provided by
# the CDash JSON data-structure. Therefore, if you want to only consider one
# set of build groups, you need to add that to the CDash query URL
# (e.g. buildName='<build-name>').
#
def flattenCDashQueryTestsToListOfDicts(fullCDashQueryTestsJson):
testsListOfDicts = []
for testDict in fullCDashQueryTestsJson['builds']:
testsListOfDicts.append(testDict)
return testsListOfDicts
# Create a lookup dict for a list of dicts
#
# listOfDicts [in/out]: List of dict objects that have keys that one will want
# to lookup the dict based on their values. May have 100% duplicate elements
# removed from the list.
#
# listOfKeys [in]: List of the names of keys in these dicts that are used to
# build a search dict data-structure which is returned from this function.
#
# removeExactDuplicateElements [in]: If True, then dict elements that are 100%
# duplicates and have the exact same key/value pairs will be removed from
# listOfDicts. (default False)
#
# checkDictsAreSame_in [in]: Allows specialization of the check for exact dict
# matches and reporting the differences. The default value is the function
# checkDictsAreSame(). Any Python object that has the __call__() operator
# function defined that takes those same arguments and returns the same
# outputs as the function checkDictsAreSame() can be passed in.
#
# If listOfDicts has any elements that are 100% complete duplicates with the
# same exact key/value pairs, then the later elements will be removed from the
# list. But if just the key/value pairs listed in listOfKeys are duplicated
# but one or more of the other key/value pairs is different, then then an
# excpetion is thrown.
#
# NOTE: This is an implementation function that is used in the class
# SearchableListOfDicts. Please use that class instead of this raw function.
#
def createLookupDictForListOfDicts(listOfDicts, listOfKeys,
removeExactDuplicateElements=False,
checkDictsAreSame_in=checkDictsAreSame,
):
# Build the lookup dict data-structure. Also, optionally mark any 100%
# duplicate elements if asked to remove 100% duplicate elements.
lookupDict = {} ; idx = 0 ; numRemoved = 0 ; duplicateIndexesToRemoveList = []
for dictEle in listOfDicts:
# Create the structure of recursive dicts for the keys in order
currentLookupDictRef = lookupDict
lastLookupDictRef = None
lastKeyValue = None
for key in listOfKeys:
keyValue = dictEle[key]
lastLookupDictRef = currentLookupDictRef
lastKeyValue = keyValue
nextLookupDictRef = currentLookupDictRef.setdefault(keyValue, {})
currentLookupDictRef = nextLookupDictRef
addEle = True
# Check to see if this dict has already been added
if currentLookupDictRef:
lookedUpDict = currentLookupDictRef.get('dict', None)
lookedUpIdx = currentLookupDictRef.get('idx', None)
(hasSameKeyValuePairs, dictDiffErrorMsg) = checkDictsAreSame_in(
dictEle, "listOfDicts["+str(idx)+"]",
lookedUpDict, "listOfDicts["+str(lookedUpIdx)+"]" )
if hasSameKeyValuePairs and removeExactDuplicateElements:
# This is a 100% duplicate element to one previously added.
# Therefore, marke this duplicate element to be removed from the
# orginal list.
duplicateIndexesToRemoveList.append(idx)
addEle = False
else:
raise Exception(
"Error, The element\n\n"+\
" listOfDicts["+str(idx)+"] =\n\n"+\
" "+sorted_dict_str(dictEle)+"\n\n"+\
" has duplicate values for the list of keys\n\n"+\
" "+str(listOfKeys)+"\n\n"+\
" with the element already added\n\n"+\
" listOfDicts["+str(lookedUpIdx)+"] =\n\n"+\
" "+sorted_dict_str(lookedUpDict)+"\n\n"+\
" and differs by at least the key/value pair\n\n"+\
" "+str(dictDiffErrorMsg))
# Need to go back and reset the dict on the last dict in the
# data-structure so that modifications to the dicts that are looked up
# will modify the original list.
if addEle:
currentLookupDictRef.update({'dict':dictEle, 'idx':idx-numRemoved})
else:
numRemoved += 1
idx += 1
# Remove 100% duplicate elements marged above
removeElementsFromListGivenIndexes(listOfDicts, duplicateIndexesToRemoveList)
return lookupDict
# Lookup a dict (and optionally also its index location) in a list of dicts
# given a lookup dict returned from createLookupDictForListOfDicts() where the
# key/value pairs match
#
# lookupDict [in]: A dict created by createLookupDictForListOfDicts() given
# the same listOfKeys used in that function.
#
# listOfKeys [in]: List of keys that was used used to create lookupDict.
#
# listOfValues [in]: A list of values for the given list of keys in
# listOfKeys.
#
# alsoReturnIdx [in]: If True, then the index of the located dict in the
# original listOfDicts will be returned as well. (default False)
#
# If the matching dict is found, then it will be returned as:
#
# matchingDict = lookupDictGivenLookupDict(...)
#
# If alsoReturnIdx==True, then also the index will be returned as:
#
# (matchingDict, idx) = lookupDictGivenLookupDict(...)
#
# If the matching dict is not found, then None will be returned or the tuple
# (None, None) if alsoReturnIdx==True.
#
# NOTE: This is an implementation function that is used in the class
# SearchableListOfDicts. Please use that class instead of this raw function.
#
def lookupDictGivenLookupDict(lookupDict, listOfKeys, listOfValues,
alsoReturnIdx=False,
):
#print("\nlookupDict = "+str(lookupDict))
#print("\nlistOfKeys = "+str(listOfKeys))
#print("\ndictToFind = "+str(dictToFind))
if len(listOfKeys) != len(listOfValues):
raise Exception("Error, len(listOfKeys)="+str(len(listOfKeys))+\
" != len(listOfValues)="+str(len(listOfValues))+" where"+\
" listOfKeys="+str(listOfKeys)+\
" and listOfValues="+str(listOfValues)+"!")
currentSubLookupDict = lookupDict
idx = 0
for idx in xrange(len(listOfValues)):
key = listOfKeys[idx]
#print("\nkey = '"+key+"'")
keyValueToFind = listOfValues[idx]
#print("keyValueToFind = '"+str(keyValueToFind)+"'")
#print("currentSubLookupDict = "+str(currentSubLookupDict))
keyValueLookedUp = currentSubLookupDict.get(keyValueToFind, None)
#print("keyValueLookedUp = "+str(keyValueLookedUp))
if not keyValueLookedUp:
if alsoReturnIdx: return (None, None)
return None
currentSubLookupDict = keyValueLookedUp
if keyValueLookedUp:
if alsoReturnIdx:
return (keyValueLookedUp.get('dict'), keyValueLookedUp.get('idx'))
return keyValueLookedUp.get('dict')
return None
# Class that encapsulates a list of dicts and an efficient lookup of a dict
# given a list key/value pairs to match.
#
# Once created, this object acts like a list of dicts in most cases but also
# contains functions to search for speicfic dicts given a set of key/value
# pairs.
#
# Any modifications to the dicts looked up with this object will edit the
# dicts in the underlying list of dicts. This therefore makes this class act
# as a type of multi-key lookup dict using the member function
# lookupDictGivenKeyValuesList(['keyval0', 'keyval1', ...]). This provides a
# handy way to access and edit the underlying dicts that require
# multi-key/value pairs to find them.
#
# NOTE: The key values for the list of keys given in listOfKeys must be
# unique! If it is not, then an excpetion will be thrown.
#
class SearchableListOfDicts(object):
# Constructor
#
# listOfDicts [stored, may be modifed]: List of dicts that a search
# data-structure will be created for.
#
# listOfKeys [stored, will not be modified]: List of the names of keys in
# the dicts of listOfDicts that a search data-structure will be created for
# and defines the set of key/value pairs used to look up up dicts in
# listOfDicts.
#
# removeExactDuplicateElements [in]: If True, then exact duplicate dicts
# based on the key/value pairs in listOfKeys will be removed from
# listOfDicts (which is modified in place, not a copy). (default False)
#
# keyMapList [in]: Optional list of key names in the input
# keyValueDictToFind to pull out and used the match the key/value pairs in
# the listOfKeys. This allows a mapping from int input dict key names to
# the output dict key names. (default None)
#
# checkDictsAreSame_in [in]: Allows specialization of the check for exact
# dict matches and reporting the differences. The default value is the
# function checkDictsAreSame(). Any Python object that has the __call__()
# operator function defined that takes those same arguments and returns the
# same outputs as the function checkDictsAreSame() can be passed in.
#
def __init__(self, listOfDicts, listOfKeys,
removeExactDuplicateElements=False, keyMapList=None,
checkDictsAreSame_in=checkDictsAreSame,
):
if keyMapList:
if len(listOfKeys) != len(keyMapList):
raise Exception("Error, listOfKeys="+str(listOfKeys)+\
" keyMapList="+str(listOfKeys)+" have different lenghts!" )
self.__listOfDicts = listOfDicts
self.__listOfKeys = listOfKeys
self.__keyMapList = keyMapList
self.__checkDictsAreSame = checkDictsAreSame_in
self.__lookupDict = createLookupDictForListOfDicts(
self.__listOfDicts, self.__listOfKeys,
removeExactDuplicateElements=removeExactDuplicateElements,
checkDictsAreSame_in=checkDictsAreSame_in)
# Convert to string rep
def __str__(self):
myStr = "SearchableListOfDicts{listOfDicts="+str(self.__listOfDicts)+\
", listOfKeys="+str(self.__listOfKeys)+", lookupDict="+str(self.__lookupDict)+"}"
return myStr
# Return listOfDicts passed into Constructor
def getListOfDicts(self):
return self.__listOfDicts
# Return listOfKeys passed to Constructor
def getListOfKeys(self):
return self.__listOfKeys
# Return keyMapList passed to Constructor
def getKeyMapList(self):
return self.__keyMapList
# Lookup a dict given a dict with same key/value pairs for keys listed in
# listOfKeys.
def lookupDictGivenKeyValueDict(self, keyValueDictToFind, alsoReturnIdx=False):
if self.__keyMapList:
keyListToUse = self.__keyMapList
else:
keyListToUse = self.__listOfKeys
keyValuesListToFind = []
for idx in xrange(len(keyListToUse)):
keyValuesListToFind.append(keyValueDictToFind.get(keyListToUse[idx]))
lookupRtn = self.lookupDictGivenKeyValuesList(keyValuesListToFind, alsoReturnIdx)
return lookupRtn
# Lookup a dict given a flat list of values for the keys
#
# Must be in same order self.getListOfKeys().
#
def lookupDictGivenKeyValuesList(self, keyValuesListToFind, alsoReturnIdx=False):
lookupRtn = lookupDictGivenLookupDict(self.__lookupDict, self.__listOfKeys,
keyValuesListToFind, alsoReturnIdx)
return lookupRtn
# Functions to allow this to act like a list
def __len__(self):
return len(self.__listOfDicts)
def __getitem__(self, index_in):
return self.__listOfDicts[index_in]
# Create a SearchableListOfDicts object for a list of builds dicts that allows
# lookups of builds given the keys "group" => "site" => "buildname" :
# build_dict.
def createSearchableListOfBuilds(buildsListOfDicts):
return SearchableListOfDicts(buildsListOfDicts, ['group', 'site', 'buildname'])
# Create a SearchableListOfDicts object for a list of tests with issue
# trackers that allows lookups of tests given the keys "site" => "buildName"
# => "testname" : test_dict.
def createSearchableListOfTests( testsListOfDicts,
removeExactDuplicateElements=False,
checkDictsAreSame_in=checkDictsAreSame,
):
return SearchableListOfDicts(testsListOfDicts, ['site', 'buildName', 'testname'],
removeExactDuplicateElements=removeExactDuplicateElements,
checkDictsAreSame_in=checkDictsAreSame_in )
# Create a SearchableListOfDicts object for a list of build dicts allows
# lookups that match the 'site' and 'buildname' fields but uses input for the
# search that are test dicts that have the fiels 'site' and 'buildName'.
def createTestToBuildSearchableListOfDicts(buildsLOD,
removeExactDuplicateElements=False,
):
return SearchableListOfDicts( buildsLOD, ('site', 'buildname'),
removeExactDuplicateElements=removeExactDuplicateElements,
keyMapList=('site', 'buildName') )
# NOTE: The extra keyMapList is needed because CDash used the key name
# 'buildname' for the build name returned form the cdash/index.php page
# while it gave the build name the key name 'buildName' for the data
# returned from cdash/queryTests.php.
# Match functor that returns true if the input dict has key/values that
# matches one dicts in the input SearchableListOfDicts.
class MatchDictKeysValuesFunctor(object):
# Construct with a SearchableListOfDicts object
def __init__(self, searchableListOfDict):
self.__searchableListOfDict = searchableListOfDict
# Convert to string rep for debugging/etc.
def __str__(self):
myStr = "MatchDictKeysValuesFunctor{"+str(self.__searchableListOfDict)+"}"
return myStr
# Return 'true' if the key/value pairs in dict_in match the key/value pairs
# in one of the dicts in the searchableListOfDict object.
def __call__(self, dict_in):
matchingDict = self.__searchableListOfDict.lookupDictGivenKeyValueDict(dict_in)
if matchingDict:
return True
return False
# Transform functor that adds issue tracker info and URL to an existing test
# dict.
#
# This functor looks up the test based on 'site', 'buildName', and 'testname'
# keys to find the entry in the list of known issues with issue trackers and
# then it copies the issue issue tracker fields to the input/output test dict.
class AddIssueTrackerInfoToTestDictFunctor(object):
# Construct with a SearchableListOfDicts object that has issue tracker info.
# This object testsWithIssueTrackersSLOD must have been constructed using
# the function createSearchableListOfTests() so it will allow lookups based
# on the 'site', 'buildName', and 'testname' keys.
def __init__(self, testsWithIssueTrackersSLOD, addEmptyOnNoMatch=True):
self.__testsWithIssueTrackersSLOD = testsWithIssueTrackersSLOD
self.__addEmptyOnNoMatch = addEmptyOnNoMatch
# Lookup the issue tracker info and add it as new key/value pairs to
# testDict_inout.
def __call__(self, testDict_inout):
# Look up the entry for the test tracker info based on the 'site',
# 'buildName', and 'testname' key/value pairs in testDict_inout.
matchingDict = \
self.__testsWithIssueTrackersSLOD.lookupDictGivenKeyValueDict(testDict_inout)
if matchingDict:
issue_tracker = matchingDict['issue_tracker']
issue_tracker_url = matchingDict['issue_tracker_url']
else:
if self.__addEmptyOnNoMatch:
issue_tracker = ""
issue_tracker_url = ""
else:
raise Exception(
"Error, testDict_inout="+str(testDict_inout)+\
" does not have an assigned issue tracker!")
testDict_inout[u'issue_tracker'] = issue_tracker
testDict_inout[u'issue_tracker_url'] = issue_tracker_url
return testDict_inout
# Assert that the list of tests with issue trackers matches the expected
# builds.
#
# testsWithIssueTrackersLOD [in]: List of dicts of tests with issue trackers.
# Here, only the fields 'site', 'buildName', and 'testname' are significant.
#
# expectedBuildsLOD [in]: List of dicts of expected builds with fields 'site',
# 'buildname'. Here, the key/value pairs 'site' and 'buildname' must be
# unique. The 'group' field is ignored (because cdash/queryTests.php does not
# give the 'group' of each test).
#
# This returns a tuple (matches, errMsg). If all of the tests match, then
# 'matches' will be True and errMsg=="". If one or more of the tests don't
# match then 'matches' will be False and 'errMsg' will give a message about
# which tests are missing.
#
def testsWithIssueTrackersMatchExpectedBuilds( testsWithIssueTrackersLOD,
testToExpectedBuildsSLOD,
):
# Gather up all of the tests that don't match one of the expected builds
nonmatchingTestsWithIssueTrackersLOD = []
for testDict in testsWithIssueTrackersLOD:
expectedBuildDict = testToExpectedBuildsSLOD.lookupDictGivenKeyValueDict(testDict)
if not expectedBuildDict:
nonmatchingTestsWithIssueTrackersLOD.append(
{'site':testDict['site'], 'buildName':testDict['buildName'],
'testname':testDict['testname']} )
# If all tests matched, return True
if len(nonmatchingTestsWithIssueTrackersLOD) == 0:
return (True, "")
# One or more tests did not match so build an error message and return False
errMsg = \
"Error: The following tests with issue trackers did not match 'site' and"+\
" 'buildName' in one of the expected builds:\n"
for testDict in nonmatchingTestsWithIssueTrackersLOD:
errMsg += \
" {'site'='"+testDict['site']+"'"+\
", 'buildName'="+testDict['buildName']+"'"+\
", 'testname'="+testDict['testname']+"'}\n"
return (False, errMsg)
# Extract just the date from the testDict['buildstartdate'] field
def dateFromBuildStartTime(buildStartTime):
return buildStartTime.split('T')[0]
# Sort list of test history dicts and get statistics
#
# Inputs:
#
# testHistoryLOD [in]: List of test dicts for the same test. This list nore
# its elements are modified in this call. (The base list object is shallow
# copied before it is sorted.)
#
# currentTestDate [in]: The current testing day (as a string YYYY-MM-DD).
# This is needed to define a frame of reference for interpeting if the test
# is currently 'Passed', 'Failed', 'Not Run', or is 'Missing' (i.e. does not
# have any test results for curent testing date).
#
# daysOfHistory [in]: Number of days of history that were requested.
#
# Note that len(testHistoryLOD) may be less than daysOfHistory which is
# allowed and handled in function. Any days in that range missing contribute
# to testHistoryStats['missing_last_x_days'].
#
# Returns:
#
# (sortedTestHistoryLOD, testHistoryStats, testStatus)
#
# where:
#
# sortedTestHistoryLOD: The sorted list of test dicts with most recent dict
# at the top. (New list object with references to the same test dict
# elements.)
#
# testHistoryStats: Dict that gives statistics for the test with fields:
# - 'pass_last_x_days': Number of times test 'Passed'
# - 'nopass_last_x_days': Number of times the not 'Passed'
# - 'missing_last_x_days': Number of days there was no test data
# - 'consec_pass_days': Number of times the test consecutively passed
# - 'consec_nopass_days': Number of times the test consecutively did not pass
# - 'consec_missing_days': Number of days test is missing
# - 'previous_nopass_date': Before current date, the previous nopass date
#
# testStatus: The status of the test for the current testing day with values:
# - 'Passed': Most recent test 'Passed' had date matching curentTestDate
# - 'Failed': Most recent test 'Failed' had date matching curentTestDate
# - 'Not Run': Most recent test 'Not Run' had date matching curentTestDate
# - 'Missing': Most recent test has date before matching curentTestDate
#
def sortTestHistoryGetStatistics(testHistoryLOD, currentTestDate, daysOfHistory):
def incr(testDict, key): testDict[key] = testDict[key] + 1
def decr(testDict, key): testDict[key] = testDict[key] - 1
# Initialize outputs assuming no history (i.e. missing)
sortedTestHistoryLOD = []
testHistoryStats = {
'pass_last_x_days': 0,
'nopass_last_x_days': 0,
'missing_last_x_days': daysOfHistory,
'consec_pass_days': 0,
'consec_nopass_days': 0,
'consec_missing_days': 0,
'previous_nopass_date': 'None'
}
testStatus = "Missing"
# Return if there is no test history
if len(testHistoryLOD) == 0:
testHistoryStats['consec_missing_days'] = daysOfHistory
return (sortedTestHistoryLOD, testHistoryStats, testStatus)
# Sort the test history by the buildstarttime (most current date at top)
sortedTestHistoryLOD = copy.copy(testHistoryLOD)
sortedTestHistoryLOD.sort(reverse=True, key=DictSortFunctor(['buildstarttime']))
# Top (most recent) test history data
topTestDict = sortedTestHistoryLOD[0]
# testStatus (for this test based on history)
topTestBuildStartDate = dateFromBuildStartTime(topTestDict['buildstarttime'])
if topTestBuildStartDate == currentTestDate:
testStatus = topTestDict['status']
else:
testStatus = "Missing"
# testHistoryStats
# Set up for counting num of consecutive pass, nopass, or missing
if testStatus == "Missing":
# The test is missing so see how many consecutive days that it is missing
currentTestDateObj = validateAndConvertYYYYMMDD(currentTestDate)
topTestDateObj = validateAndConvertYYYYMMDD(topTestBuildStartDate)
testHistoryStats['consec_missing_days'] = (currentTestDateObj - topTestDateObj).days
# There are no initial consecutive passing or nopassing days
initialTestStatusHasChanged = True
else:
# Count number of consecutive days that test is either passing or
# nopasssing
initialTestStatusHasChanged = False
if testStatus == 'Passed': previousTestStatusPassed = True
else: previousTestStatusPassed = False
previousNopassDate = None
# Loop over test history and update quantities
for pastTestDict in sortedTestHistoryLOD:
pastTestStatus = pastTestDict['status']
pastTestDate = dateFromBuildStartTime(pastTestDict['buildstarttime'])
# Count the initial consecutive streaks
if (
(pastTestStatus=='Passed') == previousTestStatusPassed \
and not initialTestStatusHasChanged \
):
# The initial consecutive streak continues!
if pastTestStatus == 'Passed':
incr(testHistoryStats, 'consec_pass_days')
else:
incr(testHistoryStats, 'consec_nopass_days')
else:
# The initial consecutive streak has been broken
initialTestStatusHasChanged = True
# Count total pass/nopass/missing tests
decr(testHistoryStats, 'missing_last_x_days')
if pastTestStatus == 'Passed':
incr(testHistoryStats, 'pass_last_x_days')
else:
incr(testHistoryStats, 'nopass_last_x_days')
# Find most recent previous nopass test date
if (
previousNopassDate == None \
and pastTestDate != currentTestDate \
and pastTestStatus != 'Passed' \
):
previousNopassDate = pastTestDate
testHistoryStats['previous_nopass_date'] = previousNopassDate
# Return the computed stuff
return (sortedTestHistoryLOD, testHistoryStats, testStatus)
# Extract testid and buildid from 'testDetailsLink' CDash test dict
# field.
def extractTestIdAndBuildIdFromTestDetailsLink(testDetailsLink):
testDetailsLinkList = testDetailsLink.split('?')
phpArgsList = testDetailsLinkList[1].split('&')
testidArgList = phpArgsList[0].split("=")
buildidArgList = phpArgsList[1].split("=")
return (testidArgList[1], buildidArgList[1])
# Check if two test dicts returned from CDash are the same, accounting for
# possible CDash defects allowing duplicate tests except for different test
# IDs and small changes in 'time' (strange defects in CDash).
#
# Has the same calling conventions and return value as the function
# checkDictsAreSame().
#
# Returns tuple (hasSameKeyValuePairs, errMsg). If
# hasSameKeyValuePairs==True, then errMsg==None. Otherwise, if
# hasSameKeyValuePairs==False, then errMsg gives a string that explains how
# they are different.
#
# This improves on a simple check dict_1 == dict_2 in that shows exactly why
# the dicts are different for a single key/value pair.
#
def checkCDashTestDictsAreSame(testDict_1, testDict_1_name,
testDict_2, testDict_2_name,
):
# Check the easy case where they are exactly the same
if testDict_1 == testDict_2:
return (True, None)
# Check to see if 'testDetailsLink' is there in both and then check contents
sameBuildIdDifferentTestIds = False
if (
('testDetailsLink' in testDict_1.keys()) \
and \
('testDetailsLink' in testDict_2.keys()) \
):
(test1d_1, buildid_1) = \
extractTestIdAndBuildIdFromTestDetailsLink(testDict_1['testDetailsLink'])
(test1d_2, buildid_2) = \
extractTestIdAndBuildIdFromTestDetailsLink(testDict_2['testDetailsLink'])
if (buildid_1 == buildid_2) and (test1d_1 != test1d_2):
# This is the special case that we are writing this function for!
sameBuildIdDifferentTestIds = True
# Set up copy to allow dropping out fields for comparison
testDict_1_copy = copy.deepcopy(testDict_1)
testDict_2_copy = copy.deepcopy(testDict_2)
# If buildIds are the same but the testIds are different, then check the
# rest of the key/value pairs to determine if they are the same:
if sameBuildIdDifferentTestIds:
testDict_1_copy.pop('testDetailsLink', None)
testDict_2_copy.pop('testDetailsLink', None)
# If the test 'time' is different by a little bit, then delcare them to be
# the same and remove 'time' field from comparison.
if testDict_1['time'] != testDict_2['time']:
time_1 = testDict_1['time']
time_2 = testDict_2['time']
rel_err = abs(time_1 - time_2) / ( (time_1 + time_2 + 1e-5)/2.0 )
rel_err_max = 1.0 # ToDo: Make this adjustable?
print("rel_err = "+str(rel_err))
print("rel_err_max = "+str(rel_err_max))
if rel_err <= rel_err_max:
testDict_1_copy.pop('time', None)
testDict_2_copy.pop('time', None)
# ToDo: Provide a better error message that prints the diff!
# Compare what ever fields are left that may be different and just use the
# standard comparison that will give a good error message for differences.
return checkDictsAreSame(testDict_1_copy, testDict_1_name,
testDict_2_copy, testDict_2_name )
# Get the test history CDash cache file.
#
# Note: this takes care of things like having '/' in the test name
#
def getTestHistoryCacheFileName(date, site, buildName, testname, daysOfHistory):
testHistoryFileName = \
date+"-"+site+"-"+buildName+"-"+testname+"-HIST-"+str(daysOfHistory)+".json"
return testHistoryFileName.replace('/', '_')
# Transform functor that computes and add detailed test history to an existing
# test dict so that it can be printed in the table
# createCDashTestHtmlTableStr().
#
# ToDo: Document the fields set by this functor
#
class AddTestHistoryToTestDictFunctor(object):
# Constructor which takes additional data needed to get the test history and
# other stuff.
#
# By default, this wil always read the data from the cache file if that file
# already exists.
#
def __init__(self, cdashUrl, projectName, date, daysOfHistory,
testCacheDir, useCachedCDashData=True, alwaysUseCacheFileIfExists=True,
verbose=False, printDetails=False,
extractCDashApiQueryData_in=extractCDashApiQueryData, # For unit testing
):
self.__cdashUrl = cdashUrl
self.__projectName = projectName
self.__date = date
self.__daysOfHistory = daysOfHistory
self.__testCacheDir = testCacheDir
self.__useCachedCDashData = useCachedCDashData
self.__alwaysUseCacheFileIfExists = alwaysUseCacheFileIfExists
self.__verbose = verbose
self.__printDetails = printDetails
self.__extractCDashApiQueryData_in = extractCDashApiQueryData_in
# Get test history off CDash and add test history info and URL to info we
# find out from that test history
#
def __call__(self, testDict):
#pp = pprint.PrettyPrinter(indent=2)
#print("\ntestDict:\n")
#pp.pprint(testDict)
# Get short names for data inside of this functor
cdashUrl = self.__cdashUrl
projectName = self.__projectName
testDayDate = validateAndConvertYYYYMMDD(self.__date)
daysOfHistory = self.__daysOfHistory
# Get basic info about the test from the from the testDict
site = testDict["site"]
buildName = testDict["buildName"]
testname = testDict["testname"]
# Determine if this test has data from CDash or if it does not
if testDict.get('buildstarttime', None):
testAlreadyHasCDashData = True
else:
testAlreadyHasCDashData = False
# Date range for test history
dayAfterCurrentTestDay = \
(testDayDate+datetime.timedelta(days=1)).isoformat()
daysBeforeCurrentTestDay = \
(testDayDate+datetime.timedelta(days=-1*daysOfHistory+1)).isoformat()
# Define queryTests.php query filters for test history
testHistoryQueryFilters = \
"filtercombine=and&filtercombine=&filtercount=5&showfilters=1&filtercombine=and"+\
"&field1=buildname&compare1=61&value1="+buildName+\
"&field2=testname&compare2=61&value2="+testname+\
"&field3=site&compare3=61&value3="+site+\
"&field4=buildstarttime&compare4=84&value4="+dayAfterCurrentTestDay+\
"&field5=buildstarttime&compare5=83&value5="+daysBeforeCurrentTestDay
# URL used to get the history of the test in JSON form
testHistoryQueryUrl = \
getCDashQueryTestsQueryUrl(cdashUrl, projectName, None, testHistoryQueryFilters)
# URL to imbed in email to show the history of the test to humans
testHistoryBrowserUrl = \
getCDashQueryTestsBrowserUrl(cdashUrl, projectName, None, testHistoryQueryFilters)
# URL for to the build summary on index.php page
buildHistoryEmailUrl = getCDashIndexBrowserUrl(
cdashUrl, projectName, None,
"filtercombine=and&filtercombine=&filtercount=4&showfilters=1&filtercombine=and"+\
"&field1=buildname&compare1=61&value1="+buildName+\
"&field2=site&compare2=61&value2="+site+\
"&field3=buildstarttime&compare3=84&value3="+dayAfterCurrentTestDay+\
"&field4=buildstarttime&compare4=83&value4="+daysBeforeCurrentTestDay )
# ToDo: Replace this with the the URL to just this one build the index.php
# page. To do that, get the build stamp from the list of builds on CDash
# and then create a URL link for this one build given 'site', 'buildName',
# and 'buildStamp'. (NOTE: We can't use 'buildstarttime' without
# replacing ':' with '%' or the URL will not work with CDash.)
# Set the names of the cached files so we can check if they exists and
# write them out otherwise
testHistoryCacheFileFullName = \
getTestHistoryCacheFileName(self.__date,site,buildName,testname,daysOfHistory)
# Possibly compress the file name if it is too long
testHistoryCacheFilePath = \
self.__testCacheDir+"/"+\
getCompressedFileNameIfTooLong(testHistoryCacheFileFullName,self.__date+"-","json")
if self.__verbose:
gettingTestHistoryMsg = \
"Getting "+str(daysOfHistory)+" days of history for "+testname+\
" in the build "+buildName+" on "+site
if os.path.exists(testHistoryCacheFilePath):
gettingTestHistoryMsg += " from cache file"
else:
gettingTestHistoryMsg += " from CDash"
print(gettingTestHistoryMsg)
# Get the test history off of CDash (or from reading the cache file)
testHistoryLOD = downloadTestsOffCDashQueryTestsAndFlatten(
testHistoryQueryUrl, testHistoryCacheFilePath,
useCachedCDashData=self.__useCachedCDashData,
alwaysUseCacheFileIfExists=self.__alwaysUseCacheFileIfExists,
verbose=self.__printDetails,
extractCDashApiQueryData_in=self.__extractCDashApiQueryData_in
)
# Sort and get test history stats and update core testDict fields
(testHistoryLOD, testHistoryStats, testStatus) = sortTestHistoryGetStatistics(
testHistoryLOD, self.__date, daysOfHistory)
# Assert and update the status
#print("\ntestStatus = "+str(testStatus))
#print("\ntestHistoryLOD[0] = "+str(testHistoryLOD[0]))
if testStatus == "Missing":
testDict['status'] = "Missing"
testDict['status_color'] = cdashColorMissing()
testDict['details'] = "Missing"
elif testStatus == "Passed":
testDict.update(testHistoryLOD[0])
testDict['status_color'] = cdashColorPassed()
else:
# If we get here, there should be at least one test dict in
# testHistoryLOD and this should be a Failed or Not Run test
# testHistoryLOD[0] should be an exact duplicate of testDict. The below
# check confirms that to make sure that CDash is giving us consistent
# data.
if testDict.get('status', None) != testStatus:
raise Exception(
"Error, test testDict['status'] = '"+str(testDict.get('status',None))+"'"+\
" != "+\
"top test history testStatus = '"+testStatus+"'"+\
" where:\n\n"+\
" testDict = "+sorted_dict_str(testDict)+"\n\n"+\
" top test history dict = "+sorted_dict_str(testHistoryLOD[0])+"\n\n" )
if testDict.get('buildstarttime', None) != testHistoryLOD[0]['buildstarttime']:
raise Exception(
"Error, testDict['buildstarttime'] = '"+\
str(testDict.get('buildstarttime',None))+"'"+\
" != "+\
"top test history 'buildstarttime' = "+\
"'"+testHistoryLOD[0]['buildstarttime']+"'"+\
" where:\n\n"+\
" testDict = "+sorted_dict_str(testDict)+"\n\n"+\
" top test history dict = "+sorted_dict_str(testHistoryLOD[0])+"\n\n" )
if testStatus == "Failed":
testDict['status_color'] = cdashColorFailed()
elif testStatus == "Not Run":
testDict['status_color'] = cdashColorNotRun()
# ToDo: Lookup the matching build info so that we can get the buildstamp
# in order to build a good link to the build on CDash?
# Get the link to the test details if it exists
testDetailsLink = testDict.get('testDetailsLink', None)
if testDetailsLink:
fullTestDetailsLink = cdashUrl+"/"+testDetailsLink
else:
fullTestDetailsLink = None
# Assign all of the new test dict fields that need to be added
testDict["site_url"] = ""
testDict['buildName_url'] = buildHistoryEmailUrl # ToDo: Change to one build
if fullTestDetailsLink:
testDict['testname_url'] = fullTestDetailsLink
testDict['status_url'] = fullTestDetailsLink
testDict['test_history_num_days'] = daysOfHistory
testDict['test_history_query_url'] = testHistoryQueryUrl
testDict['test_history_browser_url'] = testHistoryBrowserUrl
testDict['test_history_list'] = testHistoryLOD
testDict.update(testHistoryStats)
testDict['pass_last_x_days_color'] = cdashColorPassed()
testDict['pass_last_x_days_url'] = testHistoryBrowserUrl
testDict['nopass_last_x_days_color'] = cdashColorFailed()
testDict['nopass_last_x_days_url'] = testHistoryBrowserUrl
testDict['missing_last_x_days_color'] = cdashColorMissing()
testDict['missing_last_x_days_url'] = testHistoryBrowserUrl
testDict['consec_pass_days_color'] = cdashColorPassed()
testDict['consec_pass_days_url'] = testHistoryBrowserUrl
testDict['consec_nopass_days_color'] = cdashColorFailed()
testDict['consec_nopass_days_url'] = testHistoryBrowserUrl
testDict['consec_missing_days_color'] = cdashColorMissing()
testDict['consec_missing_days_url'] = testHistoryBrowserUrl
if testDict.get('status', None) == None:
print("\ntestStatus = "+testStatus)
print("\ntestDict:")
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(testDict)
raise Exception("Error, testDict['status']==None for testDict="+str(testDict))
# Return the updated test dict with the new fields
return testDict
# Gather up a list of the missing builds.
#
# Inputs:
#
# buildLookupDict [in]: Lookup dict of build summary dicts gotten off CDash
#
# expectedBuildsList [in]: List of expected builds dict with fields 'group',
# 'site', and 'buildname'.
#
# Returns an array of dicts of missing expected builds with list elements:
#
# {'group':"???", 'site':"???", 'buildname':"???", 'status':"???", ...}
#
# wher the '...' will be the rest of the fields for builds that exist on CDash
# but don't have full results.
#
# The field 'status' will either be given either:
#
# "Build not found on CDash"
#
# or
#
# "Build exists but no test results"
#
# ToDo: Change name of 'status' to 'build_missing_status' and add other
# 'build_missing_status' values like:
#
# "Build exists but no build results"
# "Build exists but no configure results"
#
def getMissingExpectedBuildsList(buildsSearchableListOfDicts, expectedBuildsList):
missingExpectedBuildsList = []
for expectedBuildDict in expectedBuildsList:
#print("\nexpectedBuildDict = "+str(expectedBuildDict))
buildSummaryDict = \
buildsSearchableListOfDicts.lookupDictGivenKeyValueDict(expectedBuildDict)
#print("buildSummaryDict = "+str(buildSummaryDict))
if not buildSummaryDict:
# Expected build not found!
missingExpectedBuildDict = copy.deepcopy(expectedBuildDict)
missingExpectedBuildDict.update({'status':"Build not found on CDash"})
#print("missingExpectedBuildDict = "+str(missingExpectedBuildDict))
missingExpectedBuildsList.append(missingExpectedBuildDict)
elif not buildSummaryDict.get('test', None):
# Build exists but it is missing tests!
missingExpectedBuildDict = copy.deepcopy(expectedBuildDict)
missingExpectedBuildDict.update({'status':"Build exists but no test results"})
#print("missingExpectedBuildDict = "+str(missingExpectedBuildDict))
missingExpectedBuildsList.append(missingExpectedBuildDict)
else:
# This build exists and it has test results so don't add it
None
# Return the list of missing expected builds and status
return missingExpectedBuildsList
# Download set of builds from CDash builds and return flattened list of dicts
#
# The cdash/api/v1/index.php query selecting the set of builds is provided by
# cdashIndexBuildsQueryUrl.
#
# If cdashIndexBuildsQueryCacheFile != None, then the raw JSON data-structure
# downloaded from CDash will be written to the file
# cdashIndexBuildsQueryCacheFile or read from that file if
# useCachedCDashData==True.
#
# If alwaysUseCacheFileIfExists==True, then if the file
# cdashIndexBuildsQueryCacheFile already exists, it will always be read to get
# data instead of communicating with CDash even if useCachedCDashData==False.
#
# The list of builds pulled off of CDash is flattended and extracted using the
# function flattenCDashIndexBuildsToListOfDicts().
#
# NOTE: The optional argument extractCDashApiQueryData_in is used in unit
# testing to avoid calling CDash.
#
def downloadBuildsOffCDashAndFlatten(
cdashIndexBuildsQueryUrl,
fullCDashIndexBuildsJsonCacheFile=None,
useCachedCDashData=False,
alwaysUseCacheFileIfExists = False,
verbose=True,
extractCDashApiQueryData_in=extractCDashApiQueryData,
):
# Get the query data
fullCDashIndexBuildsJson = getAndCacheCDashQueryDataOrReadFromCache(
cdashIndexBuildsQueryUrl, fullCDashIndexBuildsJsonCacheFile, useCachedCDashData,
alwaysUseCacheFileIfExists, verbose=verbose,
extractCDashApiQueryData_in=extractCDashApiQueryData_in )
# Get trimmed down set of builds
buildsListOfDicts = \
flattenCDashIndexBuildsToListOfDicts(fullCDashIndexBuildsJson)
return buildsListOfDicts
# Download set of tests from cdash/api/v1/ctest/queryTests.php and return
# flattened list of dicts
#
# cdashQueryTestsUrl [in]: String URL for cdash/api/v1/ctest/queryTests.php
# with filters.
#
# If verbose==True, the the CDash query URL will be printed to STDOUT.
# Otherwise, this function is silent and will not return any output to STDOUT.
#
# If fullCDashQueryTestsJsonCacheFile != None, then the raw JSON
# data-structure will be written to that file.
#
# If useCachedCDashData==True, then data will not be pulled off of CDash and
# instead the list of builds will be read from the file cdashQueryCacheFile
# which must already exist from a prior call to this function (mostly for
# debugging and unit testing purposes).
#
# If alwaysUseCacheFileIfExists==True, then if the file
# cdashIndexBuildsQueryCacheFile already exists, it will always be read to get
# data instead of communicating with CDash even if useCachedCDashData==False.
#
# The list of tests pulled off CDash is flattended and returned by the
# function flattenCDashQueryTestsToListOfDicts().
#
# NOTE: The optional argument extractCDashApiQueryData_in is used in unit
# testing to avoid calling CDash.
#
def downloadTestsOffCDashQueryTestsAndFlatten(
cdashQueryTestsUrl,
fullCDashQueryTestsJsonCacheFile=None,
useCachedCDashData=False,
alwaysUseCacheFileIfExists = False,
verbose=True,
extractCDashApiQueryData_in=extractCDashApiQueryData,
):
# Get the query data
fullCDashQueryTestsJson = getAndCacheCDashQueryDataOrReadFromCache(
cdashQueryTestsUrl, fullCDashQueryTestsJsonCacheFile, useCachedCDashData,
alwaysUseCacheFileIfExists, verbose=verbose,
extractCDashApiQueryData_in=extractCDashApiQueryData_in )
# Get flattend set of tests
testsListOfDicts = \
flattenCDashQueryTestsToListOfDicts(fullCDashQueryTestsJson)
return testsListOfDicts
# Returns True if a build has configure failures
def buildHasConfigureFailures(buildDict):
configureDict = buildDict.get('configure', None)
if configureDict and configureDict['error'] > 0:
return True
return False
# Returns True if a build has compilation/build failures
def buildHasBuildFailures(buildDict):
compilationDict = buildDict.get('compilation', None)
if compilationDict and compilationDict['error'] > 0:
return True
return False
# Returns True if a test has 'status' 'Passed'
def isTestPassed(testDict):
return (testDict.get('status', None) == 'Passed')
# Returns True if a test has 'status' 'Failed'
def isTestFailed(testDict):
return (testDict.get('status', None) == 'Failed')
# Returns True if a test has 'status' 'Not Run'
def isTestNotRun(testDict):
return (testDict.get('status', None) == 'Not Run')
# Functor class to sort a row of dicts by multiple columns of string data.
class DictSortFunctor(object):
def __init__(self, sortKeyList):
self.sortKeyList = sortKeyList
def __call__(self, dict_in):
sortKeyStr=""
for key in self.sortKeyList:
keyData = dict_in.get(key)
if sortKeyStr:
sortKeyStr += "-"+str(keyData)
else:
sortKeyStr = keyData
return sortKeyStr
# Sort and limit a list of dicts
#
# Arguments:
#
# listOfDicts [in]: List of dicts that will be sorted according to keys.
#
# sortKeyList [in]: List of dict keys that define the sort order for the data
# in the list. The default is None which means that no sort is performed.
#
# limitRowsToDisplay [in]: The max number of rows to display. The default is
# None which will result in no limit to the number of rows displayed. The top
# limitRowsToDisplay items will be dispalyed after the list is sorted.
#
def sortAndLimitListOfDicts(listOfDicts, sortKeyList = None,
limitRowsToDisplay = None\
):
# Sort the list
if sortKeyList:
listOfDictsOrdered = copy.copy(listOfDicts) # Shallow copy
listOfDictsOrdered.sort(key=DictSortFunctor(sortKeyList))
else:
listOfDictsOrdered = listOfDicts # No sort being done
# Limit rows
if limitRowsToDisplay == None:
listOfDictsLimited = listOfDictsOrdered
else:
listOfDictsLimited = listOfDictsOrdered[0:limitRowsToDisplay]
# Return the final sorted limited list
return listOfDictsLimited
# Class to store dict key and table header
class TableColumnData(object):
# Class data
validColAlignList=["left","right","center","justify","char"]
# Constructor
def __init__(self, colHeader, dictKey, colAlign="left"):
self.colHeader = colHeader
self.dictKey = dictKey
if not colAlign in self.validColAlignList:
raise Excpetion(
"Error, colAlign="+colAlign+" not valid. Please choose from"+\
" the list ['" + "', '".join(validColAlignList) + "']!" )
self.colAlign = colAlign
#
# HTML stuff
#
# Color HTML text supported color
def colorHtmlText(htmlText, color_in):
if color_in == None or color_in == "":
return htmlText
elif color_in == "red":
None # Okay!
elif color_in == "green":
None # Okay!
elif color_in == "gray":
None # Okay!
elif color_in == "orange":
None # Okay!
else:
raise Exception("Error, color='"+color_in+"' is invalid."+\
" Only 'red', 'green', 'gray' and 'orange' are supported!")
return("<font color=\""+color_in+"\">"+htmlText+"</font>")
# Add soft word breaks for '_' chars and at other places to allow word wrap
def addHtmlSoftWordBreaks(text_in):
text_out = text_in.replace('_', '_­')
return text_out
# Create an html table string from a list of dicts and column headers.
#
# Arguments:
#
# tableTitle [in]: String for the name of the table included at the top of the
# table.
#
# colDataList [in]: List of TableColumnData objects where
# colDataList[j].dictKey gives the name of the key for that column of data,
# colDataList[j].colHeader is the text name for the column header and
# colDataList[j].colAlign gives the HTML alignment. That columns in the
# table will listed in the order given in this list.
#
# rowDataList [in]: List of dicts that provide the data from the table. The
# dict in each row must have the keys specified by colData[j].dictKey. In
# addition, if (key_url=rowDataList[i].get(colData[j].dictKey+"_url",#
# None))!=None, then the table entry will be an HTML link <a
# href="dataRowList[i].get(key_url)">dataRowList[i].get(key)</a>.
#
# htmlStyle [in]: The HTML style data (between <style></style>. If None is
# passed in then a default style is provided internally. NOTE: The default
# table style uses CSS formatting for boarders but also sets the <table>
# 'boarder' property since some email clients like Gmail ignore the CSS style
# sections.
#
# htmlTableStyle [in]: The style for the HTML table used in <table
# style=htmlTableStyle>. If set to None, then a default style is used. To
# not set a style, pass in the empty string "" (not None).
#
# This will also put in soft work breaks for chars like '_' to allow for
# compressing the produced tables.
#
def createHtmlTableStr(tableTitle, colDataList, rowDataList,
htmlStyle=None, htmlTableStyle=None \
):
# style options for the table
defaultHtmlStyle=\
"table, th, td {\n"+\
" padding: 5px;\n"+\
" border: 1px solid black;\n"+\
" border-collapse: collapse;\n"+\
"}\n"+\
"tr:nth-child(even) {background-color: #eee;}\n"+\
"tr:nth-child(odd) {background-color: #fff;}\n"
if htmlStyle != None: htmlStyleUsed = htmlStyle
else: htmlStyleUsed = defaultHtmlStyle
htmlStr="<style>"+htmlStyleUsed+"</style>\n"
# Table title and <table style=...>
htmlStr+="<h3>"+tableTitle+"</h3>\n"
if htmlTableStyle != None: htmlTableStyleUsed = htmlTableStyle
else: htmlTableStyleUsed = "style=\"width:100%\" boarder=\"1\""
htmlStr+="<table "+htmlTableStyleUsed+">\n\n"
# Column headings:
htmlStr+="<tr>\n"
for colData in colDataList:
htmlStr+="<th>"+colData.colHeader+"</th>\n"
htmlStr+="</tr>\n\n"
# Rows for the table
row_i = 0
for rowData in rowDataList:
htmlStr+="<tr>\n"
col_j = 0
for colData in colDataList:
dictKey = colData.dictKey
# Get the raw entry for this column
entry = rowData.get(dictKey, None)
if entry == None:
raise Exception(
"Error, column "+str(col_j)+" dict key='"+colData.dictKey+"'"+\
" row "+str(row_i)+" entry is 'None' which is not allowed!\n\n"+\
"Row dict = "+str(rowData))
# Add soft word breaks to allow line breaks for table compression
entry = addHtmlSoftWordBreaks(str(entry).strip())
# Add color if defined for this field
entryColor = rowData.get(dictKey+"_color", None)
if entryColor:
entry = colorHtmlText(entry, entryColor)
# See if the _url key also exists
entry_url = rowData.get(dictKey+"_url", None)
# Set the text for this row/column entry with or without the hyperlink
if entry_url:
entryStr = "<a href=\""+entry_url+"\">"+str(entry)+"</a>"
else:
entryStr = entry
# Set the row entry in the HTML table
htmlStr+=\
"<td align=\""+colData.colAlign+"\">"+entryStr+"</td>\n"
col_j += 1
htmlStr+="</tr>\n\n"
row_i += 1
# End of table
htmlStr+="</table>\n\n" # Use two newlines makes for good formatting!
return(htmlStr)
# Get string for table title for CDash data to display
#
# Arguments:
#
# Arguments:
#
# dataTitle [in]: Name of the data category.
#
# dataCountAcronym [in]: Acronym for the type of data being displayed
# (e.g. 'twoi' for "Tests With Out issue trackers"). This is printed in the
# table title in the form dataCoutAcronym=len(rowDataList).
#
# numItems [in]: The number of items of data
#
def getCDashDataSummaryHtmlTableTitleStr(dataTitle, dataCountAcronym, numItems,
limitRowsToDisplay=None,
):
tableTitle = dataTitle
if limitRowsToDisplay:
tableTitle += " (limited to "+str(limitRowsToDisplay)+")"
tableTitle += ": "+dataCountAcronym+"="+str(numItems)
return tableTitle
# Create an html table string for CDash summary data.
#
# Arguments:
#
# dataTitle [in]: Name of the data that we be included in the table title.
#
# dataCountAcronym [in]: Acronym for the type of data being displayed
# (e.g. 'twoi' for "Tests With Out issue trackers"). This is printed in the
# table title in the form dataCoutAcronym=len(rowDataList).
#
# colDataList [in]: List of TableColumnData objects where
# colDataList[j].dictKey gives the name of the key for that column of data,
# colDataList[j].colHeader is the text name for the column header and
# colDataList[j].colAlign gives the HTML alignment. That columns in the
# table will listed in the order given in this list.
#
# rowDataList [in]: List of dicts that provide the data from the table. The
# dict in each row must have the keys specified by colData[j].dictKey.
#
# sortKeyList [in]: List of dict keys that define the sort order for the data
# in the list. The default is None which means that no sort is performed.
#
# limitRowsToDisplay [in]: The max number of rows to display. The default is
# None which will result in no limit to the number of rows displayed. The top
# limitRowsToDisplay items will be dispalyed after the list is sorted.
#
# htmlStyle [in]: The HTML style data (between <style></style>. If None is
# passed in then a default style is provided internally (see
# createHtmlTableStr().
#
# htmlTableStyle [in]: The style for the HTML table used in <table
# style=htmlTableStyle>. The default is None in which case a default is
# picked by createHtmlTableStr(().
#
# NOTE: If len(rowDataList) == 0, then the empty string "" is returned.
#
def createCDashDataSummaryHtmlTableStr( dataTitle, dataCountAcronym,
colDataList, rowDataList, sortKeyList=None, limitRowsToDisplay=None,
htmlStyle=None, htmlTableStyle=None,
):
# If no rows, don't create a table
if len(rowDataList) == 0:
return ""
# Sort the list and limit the list
rowDataListDisplayed = sortAndLimitListOfDicts(
rowDataList, sortKeyList, limitRowsToDisplay)
# Table title
tableTitle = getCDashDataSummaryHtmlTableTitleStr(
dataTitle, dataCountAcronym, len(rowDataList), limitRowsToDisplay )
# Create and return the table
return createHtmlTableStr( tableTitle,
colDataList, rowDataListDisplayed, htmlStyle, htmlTableStyle )
# Create a tests HTML table string
#
# testTypeDescr [in]: Description of the test type being tabulated
# (e.g. "Failing tests without issue trackers")
#
# testTypeCountAcronym [in]: Acronym for the test type being tabulated
# (e.g. "twoif")
#
# testTypeCountNum [in]: Number of total items for the test type, before
# limiting (e.g. 25)
#
# testsLOD [in]: List of dicts of the test data typically first first
# downloaded from CDash. Each dict in this list must also have been operated
# on by the functors AddIssueTrackerInfoToTestDictFunctor and
# AddTestHistoryToTestDictFunctor in order to have all of the data needed to
# print in this table.
#
# daysOfHistory [in]: Number of days of test history being displayed. This is
# needed for one of the table column headers. (ToDo: Remove this and get this
# from the data).
#
# limitRowsToDisplay [in]: Limit of the number of rows to display. If this
# limited then this arugment is needed in order to print "(limited it ???)" in
# the table title. Should be 'None' if this listing is not limited. (default
# None)
#
# htmlStyle [in]: HTML sytle for the entire table (see createHtmlTableStr())
# (default None)
#
# htmlTableStyle [in]: Sytle inside of <table ... > (see createHtmlTableStr())
# (default None)
#
def createCDashTestHtmlTableStr(
testSetType,
testTypeDescr, testTypeCountAcronym, testTypeCountNum, testsLOD,
daysOfHistory, limitRowsToDisplay=None, testSetColor="",
htmlStyle=None, htmlTableStyle=None,
):
# Return empty string if no tests
if len(testsLOD) == 0:
return ""
# Table title
tableTitle = colorHtmlText(
getCDashDataSummaryHtmlTableTitleStr(
testTypeDescr, testTypeCountAcronym, testTypeCountNum, limitRowsToDisplay ),
testSetColor )
# Consecutive nopass/pass/missing column
tcd = TableColumnData
if testSetType == 'nopass':
consecCol = tcd("Consec­utive Non-pass Days", 'consec_nopass_days', 'right')
elif testSetType == 'pass':
consecCol = tcd("Consec­utive Pass Days", 'consec_pass_days', 'right')
elif testSetType == 'missing':
consecCol = tcd("Consec­utive Missing Days", 'consec_missing_days', 'right')
else:
raise Exception("Error, invalid testSetType="+str(testSetType))
# Create column headers
testsColDataList = [
tcd("Site", "site"),
tcd("Build Name", "buildName"),
tcd("Test Name", "testname"),
tcd("Status", "status"),
tcd("Details", "details"),
consecCol,
tcd("Non-pass Last "+str(daysOfHistory)+" Days", 'nopass_last_x_days', "right"),
tcd("Pass Last "+str(daysOfHistory)+" Days", 'pass_last_x_days', "right"),
tcd("Issue Tracker", "issue_tracker", "right"),
]
# Return the HTML table
return createHtmlTableStr( tableTitle,
testsColDataList, testsLOD,
htmlStyle=htmlStyle, htmlTableStyle=htmlTableStyle )
#
# Create an HTML MIME Email
#
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
# Create MINE formatted email object (but don't send it)
#
def createHtmlMimeEmail(fromAddress, toAddress, subject, textBody, htmlBody):
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['From'] = fromAddress
msg['To'] = toAddress
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(textBody, 'plain')
part2 = MIMEText(htmlBody, 'html')
# Attach parts into message container. According to RFC 2046, the last part
# of a multipart message, in this case the HTML message, is best and
# preferred.
msg.attach(part1)
msg.attach(part2)
return msg
# Send a MIME formatted email
#
def sendMineEmail(mimeEmail):
# Send the message via local SMTP server.
s = smtplib.SMTP('localhost')
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(mimeEmail['From'], mimeEmail['To'], mimeEmail.as_string())
s.quit()
|
py | 7df6bda4ac94a88e0785a02d105d53a3798e5a3d | import numpy as np
import tensorflow as tf
import tflearn
GAMMA = 0.99
ENTROPY_WEIGHT = 0.1
ENTROPY_EPS = 1e-6
EPS = 1e-6
MAX_BR_LEVELS = 10
MASK_DIM = 6
class ActorNetwork(object):
"""
Input to the network is the state, output is the distribution
of all actions.
"""
def __init__(self, sess, state_dim, action_dim, learning_rate):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.lr_rate = learning_rate
assert self.a_dim == MAX_BR_LEVELS
# Placeholder for masking invalid actions
self.mask = tf.compat.v1.placeholder(tf.bool, self.a_dim)
# Create the actor network
self.inputs, self.out = self.create_actor_network()
# Get all network parameters
self.network_params = \
tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='actor')
# Set all network parameters
self.input_network_params = []
for param in self.network_params:
self.input_network_params.append(
tf.compat.v1.placeholder(tf.float32, shape=param.get_shape()))
self.set_network_params_op = []
for idx, param in enumerate(self.input_network_params):
self.set_network_params_op.append(self.network_params[idx].assign(param))
# Selected action, 0-1 vector
# the shape of acts are not determined (only upper-bounded by a_dim)
self.acts = tf.compat.v1.placeholder(tf.float32, [None, None])
# This gradient will be provided by the critic network
self.act_grad_weights = tf.compat.v1.placeholder(tf.float32, [None, 1])
# Compute the objective (log action_vector and entropy)
self.obj = tf.compat.v1.reduce_sum(tf.compat.v1.multiply(
tf.compat.v1.log(tf.reduce_sum(tf.compat.v1.multiply(self.out, self.acts),
axis=1, keepdims=True)),
-self.act_grad_weights)) \
+ ENTROPY_WEIGHT * tf.reduce_sum(tf.multiply(self.out,
tf.compat.v1.log(self.out + ENTROPY_EPS)))
# Combine the gradients here
self.actor_gradients = tf.gradients(self.obj, self.network_params)
# Optimization Op
self.optimize = tf.compat.v1.train.RMSPropOptimizer(self.lr_rate).\
apply_gradients(zip(self.actor_gradients, self.network_params))
def create_actor_network(self):
with tf.compat.v1.variable_scope('actor'):
inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')
split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')
split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')
reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])
split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')
split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')
split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')
flatten_0 = tflearn.flatten(split_3)
flatten_1 = tflearn.flatten(split_4)
flatten_2 = tflearn.flatten(split_5)
merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')
dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')
# for multiple video, mask out the invalid actions
linear_out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='linear')
linear_out = tf.transpose(linear_out) # [None, a_dim] -> [a_dim, None]
mask_out = tf.boolean_mask(linear_out, self.mask) # [a_dim, None] -> [masked, None]
mask_out = tf.transpose(mask_out) # [masked, None] -> [None, masked]
softmax_out = tf.nn.softmax(mask_out)
return inputs, softmax_out
def train(self, inputs, acts, act_grad_weights):
# there can be only one kind of mask in a training epoch
for i in range(inputs.shape[0]):
assert np.all(inputs[0, MASK_DIM, -MAX_BR_LEVELS:] == \
inputs[i, MASK_DIM, -MAX_BR_LEVELS:])
# action dimension matches with mask length
assert acts.shape[1] == np.sum(inputs[0:1, MASK_DIM, -MAX_BR_LEVELS:])
self.sess.run(self.optimize, feed_dict={
self.inputs: inputs,
self.mask: inputs[0, MASK_DIM, -MAX_BR_LEVELS:],
self.acts: acts,
self.act_grad_weights: act_grad_weights
})
def predict(self, inputs):
for i in range(inputs.shape[0]):
assert np.all(inputs[0, MASK_DIM, -MAX_BR_LEVELS:] == \
inputs[i, MASK_DIM, -MAX_BR_LEVELS:])
return self.sess.run(self.out, feed_dict={
self.inputs: inputs,
self.mask: inputs[0, MASK_DIM, -MAX_BR_LEVELS:]
})
def get_gradients(self, inputs, acts, act_grad_weights):
for i in range(inputs.shape[0]):
assert np.all(inputs[0, MASK_DIM, -MAX_BR_LEVELS:] == \
inputs[i, MASK_DIM, -MAX_BR_LEVELS:])
return self.sess.run(self.actor_gradients, feed_dict={
self.inputs: inputs,
self.mask: inputs[0, MASK_DIM, -MAX_BR_LEVELS:],
self.acts: acts,
self.act_grad_weights: act_grad_weights
})
def apply_gradients(self, actor_gradients):
return self.sess.run(self.optimize, feed_dict={
i: d for i, d in zip(self.actor_gradients, actor_gradients)
})
def get_network_params(self):
return self.sess.run(self.network_params)
def set_network_params(self, input_network_params):
self.sess.run(self.set_network_params_op, feed_dict={
i: d for i, d in zip(self.input_network_params, input_network_params)
})
class CriticNetwork(object):
"""
Input to the network is the state and action, output is V(s).
On policy: the action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, state_dim, learning_rate):
self.sess = sess
self.s_dim = state_dim
self.lr_rate = learning_rate
# Create the critic network
self.inputs, self.out = self.create_critic_network()
# Get all network parameters
self.network_params = \
tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='critic')
# Set all network parameters
self.input_network_params = []
for param in self.network_params:
self.input_network_params.append(
tf.compat.v1.placeholder(tf.float32, shape=param.get_shape()))
self.set_network_params_op = []
for idx, param in enumerate(self.input_network_params):
self.set_network_params_op.append(self.network_params[idx].assign(param))
# Network target V(s)
self.td_target = tf.compat.v1.placeholder(tf.float32, [None, 1])
# Temporal Difference, will also be weights for actor_gradients
self.td = tf.sub(self.td_target, self.out)
# Mean square error
self.loss = tflearn.mean_square(self.td_target, self.out)
# Compute critic gradient
self.critic_gradients = tf.gradients(self.loss, self.network_params)
# Optimization Op
self.optimize = tf.compat.v1.train.RMSPropOptimizer(self.lr_rate).\
apply_gradients(zip(self.critic_gradients, self.network_params))
def create_critic_network(self):
with tf.compat.v1.variable_scope('critic'):
inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')
split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')
split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')
reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])
split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')
split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')
split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')
flatten_0 = tflearn.flatten(split_3)
flatten_1 = tflearn.flatten(split_4)
flatten_2 = tflearn.flatten(split_5)
merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')
dense_net_0 = tflearn.fully_connected(merge_net, 100, activation='relu')
out = tflearn.fully_connected(dense_net_0, 1, activation='linear')
return inputs, out
def train(self, inputs, td_target):
return self.sess.run([self.loss, self.optimize], feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def predict(self, inputs):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs
})
def get_td(self, inputs, td_target):
return self.sess.run(self.td, feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def get_gradients(self, inputs, td_target):
return self.sess.run(self.critic_gradients, feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def apply_gradients(self, critic_gradients):
return self.sess.run(self.optimize, feed_dict={
i: d for i, d in zip(self.critic_gradients, critic_gradients)
})
def get_network_params(self):
return self.sess.run(self.network_params)
def set_network_params(self, input_network_params):
self.sess.run(self.set_network_params_op, feed_dict={
i: d for i, d in zip(self.input_network_params, input_network_params)
})
def compute_gradients(s_batch, a_batch, r_batch, terminal, actor, critic):
"""
batch of s, a, r is from samples in a sequence
the format is in np.array([batch_size, s/a/r_dim])
terminal is True when sequence ends as a terminal state
"""
assert s_batch.shape[0] == a_batch.shape[0]
assert s_batch.shape[0] == r_batch.shape[0]
ba_size = s_batch.shape[0]
v_batch = critic.predict(s_batch)
R_batch = np.zeros(r_batch.shape)
if terminal:
R_batch[-1, 0] = 0 # terminal state
else:
R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state
for t in reversed(range(ba_size - 1)):
R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]
td_batch = R_batch - v_batch
actor_gradients = actor.get_gradients(s_batch, a_batch, td_batch)
critic_gradients = critic.get_gradients(s_batch, R_batch)
return actor_gradients, critic_gradients, td_batch
def discount(x, gamma):
"""
Given vector x, computes a vector y such that
y[i] = x[i] + gamma * x[i+1] + gamma^2 x[i+2] + ...
"""
out = np.zeros(len(x))
out[-1] = x[-1]
for i in reversed(range(len(x)-1)):
out[i] = x[i] + gamma*out[i+1]
assert x.ndim >= 1
# More efficient version:
# scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
return out
def compute_entropy(x):
"""
Given vector x, computes the entropy
H(x) = - sum( p * log(p))
"""
H = 0.0
for i in range(len(x)):
if 0 < x[i] < 1:
H -= x[i] * np.log(x[i])
return H
def build_summaries():
td_loss = tf.compat.v1.Variable(0.)
tf.compat.v1.summary.scalar("TD_loss", td_loss)
eps_total_reward = tf.compat.v1.Variable(0.)
tf.compat.v1.summary.scalar("Eps_total_reward", eps_total_reward)
avg_entropy = tf.compat.v1.Variable(0.)
tf.compat.v1.summary.scalar("Avg_entropy", avg_entropy)
summary_vars = [td_loss, eps_total_reward, avg_entropy]
summary_ops = tf.compat.v1.summary.merge_all()
return summary_ops, summary_vars
|
py | 7df6bdab3642c5d6f05d1b1cd9b861da1ba720f1 | #!/usr/bin/python3
import argparse
import sys
import os
from colorama import Fore
from atcodertools.tools.tester import USER_FACING_JUDGE_TYPE_LIST, DEFAULT_EPS
from atcodertools.tools.utils import with_color
from atcodertools.client.atcoder import AtCoderClient, LoginError
from atcodertools.tools import tester
from atcodertools.common.logging import logger
from atcodertools.tools.models.metadata import Metadata
def main(prog, args, credential_supplier=None, use_local_session_cache=True) -> bool:
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--exec", '-e',
help="File path to the execution target. [Default] Automatically detected exec file",
default=None)
parser.add_argument("--dir", '-d',
help="Target directory to test. [Default] Current directory",
default=".")
parser.add_argument("--timeout", '-t',
help="Timeout for each test cases (sec) [Default] 1",
type=int,
default=1)
parser.add_argument("--code", '-c',
help="Path to the source code to submit [Default] Code path written in metadata.json",
type=str,
default=None)
parser.add_argument("--force", "-f",
action="store_true",
help="Submit the code regardless of the local test result [Default] False",
default=False)
parser.add_argument("--save-no-session-cache",
action="store_true",
help="Save no session cache to avoid security risk",
default=False)
parser.add_argument("--unlock-safety", "-u",
action="store_true",
help="By default, this script only submits the first code per problem. However, you can remove"
" the safety by this option in order to submit codes twice or more.",
default=False)
parser.add_argument('--judge-type', '-j',
help='error type'
' must be one of [{}]'.format(
', '.join(USER_FACING_JUDGE_TYPE_LIST)),
type=str,
default=None)
parser.add_argument('--error-value', '-v',
help='error value for decimal number judge:'
' [Default] ' + str(DEFAULT_EPS),
type=float,
default=None)
args = parser.parse_args(args)
metadata_file = os.path.join(args.dir, "metadata.json")
try:
metadata = Metadata.load_from(metadata_file)
except IOError:
logger.error(
"{0} is not found! You need {0} to use this submission functionality.".format(metadata_file))
return False
try:
client = AtCoderClient()
client.login(save_session_cache=not args.save_no_session_cache,
credential_supplier=credential_supplier,
use_local_session_cache=use_local_session_cache,
)
except LoginError:
logger.error("Login failed. Try again.")
return False
tester_args = []
if args.exec:
tester_args += ["-e", args.exec]
if args.dir:
tester_args += ["-d", args.dir]
if args.timeout:
tester_args += ["-t", str(args.timeout)]
if args.judge_type is not None:
tester_args += ["-j", str(args.judge_type)]
if args.error_value is not None:
tester_args += ["-v", str(args.error_value)]
if args.force or tester.main("", tester_args):
submissions = client.download_submission_list(metadata.problem.contest)
if not args.unlock_safety:
for submission in submissions:
if submission.problem_id == metadata.problem.problem_id:
logger.error(with_color("Cancel submitting because you already sent some code to the problem. Please "
"specify -u to send the code. {}".format(
metadata.problem.contest.get_submissions_url(submission)), Fore.LIGHTRED_EX))
return False
code_path = args.code or os.path.join(args.dir, metadata.code_filename)
for encoding in ['utf8', 'utf-8_sig', 'cp932']:
try:
with open(code_path, 'r', encoding=encoding) as f:
source = f.read()
break
except UnicodeDecodeError:
logger.warning("code wasn't recognized as {}".format(encoding))
logger.info(
"Submitting {} as {}".format(code_path, metadata.lang.name))
submission = client.submit_source_code(
metadata.problem.contest, metadata.problem, metadata.lang, source)
logger.info("{} {}".format(
with_color("Done!", Fore.LIGHTGREEN_EX),
metadata.problem.contest.get_submissions_url(submission)))
if __name__ == "__main__":
main(sys.argv[0], sys.argv[1:])
|
py | 7df6bf2631136d108764e0b64fb7cfe0fbfe8143 | class User:
def __init__(self, data):
self._id = data['id']
self._email = data['email']
self._name = data['name']
def get_all_data(self):
return {
'id': self._id,
'email': self._email,
'name': self._name
}
def get_id(self):
return self._id
def get_email(self):
return self._email
def get_name_first(self):
return self._name['first']
def get_name_last(self):
return self._name['last']
def get_name_full(self):
return self._name['first'] + ' ' + self._name['last']
|
py | 7df6c01d3fae499f0c5487be5e21a2c8b46a299f | import imix.utils.distributed_info as dist_info
import torch
import os
import random
from datetime import datetime
from imix.utils.third_party_libs import PathManager
from imix.utils.logger import setup_logger
from imix.utils.collect_running_env import collect_env_info
import argparse
import json
from imix.utils.config import set_imix_work_dir, seed_all_rng
import pprint
import sys
random.seed(datetime.now())
def default_argument_parser(epilog=None):
if epilog is None:
epilog = f"""
iMIX framework running example:
1.Run on single machine:
a. Training on a multiple GPUs
${sys.argv[0]} --gpus 8 --config-file cfg.py --load-from /path/weight.pth
${sys.argv[0]} --gpus 8 --config-file cfg.py --resume-from /path/weight.pth
b. Training on a single GPU
${sys.argv[0]} --gpus 1 --config-file cfg.py --load-from /path/weight.pth
or
${sys.argv[0]} --config-file cfg.py --load-from /path/weight.pth
c. testing on a single GPU or multiple GPUS
${sys.argv[0]} --gpus 1 --config-file cfg.py --load-from /path/weight.pth --eval-only
${sys.argv[0]} --gpus 4 --config-file cfg.py --load-from /path/weight.pth --eval-only
2.Run on multiple machines:
(machine0)$ {sys.argv[
0]} --gpus 8 --node-rank 0 --machines 2 --master-addr 'tcp://127.0.0.1' --master-port 8889 [--other-flags]
(machine1)$ {sys.argv[
0]} --gpus 8 --node-rank 1 --machines 2 --master-addr 'tcp://127.0.0.1' --master-port 8889 [--other-flags]
""" # noqa
parser = argparse.ArgumentParser(epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--config-file', metavar='FILE', help='train config file path')
parser.add_argument('--resume-from', default=None, help='resume from the checkpoint file')
parser.add_argument('--load-from', default=None, help='load from the checkpoint file')
parser.add_argument('--eval-only', action='store_true', help='just run evaluation')
parser.add_argument('--build-submit', action='store_true', help='generate submission results')
parser.add_argument('--gpus', type=int, default=1, help='the number of gpus on each machine ')
parser.add_argument('--machines', type=int, default=1, help='the total number of machine to use')
parser.add_argument('--node-rank', type=int, default=0, help='the rank of current node(unique per machine)')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--master-port',
default=2**14 + hash(random.randint(0, 2**14)),
type=int,
help='it is the free port of mast node(rank 0) and is used for communication in distributed training')
parser.add_argument(
'--master-addr', default='tcp://127.0.0.1', type=str, help='the IP address of mast node(rank 0)')
return parser
def default_setup(args, cfg):
output_dir = cfg.work_dir
set_imix_work_dir(output_dir)
if output_dir and dist_info.is_main_process():
PathManager.mkdirs(output_dir)
rank = dist_info.get_rank()
logger = setup_logger(output_dir, distributed_rank=rank, name='imix')
logger.info('Current environment information : \n{}'.format(collect_env_info()))
logger.info('Command line args: \n {}'.format(args))
if hasattr(args, 'config_file') and args.config_file != '':
logger.info('{} file content:\n{}'.format(args.config_file, PathManager.open(args.config_file, 'r').read()))
logger.info('full config file content: ')
pprint.pprint({k: v for k, v in cfg.items()})
if dist_info.is_main_process() and output_dir:
cfg_path = os.path.join(output_dir, 'config.json')
with open(cfg_path, 'w') as f:
f.write(json.dumps({k: v for k, v in cfg.items()}, indent=4, separators=(',', ':')))
logger.info('full config file saved to {}'.format(cfg_path))
seed = getattr(cfg, 'seed', None)
seed_all_rng(seed=None if seed is None else seed + rank)
if not (hasattr(cfg, 'eval_only') and getattr(cfg, 'eval_only', False)):
torch.backends.cudnn.benchmark = getattr(cfg, 'CUDNN_BENCHMARK', False)
|
py | 7df6c2358171a0a650e762883869944e9d652b51 | """pypi package setup."""
from __future__ import print_function
import codecs
from os import path
from setuptools import setup, find_packages
try:
import ROOT # pylint: disable=W0611
except ImportError:
print("ROOT is required by this library.")
DEPS = ['numpy', 'PyYAML>4.*', 'future', 'pylint']
HERE = path.abspath(path.dirname(__file__))
with codecs.open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name='hepdata_lib',
version='0.7.0',
description='Library for getting your data into HEPData',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/HEPData/hepdata_lib',
author='Andreas Albert, Clemens Lange',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='HEPData physics OpenData',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
zip_safe=False,
install_requires=DEPS,
setup_requires=['pytest-runner', 'pytest-cov'],
tests_require=['pytest', 'papermill', 'six'],
project_urls={
'Documentation': 'https://hepdata-lib.readthedocs.io',
'Bug Reports': 'https://github.com/HEPData/hepdata_lib/issues',
'Source': 'https://github.com/HEPData/hepdata_lib',
}, )
|
py | 7df6c4675d509658fd487972474efe1348b48d1e | import hashlib
import textwrap
try:
import sqlite3
except ImportError:
pass
from collections import defaultdict
from pycoin.encoding.hash import hash160
class Keychain(object):
def __init__(self, sqlite3_db=None):
self._db = sqlite3_db or sqlite3.connect(":memory:")
self._db.text_factory = type(b'')
self._init_tables()
self.clear_secrets()
def commit(self):
self._db.commit()
def _exec_sql(self, sql, *args):
c = self._db.cursor()
c.execute(textwrap.dedent(sql), args)
return c
def _exec_sql_list(self, SQL):
for sql in SQL:
self._exec_sql(sql)
def _init_table_hash160(self):
self._exec_sql_list([
"create table if not exists HASH160 (hash160 blob primary key, path text, fingerprint blob)",
])
def _init_table_p2s(self):
self._exec_sql_list([
"create table if not exists P2S (hash160 blob primary key, hash256 blob, script blob)",
"create index if not exists P2S_H256 on P2S (hash256)",
])
def _init_tables(self):
self._init_table_hash160()
self._init_table_p2s()
self.commit()
def add_keys_path(self, keys, path):
total = 0
for key in keys:
fingerprint = key.fingerprint()
hash160 = key.subkey_for_path(path).hash160()
self._exec_sql("insert or ignore into HASH160 values (?, ?, ?)", hash160, path, fingerprint)
total += 1
return total
def add_key_paths(self, key, path_iterator=[""]):
fingerprint = key.fingerprint()
total = 0
for path in path_iterator:
hash160 = key.subkey_for_path(path).hash160()
self._exec_sql("insert or ignore into HASH160 values (?, ?, ?)", hash160, path, fingerprint)
total += 1
return total
def path_for_hash160(self, hash160):
SQL = "select fingerprint, path from HASH160 where hash160 = ?"
c = self._exec_sql(SQL, hash160)
r = c.fetchone()
if r is not None:
return r[0], r[1].decode("utf8")
def add_p2s_script(self, script):
h160 = hash160(script)
h256 = hashlib.sha256(script).digest()
self._exec_sql("insert or ignore into P2S values (?, ?, ?)", h160, h256, script)
def add_p2s_scripts(self, scripts):
for script in scripts:
self.add_p2s_script(script)
self.commit()
def p2s_for_hash(self, hash160or256):
SQL = "select script from P2S where hash160 = ? or hash256 = ?"
c = self._exec_sql(SQL, hash160or256, hash160or256)
r = c.fetchone()
if r is not None:
return r[0]
def _add_key_to_cache(self, key):
secret_exponent = key.secret_exponent()
public_pair = key.public_pair()
for is_compressed in (True, False):
hash160 = key.hash160(is_compressed=is_compressed)
self._secret_exponent_cache[hash160] = (secret_exponent, public_pair, is_compressed, key._generator)
def get(self, hash160, default=None):
v = self.p2s_for_hash(hash160)
if v:
return v
if hash160 not in self._secret_exponent_cache:
v = self.path_for_hash160(hash160)
if v:
fingerprint, path = v
for key in self._secrets.get(fingerprint, []):
subkey = key.subkey_for_path(path)
self._add_key_to_cache(subkey)
return self._secret_exponent_cache.get(hash160, default)
def add_secret(self, private_key):
self._secrets[private_key.fingerprint()].add(private_key)
self._add_key_to_cache(private_key)
def add_secrets(self, private_keys):
for key in private_keys:
self.add_secret(key)
def has_secrets(self):
return len(self._secrets) + len(self._secret_exponent_cache) > 0
def clear_secrets(self):
self._secrets = defaultdict(set)
self._secret_exponent_cache = {}
def interested_hashes(self):
SQL = "select hash160 from HASH160"
c = self._exec_sql(SQL)
for r in c:
yield r[0]
SQL = "select hash160, hash256 from P2S"
c = self._exec_sql(SQL)
for r in c:
yield r[0]
yield r[1]
|
py | 7df6c4caee33713c9b6fa7318cd59b04cafabf1f | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def spark_streaming_to_pubsublite(
project_number: int, location: str, topic_id: str
) -> None:
# [START pubsublite_spark_streaming_to_pubsublite]
from pyspark.sql import SparkSession
from pyspark.sql.functions import array, create_map, col, lit, when
from pyspark.sql.types import BinaryType, StringType
import uuid
# TODO(developer):
# project_number = 11223344556677
# location = "us-central1-a"
# topic_id = "your-topic-id"
spark = SparkSession.builder.appName("write-app").getOrCreate()
# Create a RateStreamSource that generates consecutive numbers with timestamps:
# |-- timestamp: timestamp (nullable = true)
# |-- value: long (nullable = true)
sdf = spark.readStream.format("rate").option("rowsPerSecond", 1).load()
# Transform the dataframe to match the required data fields and data types:
# https://github.com/googleapis/java-pubsublite-spark#data-schema
sdf = (
sdf.withColumn("key", lit("example").cast(BinaryType()))
.withColumn("data", col("value").cast(StringType()).cast(BinaryType()))
.withColumnRenamed("timestamp", "event_timestamp")
# Populate the attributes field. For example, an even value will
# have {"key1", [b"even"]}.
.withColumn(
"attributes",
create_map(
lit("key1"),
array(when(col("value") % 2 == 0, b"even").otherwise(b"odd")),
),
)
.drop("value")
)
# After the transformation, the schema of the dataframe should look like:
# |-- key: binary (nullable = false)
# |-- data: binary (nullable = true)
# |-- event_timestamp: timestamp (nullable = true)
# |-- attributes: map (nullable = false)
# | |-- key: string
# | |-- value: array (valueContainsNull = false)
# | | |-- element: binary (containsNull = false)
sdf.printSchema()
query = (
sdf.writeStream.format("pubsublite")
.option(
"pubsublite.topic",
f"projects/{project_number}/locations/{location}/topics/{topic_id}",
)
# Required. Use a unique checkpoint location for each job.
.option("checkpointLocation", "/tmp/app" + uuid.uuid4().hex)
.outputMode("append")
.trigger(processingTime="1 second")
.start()
)
# Wait 60 seconds to terminate the query.
query.awaitTermination(60)
query.stop()
# [END pubsublite_spark_streaming_to_pubsublite]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--project_number", help="Google Cloud Project Number")
parser.add_argument("--location", help="Your Cloud location, e.g. us-central1-a")
parser.add_argument("--topic_id", help="Your Pub/Sub Lite topic ID")
args = parser.parse_args()
spark_streaming_to_pubsublite(args.project_number, args.location, args.topic_id)
|
py | 7df6c4de64c20b76e72b57fc6677d4664b387c55 | formatters = {
'RED': '\033[91m',
'GREEN': '\033[92m',
'END': '\033[0m',
}
def boxwrap(text, linenumbers=True):
'''Wraps text in a box'''
response = ''
text = text.encode('utf-8')
split = text.splitlines()
if len(split) == 0:
return response
total = max([len(x) for x in split])
if linenumbers:
total += 7
else:
total += 3
response = '+' + ('-' * total) + '+' + '\n'
for index, line in enumerate(text.splitlines()):
if linenumbers:
newline = '| {index}. {line}'.format(index=index + 1, line=line)
else:
newline = '| {line}'.format(index=index + 1, line=line)
if len(newline) < total:
newline += ' ' * (total - len(newline) + 1)
newline += '|'
response += newline + '\n'
response += '+' + ('-' * total) + '+'
return response
def writecolour(text, colour='RED'):
response = '{' + colour + '}' + text + '{END}'
response = response.format(**formatters)
return response
def whalesay(text):
response = boxwrap(text=text, linenumbers=False)
response += '''
\
\ ==
\ ===
/""""""""""""""""\___/ ===
{ /
\______ o __/
\ \ __/
\____\______/'''
return response
def piesay(text):
response = boxwrap(text=text, linenumbers=False)
response += '''
(
)
__..---..__
,-=' / | \ `=-.
:--..___________..--;
\.,_____________,./ '''
return response
|
py | 7df6c53cea1c7ae02092466141f7baaeecce0c42 | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
"""
Shard constructor that treats shard objects,
named functions and lists of strings (code) equivalently
Subclass to create more specific connectors, e.g.
branching, looping, methods, function calls, etc.
"""
def namegen(name = 'shard'):
"""
Generates names for anonymous shards
"""
i = 0
while True:
yield name+str(i)
i += 1
def iscode(c):
"""
Tests if argument type could be lines of code,
i.e. list of strings
"""
if type(c) == type([]):
if c:
return type(c[0]) == type('')
else:
return True
else: return False
def isfunction(f):
"""
Tests if argument is a function
"""
return callable(f)
class DependencyError(Exception): pass
class ArgumentError(Exception): pass
indentation = " "
nl = "\n"
class shard(object):
"""
Initialisation creates shards from lines of code, existing functions,
or a combination of these and existing shard objects.
As the shard base class, classmethods for handling and checking
shard dependencies are also given
Arguments:
name = name of new shard, default None. If no name is specified
a default name will be generated (except where shard is
created from a single function, where the function's name
will be used)
annotate = whether to add annotations for imported code into
new shard's generated code, default True
function = if shard is being made from a single function, it can be
entered here. Used mainly internally to initialise function
objects passed into shards. If present, any following
arguments are ignored. Default is None
code = as function, but if initialisation is for single code block
shards = the shards that will compose the body of the new shard,
in the order in which they will be added. Arguments here
can be any combination of existing shard objects, function
objects, and lists of code lines (e.g. as imported by
getshard); these will be initialised as necessary
indent = level of indentation to add to imported code, default 0
Returns:
shard object containing the name and code of the new shard
"""
# generator to name anonymous shards
namer = namegen()
# dependency checking attributes
requiredMethods = set()
requiredIShards = set()
# classmethods
@classmethod
def addReqMethods(self, *methods):
"""
Adds methods to the list of requirements for this class.
A DependencyError will be raised if these are not filled
when the object is constructed
Arguments:
methods = string names of additional required methods
"""
self.requiredMethods = self.requiredMethods | set(methods)
@classmethod
def addReqIShards(self, *ishards):
"""
Adds inline shards to the list of requirements for this class.
A DependencyError will be raised if these are not filled
when the object is constructed
Arguments:
ishards = string names of additional required shards
"""
self.requiredIShards = self.requiredIShards | set(ishards)
@classmethod
def remReqMethods(self, *methods):
"""
Removes methods from list of requirements. Methods
given but not in requirements list will be ignored
Arguments:
methods = string names of unrequired methods. If empty,
all methods will be removed
"""
if not methods:
self.requiredMethods = set()
else:
self.requiredMethods = self.requiredMethods - set(methods)
@classmethod
def remReqIShards(self, *ishards):
"""
Removes inline shards from list of requirements. Shards
given but not in requirements list will be ignored
Arguments:
ishards = string names of unrequired shards. If empty,
all shards will be removed
"""
if not ishards:
self.requiredIShards = set()
else:
self.requiredIShards = self.requiredIShards - set(ishards)
# default initialisation parameters
initargs = {}
initargs['name'] = None
initargs['annotate'] = True
initargs['function'] = None
initargs['code'] = None
initargs['shards'] = []
initargs['indent'] = 0
# instance methods
def __init__(self, name = None, annotate = True, function = None,
code = None, shards = [], indent = 0):
super(shard, self).__init__()
self.indent = indent
if function:
self.name = function.func_name
self.code = self.addindent(self.getshard(function), indent)
self.shards = [function]
elif code:
if name:
self.name = name
else:
self.name = self.namer.next()
self.code = self.addindent(code, indent)
self.shards = [code]
else:
if name:
self.name = name
else:
self.name = self.namer.next()
self.code = []
self.shards = self.makeShards(shards)
for s in self.shards:
if annotate:
self.code += self.addindent(s.annotate(), indent)
else:
self.code += self.addindent(s.code, indent)
def makeShards(self, things = None):
"""
Converts functions or lines of code to shard objects
Arguments:
things = any mix of shard objects, functions or lines
of code to convert, in a sequence. Default
is None, in which case self.shards is used
Returns:
list of inline shards equivalent to arguments
"""
if things == None:
things = self.shards
shards = []
for t in things:
if isfunction(t):
shards.append(shard(function = t))
elif iscode(t):
if not hasattr(self, 'name'):
for i in self.__dict__:
print i
print
nm = self.name +'.' + self.namer.next()
shards.append(shard(name = nm, code = t))
else:
shards.append(t)
return shards
def checkDependencies(self, mshards, ishards):
"""
Checks that given methods and inline shards satisfy
the dependencies listed by the class; raises a
DependencyError if they are not
Arguments:
mshards = sequence of method shard objects
ishards = list of, or dict whose keys are, the names
of the supplied inline shards
"""
error = ""
methods = set([s.name for s in mshards])
if isinstance(ishards, dict):
inlines = set(ishards.keys())
else:
inlines = set(ishards)
if not self.requiredMethods <= methods:
error += "need methods "+ str(self.requiredMethods - methods)
if not self.requiredIShards <= inlines:
error += "need ishards "+ str(self.requiredIShards - inlines)
if not error == "":
raise DependencyError, error
return True
def getshard(self, function):
"""
Gets shard code for generation
Arguments:
function = shard function to get
Returns:
list of lines of code of function
"""
# get code, throwaway def line
lines = inspect.getsource(function).splitlines(True)[1:]
# remove any whitespace lines
lines = [line for line in lines if not line.isspace()]
# remove docstrings
doctag = r'"""'
while True:
if lines[0].count(doctag) % 2 == 1:
lines.pop(0) # remove line with opening doctag
while lines[0].count(doctag) % 2 == 0:
lines.pop(0) # remove lines till tag match
lines.pop(0) # remove matching tag
if lines[0].count(doctag) == 0:
break # no docstring, start of code
else: # docstring tags closed, continue till code line found
lines.pop(0)
return [c[len(lines[0]) - len(lines[0].lstrip()):] for c in lines] # remove leading indentation
def annotate(self, delimchar = '-'):
"""
Marks out start and end of shard code with comments
Arguments:
delimchar = single character string containing character to be used
in marking out shard limit across the page
Returns:
list of lines of code surrounded by delimiter comments as specified
"""
start = r"# START SHARD: " + self.name + " "
start = self.addindent([start], self.indent)[0]
start = start.ljust(80, delimchar) + nl
end = r"# END SHARD: " + self.name + " "
end = self.addindent([end], self.indent)[0]
end = end.ljust(80, delimchar) + nl
return [start] + self.code + [end] + [nl]
def addindent(self, lines = None, level = 1):
"""
Indents code with spaces
Arguments:
level = number of levels to be indented, defaults to 1
Returns:
object's code attribute prefixed by specified amount of whitespace
"""
if lines == None:
lines = self.code
if level < 0: # remove indentation
level = -level
return [ line[len(indentation*level):] for line in lines ]
elif level == 0:
return lines
elif level > 0: # add indentation
return [indentation*level + line for line in lines]
def writeFile(self, filename = None):
"""
Writes code from this shard into a file.
Arguments:
filename = filename to write to. No checking of name clashes
is performed, defaults to shard object name with
a .py extension
Returns:
file containing shard code
"""
if not filename:
filename = self.name + '.py'
file = open(filename,"w")
file.writelines(self.code)
file.close()
return file
class docShard(shard):
"""
As shard constructor, but additionally sets a self.docstring
attribute to be a list of the lines of the docstring, indented one
level further than given indentation
Additional argument:
docstring = formatted string of comments, default is empty
"""
# default initialisation parameters
initargs = {}
initargs['name'] = None
initargs['annotate'] = True
initargs['docstring'] = ''
initargs['shards'] = []
def __init__(self, name = None, annotate = True, docstring = '', shards = []):
super(docShard, self).__init__(name = name, annotate = annotate, shards = shards)
if docstring:
self.docstring = self.makedoc(docstring)
else:
self.docstring = []
def makedoc(self, doc, indent = 0):
"""
Creates docstring
Arguments:
doc = formatted string for docstring
Returns:
list of strings containing lines of docstring
"""
tag = "\"\"\"" + nl
docstr = tag + doc + nl + tag
return self.addindent(docstr.splitlines(True), indent)
|
py | 7df6c5be0ada963089033ad1cfa396b875a4481a | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_swigfaiss', [dirname(__file__)])
except ImportError:
import _swigfaiss
return _swigfaiss
if fp is not None:
try:
_mod = imp.load_module('_swigfaiss', fp, pathname, description)
finally:
fp.close()
return _mod
_swigfaiss = swig_import_helper()
del swig_import_helper
else:
import _swigfaiss
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class FloatVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FloatVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_FloatVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.FloatVector_push_back(self, *args)
def clear(self): return _swigfaiss.FloatVector_clear(self)
def data(self): return _swigfaiss.FloatVector_data(self)
def size(self): return _swigfaiss.FloatVector_size(self)
def at(self, *args): return _swigfaiss.FloatVector_at(self, *args)
def resize(self, *args): return _swigfaiss.FloatVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_FloatVector
__del__ = lambda self : None;
FloatVector_swigregister = _swigfaiss.FloatVector_swigregister
FloatVector_swigregister(FloatVector)
class DoubleVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_DoubleVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.DoubleVector_push_back(self, *args)
def clear(self): return _swigfaiss.DoubleVector_clear(self)
def data(self): return _swigfaiss.DoubleVector_data(self)
def size(self): return _swigfaiss.DoubleVector_size(self)
def at(self, *args): return _swigfaiss.DoubleVector_at(self, *args)
def resize(self, *args): return _swigfaiss.DoubleVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_DoubleVector
__del__ = lambda self : None;
DoubleVector_swigregister = _swigfaiss.DoubleVector_swigregister
DoubleVector_swigregister(DoubleVector)
class ByteVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ByteVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ByteVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_ByteVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.ByteVector_push_back(self, *args)
def clear(self): return _swigfaiss.ByteVector_clear(self)
def data(self): return _swigfaiss.ByteVector_data(self)
def size(self): return _swigfaiss.ByteVector_size(self)
def at(self, *args): return _swigfaiss.ByteVector_at(self, *args)
def resize(self, *args): return _swigfaiss.ByteVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_ByteVector
__del__ = lambda self : None;
ByteVector_swigregister = _swigfaiss.ByteVector_swigregister
ByteVector_swigregister(ByteVector)
class Uint64Vector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Uint64Vector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Uint64Vector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_Uint64Vector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.Uint64Vector_push_back(self, *args)
def clear(self): return _swigfaiss.Uint64Vector_clear(self)
def data(self): return _swigfaiss.Uint64Vector_data(self)
def size(self): return _swigfaiss.Uint64Vector_size(self)
def at(self, *args): return _swigfaiss.Uint64Vector_at(self, *args)
def resize(self, *args): return _swigfaiss.Uint64Vector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_Uint64Vector
__del__ = lambda self : None;
Uint64Vector_swigregister = _swigfaiss.Uint64Vector_swigregister
Uint64Vector_swigregister(Uint64Vector)
class LongVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LongVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LongVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_LongVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.LongVector_push_back(self, *args)
def clear(self): return _swigfaiss.LongVector_clear(self)
def data(self): return _swigfaiss.LongVector_data(self)
def size(self): return _swigfaiss.LongVector_size(self)
def at(self, *args): return _swigfaiss.LongVector_at(self, *args)
def resize(self, *args): return _swigfaiss.LongVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_LongVector
__del__ = lambda self : None;
LongVector_swigregister = _swigfaiss.LongVector_swigregister
LongVector_swigregister(LongVector)
class IntVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_IntVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.IntVector_push_back(self, *args)
def clear(self): return _swigfaiss.IntVector_clear(self)
def data(self): return _swigfaiss.IntVector_data(self)
def size(self): return _swigfaiss.IntVector_size(self)
def at(self, *args): return _swigfaiss.IntVector_at(self, *args)
def resize(self, *args): return _swigfaiss.IntVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_IntVector
__del__ = lambda self : None;
IntVector_swigregister = _swigfaiss.IntVector_swigregister
IntVector_swigregister(IntVector)
class VectorTransformVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VectorTransformVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VectorTransformVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_VectorTransformVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.VectorTransformVector_push_back(self, *args)
def clear(self): return _swigfaiss.VectorTransformVector_clear(self)
def data(self): return _swigfaiss.VectorTransformVector_data(self)
def size(self): return _swigfaiss.VectorTransformVector_size(self)
def at(self, *args): return _swigfaiss.VectorTransformVector_at(self, *args)
def resize(self, *args): return _swigfaiss.VectorTransformVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_VectorTransformVector
__del__ = lambda self : None;
VectorTransformVector_swigregister = _swigfaiss.VectorTransformVector_swigregister
VectorTransformVector_swigregister(VectorTransformVector)
class OperatingPointVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, OperatingPointVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, OperatingPointVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_OperatingPointVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.OperatingPointVector_push_back(self, *args)
def clear(self): return _swigfaiss.OperatingPointVector_clear(self)
def data(self): return _swigfaiss.OperatingPointVector_data(self)
def size(self): return _swigfaiss.OperatingPointVector_size(self)
def at(self, *args): return _swigfaiss.OperatingPointVector_at(self, *args)
def resize(self, *args): return _swigfaiss.OperatingPointVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_OperatingPointVector
__del__ = lambda self : None;
OperatingPointVector_swigregister = _swigfaiss.OperatingPointVector_swigregister
OperatingPointVector_swigregister(OperatingPointVector)
class FloatVectorVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatVectorVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FloatVectorVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_FloatVectorVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.FloatVectorVector_push_back(self, *args)
def clear(self): return _swigfaiss.FloatVectorVector_clear(self)
def data(self): return _swigfaiss.FloatVectorVector_data(self)
def size(self): return _swigfaiss.FloatVectorVector_size(self)
def at(self, *args): return _swigfaiss.FloatVectorVector_at(self, *args)
def resize(self, *args): return _swigfaiss.FloatVectorVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_FloatVectorVector
__del__ = lambda self : None;
FloatVectorVector_swigregister = _swigfaiss.FloatVectorVector_swigregister
FloatVectorVector_swigregister(FloatVectorVector)
class ByteVectorVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ByteVectorVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ByteVectorVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_ByteVectorVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.ByteVectorVector_push_back(self, *args)
def clear(self): return _swigfaiss.ByteVectorVector_clear(self)
def data(self): return _swigfaiss.ByteVectorVector_data(self)
def size(self): return _swigfaiss.ByteVectorVector_size(self)
def at(self, *args): return _swigfaiss.ByteVectorVector_at(self, *args)
def resize(self, *args): return _swigfaiss.ByteVectorVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_ByteVectorVector
__del__ = lambda self : None;
ByteVectorVector_swigregister = _swigfaiss.ByteVectorVector_swigregister
ByteVectorVector_swigregister(ByteVectorVector)
class LongVectorVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LongVectorVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LongVectorVector, name)
__repr__ = _swig_repr
def __init__(self):
this = _swigfaiss.new_LongVectorVector()
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _swigfaiss.LongVectorVector_push_back(self, *args)
def clear(self): return _swigfaiss.LongVectorVector_clear(self)
def data(self): return _swigfaiss.LongVectorVector_data(self)
def size(self): return _swigfaiss.LongVectorVector_size(self)
def at(self, *args): return _swigfaiss.LongVectorVector_at(self, *args)
def resize(self, *args): return _swigfaiss.LongVectorVector_resize(self, *args)
__swig_destroy__ = _swigfaiss.delete_LongVectorVector
__del__ = lambda self : None;
LongVectorVector_swigregister = _swigfaiss.LongVectorVector_swigregister
LongVectorVector_swigregister(LongVectorVector)
def popcount64(*args):
return _swigfaiss.popcount64(*args)
popcount64 = _swigfaiss.popcount64
def hammings(*args):
return _swigfaiss.hammings(*args)
hammings = _swigfaiss.hammings
def bitvec_print(*args):
return _swigfaiss.bitvec_print(*args)
bitvec_print = _swigfaiss.bitvec_print
def fvecs2bitvecs(*args):
return _swigfaiss.fvecs2bitvecs(*args)
fvecs2bitvecs = _swigfaiss.fvecs2bitvecs
def fvec2bitvec(*args):
return _swigfaiss.fvec2bitvec(*args)
fvec2bitvec = _swigfaiss.fvec2bitvec
def hammings_knn(*args):
return _swigfaiss.hammings_knn(*args)
hammings_knn = _swigfaiss.hammings_knn
def hammings_knn_core(*args):
return _swigfaiss.hammings_knn_core(*args)
hammings_knn_core = _swigfaiss.hammings_knn_core
def hamming_count_thres(*args):
return _swigfaiss.hamming_count_thres(*args)
hamming_count_thres = _swigfaiss.hamming_count_thres
def match_hamming_thres(*args):
return _swigfaiss.match_hamming_thres(*args)
match_hamming_thres = _swigfaiss.match_hamming_thres
def crosshamming_count_thres(*args):
return _swigfaiss.crosshamming_count_thres(*args)
crosshamming_count_thres = _swigfaiss.crosshamming_count_thres
class HammingComputer4(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputer4, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputer4, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.HammingComputer4_a0_set
__swig_getmethods__["a0"] = _swigfaiss.HammingComputer4_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.HammingComputer4_a0_get, _swigfaiss.HammingComputer4_a0_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputer4(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputer4_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputer4
__del__ = lambda self : None;
HammingComputer4_swigregister = _swigfaiss.HammingComputer4_swigregister
HammingComputer4_swigregister(HammingComputer4)
class HammingComputer8(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputer8, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputer8, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.HammingComputer8_a0_set
__swig_getmethods__["a0"] = _swigfaiss.HammingComputer8_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.HammingComputer8_a0_get, _swigfaiss.HammingComputer8_a0_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputer8(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputer8_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputer8
__del__ = lambda self : None;
HammingComputer8_swigregister = _swigfaiss.HammingComputer8_swigregister
HammingComputer8_swigregister(HammingComputer8)
class HammingComputer16(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputer16, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputer16, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.HammingComputer16_a0_set
__swig_getmethods__["a0"] = _swigfaiss.HammingComputer16_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.HammingComputer16_a0_get, _swigfaiss.HammingComputer16_a0_set)
__swig_setmethods__["a1"] = _swigfaiss.HammingComputer16_a1_set
__swig_getmethods__["a1"] = _swigfaiss.HammingComputer16_a1_get
if _newclass:a1 = _swig_property(_swigfaiss.HammingComputer16_a1_get, _swigfaiss.HammingComputer16_a1_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputer16(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputer16_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputer16
__del__ = lambda self : None;
HammingComputer16_swigregister = _swigfaiss.HammingComputer16_swigregister
HammingComputer16_swigregister(HammingComputer16)
class HammingComputer20(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputer20, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputer20, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.HammingComputer20_a0_set
__swig_getmethods__["a0"] = _swigfaiss.HammingComputer20_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.HammingComputer20_a0_get, _swigfaiss.HammingComputer20_a0_set)
__swig_setmethods__["a1"] = _swigfaiss.HammingComputer20_a1_set
__swig_getmethods__["a1"] = _swigfaiss.HammingComputer20_a1_get
if _newclass:a1 = _swig_property(_swigfaiss.HammingComputer20_a1_get, _swigfaiss.HammingComputer20_a1_set)
__swig_setmethods__["a2"] = _swigfaiss.HammingComputer20_a2_set
__swig_getmethods__["a2"] = _swigfaiss.HammingComputer20_a2_get
if _newclass:a2 = _swig_property(_swigfaiss.HammingComputer20_a2_get, _swigfaiss.HammingComputer20_a2_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputer20(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputer20_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputer20
__del__ = lambda self : None;
HammingComputer20_swigregister = _swigfaiss.HammingComputer20_swigregister
HammingComputer20_swigregister(HammingComputer20)
class HammingComputer32(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputer32, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputer32, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.HammingComputer32_a0_set
__swig_getmethods__["a0"] = _swigfaiss.HammingComputer32_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.HammingComputer32_a0_get, _swigfaiss.HammingComputer32_a0_set)
__swig_setmethods__["a1"] = _swigfaiss.HammingComputer32_a1_set
__swig_getmethods__["a1"] = _swigfaiss.HammingComputer32_a1_get
if _newclass:a1 = _swig_property(_swigfaiss.HammingComputer32_a1_get, _swigfaiss.HammingComputer32_a1_set)
__swig_setmethods__["a2"] = _swigfaiss.HammingComputer32_a2_set
__swig_getmethods__["a2"] = _swigfaiss.HammingComputer32_a2_get
if _newclass:a2 = _swig_property(_swigfaiss.HammingComputer32_a2_get, _swigfaiss.HammingComputer32_a2_set)
__swig_setmethods__["a3"] = _swigfaiss.HammingComputer32_a3_set
__swig_getmethods__["a3"] = _swigfaiss.HammingComputer32_a3_get
if _newclass:a3 = _swig_property(_swigfaiss.HammingComputer32_a3_get, _swigfaiss.HammingComputer32_a3_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputer32(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputer32_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputer32
__del__ = lambda self : None;
HammingComputer32_swigregister = _swigfaiss.HammingComputer32_swigregister
HammingComputer32_swigregister(HammingComputer32)
class HammingComputer64(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputer64, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputer64, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.HammingComputer64_a0_set
__swig_getmethods__["a0"] = _swigfaiss.HammingComputer64_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.HammingComputer64_a0_get, _swigfaiss.HammingComputer64_a0_set)
__swig_setmethods__["a1"] = _swigfaiss.HammingComputer64_a1_set
__swig_getmethods__["a1"] = _swigfaiss.HammingComputer64_a1_get
if _newclass:a1 = _swig_property(_swigfaiss.HammingComputer64_a1_get, _swigfaiss.HammingComputer64_a1_set)
__swig_setmethods__["a2"] = _swigfaiss.HammingComputer64_a2_set
__swig_getmethods__["a2"] = _swigfaiss.HammingComputer64_a2_get
if _newclass:a2 = _swig_property(_swigfaiss.HammingComputer64_a2_get, _swigfaiss.HammingComputer64_a2_set)
__swig_setmethods__["a3"] = _swigfaiss.HammingComputer64_a3_set
__swig_getmethods__["a3"] = _swigfaiss.HammingComputer64_a3_get
if _newclass:a3 = _swig_property(_swigfaiss.HammingComputer64_a3_get, _swigfaiss.HammingComputer64_a3_set)
__swig_setmethods__["a4"] = _swigfaiss.HammingComputer64_a4_set
__swig_getmethods__["a4"] = _swigfaiss.HammingComputer64_a4_get
if _newclass:a4 = _swig_property(_swigfaiss.HammingComputer64_a4_get, _swigfaiss.HammingComputer64_a4_set)
__swig_setmethods__["a5"] = _swigfaiss.HammingComputer64_a5_set
__swig_getmethods__["a5"] = _swigfaiss.HammingComputer64_a5_get
if _newclass:a5 = _swig_property(_swigfaiss.HammingComputer64_a5_get, _swigfaiss.HammingComputer64_a5_set)
__swig_setmethods__["a6"] = _swigfaiss.HammingComputer64_a6_set
__swig_getmethods__["a6"] = _swigfaiss.HammingComputer64_a6_get
if _newclass:a6 = _swig_property(_swigfaiss.HammingComputer64_a6_get, _swigfaiss.HammingComputer64_a6_set)
__swig_setmethods__["a7"] = _swigfaiss.HammingComputer64_a7_set
__swig_getmethods__["a7"] = _swigfaiss.HammingComputer64_a7_get
if _newclass:a7 = _swig_property(_swigfaiss.HammingComputer64_a7_get, _swigfaiss.HammingComputer64_a7_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputer64(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputer64_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputer64
__del__ = lambda self : None;
HammingComputer64_swigregister = _swigfaiss.HammingComputer64_swigregister
HammingComputer64_swigregister(HammingComputer64)
class HammingComputerDefault(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputerDefault, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputerDefault, name)
__repr__ = _swig_repr
__swig_setmethods__["a"] = _swigfaiss.HammingComputerDefault_a_set
__swig_getmethods__["a"] = _swigfaiss.HammingComputerDefault_a_get
if _newclass:a = _swig_property(_swigfaiss.HammingComputerDefault_a_get, _swigfaiss.HammingComputerDefault_a_set)
__swig_setmethods__["n"] = _swigfaiss.HammingComputerDefault_n_set
__swig_getmethods__["n"] = _swigfaiss.HammingComputerDefault_n_get
if _newclass:n = _swig_property(_swigfaiss.HammingComputerDefault_n_get, _swigfaiss.HammingComputerDefault_n_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputerDefault(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputerDefault_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputerDefault
__del__ = lambda self : None;
HammingComputerDefault_swigregister = _swigfaiss.HammingComputerDefault_swigregister
HammingComputerDefault_swigregister(HammingComputerDefault)
class HammingComputerM8(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputerM8, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputerM8, name)
__repr__ = _swig_repr
__swig_setmethods__["a"] = _swigfaiss.HammingComputerM8_a_set
__swig_getmethods__["a"] = _swigfaiss.HammingComputerM8_a_get
if _newclass:a = _swig_property(_swigfaiss.HammingComputerM8_a_get, _swigfaiss.HammingComputerM8_a_set)
__swig_setmethods__["n"] = _swigfaiss.HammingComputerM8_n_set
__swig_getmethods__["n"] = _swigfaiss.HammingComputerM8_n_get
if _newclass:n = _swig_property(_swigfaiss.HammingComputerM8_n_get, _swigfaiss.HammingComputerM8_n_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputerM8(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputerM8_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputerM8
__del__ = lambda self : None;
HammingComputerM8_swigregister = _swigfaiss.HammingComputerM8_swigregister
HammingComputerM8_swigregister(HammingComputerM8)
class HammingComputerM4(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HammingComputerM4, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HammingComputerM4, name)
__repr__ = _swig_repr
__swig_setmethods__["a"] = _swigfaiss.HammingComputerM4_a_set
__swig_getmethods__["a"] = _swigfaiss.HammingComputerM4_a_get
if _newclass:a = _swig_property(_swigfaiss.HammingComputerM4_a_get, _swigfaiss.HammingComputerM4_a_set)
__swig_setmethods__["n"] = _swigfaiss.HammingComputerM4_n_set
__swig_getmethods__["n"] = _swigfaiss.HammingComputerM4_n_get
if _newclass:n = _swig_property(_swigfaiss.HammingComputerM4_n_get, _swigfaiss.HammingComputerM4_n_set)
def __init__(self, *args):
this = _swigfaiss.new_HammingComputerM4(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.HammingComputerM4_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_HammingComputerM4
__del__ = lambda self : None;
HammingComputerM4_swigregister = _swigfaiss.HammingComputerM4_swigregister
HammingComputerM4_swigregister(HammingComputerM4)
def generalized_hamming_64(*args):
return _swigfaiss.generalized_hamming_64(*args)
generalized_hamming_64 = _swigfaiss.generalized_hamming_64
class GenHammingComputer8(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GenHammingComputer8, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GenHammingComputer8, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.GenHammingComputer8_a0_set
__swig_getmethods__["a0"] = _swigfaiss.GenHammingComputer8_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.GenHammingComputer8_a0_get, _swigfaiss.GenHammingComputer8_a0_set)
def __init__(self, *args):
this = _swigfaiss.new_GenHammingComputer8(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.GenHammingComputer8_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_GenHammingComputer8
__del__ = lambda self : None;
GenHammingComputer8_swigregister = _swigfaiss.GenHammingComputer8_swigregister
GenHammingComputer8_swigregister(GenHammingComputer8)
class GenHammingComputer16(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GenHammingComputer16, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GenHammingComputer16, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.GenHammingComputer16_a0_set
__swig_getmethods__["a0"] = _swigfaiss.GenHammingComputer16_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.GenHammingComputer16_a0_get, _swigfaiss.GenHammingComputer16_a0_set)
__swig_setmethods__["a1"] = _swigfaiss.GenHammingComputer16_a1_set
__swig_getmethods__["a1"] = _swigfaiss.GenHammingComputer16_a1_get
if _newclass:a1 = _swig_property(_swigfaiss.GenHammingComputer16_a1_get, _swigfaiss.GenHammingComputer16_a1_set)
def __init__(self, *args):
this = _swigfaiss.new_GenHammingComputer16(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.GenHammingComputer16_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_GenHammingComputer16
__del__ = lambda self : None;
GenHammingComputer16_swigregister = _swigfaiss.GenHammingComputer16_swigregister
GenHammingComputer16_swigregister(GenHammingComputer16)
class GenHammingComputer32(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GenHammingComputer32, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GenHammingComputer32, name)
__repr__ = _swig_repr
__swig_setmethods__["a0"] = _swigfaiss.GenHammingComputer32_a0_set
__swig_getmethods__["a0"] = _swigfaiss.GenHammingComputer32_a0_get
if _newclass:a0 = _swig_property(_swigfaiss.GenHammingComputer32_a0_get, _swigfaiss.GenHammingComputer32_a0_set)
__swig_setmethods__["a1"] = _swigfaiss.GenHammingComputer32_a1_set
__swig_getmethods__["a1"] = _swigfaiss.GenHammingComputer32_a1_get
if _newclass:a1 = _swig_property(_swigfaiss.GenHammingComputer32_a1_get, _swigfaiss.GenHammingComputer32_a1_set)
__swig_setmethods__["a2"] = _swigfaiss.GenHammingComputer32_a2_set
__swig_getmethods__["a2"] = _swigfaiss.GenHammingComputer32_a2_get
if _newclass:a2 = _swig_property(_swigfaiss.GenHammingComputer32_a2_get, _swigfaiss.GenHammingComputer32_a2_set)
__swig_setmethods__["a3"] = _swigfaiss.GenHammingComputer32_a3_set
__swig_getmethods__["a3"] = _swigfaiss.GenHammingComputer32_a3_get
if _newclass:a3 = _swig_property(_swigfaiss.GenHammingComputer32_a3_get, _swigfaiss.GenHammingComputer32_a3_set)
def __init__(self, *args):
this = _swigfaiss.new_GenHammingComputer32(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.GenHammingComputer32_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_GenHammingComputer32
__del__ = lambda self : None;
GenHammingComputer32_swigregister = _swigfaiss.GenHammingComputer32_swigregister
GenHammingComputer32_swigregister(GenHammingComputer32)
class GenHammingComputerM8(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GenHammingComputerM8, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GenHammingComputerM8, name)
__repr__ = _swig_repr
__swig_setmethods__["a"] = _swigfaiss.GenHammingComputerM8_a_set
__swig_getmethods__["a"] = _swigfaiss.GenHammingComputerM8_a_get
if _newclass:a = _swig_property(_swigfaiss.GenHammingComputerM8_a_get, _swigfaiss.GenHammingComputerM8_a_set)
__swig_setmethods__["n"] = _swigfaiss.GenHammingComputerM8_n_set
__swig_getmethods__["n"] = _swigfaiss.GenHammingComputerM8_n_get
if _newclass:n = _swig_property(_swigfaiss.GenHammingComputerM8_n_get, _swigfaiss.GenHammingComputerM8_n_set)
def __init__(self, *args):
this = _swigfaiss.new_GenHammingComputerM8(*args)
try: self.this.append(this)
except: self.this = this
def hamming(self, *args): return _swigfaiss.GenHammingComputerM8_hamming(self, *args)
__swig_destroy__ = _swigfaiss.delete_GenHammingComputerM8
__del__ = lambda self : None;
GenHammingComputerM8_swigregister = _swigfaiss.GenHammingComputerM8_swigregister
GenHammingComputerM8_swigregister(GenHammingComputerM8)
def generalized_hammings_knn(*args):
return _swigfaiss.generalized_hammings_knn(*args)
generalized_hammings_knn = _swigfaiss.generalized_hammings_knn
def get_num_gpus():
return _swigfaiss.get_num_gpus()
get_num_gpus = _swigfaiss.get_num_gpus
def getmillisecs():
return _swigfaiss.getmillisecs()
getmillisecs = _swigfaiss.getmillisecs
def get_mem_usage_kb():
return _swigfaiss.get_mem_usage_kb()
get_mem_usage_kb = _swigfaiss.get_mem_usage_kb
class RandomGenerator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RandomGenerator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RandomGenerator, name)
__repr__ = _swig_repr
def rand_long(self): return _swigfaiss.RandomGenerator_rand_long(self)
def rand_int(self, *args): return _swigfaiss.RandomGenerator_rand_int(self, *args)
def rand_float(self): return _swigfaiss.RandomGenerator_rand_float(self)
def rand_double(self): return _swigfaiss.RandomGenerator_rand_double(self)
def __init__(self, *args):
this = _swigfaiss.new_RandomGenerator(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_RandomGenerator
__del__ = lambda self : None;
RandomGenerator_swigregister = _swigfaiss.RandomGenerator_swigregister
RandomGenerator_swigregister(RandomGenerator)
def float_rand(*args):
return _swigfaiss.float_rand(*args)
float_rand = _swigfaiss.float_rand
def float_randn(*args):
return _swigfaiss.float_randn(*args)
float_randn = _swigfaiss.float_randn
def long_rand(*args):
return _swigfaiss.long_rand(*args)
long_rand = _swigfaiss.long_rand
def byte_rand(*args):
return _swigfaiss.byte_rand(*args)
byte_rand = _swigfaiss.byte_rand
def rand_perm(*args):
return _swigfaiss.rand_perm(*args)
rand_perm = _swigfaiss.rand_perm
def fvec_L2sqr(*args):
return _swigfaiss.fvec_L2sqr(*args)
fvec_L2sqr = _swigfaiss.fvec_L2sqr
def fvec_inner_product(*args):
return _swigfaiss.fvec_inner_product(*args)
fvec_inner_product = _swigfaiss.fvec_inner_product
def imbalance_factor(*args):
return _swigfaiss.imbalance_factor(*args)
imbalance_factor = _swigfaiss.imbalance_factor
def pairwise_L2sqr(*args):
return _swigfaiss.pairwise_L2sqr(*args)
pairwise_L2sqr = _swigfaiss.pairwise_L2sqr
def fvec_inner_products_ny(*args):
return _swigfaiss.fvec_inner_products_ny(*args)
fvec_inner_products_ny = _swigfaiss.fvec_inner_products_ny
def fvec_L2sqr_ny(*args):
return _swigfaiss.fvec_L2sqr_ny(*args)
fvec_L2sqr_ny = _swigfaiss.fvec_L2sqr_ny
def fvec_norm_L2sqr(*args):
return _swigfaiss.fvec_norm_L2sqr(*args)
fvec_norm_L2sqr = _swigfaiss.fvec_norm_L2sqr
def fvec_norms_L2(*args):
return _swigfaiss.fvec_norms_L2(*args)
fvec_norms_L2 = _swigfaiss.fvec_norms_L2
def fvec_norms_L2sqr(*args):
return _swigfaiss.fvec_norms_L2sqr(*args)
fvec_norms_L2sqr = _swigfaiss.fvec_norms_L2sqr
def fvec_renorm_L2(*args):
return _swigfaiss.fvec_renorm_L2(*args)
fvec_renorm_L2 = _swigfaiss.fvec_renorm_L2
def inner_product_to_L2sqr(*args):
return _swigfaiss.inner_product_to_L2sqr(*args)
inner_product_to_L2sqr = _swigfaiss.inner_product_to_L2sqr
def fvec_inner_products_by_idx(*args):
return _swigfaiss.fvec_inner_products_by_idx(*args)
fvec_inner_products_by_idx = _swigfaiss.fvec_inner_products_by_idx
def fvec_L2sqr_by_idx(*args):
return _swigfaiss.fvec_L2sqr_by_idx(*args)
fvec_L2sqr_by_idx = _swigfaiss.fvec_L2sqr_by_idx
def knn_inner_product(*args):
return _swigfaiss.knn_inner_product(*args)
knn_inner_product = _swigfaiss.knn_inner_product
def knn_L2sqr(*args):
return _swigfaiss.knn_L2sqr(*args)
knn_L2sqr = _swigfaiss.knn_L2sqr
def knn_L2sqr_base_shift(*args):
return _swigfaiss.knn_L2sqr_base_shift(*args)
knn_L2sqr_base_shift = _swigfaiss.knn_L2sqr_base_shift
def knn_inner_products_by_idx(*args):
return _swigfaiss.knn_inner_products_by_idx(*args)
knn_inner_products_by_idx = _swigfaiss.knn_inner_products_by_idx
def knn_L2sqr_by_idx(*args):
return _swigfaiss.knn_L2sqr_by_idx(*args)
knn_L2sqr_by_idx = _swigfaiss.knn_L2sqr_by_idx
def range_search_L2sqr(*args):
return _swigfaiss.range_search_L2sqr(*args)
range_search_L2sqr = _swigfaiss.range_search_L2sqr
def range_search_inner_product(*args):
return _swigfaiss.range_search_inner_product(*args)
range_search_inner_product = _swigfaiss.range_search_inner_product
def fvec_madd(*args):
return _swigfaiss.fvec_madd(*args)
fvec_madd = _swigfaiss.fvec_madd
def fvec_madd_and_argmin(*args):
return _swigfaiss.fvec_madd_and_argmin(*args)
fvec_madd_and_argmin = _swigfaiss.fvec_madd_and_argmin
def reflection(*args):
return _swigfaiss.reflection(*args)
reflection = _swigfaiss.reflection
def km_update_centroids(*args):
return _swigfaiss.km_update_centroids(*args)
km_update_centroids = _swigfaiss.km_update_centroids
def matrix_qr(*args):
return _swigfaiss.matrix_qr(*args)
matrix_qr = _swigfaiss.matrix_qr
def ranklist_handle_ties(*args):
return _swigfaiss.ranklist_handle_ties(*args)
ranklist_handle_ties = _swigfaiss.ranklist_handle_ties
def ranklist_intersection_size(*args):
return _swigfaiss.ranklist_intersection_size(*args)
ranklist_intersection_size = _swigfaiss.ranklist_intersection_size
def merge_result_table_with(*args):
return _swigfaiss.merge_result_table_with(*args)
merge_result_table_with = _swigfaiss.merge_result_table_with
def fvec_argsort(*args):
return _swigfaiss.fvec_argsort(*args)
fvec_argsort = _swigfaiss.fvec_argsort
def fvec_argsort_parallel(*args):
return _swigfaiss.fvec_argsort_parallel(*args)
fvec_argsort_parallel = _swigfaiss.fvec_argsort_parallel
def ivec_hist(*args):
return _swigfaiss.ivec_hist(*args)
ivec_hist = _swigfaiss.ivec_hist
def bincode_hist(*args):
return _swigfaiss.bincode_hist(*args)
bincode_hist = _swigfaiss.bincode_hist
def ivec_checksum(*args):
return _swigfaiss.ivec_checksum(*args)
ivec_checksum = _swigfaiss.ivec_checksum
def fvecs_maybe_subsample(*args):
return _swigfaiss.fvecs_maybe_subsample(*args)
fvecs_maybe_subsample = _swigfaiss.fvecs_maybe_subsample
METRIC_INNER_PRODUCT = _swigfaiss.METRIC_INNER_PRODUCT
METRIC_L2 = _swigfaiss.METRIC_L2
class Index(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Index, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Index, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["d"] = _swigfaiss.Index_d_set
__swig_getmethods__["d"] = _swigfaiss.Index_d_get
if _newclass:d = _swig_property(_swigfaiss.Index_d_get, _swigfaiss.Index_d_set)
__swig_setmethods__["ntotal"] = _swigfaiss.Index_ntotal_set
__swig_getmethods__["ntotal"] = _swigfaiss.Index_ntotal_get
if _newclass:ntotal = _swig_property(_swigfaiss.Index_ntotal_get, _swigfaiss.Index_ntotal_set)
__swig_setmethods__["verbose"] = _swigfaiss.Index_verbose_set
__swig_getmethods__["verbose"] = _swigfaiss.Index_verbose_get
if _newclass:verbose = _swig_property(_swigfaiss.Index_verbose_get, _swigfaiss.Index_verbose_set)
__swig_setmethods__["is_trained"] = _swigfaiss.Index_is_trained_set
__swig_getmethods__["is_trained"] = _swigfaiss.Index_is_trained_get
if _newclass:is_trained = _swig_property(_swigfaiss.Index_is_trained_get, _swigfaiss.Index_is_trained_set)
__swig_setmethods__["metric_type"] = _swigfaiss.Index_metric_type_set
__swig_getmethods__["metric_type"] = _swigfaiss.Index_metric_type_get
if _newclass:metric_type = _swig_property(_swigfaiss.Index_metric_type_get, _swigfaiss.Index_metric_type_set)
__swig_destroy__ = _swigfaiss.delete_Index
__del__ = lambda self : None;
def train(self, *args): return _swigfaiss.Index_train(self, *args)
def add(self, *args): return _swigfaiss.Index_add(self, *args)
def add_with_ids(self, *args): return _swigfaiss.Index_add_with_ids(self, *args)
def search(self, *args): return _swigfaiss.Index_search(self, *args)
def range_search(self, *args): return _swigfaiss.Index_range_search(self, *args)
def assign(self, *args): return _swigfaiss.Index_assign(self, *args)
def reset(self): return _swigfaiss.Index_reset(self)
def remove_ids(self, *args): return _swigfaiss.Index_remove_ids(self, *args)
def reconstruct(self, *args): return _swigfaiss.Index_reconstruct(self, *args)
def reconstruct_n(self, *args): return _swigfaiss.Index_reconstruct_n(self, *args)
def search_and_reconstruct(self, *args): return _swigfaiss.Index_search_and_reconstruct(self, *args)
def compute_residual(self, *args): return _swigfaiss.Index_compute_residual(self, *args)
def display(self): return _swigfaiss.Index_display(self)
Index_swigregister = _swigfaiss.Index_swigregister
Index_swigregister(Index)
cvar = _swigfaiss.cvar
class ClusteringParameters(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ClusteringParameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ClusteringParameters, name)
__repr__ = _swig_repr
__swig_setmethods__["niter"] = _swigfaiss.ClusteringParameters_niter_set
__swig_getmethods__["niter"] = _swigfaiss.ClusteringParameters_niter_get
if _newclass:niter = _swig_property(_swigfaiss.ClusteringParameters_niter_get, _swigfaiss.ClusteringParameters_niter_set)
__swig_setmethods__["nredo"] = _swigfaiss.ClusteringParameters_nredo_set
__swig_getmethods__["nredo"] = _swigfaiss.ClusteringParameters_nredo_get
if _newclass:nredo = _swig_property(_swigfaiss.ClusteringParameters_nredo_get, _swigfaiss.ClusteringParameters_nredo_set)
__swig_setmethods__["verbose"] = _swigfaiss.ClusteringParameters_verbose_set
__swig_getmethods__["verbose"] = _swigfaiss.ClusteringParameters_verbose_get
if _newclass:verbose = _swig_property(_swigfaiss.ClusteringParameters_verbose_get, _swigfaiss.ClusteringParameters_verbose_set)
__swig_setmethods__["spherical"] = _swigfaiss.ClusteringParameters_spherical_set
__swig_getmethods__["spherical"] = _swigfaiss.ClusteringParameters_spherical_get
if _newclass:spherical = _swig_property(_swigfaiss.ClusteringParameters_spherical_get, _swigfaiss.ClusteringParameters_spherical_set)
__swig_setmethods__["update_index"] = _swigfaiss.ClusteringParameters_update_index_set
__swig_getmethods__["update_index"] = _swigfaiss.ClusteringParameters_update_index_get
if _newclass:update_index = _swig_property(_swigfaiss.ClusteringParameters_update_index_get, _swigfaiss.ClusteringParameters_update_index_set)
__swig_setmethods__["frozen_centroids"] = _swigfaiss.ClusteringParameters_frozen_centroids_set
__swig_getmethods__["frozen_centroids"] = _swigfaiss.ClusteringParameters_frozen_centroids_get
if _newclass:frozen_centroids = _swig_property(_swigfaiss.ClusteringParameters_frozen_centroids_get, _swigfaiss.ClusteringParameters_frozen_centroids_set)
__swig_setmethods__["min_points_per_centroid"] = _swigfaiss.ClusteringParameters_min_points_per_centroid_set
__swig_getmethods__["min_points_per_centroid"] = _swigfaiss.ClusteringParameters_min_points_per_centroid_get
if _newclass:min_points_per_centroid = _swig_property(_swigfaiss.ClusteringParameters_min_points_per_centroid_get, _swigfaiss.ClusteringParameters_min_points_per_centroid_set)
__swig_setmethods__["max_points_per_centroid"] = _swigfaiss.ClusteringParameters_max_points_per_centroid_set
__swig_getmethods__["max_points_per_centroid"] = _swigfaiss.ClusteringParameters_max_points_per_centroid_get
if _newclass:max_points_per_centroid = _swig_property(_swigfaiss.ClusteringParameters_max_points_per_centroid_get, _swigfaiss.ClusteringParameters_max_points_per_centroid_set)
__swig_setmethods__["seed"] = _swigfaiss.ClusteringParameters_seed_set
__swig_getmethods__["seed"] = _swigfaiss.ClusteringParameters_seed_get
if _newclass:seed = _swig_property(_swigfaiss.ClusteringParameters_seed_get, _swigfaiss.ClusteringParameters_seed_set)
def __init__(self):
this = _swigfaiss.new_ClusteringParameters()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_ClusteringParameters
__del__ = lambda self : None;
ClusteringParameters_swigregister = _swigfaiss.ClusteringParameters_swigregister
ClusteringParameters_swigregister(ClusteringParameters)
class Clustering(ClusteringParameters):
__swig_setmethods__ = {}
for _s in [ClusteringParameters]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Clustering, name, value)
__swig_getmethods__ = {}
for _s in [ClusteringParameters]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, Clustering, name)
__repr__ = _swig_repr
__swig_setmethods__["d"] = _swigfaiss.Clustering_d_set
__swig_getmethods__["d"] = _swigfaiss.Clustering_d_get
if _newclass:d = _swig_property(_swigfaiss.Clustering_d_get, _swigfaiss.Clustering_d_set)
__swig_setmethods__["k"] = _swigfaiss.Clustering_k_set
__swig_getmethods__["k"] = _swigfaiss.Clustering_k_get
if _newclass:k = _swig_property(_swigfaiss.Clustering_k_get, _swigfaiss.Clustering_k_set)
__swig_setmethods__["centroids"] = _swigfaiss.Clustering_centroids_set
__swig_getmethods__["centroids"] = _swigfaiss.Clustering_centroids_get
if _newclass:centroids = _swig_property(_swigfaiss.Clustering_centroids_get, _swigfaiss.Clustering_centroids_set)
__swig_setmethods__["obj"] = _swigfaiss.Clustering_obj_set
__swig_getmethods__["obj"] = _swigfaiss.Clustering_obj_get
if _newclass:obj = _swig_property(_swigfaiss.Clustering_obj_get, _swigfaiss.Clustering_obj_set)
def __init__(self, *args):
this = _swigfaiss.new_Clustering(*args)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.Clustering_train(self, *args)
__swig_destroy__ = _swigfaiss.delete_Clustering
__del__ = lambda self : None;
Clustering_swigregister = _swigfaiss.Clustering_swigregister
Clustering_swigregister(Clustering)
def kmeans_clustering(*args):
return _swigfaiss.kmeans_clustering(*args)
kmeans_clustering = _swigfaiss.kmeans_clustering
class ProductQuantizer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ProductQuantizer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ProductQuantizer, name)
__repr__ = _swig_repr
__swig_setmethods__["d"] = _swigfaiss.ProductQuantizer_d_set
__swig_getmethods__["d"] = _swigfaiss.ProductQuantizer_d_get
if _newclass:d = _swig_property(_swigfaiss.ProductQuantizer_d_get, _swigfaiss.ProductQuantizer_d_set)
__swig_setmethods__["M"] = _swigfaiss.ProductQuantizer_M_set
__swig_getmethods__["M"] = _swigfaiss.ProductQuantizer_M_get
if _newclass:M = _swig_property(_swigfaiss.ProductQuantizer_M_get, _swigfaiss.ProductQuantizer_M_set)
__swig_setmethods__["nbits"] = _swigfaiss.ProductQuantizer_nbits_set
__swig_getmethods__["nbits"] = _swigfaiss.ProductQuantizer_nbits_get
if _newclass:nbits = _swig_property(_swigfaiss.ProductQuantizer_nbits_get, _swigfaiss.ProductQuantizer_nbits_set)
__swig_setmethods__["dsub"] = _swigfaiss.ProductQuantizer_dsub_set
__swig_getmethods__["dsub"] = _swigfaiss.ProductQuantizer_dsub_get
if _newclass:dsub = _swig_property(_swigfaiss.ProductQuantizer_dsub_get, _swigfaiss.ProductQuantizer_dsub_set)
__swig_setmethods__["byte_per_idx"] = _swigfaiss.ProductQuantizer_byte_per_idx_set
__swig_getmethods__["byte_per_idx"] = _swigfaiss.ProductQuantizer_byte_per_idx_get
if _newclass:byte_per_idx = _swig_property(_swigfaiss.ProductQuantizer_byte_per_idx_get, _swigfaiss.ProductQuantizer_byte_per_idx_set)
__swig_setmethods__["code_size"] = _swigfaiss.ProductQuantizer_code_size_set
__swig_getmethods__["code_size"] = _swigfaiss.ProductQuantizer_code_size_get
if _newclass:code_size = _swig_property(_swigfaiss.ProductQuantizer_code_size_get, _swigfaiss.ProductQuantizer_code_size_set)
__swig_setmethods__["ksub"] = _swigfaiss.ProductQuantizer_ksub_set
__swig_getmethods__["ksub"] = _swigfaiss.ProductQuantizer_ksub_get
if _newclass:ksub = _swig_property(_swigfaiss.ProductQuantizer_ksub_get, _swigfaiss.ProductQuantizer_ksub_set)
__swig_setmethods__["verbose"] = _swigfaiss.ProductQuantizer_verbose_set
__swig_getmethods__["verbose"] = _swigfaiss.ProductQuantizer_verbose_get
if _newclass:verbose = _swig_property(_swigfaiss.ProductQuantizer_verbose_get, _swigfaiss.ProductQuantizer_verbose_set)
Train_default = _swigfaiss.ProductQuantizer_Train_default
Train_hot_start = _swigfaiss.ProductQuantizer_Train_hot_start
Train_shared = _swigfaiss.ProductQuantizer_Train_shared
Train_hypercube = _swigfaiss.ProductQuantizer_Train_hypercube
Train_hypercube_pca = _swigfaiss.ProductQuantizer_Train_hypercube_pca
__swig_setmethods__["train_type"] = _swigfaiss.ProductQuantizer_train_type_set
__swig_getmethods__["train_type"] = _swigfaiss.ProductQuantizer_train_type_get
if _newclass:train_type = _swig_property(_swigfaiss.ProductQuantizer_train_type_get, _swigfaiss.ProductQuantizer_train_type_set)
__swig_setmethods__["cp"] = _swigfaiss.ProductQuantizer_cp_set
__swig_getmethods__["cp"] = _swigfaiss.ProductQuantizer_cp_get
if _newclass:cp = _swig_property(_swigfaiss.ProductQuantizer_cp_get, _swigfaiss.ProductQuantizer_cp_set)
__swig_setmethods__["centroids"] = _swigfaiss.ProductQuantizer_centroids_set
__swig_getmethods__["centroids"] = _swigfaiss.ProductQuantizer_centroids_get
if _newclass:centroids = _swig_property(_swigfaiss.ProductQuantizer_centroids_get, _swigfaiss.ProductQuantizer_centroids_set)
def get_centroids(self, *args): return _swigfaiss.ProductQuantizer_get_centroids(self, *args)
def train(self, *args): return _swigfaiss.ProductQuantizer_train(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_ProductQuantizer(*args)
try: self.this.append(this)
except: self.this = this
def set_derived_values(self): return _swigfaiss.ProductQuantizer_set_derived_values(self)
def set_params(self, *args): return _swigfaiss.ProductQuantizer_set_params(self, *args)
def compute_code(self, *args): return _swigfaiss.ProductQuantizer_compute_code(self, *args)
def compute_codes(self, *args): return _swigfaiss.ProductQuantizer_compute_codes(self, *args)
def decode(self, *args): return _swigfaiss.ProductQuantizer_decode(self, *args)
def compute_code_from_distance_table(self, *args): return _swigfaiss.ProductQuantizer_compute_code_from_distance_table(self, *args)
def compute_distance_table(self, *args): return _swigfaiss.ProductQuantizer_compute_distance_table(self, *args)
def compute_inner_prod_table(self, *args): return _swigfaiss.ProductQuantizer_compute_inner_prod_table(self, *args)
def compute_distance_tables(self, *args): return _swigfaiss.ProductQuantizer_compute_distance_tables(self, *args)
def compute_inner_prod_tables(self, *args): return _swigfaiss.ProductQuantizer_compute_inner_prod_tables(self, *args)
def search(self, *args): return _swigfaiss.ProductQuantizer_search(self, *args)
def search_ip(self, *args): return _swigfaiss.ProductQuantizer_search_ip(self, *args)
__swig_setmethods__["sdc_table"] = _swigfaiss.ProductQuantizer_sdc_table_set
__swig_getmethods__["sdc_table"] = _swigfaiss.ProductQuantizer_sdc_table_get
if _newclass:sdc_table = _swig_property(_swigfaiss.ProductQuantizer_sdc_table_get, _swigfaiss.ProductQuantizer_sdc_table_set)
def compute_sdc_table(self): return _swigfaiss.ProductQuantizer_compute_sdc_table(self)
def search_sdc(self, *args): return _swigfaiss.ProductQuantizer_search_sdc(self, *args)
__swig_destroy__ = _swigfaiss.delete_ProductQuantizer
__del__ = lambda self : None;
ProductQuantizer_swigregister = _swigfaiss.ProductQuantizer_swigregister
ProductQuantizer_swigregister(ProductQuantizer)
class VectorTransform(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VectorTransform, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VectorTransform, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["d_in"] = _swigfaiss.VectorTransform_d_in_set
__swig_getmethods__["d_in"] = _swigfaiss.VectorTransform_d_in_get
if _newclass:d_in = _swig_property(_swigfaiss.VectorTransform_d_in_get, _swigfaiss.VectorTransform_d_in_set)
__swig_setmethods__["d_out"] = _swigfaiss.VectorTransform_d_out_set
__swig_getmethods__["d_out"] = _swigfaiss.VectorTransform_d_out_get
if _newclass:d_out = _swig_property(_swigfaiss.VectorTransform_d_out_get, _swigfaiss.VectorTransform_d_out_set)
__swig_setmethods__["is_trained"] = _swigfaiss.VectorTransform_is_trained_set
__swig_getmethods__["is_trained"] = _swigfaiss.VectorTransform_is_trained_get
if _newclass:is_trained = _swig_property(_swigfaiss.VectorTransform_is_trained_get, _swigfaiss.VectorTransform_is_trained_set)
def train(self, *args): return _swigfaiss.VectorTransform_train(self, *args)
def apply(self, *args): return _swigfaiss.VectorTransform_apply(self, *args)
def apply_noalloc(self, *args): return _swigfaiss.VectorTransform_apply_noalloc(self, *args)
def reverse_transform(self, *args): return _swigfaiss.VectorTransform_reverse_transform(self, *args)
__swig_destroy__ = _swigfaiss.delete_VectorTransform
__del__ = lambda self : None;
VectorTransform_swigregister = _swigfaiss.VectorTransform_swigregister
VectorTransform_swigregister(VectorTransform)
class LinearTransform(VectorTransform):
__swig_setmethods__ = {}
for _s in [VectorTransform]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, LinearTransform, name, value)
__swig_getmethods__ = {}
for _s in [VectorTransform]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, LinearTransform, name)
__repr__ = _swig_repr
__swig_setmethods__["have_bias"] = _swigfaiss.LinearTransform_have_bias_set
__swig_getmethods__["have_bias"] = _swigfaiss.LinearTransform_have_bias_get
if _newclass:have_bias = _swig_property(_swigfaiss.LinearTransform_have_bias_get, _swigfaiss.LinearTransform_have_bias_set)
__swig_setmethods__["is_orthonormal"] = _swigfaiss.LinearTransform_is_orthonormal_set
__swig_getmethods__["is_orthonormal"] = _swigfaiss.LinearTransform_is_orthonormal_get
if _newclass:is_orthonormal = _swig_property(_swigfaiss.LinearTransform_is_orthonormal_get, _swigfaiss.LinearTransform_is_orthonormal_set)
__swig_setmethods__["A"] = _swigfaiss.LinearTransform_A_set
__swig_getmethods__["A"] = _swigfaiss.LinearTransform_A_get
if _newclass:A = _swig_property(_swigfaiss.LinearTransform_A_get, _swigfaiss.LinearTransform_A_set)
__swig_setmethods__["b"] = _swigfaiss.LinearTransform_b_set
__swig_getmethods__["b"] = _swigfaiss.LinearTransform_b_get
if _newclass:b = _swig_property(_swigfaiss.LinearTransform_b_get, _swigfaiss.LinearTransform_b_set)
def __init__(self, d_in=0, d_out=0, have_bias=False):
this = _swigfaiss.new_LinearTransform(d_in, d_out, have_bias)
try: self.this.append(this)
except: self.this = this
def apply_noalloc(self, *args): return _swigfaiss.LinearTransform_apply_noalloc(self, *args)
def transform_transpose(self, *args): return _swigfaiss.LinearTransform_transform_transpose(self, *args)
def reverse_transform(self, *args): return _swigfaiss.LinearTransform_reverse_transform(self, *args)
def set_is_orthonormal(self): return _swigfaiss.LinearTransform_set_is_orthonormal(self)
__swig_setmethods__["verbose"] = _swigfaiss.LinearTransform_verbose_set
__swig_getmethods__["verbose"] = _swigfaiss.LinearTransform_verbose_get
if _newclass:verbose = _swig_property(_swigfaiss.LinearTransform_verbose_get, _swigfaiss.LinearTransform_verbose_set)
__swig_destroy__ = _swigfaiss.delete_LinearTransform
__del__ = lambda self : None;
LinearTransform_swigregister = _swigfaiss.LinearTransform_swigregister
LinearTransform_swigregister(LinearTransform)
class RandomRotationMatrix(LinearTransform):
__swig_setmethods__ = {}
for _s in [LinearTransform]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, RandomRotationMatrix, name, value)
__swig_getmethods__ = {}
for _s in [LinearTransform]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, RandomRotationMatrix, name)
__repr__ = _swig_repr
def init(self, *args): return _swigfaiss.RandomRotationMatrix_init(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_RandomRotationMatrix(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_RandomRotationMatrix
__del__ = lambda self : None;
RandomRotationMatrix_swigregister = _swigfaiss.RandomRotationMatrix_swigregister
RandomRotationMatrix_swigregister(RandomRotationMatrix)
class PCAMatrix(LinearTransform):
__swig_setmethods__ = {}
for _s in [LinearTransform]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PCAMatrix, name, value)
__swig_getmethods__ = {}
for _s in [LinearTransform]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, PCAMatrix, name)
__repr__ = _swig_repr
__swig_setmethods__["eigen_power"] = _swigfaiss.PCAMatrix_eigen_power_set
__swig_getmethods__["eigen_power"] = _swigfaiss.PCAMatrix_eigen_power_get
if _newclass:eigen_power = _swig_property(_swigfaiss.PCAMatrix_eigen_power_get, _swigfaiss.PCAMatrix_eigen_power_set)
__swig_setmethods__["random_rotation"] = _swigfaiss.PCAMatrix_random_rotation_set
__swig_getmethods__["random_rotation"] = _swigfaiss.PCAMatrix_random_rotation_get
if _newclass:random_rotation = _swig_property(_swigfaiss.PCAMatrix_random_rotation_get, _swigfaiss.PCAMatrix_random_rotation_set)
__swig_setmethods__["max_points_per_d"] = _swigfaiss.PCAMatrix_max_points_per_d_set
__swig_getmethods__["max_points_per_d"] = _swigfaiss.PCAMatrix_max_points_per_d_get
if _newclass:max_points_per_d = _swig_property(_swigfaiss.PCAMatrix_max_points_per_d_get, _swigfaiss.PCAMatrix_max_points_per_d_set)
__swig_setmethods__["balanced_bins"] = _swigfaiss.PCAMatrix_balanced_bins_set
__swig_getmethods__["balanced_bins"] = _swigfaiss.PCAMatrix_balanced_bins_get
if _newclass:balanced_bins = _swig_property(_swigfaiss.PCAMatrix_balanced_bins_get, _swigfaiss.PCAMatrix_balanced_bins_set)
__swig_setmethods__["mean"] = _swigfaiss.PCAMatrix_mean_set
__swig_getmethods__["mean"] = _swigfaiss.PCAMatrix_mean_get
if _newclass:mean = _swig_property(_swigfaiss.PCAMatrix_mean_get, _swigfaiss.PCAMatrix_mean_set)
__swig_setmethods__["eigenvalues"] = _swigfaiss.PCAMatrix_eigenvalues_set
__swig_getmethods__["eigenvalues"] = _swigfaiss.PCAMatrix_eigenvalues_get
if _newclass:eigenvalues = _swig_property(_swigfaiss.PCAMatrix_eigenvalues_get, _swigfaiss.PCAMatrix_eigenvalues_set)
__swig_setmethods__["PCAMat"] = _swigfaiss.PCAMatrix_PCAMat_set
__swig_getmethods__["PCAMat"] = _swigfaiss.PCAMatrix_PCAMat_get
if _newclass:PCAMat = _swig_property(_swigfaiss.PCAMatrix_PCAMat_get, _swigfaiss.PCAMatrix_PCAMat_set)
def __init__(self, d_in=0, d_out=0, eigen_power=0, random_rotation=False):
this = _swigfaiss.new_PCAMatrix(d_in, d_out, eigen_power, random_rotation)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.PCAMatrix_train(self, *args)
def copy_from(self, *args): return _swigfaiss.PCAMatrix_copy_from(self, *args)
def prepare_Ab(self): return _swigfaiss.PCAMatrix_prepare_Ab(self)
__swig_destroy__ = _swigfaiss.delete_PCAMatrix
__del__ = lambda self : None;
PCAMatrix_swigregister = _swigfaiss.PCAMatrix_swigregister
PCAMatrix_swigregister(PCAMatrix)
class OPQMatrix(LinearTransform):
__swig_setmethods__ = {}
for _s in [LinearTransform]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, OPQMatrix, name, value)
__swig_getmethods__ = {}
for _s in [LinearTransform]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, OPQMatrix, name)
__repr__ = _swig_repr
__swig_setmethods__["M"] = _swigfaiss.OPQMatrix_M_set
__swig_getmethods__["M"] = _swigfaiss.OPQMatrix_M_get
if _newclass:M = _swig_property(_swigfaiss.OPQMatrix_M_get, _swigfaiss.OPQMatrix_M_set)
__swig_setmethods__["niter"] = _swigfaiss.OPQMatrix_niter_set
__swig_getmethods__["niter"] = _swigfaiss.OPQMatrix_niter_get
if _newclass:niter = _swig_property(_swigfaiss.OPQMatrix_niter_get, _swigfaiss.OPQMatrix_niter_set)
__swig_setmethods__["niter_pq"] = _swigfaiss.OPQMatrix_niter_pq_set
__swig_getmethods__["niter_pq"] = _swigfaiss.OPQMatrix_niter_pq_get
if _newclass:niter_pq = _swig_property(_swigfaiss.OPQMatrix_niter_pq_get, _swigfaiss.OPQMatrix_niter_pq_set)
__swig_setmethods__["niter_pq_0"] = _swigfaiss.OPQMatrix_niter_pq_0_set
__swig_getmethods__["niter_pq_0"] = _swigfaiss.OPQMatrix_niter_pq_0_get
if _newclass:niter_pq_0 = _swig_property(_swigfaiss.OPQMatrix_niter_pq_0_get, _swigfaiss.OPQMatrix_niter_pq_0_set)
__swig_setmethods__["max_train_points"] = _swigfaiss.OPQMatrix_max_train_points_set
__swig_getmethods__["max_train_points"] = _swigfaiss.OPQMatrix_max_train_points_get
if _newclass:max_train_points = _swig_property(_swigfaiss.OPQMatrix_max_train_points_get, _swigfaiss.OPQMatrix_max_train_points_set)
__swig_setmethods__["verbose"] = _swigfaiss.OPQMatrix_verbose_set
__swig_getmethods__["verbose"] = _swigfaiss.OPQMatrix_verbose_get
if _newclass:verbose = _swig_property(_swigfaiss.OPQMatrix_verbose_get, _swigfaiss.OPQMatrix_verbose_set)
def __init__(self, *args):
this = _swigfaiss.new_OPQMatrix(*args)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.OPQMatrix_train(self, *args)
__swig_destroy__ = _swigfaiss.delete_OPQMatrix
__del__ = lambda self : None;
OPQMatrix_swigregister = _swigfaiss.OPQMatrix_swigregister
OPQMatrix_swigregister(OPQMatrix)
class RemapDimensionsTransform(VectorTransform):
__swig_setmethods__ = {}
for _s in [VectorTransform]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, RemapDimensionsTransform, name, value)
__swig_getmethods__ = {}
for _s in [VectorTransform]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, RemapDimensionsTransform, name)
__repr__ = _swig_repr
__swig_setmethods__["map"] = _swigfaiss.RemapDimensionsTransform_map_set
__swig_getmethods__["map"] = _swigfaiss.RemapDimensionsTransform_map_get
if _newclass:map = _swig_property(_swigfaiss.RemapDimensionsTransform_map_get, _swigfaiss.RemapDimensionsTransform_map_set)
def apply_noalloc(self, *args): return _swigfaiss.RemapDimensionsTransform_apply_noalloc(self, *args)
def reverse_transform(self, *args): return _swigfaiss.RemapDimensionsTransform_reverse_transform(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_RemapDimensionsTransform(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_RemapDimensionsTransform
__del__ = lambda self : None;
RemapDimensionsTransform_swigregister = _swigfaiss.RemapDimensionsTransform_swigregister
RemapDimensionsTransform_swigregister(RemapDimensionsTransform)
class NormalizationTransform(VectorTransform):
__swig_setmethods__ = {}
for _s in [VectorTransform]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, NormalizationTransform, name, value)
__swig_getmethods__ = {}
for _s in [VectorTransform]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, NormalizationTransform, name)
__repr__ = _swig_repr
__swig_setmethods__["norm"] = _swigfaiss.NormalizationTransform_norm_set
__swig_getmethods__["norm"] = _swigfaiss.NormalizationTransform_norm_get
if _newclass:norm = _swig_property(_swigfaiss.NormalizationTransform_norm_get, _swigfaiss.NormalizationTransform_norm_set)
def __init__(self, *args):
this = _swigfaiss.new_NormalizationTransform(*args)
try: self.this.append(this)
except: self.this = this
def apply_noalloc(self, *args): return _swigfaiss.NormalizationTransform_apply_noalloc(self, *args)
def reverse_transform(self, *args): return _swigfaiss.NormalizationTransform_reverse_transform(self, *args)
__swig_destroy__ = _swigfaiss.delete_NormalizationTransform
__del__ = lambda self : None;
NormalizationTransform_swigregister = _swigfaiss.NormalizationTransform_swigregister
NormalizationTransform_swigregister(NormalizationTransform)
class IndexPreTransform(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexPreTransform, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexPreTransform, name)
__repr__ = _swig_repr
__swig_setmethods__["chain"] = _swigfaiss.IndexPreTransform_chain_set
__swig_getmethods__["chain"] = _swigfaiss.IndexPreTransform_chain_get
if _newclass:chain = _swig_property(_swigfaiss.IndexPreTransform_chain_get, _swigfaiss.IndexPreTransform_chain_set)
__swig_setmethods__["index"] = _swigfaiss.IndexPreTransform_index_set
__swig_getmethods__["index"] = _swigfaiss.IndexPreTransform_index_get
if _newclass:index = _swig_property(_swigfaiss.IndexPreTransform_index_get, _swigfaiss.IndexPreTransform_index_set)
__swig_setmethods__["own_fields"] = _swigfaiss.IndexPreTransform_own_fields_set
__swig_getmethods__["own_fields"] = _swigfaiss.IndexPreTransform_own_fields_get
if _newclass:own_fields = _swig_property(_swigfaiss.IndexPreTransform_own_fields_get, _swigfaiss.IndexPreTransform_own_fields_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexPreTransform(*args)
try: self.this.append(this)
except: self.this = this
def prepend_transform(self, *args): return _swigfaiss.IndexPreTransform_prepend_transform(self, *args)
def train(self, *args): return _swigfaiss.IndexPreTransform_train(self, *args)
def add(self, *args): return _swigfaiss.IndexPreTransform_add(self, *args)
def add_with_ids(self, *args): return _swigfaiss.IndexPreTransform_add_with_ids(self, *args)
def reset(self): return _swigfaiss.IndexPreTransform_reset(self)
def remove_ids(self, *args): return _swigfaiss.IndexPreTransform_remove_ids(self, *args)
def search(self, *args): return _swigfaiss.IndexPreTransform_search(self, *args)
def reconstruct(self, *args): return _swigfaiss.IndexPreTransform_reconstruct(self, *args)
def reconstruct_n(self, *args): return _swigfaiss.IndexPreTransform_reconstruct_n(self, *args)
def search_and_reconstruct(self, *args): return _swigfaiss.IndexPreTransform_search_and_reconstruct(self, *args)
def apply_chain(self, *args): return _swigfaiss.IndexPreTransform_apply_chain(self, *args)
def reverse_chain(self, *args): return _swigfaiss.IndexPreTransform_reverse_chain(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexPreTransform
__del__ = lambda self : None;
IndexPreTransform_swigregister = _swigfaiss.IndexPreTransform_swigregister
IndexPreTransform_swigregister(IndexPreTransform)
class IndexFlat(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexFlat, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexFlat, name)
__repr__ = _swig_repr
__swig_setmethods__["xb"] = _swigfaiss.IndexFlat_xb_set
__swig_getmethods__["xb"] = _swigfaiss.IndexFlat_xb_get
if _newclass:xb = _swig_property(_swigfaiss.IndexFlat_xb_get, _swigfaiss.IndexFlat_xb_set)
def add(self, *args): return _swigfaiss.IndexFlat_add(self, *args)
def reset(self): return _swigfaiss.IndexFlat_reset(self)
def search(self, *args): return _swigfaiss.IndexFlat_search(self, *args)
def range_search(self, *args): return _swigfaiss.IndexFlat_range_search(self, *args)
def reconstruct(self, *args): return _swigfaiss.IndexFlat_reconstruct(self, *args)
def compute_distance_subset(self, *args): return _swigfaiss.IndexFlat_compute_distance_subset(self, *args)
def remove_ids(self, *args): return _swigfaiss.IndexFlat_remove_ids(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_IndexFlat(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_IndexFlat
__del__ = lambda self : None;
IndexFlat_swigregister = _swigfaiss.IndexFlat_swigregister
IndexFlat_swigregister(IndexFlat)
class IndexFlatIP(IndexFlat):
__swig_setmethods__ = {}
for _s in [IndexFlat]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexFlatIP, name, value)
__swig_getmethods__ = {}
for _s in [IndexFlat]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexFlatIP, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _swigfaiss.new_IndexFlatIP(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_IndexFlatIP
__del__ = lambda self : None;
IndexFlatIP_swigregister = _swigfaiss.IndexFlatIP_swigregister
IndexFlatIP_swigregister(IndexFlatIP)
class IndexFlatL2(IndexFlat):
__swig_setmethods__ = {}
for _s in [IndexFlat]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexFlatL2, name, value)
__swig_getmethods__ = {}
for _s in [IndexFlat]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexFlatL2, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _swigfaiss.new_IndexFlatL2(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_IndexFlatL2
__del__ = lambda self : None;
IndexFlatL2_swigregister = _swigfaiss.IndexFlatL2_swigregister
IndexFlatL2_swigregister(IndexFlatL2)
class IndexFlatL2BaseShift(IndexFlatL2):
__swig_setmethods__ = {}
for _s in [IndexFlatL2]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexFlatL2BaseShift, name, value)
__swig_getmethods__ = {}
for _s in [IndexFlatL2]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexFlatL2BaseShift, name)
__repr__ = _swig_repr
__swig_setmethods__["shift"] = _swigfaiss.IndexFlatL2BaseShift_shift_set
__swig_getmethods__["shift"] = _swigfaiss.IndexFlatL2BaseShift_shift_get
if _newclass:shift = _swig_property(_swigfaiss.IndexFlatL2BaseShift_shift_get, _swigfaiss.IndexFlatL2BaseShift_shift_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexFlatL2BaseShift(*args)
try: self.this.append(this)
except: self.this = this
def search(self, *args): return _swigfaiss.IndexFlatL2BaseShift_search(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexFlatL2BaseShift
__del__ = lambda self : None;
IndexFlatL2BaseShift_swigregister = _swigfaiss.IndexFlatL2BaseShift_swigregister
IndexFlatL2BaseShift_swigregister(IndexFlatL2BaseShift)
class IndexRefineFlat(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexRefineFlat, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexRefineFlat, name)
__repr__ = _swig_repr
__swig_setmethods__["refine_index"] = _swigfaiss.IndexRefineFlat_refine_index_set
__swig_getmethods__["refine_index"] = _swigfaiss.IndexRefineFlat_refine_index_get
if _newclass:refine_index = _swig_property(_swigfaiss.IndexRefineFlat_refine_index_get, _swigfaiss.IndexRefineFlat_refine_index_set)
__swig_setmethods__["base_index"] = _swigfaiss.IndexRefineFlat_base_index_set
__swig_getmethods__["base_index"] = _swigfaiss.IndexRefineFlat_base_index_get
if _newclass:base_index = _swig_property(_swigfaiss.IndexRefineFlat_base_index_get, _swigfaiss.IndexRefineFlat_base_index_set)
__swig_setmethods__["own_fields"] = _swigfaiss.IndexRefineFlat_own_fields_set
__swig_getmethods__["own_fields"] = _swigfaiss.IndexRefineFlat_own_fields_get
if _newclass:own_fields = _swig_property(_swigfaiss.IndexRefineFlat_own_fields_get, _swigfaiss.IndexRefineFlat_own_fields_set)
__swig_setmethods__["k_factor"] = _swigfaiss.IndexRefineFlat_k_factor_set
__swig_getmethods__["k_factor"] = _swigfaiss.IndexRefineFlat_k_factor_get
if _newclass:k_factor = _swig_property(_swigfaiss.IndexRefineFlat_k_factor_get, _swigfaiss.IndexRefineFlat_k_factor_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexRefineFlat(*args)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.IndexRefineFlat_train(self, *args)
def add(self, *args): return _swigfaiss.IndexRefineFlat_add(self, *args)
def reset(self): return _swigfaiss.IndexRefineFlat_reset(self)
def search(self, *args): return _swigfaiss.IndexRefineFlat_search(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexRefineFlat
__del__ = lambda self : None;
IndexRefineFlat_swigregister = _swigfaiss.IndexRefineFlat_swigregister
IndexRefineFlat_swigregister(IndexRefineFlat)
class IndexFlat1D(IndexFlatL2):
__swig_setmethods__ = {}
for _s in [IndexFlatL2]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexFlat1D, name, value)
__swig_getmethods__ = {}
for _s in [IndexFlatL2]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexFlat1D, name)
__repr__ = _swig_repr
__swig_setmethods__["continuous_update"] = _swigfaiss.IndexFlat1D_continuous_update_set
__swig_getmethods__["continuous_update"] = _swigfaiss.IndexFlat1D_continuous_update_get
if _newclass:continuous_update = _swig_property(_swigfaiss.IndexFlat1D_continuous_update_get, _swigfaiss.IndexFlat1D_continuous_update_set)
__swig_setmethods__["perm"] = _swigfaiss.IndexFlat1D_perm_set
__swig_getmethods__["perm"] = _swigfaiss.IndexFlat1D_perm_get
if _newclass:perm = _swig_property(_swigfaiss.IndexFlat1D_perm_get, _swigfaiss.IndexFlat1D_perm_set)
def __init__(self, continuous_update=True):
this = _swigfaiss.new_IndexFlat1D(continuous_update)
try: self.this.append(this)
except: self.this = this
def update_permutation(self): return _swigfaiss.IndexFlat1D_update_permutation(self)
def add(self, *args): return _swigfaiss.IndexFlat1D_add(self, *args)
def reset(self): return _swigfaiss.IndexFlat1D_reset(self)
def search(self, *args): return _swigfaiss.IndexFlat1D_search(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexFlat1D
__del__ = lambda self : None;
IndexFlat1D_swigregister = _swigfaiss.IndexFlat1D_swigregister
IndexFlat1D_swigregister(IndexFlat1D)
class IndexLSH(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexLSH, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexLSH, name)
__repr__ = _swig_repr
__swig_setmethods__["nbits"] = _swigfaiss.IndexLSH_nbits_set
__swig_getmethods__["nbits"] = _swigfaiss.IndexLSH_nbits_get
if _newclass:nbits = _swig_property(_swigfaiss.IndexLSH_nbits_get, _swigfaiss.IndexLSH_nbits_set)
__swig_setmethods__["bytes_per_vec"] = _swigfaiss.IndexLSH_bytes_per_vec_set
__swig_getmethods__["bytes_per_vec"] = _swigfaiss.IndexLSH_bytes_per_vec_get
if _newclass:bytes_per_vec = _swig_property(_swigfaiss.IndexLSH_bytes_per_vec_get, _swigfaiss.IndexLSH_bytes_per_vec_set)
__swig_setmethods__["rotate_data"] = _swigfaiss.IndexLSH_rotate_data_set
__swig_getmethods__["rotate_data"] = _swigfaiss.IndexLSH_rotate_data_get
if _newclass:rotate_data = _swig_property(_swigfaiss.IndexLSH_rotate_data_get, _swigfaiss.IndexLSH_rotate_data_set)
__swig_setmethods__["train_thresholds"] = _swigfaiss.IndexLSH_train_thresholds_set
__swig_getmethods__["train_thresholds"] = _swigfaiss.IndexLSH_train_thresholds_get
if _newclass:train_thresholds = _swig_property(_swigfaiss.IndexLSH_train_thresholds_get, _swigfaiss.IndexLSH_train_thresholds_set)
__swig_setmethods__["rrot"] = _swigfaiss.IndexLSH_rrot_set
__swig_getmethods__["rrot"] = _swigfaiss.IndexLSH_rrot_get
if _newclass:rrot = _swig_property(_swigfaiss.IndexLSH_rrot_get, _swigfaiss.IndexLSH_rrot_set)
__swig_setmethods__["thresholds"] = _swigfaiss.IndexLSH_thresholds_set
__swig_getmethods__["thresholds"] = _swigfaiss.IndexLSH_thresholds_get
if _newclass:thresholds = _swig_property(_swigfaiss.IndexLSH_thresholds_get, _swigfaiss.IndexLSH_thresholds_set)
__swig_setmethods__["codes"] = _swigfaiss.IndexLSH_codes_set
__swig_getmethods__["codes"] = _swigfaiss.IndexLSH_codes_get
if _newclass:codes = _swig_property(_swigfaiss.IndexLSH_codes_get, _swigfaiss.IndexLSH_codes_set)
def apply_preprocess(self, *args): return _swigfaiss.IndexLSH_apply_preprocess(self, *args)
def train(self, *args): return _swigfaiss.IndexLSH_train(self, *args)
def add(self, *args): return _swigfaiss.IndexLSH_add(self, *args)
def search(self, *args): return _swigfaiss.IndexLSH_search(self, *args)
def reset(self): return _swigfaiss.IndexLSH_reset(self)
def transfer_thresholds(self, *args): return _swigfaiss.IndexLSH_transfer_thresholds(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexLSH
__del__ = lambda self : None;
def __init__(self, *args):
this = _swigfaiss.new_IndexLSH(*args)
try: self.this.append(this)
except: self.this = this
IndexLSH_swigregister = _swigfaiss.IndexLSH_swigregister
IndexLSH_swigregister(IndexLSH)
class SimulatedAnnealingParameters(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SimulatedAnnealingParameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SimulatedAnnealingParameters, name)
__repr__ = _swig_repr
__swig_setmethods__["init_temperature"] = _swigfaiss.SimulatedAnnealingParameters_init_temperature_set
__swig_getmethods__["init_temperature"] = _swigfaiss.SimulatedAnnealingParameters_init_temperature_get
if _newclass:init_temperature = _swig_property(_swigfaiss.SimulatedAnnealingParameters_init_temperature_get, _swigfaiss.SimulatedAnnealingParameters_init_temperature_set)
__swig_setmethods__["temperature_decay"] = _swigfaiss.SimulatedAnnealingParameters_temperature_decay_set
__swig_getmethods__["temperature_decay"] = _swigfaiss.SimulatedAnnealingParameters_temperature_decay_get
if _newclass:temperature_decay = _swig_property(_swigfaiss.SimulatedAnnealingParameters_temperature_decay_get, _swigfaiss.SimulatedAnnealingParameters_temperature_decay_set)
__swig_setmethods__["n_iter"] = _swigfaiss.SimulatedAnnealingParameters_n_iter_set
__swig_getmethods__["n_iter"] = _swigfaiss.SimulatedAnnealingParameters_n_iter_get
if _newclass:n_iter = _swig_property(_swigfaiss.SimulatedAnnealingParameters_n_iter_get, _swigfaiss.SimulatedAnnealingParameters_n_iter_set)
__swig_setmethods__["n_redo"] = _swigfaiss.SimulatedAnnealingParameters_n_redo_set
__swig_getmethods__["n_redo"] = _swigfaiss.SimulatedAnnealingParameters_n_redo_get
if _newclass:n_redo = _swig_property(_swigfaiss.SimulatedAnnealingParameters_n_redo_get, _swigfaiss.SimulatedAnnealingParameters_n_redo_set)
__swig_setmethods__["seed"] = _swigfaiss.SimulatedAnnealingParameters_seed_set
__swig_getmethods__["seed"] = _swigfaiss.SimulatedAnnealingParameters_seed_get
if _newclass:seed = _swig_property(_swigfaiss.SimulatedAnnealingParameters_seed_get, _swigfaiss.SimulatedAnnealingParameters_seed_set)
__swig_setmethods__["verbose"] = _swigfaiss.SimulatedAnnealingParameters_verbose_set
__swig_getmethods__["verbose"] = _swigfaiss.SimulatedAnnealingParameters_verbose_get
if _newclass:verbose = _swig_property(_swigfaiss.SimulatedAnnealingParameters_verbose_get, _swigfaiss.SimulatedAnnealingParameters_verbose_set)
__swig_setmethods__["only_bit_flips"] = _swigfaiss.SimulatedAnnealingParameters_only_bit_flips_set
__swig_getmethods__["only_bit_flips"] = _swigfaiss.SimulatedAnnealingParameters_only_bit_flips_get
if _newclass:only_bit_flips = _swig_property(_swigfaiss.SimulatedAnnealingParameters_only_bit_flips_get, _swigfaiss.SimulatedAnnealingParameters_only_bit_flips_set)
__swig_setmethods__["init_random"] = _swigfaiss.SimulatedAnnealingParameters_init_random_set
__swig_getmethods__["init_random"] = _swigfaiss.SimulatedAnnealingParameters_init_random_get
if _newclass:init_random = _swig_property(_swigfaiss.SimulatedAnnealingParameters_init_random_get, _swigfaiss.SimulatedAnnealingParameters_init_random_set)
def __init__(self):
this = _swigfaiss.new_SimulatedAnnealingParameters()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_SimulatedAnnealingParameters
__del__ = lambda self : None;
SimulatedAnnealingParameters_swigregister = _swigfaiss.SimulatedAnnealingParameters_swigregister
SimulatedAnnealingParameters_swigregister(SimulatedAnnealingParameters)
class PermutationObjective(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PermutationObjective, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PermutationObjective, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["n"] = _swigfaiss.PermutationObjective_n_set
__swig_getmethods__["n"] = _swigfaiss.PermutationObjective_n_get
if _newclass:n = _swig_property(_swigfaiss.PermutationObjective_n_get, _swigfaiss.PermutationObjective_n_set)
def compute_cost(self, *args): return _swigfaiss.PermutationObjective_compute_cost(self, *args)
def cost_update(self, *args): return _swigfaiss.PermutationObjective_cost_update(self, *args)
__swig_destroy__ = _swigfaiss.delete_PermutationObjective
__del__ = lambda self : None;
PermutationObjective_swigregister = _swigfaiss.PermutationObjective_swigregister
PermutationObjective_swigregister(PermutationObjective)
class ReproduceDistancesObjective(PermutationObjective):
__swig_setmethods__ = {}
for _s in [PermutationObjective]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ReproduceDistancesObjective, name, value)
__swig_getmethods__ = {}
for _s in [PermutationObjective]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, ReproduceDistancesObjective, name)
__repr__ = _swig_repr
__swig_setmethods__["dis_weight_factor"] = _swigfaiss.ReproduceDistancesObjective_dis_weight_factor_set
__swig_getmethods__["dis_weight_factor"] = _swigfaiss.ReproduceDistancesObjective_dis_weight_factor_get
if _newclass:dis_weight_factor = _swig_property(_swigfaiss.ReproduceDistancesObjective_dis_weight_factor_get, _swigfaiss.ReproduceDistancesObjective_dis_weight_factor_set)
__swig_getmethods__["sqr"] = lambda x: _swigfaiss.ReproduceDistancesObjective_sqr
if _newclass:sqr = staticmethod(_swigfaiss.ReproduceDistancesObjective_sqr)
def dis_weight(self, *args): return _swigfaiss.ReproduceDistancesObjective_dis_weight(self, *args)
__swig_setmethods__["source_dis"] = _swigfaiss.ReproduceDistancesObjective_source_dis_set
__swig_getmethods__["source_dis"] = _swigfaiss.ReproduceDistancesObjective_source_dis_get
if _newclass:source_dis = _swig_property(_swigfaiss.ReproduceDistancesObjective_source_dis_get, _swigfaiss.ReproduceDistancesObjective_source_dis_set)
__swig_setmethods__["target_dis"] = _swigfaiss.ReproduceDistancesObjective_target_dis_set
__swig_getmethods__["target_dis"] = _swigfaiss.ReproduceDistancesObjective_target_dis_get
if _newclass:target_dis = _swig_property(_swigfaiss.ReproduceDistancesObjective_target_dis_get, _swigfaiss.ReproduceDistancesObjective_target_dis_set)
__swig_setmethods__["weights"] = _swigfaiss.ReproduceDistancesObjective_weights_set
__swig_getmethods__["weights"] = _swigfaiss.ReproduceDistancesObjective_weights_get
if _newclass:weights = _swig_property(_swigfaiss.ReproduceDistancesObjective_weights_get, _swigfaiss.ReproduceDistancesObjective_weights_set)
def get_source_dis(self, *args): return _swigfaiss.ReproduceDistancesObjective_get_source_dis(self, *args)
def compute_cost(self, *args): return _swigfaiss.ReproduceDistancesObjective_compute_cost(self, *args)
def cost_update(self, *args): return _swigfaiss.ReproduceDistancesObjective_cost_update(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_ReproduceDistancesObjective(*args)
try: self.this.append(this)
except: self.this = this
__swig_getmethods__["compute_mean_stdev"] = lambda x: _swigfaiss.ReproduceDistancesObjective_compute_mean_stdev
if _newclass:compute_mean_stdev = staticmethod(_swigfaiss.ReproduceDistancesObjective_compute_mean_stdev)
def set_affine_target_dis(self, *args): return _swigfaiss.ReproduceDistancesObjective_set_affine_target_dis(self, *args)
__swig_destroy__ = _swigfaiss.delete_ReproduceDistancesObjective
__del__ = lambda self : None;
ReproduceDistancesObjective_swigregister = _swigfaiss.ReproduceDistancesObjective_swigregister
ReproduceDistancesObjective_swigregister(ReproduceDistancesObjective)
def ReproduceDistancesObjective_sqr(*args):
return _swigfaiss.ReproduceDistancesObjective_sqr(*args)
ReproduceDistancesObjective_sqr = _swigfaiss.ReproduceDistancesObjective_sqr
def ReproduceDistancesObjective_compute_mean_stdev(*args):
return _swigfaiss.ReproduceDistancesObjective_compute_mean_stdev(*args)
ReproduceDistancesObjective_compute_mean_stdev = _swigfaiss.ReproduceDistancesObjective_compute_mean_stdev
class SimulatedAnnealingOptimizer(SimulatedAnnealingParameters):
__swig_setmethods__ = {}
for _s in [SimulatedAnnealingParameters]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimulatedAnnealingOptimizer, name, value)
__swig_getmethods__ = {}
for _s in [SimulatedAnnealingParameters]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SimulatedAnnealingOptimizer, name)
__repr__ = _swig_repr
__swig_setmethods__["obj"] = _swigfaiss.SimulatedAnnealingOptimizer_obj_set
__swig_getmethods__["obj"] = _swigfaiss.SimulatedAnnealingOptimizer_obj_get
if _newclass:obj = _swig_property(_swigfaiss.SimulatedAnnealingOptimizer_obj_get, _swigfaiss.SimulatedAnnealingOptimizer_obj_set)
__swig_setmethods__["n"] = _swigfaiss.SimulatedAnnealingOptimizer_n_set
__swig_getmethods__["n"] = _swigfaiss.SimulatedAnnealingOptimizer_n_get
if _newclass:n = _swig_property(_swigfaiss.SimulatedAnnealingOptimizer_n_get, _swigfaiss.SimulatedAnnealingOptimizer_n_set)
__swig_setmethods__["logfile"] = _swigfaiss.SimulatedAnnealingOptimizer_logfile_set
__swig_getmethods__["logfile"] = _swigfaiss.SimulatedAnnealingOptimizer_logfile_get
if _newclass:logfile = _swig_property(_swigfaiss.SimulatedAnnealingOptimizer_logfile_get, _swigfaiss.SimulatedAnnealingOptimizer_logfile_set)
def __init__(self, *args):
this = _swigfaiss.new_SimulatedAnnealingOptimizer(*args)
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["rnd"] = _swigfaiss.SimulatedAnnealingOptimizer_rnd_set
__swig_getmethods__["rnd"] = _swigfaiss.SimulatedAnnealingOptimizer_rnd_get
if _newclass:rnd = _swig_property(_swigfaiss.SimulatedAnnealingOptimizer_rnd_get, _swigfaiss.SimulatedAnnealingOptimizer_rnd_set)
__swig_setmethods__["init_cost"] = _swigfaiss.SimulatedAnnealingOptimizer_init_cost_set
__swig_getmethods__["init_cost"] = _swigfaiss.SimulatedAnnealingOptimizer_init_cost_get
if _newclass:init_cost = _swig_property(_swigfaiss.SimulatedAnnealingOptimizer_init_cost_get, _swigfaiss.SimulatedAnnealingOptimizer_init_cost_set)
def optimize(self, *args): return _swigfaiss.SimulatedAnnealingOptimizer_optimize(self, *args)
def run_optimization(self, *args): return _swigfaiss.SimulatedAnnealingOptimizer_run_optimization(self, *args)
__swig_destroy__ = _swigfaiss.delete_SimulatedAnnealingOptimizer
__del__ = lambda self : None;
SimulatedAnnealingOptimizer_swigregister = _swigfaiss.SimulatedAnnealingOptimizer_swigregister
SimulatedAnnealingOptimizer_swigregister(SimulatedAnnealingOptimizer)
class PolysemousTraining(SimulatedAnnealingParameters):
__swig_setmethods__ = {}
for _s in [SimulatedAnnealingParameters]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PolysemousTraining, name, value)
__swig_getmethods__ = {}
for _s in [SimulatedAnnealingParameters]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, PolysemousTraining, name)
__repr__ = _swig_repr
OT_None = _swigfaiss.PolysemousTraining_OT_None
OT_ReproduceDistances_affine = _swigfaiss.PolysemousTraining_OT_ReproduceDistances_affine
OT_Ranking_weighted_diff = _swigfaiss.PolysemousTraining_OT_Ranking_weighted_diff
__swig_setmethods__["optimization_type"] = _swigfaiss.PolysemousTraining_optimization_type_set
__swig_getmethods__["optimization_type"] = _swigfaiss.PolysemousTraining_optimization_type_get
if _newclass:optimization_type = _swig_property(_swigfaiss.PolysemousTraining_optimization_type_get, _swigfaiss.PolysemousTraining_optimization_type_set)
__swig_setmethods__["ntrain_permutation"] = _swigfaiss.PolysemousTraining_ntrain_permutation_set
__swig_getmethods__["ntrain_permutation"] = _swigfaiss.PolysemousTraining_ntrain_permutation_get
if _newclass:ntrain_permutation = _swig_property(_swigfaiss.PolysemousTraining_ntrain_permutation_get, _swigfaiss.PolysemousTraining_ntrain_permutation_set)
__swig_setmethods__["dis_weight_factor"] = _swigfaiss.PolysemousTraining_dis_weight_factor_set
__swig_getmethods__["dis_weight_factor"] = _swigfaiss.PolysemousTraining_dis_weight_factor_get
if _newclass:dis_weight_factor = _swig_property(_swigfaiss.PolysemousTraining_dis_weight_factor_get, _swigfaiss.PolysemousTraining_dis_weight_factor_set)
__swig_setmethods__["log_pattern"] = _swigfaiss.PolysemousTraining_log_pattern_set
__swig_getmethods__["log_pattern"] = _swigfaiss.PolysemousTraining_log_pattern_get
if _newclass:log_pattern = _swig_property(_swigfaiss.PolysemousTraining_log_pattern_get, _swigfaiss.PolysemousTraining_log_pattern_set)
def __init__(self):
this = _swigfaiss.new_PolysemousTraining()
try: self.this.append(this)
except: self.this = this
def optimize_pq_for_hamming(self, *args): return _swigfaiss.PolysemousTraining_optimize_pq_for_hamming(self, *args)
def optimize_ranking(self, *args): return _swigfaiss.PolysemousTraining_optimize_ranking(self, *args)
def optimize_reproduce_distances(self, *args): return _swigfaiss.PolysemousTraining_optimize_reproduce_distances(self, *args)
__swig_destroy__ = _swigfaiss.delete_PolysemousTraining
__del__ = lambda self : None;
PolysemousTraining_swigregister = _swigfaiss.PolysemousTraining_swigregister
PolysemousTraining_swigregister(PolysemousTraining)
class IndexPQ(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexPQ, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexPQ, name)
__repr__ = _swig_repr
__swig_setmethods__["pq"] = _swigfaiss.IndexPQ_pq_set
__swig_getmethods__["pq"] = _swigfaiss.IndexPQ_pq_get
if _newclass:pq = _swig_property(_swigfaiss.IndexPQ_pq_get, _swigfaiss.IndexPQ_pq_set)
__swig_setmethods__["codes"] = _swigfaiss.IndexPQ_codes_set
__swig_getmethods__["codes"] = _swigfaiss.IndexPQ_codes_get
if _newclass:codes = _swig_property(_swigfaiss.IndexPQ_codes_get, _swigfaiss.IndexPQ_codes_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexPQ(*args)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.IndexPQ_train(self, *args)
def add(self, *args): return _swigfaiss.IndexPQ_add(self, *args)
def search(self, *args): return _swigfaiss.IndexPQ_search(self, *args)
def reset(self): return _swigfaiss.IndexPQ_reset(self)
def reconstruct_n(self, *args): return _swigfaiss.IndexPQ_reconstruct_n(self, *args)
def reconstruct(self, *args): return _swigfaiss.IndexPQ_reconstruct(self, *args)
__swig_setmethods__["do_polysemous_training"] = _swigfaiss.IndexPQ_do_polysemous_training_set
__swig_getmethods__["do_polysemous_training"] = _swigfaiss.IndexPQ_do_polysemous_training_get
if _newclass:do_polysemous_training = _swig_property(_swigfaiss.IndexPQ_do_polysemous_training_get, _swigfaiss.IndexPQ_do_polysemous_training_set)
__swig_setmethods__["polysemous_training"] = _swigfaiss.IndexPQ_polysemous_training_set
__swig_getmethods__["polysemous_training"] = _swigfaiss.IndexPQ_polysemous_training_get
if _newclass:polysemous_training = _swig_property(_swigfaiss.IndexPQ_polysemous_training_get, _swigfaiss.IndexPQ_polysemous_training_set)
ST_PQ = _swigfaiss.IndexPQ_ST_PQ
ST_HE = _swigfaiss.IndexPQ_ST_HE
ST_generalized_HE = _swigfaiss.IndexPQ_ST_generalized_HE
ST_SDC = _swigfaiss.IndexPQ_ST_SDC
ST_polysemous = _swigfaiss.IndexPQ_ST_polysemous
ST_polysemous_generalize = _swigfaiss.IndexPQ_ST_polysemous_generalize
__swig_setmethods__["search_type"] = _swigfaiss.IndexPQ_search_type_set
__swig_getmethods__["search_type"] = _swigfaiss.IndexPQ_search_type_get
if _newclass:search_type = _swig_property(_swigfaiss.IndexPQ_search_type_get, _swigfaiss.IndexPQ_search_type_set)
__swig_setmethods__["encode_signs"] = _swigfaiss.IndexPQ_encode_signs_set
__swig_getmethods__["encode_signs"] = _swigfaiss.IndexPQ_encode_signs_get
if _newclass:encode_signs = _swig_property(_swigfaiss.IndexPQ_encode_signs_get, _swigfaiss.IndexPQ_encode_signs_set)
__swig_setmethods__["polysemous_ht"] = _swigfaiss.IndexPQ_polysemous_ht_set
__swig_getmethods__["polysemous_ht"] = _swigfaiss.IndexPQ_polysemous_ht_get
if _newclass:polysemous_ht = _swig_property(_swigfaiss.IndexPQ_polysemous_ht_get, _swigfaiss.IndexPQ_polysemous_ht_set)
def search_core_polysemous(self, *args): return _swigfaiss.IndexPQ_search_core_polysemous(self, *args)
def hamming_distance_histogram(self, *args): return _swigfaiss.IndexPQ_hamming_distance_histogram(self, *args)
def hamming_distance_table(self, *args): return _swigfaiss.IndexPQ_hamming_distance_table(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexPQ
__del__ = lambda self : None;
IndexPQ_swigregister = _swigfaiss.IndexPQ_swigregister
IndexPQ_swigregister(IndexPQ)
class IndexPQStats(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexPQStats, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IndexPQStats, name)
__repr__ = _swig_repr
__swig_setmethods__["nq"] = _swigfaiss.IndexPQStats_nq_set
__swig_getmethods__["nq"] = _swigfaiss.IndexPQStats_nq_get
if _newclass:nq = _swig_property(_swigfaiss.IndexPQStats_nq_get, _swigfaiss.IndexPQStats_nq_set)
__swig_setmethods__["ncode"] = _swigfaiss.IndexPQStats_ncode_set
__swig_getmethods__["ncode"] = _swigfaiss.IndexPQStats_ncode_get
if _newclass:ncode = _swig_property(_swigfaiss.IndexPQStats_ncode_get, _swigfaiss.IndexPQStats_ncode_set)
__swig_setmethods__["n_hamming_pass"] = _swigfaiss.IndexPQStats_n_hamming_pass_set
__swig_getmethods__["n_hamming_pass"] = _swigfaiss.IndexPQStats_n_hamming_pass_get
if _newclass:n_hamming_pass = _swig_property(_swigfaiss.IndexPQStats_n_hamming_pass_get, _swigfaiss.IndexPQStats_n_hamming_pass_set)
def __init__(self):
this = _swigfaiss.new_IndexPQStats()
try: self.this.append(this)
except: self.this = this
def reset(self): return _swigfaiss.IndexPQStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexPQStats
__del__ = lambda self : None;
IndexPQStats_swigregister = _swigfaiss.IndexPQStats_swigregister
IndexPQStats_swigregister(IndexPQStats)
class MultiIndexQuantizer(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, MultiIndexQuantizer, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, MultiIndexQuantizer, name)
__repr__ = _swig_repr
__swig_setmethods__["pq"] = _swigfaiss.MultiIndexQuantizer_pq_set
__swig_getmethods__["pq"] = _swigfaiss.MultiIndexQuantizer_pq_get
if _newclass:pq = _swig_property(_swigfaiss.MultiIndexQuantizer_pq_get, _swigfaiss.MultiIndexQuantizer_pq_set)
def train(self, *args): return _swigfaiss.MultiIndexQuantizer_train(self, *args)
def search(self, *args): return _swigfaiss.MultiIndexQuantizer_search(self, *args)
def add(self, *args): return _swigfaiss.MultiIndexQuantizer_add(self, *args)
def reset(self): return _swigfaiss.MultiIndexQuantizer_reset(self)
def __init__(self, *args):
this = _swigfaiss.new_MultiIndexQuantizer(*args)
try: self.this.append(this)
except: self.this = this
def reconstruct(self, *args): return _swigfaiss.MultiIndexQuantizer_reconstruct(self, *args)
__swig_destroy__ = _swigfaiss.delete_MultiIndexQuantizer
__del__ = lambda self : None;
MultiIndexQuantizer_swigregister = _swigfaiss.MultiIndexQuantizer_swigregister
MultiIndexQuantizer_swigregister(MultiIndexQuantizer)
class Level1Quantizer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Level1Quantizer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Level1Quantizer, name)
__repr__ = _swig_repr
__swig_setmethods__["quantizer"] = _swigfaiss.Level1Quantizer_quantizer_set
__swig_getmethods__["quantizer"] = _swigfaiss.Level1Quantizer_quantizer_get
if _newclass:quantizer = _swig_property(_swigfaiss.Level1Quantizer_quantizer_get, _swigfaiss.Level1Quantizer_quantizer_set)
__swig_setmethods__["nlist"] = _swigfaiss.Level1Quantizer_nlist_set
__swig_getmethods__["nlist"] = _swigfaiss.Level1Quantizer_nlist_get
if _newclass:nlist = _swig_property(_swigfaiss.Level1Quantizer_nlist_get, _swigfaiss.Level1Quantizer_nlist_set)
__swig_setmethods__["quantizer_trains_alone"] = _swigfaiss.Level1Quantizer_quantizer_trains_alone_set
__swig_getmethods__["quantizer_trains_alone"] = _swigfaiss.Level1Quantizer_quantizer_trains_alone_get
if _newclass:quantizer_trains_alone = _swig_property(_swigfaiss.Level1Quantizer_quantizer_trains_alone_get, _swigfaiss.Level1Quantizer_quantizer_trains_alone_set)
__swig_setmethods__["own_fields"] = _swigfaiss.Level1Quantizer_own_fields_set
__swig_getmethods__["own_fields"] = _swigfaiss.Level1Quantizer_own_fields_get
if _newclass:own_fields = _swig_property(_swigfaiss.Level1Quantizer_own_fields_get, _swigfaiss.Level1Quantizer_own_fields_set)
__swig_setmethods__["cp"] = _swigfaiss.Level1Quantizer_cp_set
__swig_getmethods__["cp"] = _swigfaiss.Level1Quantizer_cp_get
if _newclass:cp = _swig_property(_swigfaiss.Level1Quantizer_cp_get, _swigfaiss.Level1Quantizer_cp_set)
__swig_setmethods__["clustering_index"] = _swigfaiss.Level1Quantizer_clustering_index_set
__swig_getmethods__["clustering_index"] = _swigfaiss.Level1Quantizer_clustering_index_get
if _newclass:clustering_index = _swig_property(_swigfaiss.Level1Quantizer_clustering_index_get, _swigfaiss.Level1Quantizer_clustering_index_set)
def train_q1(self, *args): return _swigfaiss.Level1Quantizer_train_q1(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_Level1Quantizer(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_Level1Quantizer
__del__ = lambda self : None;
Level1Quantizer_swigregister = _swigfaiss.Level1Quantizer_swigregister
Level1Quantizer_swigregister(Level1Quantizer)
class IndexIVF(Index,Level1Quantizer):
__swig_setmethods__ = {}
for _s in [Index,Level1Quantizer]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVF, name, value)
__swig_getmethods__ = {}
for _s in [Index,Level1Quantizer]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVF, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["nprobe"] = _swigfaiss.IndexIVF_nprobe_set
__swig_getmethods__["nprobe"] = _swigfaiss.IndexIVF_nprobe_get
if _newclass:nprobe = _swig_property(_swigfaiss.IndexIVF_nprobe_get, _swigfaiss.IndexIVF_nprobe_set)
__swig_setmethods__["max_codes"] = _swigfaiss.IndexIVF_max_codes_set
__swig_getmethods__["max_codes"] = _swigfaiss.IndexIVF_max_codes_get
if _newclass:max_codes = _swig_property(_swigfaiss.IndexIVF_max_codes_get, _swigfaiss.IndexIVF_max_codes_set)
__swig_setmethods__["ids"] = _swigfaiss.IndexIVF_ids_set
__swig_getmethods__["ids"] = _swigfaiss.IndexIVF_ids_get
if _newclass:ids = _swig_property(_swigfaiss.IndexIVF_ids_get, _swigfaiss.IndexIVF_ids_set)
__swig_setmethods__["code_size"] = _swigfaiss.IndexIVF_code_size_set
__swig_getmethods__["code_size"] = _swigfaiss.IndexIVF_code_size_get
if _newclass:code_size = _swig_property(_swigfaiss.IndexIVF_code_size_get, _swigfaiss.IndexIVF_code_size_set)
__swig_setmethods__["codes"] = _swigfaiss.IndexIVF_codes_set
__swig_getmethods__["codes"] = _swigfaiss.IndexIVF_codes_get
if _newclass:codes = _swig_property(_swigfaiss.IndexIVF_codes_get, _swigfaiss.IndexIVF_codes_set)
__swig_setmethods__["maintain_direct_map"] = _swigfaiss.IndexIVF_maintain_direct_map_set
__swig_getmethods__["maintain_direct_map"] = _swigfaiss.IndexIVF_maintain_direct_map_get
if _newclass:maintain_direct_map = _swig_property(_swigfaiss.IndexIVF_maintain_direct_map_get, _swigfaiss.IndexIVF_maintain_direct_map_set)
__swig_setmethods__["direct_map"] = _swigfaiss.IndexIVF_direct_map_set
__swig_getmethods__["direct_map"] = _swigfaiss.IndexIVF_direct_map_get
if _newclass:direct_map = _swig_property(_swigfaiss.IndexIVF_direct_map_get, _swigfaiss.IndexIVF_direct_map_set)
def reset(self): return _swigfaiss.IndexIVF_reset(self)
def train(self, *args): return _swigfaiss.IndexIVF_train(self, *args)
def add(self, *args): return _swigfaiss.IndexIVF_add(self, *args)
def train_residual(self, *args): return _swigfaiss.IndexIVF_train_residual(self, *args)
def search_preassigned(self, *args): return _swigfaiss.IndexIVF_search_preassigned(self, *args)
def search(self, *args): return _swigfaiss.IndexIVF_search(self, *args)
def reconstruct(self, *args): return _swigfaiss.IndexIVF_reconstruct(self, *args)
def reconstruct_n(self, *args): return _swigfaiss.IndexIVF_reconstruct_n(self, *args)
def search_and_reconstruct(self, *args): return _swigfaiss.IndexIVF_search_and_reconstruct(self, *args)
def reconstruct_from_offset(self, *args): return _swigfaiss.IndexIVF_reconstruct_from_offset(self, *args)
def remove_ids(self, *args): return _swigfaiss.IndexIVF_remove_ids(self, *args)
def merge_from(self, *args): return _swigfaiss.IndexIVF_merge_from(self, *args)
def copy_subset_to(self, *args): return _swigfaiss.IndexIVF_copy_subset_to(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexIVF
__del__ = lambda self : None;
def get_list_size(self, *args): return _swigfaiss.IndexIVF_get_list_size(self, *args)
def make_direct_map(self, new_maintain_direct_map=True): return _swigfaiss.IndexIVF_make_direct_map(self, new_maintain_direct_map)
def imbalance_factor(self): return _swigfaiss.IndexIVF_imbalance_factor(self)
def print_stats(self): return _swigfaiss.IndexIVF_print_stats(self)
IndexIVF_swigregister = _swigfaiss.IndexIVF_swigregister
IndexIVF_swigregister(IndexIVF)
class IndexIVFStats(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVFStats, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVFStats, name)
__repr__ = _swig_repr
__swig_setmethods__["nq"] = _swigfaiss.IndexIVFStats_nq_set
__swig_getmethods__["nq"] = _swigfaiss.IndexIVFStats_nq_get
if _newclass:nq = _swig_property(_swigfaiss.IndexIVFStats_nq_get, _swigfaiss.IndexIVFStats_nq_set)
__swig_setmethods__["nlist"] = _swigfaiss.IndexIVFStats_nlist_set
__swig_getmethods__["nlist"] = _swigfaiss.IndexIVFStats_nlist_get
if _newclass:nlist = _swig_property(_swigfaiss.IndexIVFStats_nlist_get, _swigfaiss.IndexIVFStats_nlist_set)
__swig_setmethods__["ndis"] = _swigfaiss.IndexIVFStats_ndis_set
__swig_getmethods__["ndis"] = _swigfaiss.IndexIVFStats_ndis_get
if _newclass:ndis = _swig_property(_swigfaiss.IndexIVFStats_ndis_get, _swigfaiss.IndexIVFStats_ndis_set)
def __init__(self):
this = _swigfaiss.new_IndexIVFStats()
try: self.this.append(this)
except: self.this = this
def reset(self): return _swigfaiss.IndexIVFStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexIVFStats
__del__ = lambda self : None;
IndexIVFStats_swigregister = _swigfaiss.IndexIVFStats_swigregister
IndexIVFStats_swigregister(IndexIVFStats)
class IndexIVFFlat(IndexIVF):
__swig_setmethods__ = {}
for _s in [IndexIVF]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVFFlat, name, value)
__swig_getmethods__ = {}
for _s in [IndexIVF]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVFFlat, name)
__repr__ = _swig_repr
def add_core(self, *args): return _swigfaiss.IndexIVFFlat_add_core(self, *args)
def add_with_ids(self, *args): return _swigfaiss.IndexIVFFlat_add_with_ids(self, *args)
def search_preassigned(self, *args): return _swigfaiss.IndexIVFFlat_search_preassigned(self, *args)
def range_search(self, *args): return _swigfaiss.IndexIVFFlat_range_search(self, *args)
def update_vectors(self, *args): return _swigfaiss.IndexIVFFlat_update_vectors(self, *args)
def reconstruct_from_offset(self, *args): return _swigfaiss.IndexIVFFlat_reconstruct_from_offset(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_IndexIVFFlat(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_IndexIVFFlat
__del__ = lambda self : None;
IndexIVFFlat_swigregister = _swigfaiss.IndexIVFFlat_swigregister
IndexIVFFlat_swigregister(IndexIVFFlat)
class ScalarQuantizer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ScalarQuantizer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ScalarQuantizer, name)
__repr__ = _swig_repr
QT_8bit = _swigfaiss.ScalarQuantizer_QT_8bit
QT_4bit = _swigfaiss.ScalarQuantizer_QT_4bit
QT_8bit_uniform = _swigfaiss.ScalarQuantizer_QT_8bit_uniform
QT_4bit_uniform = _swigfaiss.ScalarQuantizer_QT_4bit_uniform
__swig_setmethods__["qtype"] = _swigfaiss.ScalarQuantizer_qtype_set
__swig_getmethods__["qtype"] = _swigfaiss.ScalarQuantizer_qtype_get
if _newclass:qtype = _swig_property(_swigfaiss.ScalarQuantizer_qtype_get, _swigfaiss.ScalarQuantizer_qtype_set)
RS_minmax = _swigfaiss.ScalarQuantizer_RS_minmax
RS_meanstd = _swigfaiss.ScalarQuantizer_RS_meanstd
RS_quantiles = _swigfaiss.ScalarQuantizer_RS_quantiles
RS_optim = _swigfaiss.ScalarQuantizer_RS_optim
__swig_setmethods__["rangestat"] = _swigfaiss.ScalarQuantizer_rangestat_set
__swig_getmethods__["rangestat"] = _swigfaiss.ScalarQuantizer_rangestat_get
if _newclass:rangestat = _swig_property(_swigfaiss.ScalarQuantizer_rangestat_get, _swigfaiss.ScalarQuantizer_rangestat_set)
__swig_setmethods__["rangestat_arg"] = _swigfaiss.ScalarQuantizer_rangestat_arg_set
__swig_getmethods__["rangestat_arg"] = _swigfaiss.ScalarQuantizer_rangestat_arg_get
if _newclass:rangestat_arg = _swig_property(_swigfaiss.ScalarQuantizer_rangestat_arg_get, _swigfaiss.ScalarQuantizer_rangestat_arg_set)
__swig_setmethods__["d"] = _swigfaiss.ScalarQuantizer_d_set
__swig_getmethods__["d"] = _swigfaiss.ScalarQuantizer_d_get
if _newclass:d = _swig_property(_swigfaiss.ScalarQuantizer_d_get, _swigfaiss.ScalarQuantizer_d_set)
__swig_setmethods__["code_size"] = _swigfaiss.ScalarQuantizer_code_size_set
__swig_getmethods__["code_size"] = _swigfaiss.ScalarQuantizer_code_size_get
if _newclass:code_size = _swig_property(_swigfaiss.ScalarQuantizer_code_size_get, _swigfaiss.ScalarQuantizer_code_size_set)
__swig_setmethods__["trained"] = _swigfaiss.ScalarQuantizer_trained_set
__swig_getmethods__["trained"] = _swigfaiss.ScalarQuantizer_trained_get
if _newclass:trained = _swig_property(_swigfaiss.ScalarQuantizer_trained_get, _swigfaiss.ScalarQuantizer_trained_set)
def __init__(self, *args):
this = _swigfaiss.new_ScalarQuantizer(*args)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.ScalarQuantizer_train(self, *args)
def compute_codes(self, *args): return _swigfaiss.ScalarQuantizer_compute_codes(self, *args)
def decode(self, *args): return _swigfaiss.ScalarQuantizer_decode(self, *args)
def get_distance_computer(self, *args): return _swigfaiss.ScalarQuantizer_get_distance_computer(self, *args)
__swig_destroy__ = _swigfaiss.delete_ScalarQuantizer
__del__ = lambda self : None;
ScalarQuantizer_swigregister = _swigfaiss.ScalarQuantizer_swigregister
ScalarQuantizer_swigregister(ScalarQuantizer)
class IndexScalarQuantizer(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexScalarQuantizer, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexScalarQuantizer, name)
__repr__ = _swig_repr
__swig_setmethods__["sq"] = _swigfaiss.IndexScalarQuantizer_sq_set
__swig_getmethods__["sq"] = _swigfaiss.IndexScalarQuantizer_sq_get
if _newclass:sq = _swig_property(_swigfaiss.IndexScalarQuantizer_sq_get, _swigfaiss.IndexScalarQuantizer_sq_set)
__swig_setmethods__["codes"] = _swigfaiss.IndexScalarQuantizer_codes_set
__swig_getmethods__["codes"] = _swigfaiss.IndexScalarQuantizer_codes_get
if _newclass:codes = _swig_property(_swigfaiss.IndexScalarQuantizer_codes_get, _swigfaiss.IndexScalarQuantizer_codes_set)
__swig_setmethods__["code_size"] = _swigfaiss.IndexScalarQuantizer_code_size_set
__swig_getmethods__["code_size"] = _swigfaiss.IndexScalarQuantizer_code_size_get
if _newclass:code_size = _swig_property(_swigfaiss.IndexScalarQuantizer_code_size_get, _swigfaiss.IndexScalarQuantizer_code_size_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexScalarQuantizer(*args)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.IndexScalarQuantizer_train(self, *args)
def add(self, *args): return _swigfaiss.IndexScalarQuantizer_add(self, *args)
def search(self, *args): return _swigfaiss.IndexScalarQuantizer_search(self, *args)
def reset(self): return _swigfaiss.IndexScalarQuantizer_reset(self)
def reconstruct_n(self, *args): return _swigfaiss.IndexScalarQuantizer_reconstruct_n(self, *args)
def reconstruct(self, *args): return _swigfaiss.IndexScalarQuantizer_reconstruct(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexScalarQuantizer
__del__ = lambda self : None;
IndexScalarQuantizer_swigregister = _swigfaiss.IndexScalarQuantizer_swigregister
IndexScalarQuantizer_swigregister(IndexScalarQuantizer)
class IndexIVFScalarQuantizer(IndexIVF):
__swig_setmethods__ = {}
for _s in [IndexIVF]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVFScalarQuantizer, name, value)
__swig_getmethods__ = {}
for _s in [IndexIVF]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVFScalarQuantizer, name)
__repr__ = _swig_repr
__swig_setmethods__["sq"] = _swigfaiss.IndexIVFScalarQuantizer_sq_set
__swig_getmethods__["sq"] = _swigfaiss.IndexIVFScalarQuantizer_sq_get
if _newclass:sq = _swig_property(_swigfaiss.IndexIVFScalarQuantizer_sq_get, _swigfaiss.IndexIVFScalarQuantizer_sq_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexIVFScalarQuantizer(*args)
try: self.this.append(this)
except: self.this = this
def train_residual(self, *args): return _swigfaiss.IndexIVFScalarQuantizer_train_residual(self, *args)
def add_with_ids(self, *args): return _swigfaiss.IndexIVFScalarQuantizer_add_with_ids(self, *args)
def search_preassigned(self, *args): return _swigfaiss.IndexIVFScalarQuantizer_search_preassigned(self, *args)
def reconstruct_from_offset(self, *args): return _swigfaiss.IndexIVFScalarQuantizer_reconstruct_from_offset(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexIVFScalarQuantizer
__del__ = lambda self : None;
IndexIVFScalarQuantizer_swigregister = _swigfaiss.IndexIVFScalarQuantizer_swigregister
IndexIVFScalarQuantizer_swigregister(IndexIVFScalarQuantizer)
class HNSW(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HNSW, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HNSW, name)
__repr__ = _swig_repr
__swig_setmethods__["assign_probas"] = _swigfaiss.HNSW_assign_probas_set
__swig_getmethods__["assign_probas"] = _swigfaiss.HNSW_assign_probas_get
if _newclass:assign_probas = _swig_property(_swigfaiss.HNSW_assign_probas_get, _swigfaiss.HNSW_assign_probas_set)
__swig_setmethods__["cum_nneighbor_per_level"] = _swigfaiss.HNSW_cum_nneighbor_per_level_set
__swig_getmethods__["cum_nneighbor_per_level"] = _swigfaiss.HNSW_cum_nneighbor_per_level_get
if _newclass:cum_nneighbor_per_level = _swig_property(_swigfaiss.HNSW_cum_nneighbor_per_level_get, _swigfaiss.HNSW_cum_nneighbor_per_level_set)
__swig_setmethods__["levels"] = _swigfaiss.HNSW_levels_set
__swig_getmethods__["levels"] = _swigfaiss.HNSW_levels_get
if _newclass:levels = _swig_property(_swigfaiss.HNSW_levels_get, _swigfaiss.HNSW_levels_set)
__swig_setmethods__["offsets"] = _swigfaiss.HNSW_offsets_set
__swig_getmethods__["offsets"] = _swigfaiss.HNSW_offsets_get
if _newclass:offsets = _swig_property(_swigfaiss.HNSW_offsets_get, _swigfaiss.HNSW_offsets_set)
__swig_setmethods__["neighbors"] = _swigfaiss.HNSW_neighbors_set
__swig_getmethods__["neighbors"] = _swigfaiss.HNSW_neighbors_get
if _newclass:neighbors = _swig_property(_swigfaiss.HNSW_neighbors_get, _swigfaiss.HNSW_neighbors_set)
__swig_setmethods__["entry_point"] = _swigfaiss.HNSW_entry_point_set
__swig_getmethods__["entry_point"] = _swigfaiss.HNSW_entry_point_get
if _newclass:entry_point = _swig_property(_swigfaiss.HNSW_entry_point_get, _swigfaiss.HNSW_entry_point_set)
__swig_setmethods__["rng"] = _swigfaiss.HNSW_rng_set
__swig_getmethods__["rng"] = _swigfaiss.HNSW_rng_get
if _newclass:rng = _swig_property(_swigfaiss.HNSW_rng_get, _swigfaiss.HNSW_rng_set)
__swig_setmethods__["max_level"] = _swigfaiss.HNSW_max_level_set
__swig_getmethods__["max_level"] = _swigfaiss.HNSW_max_level_get
if _newclass:max_level = _swig_property(_swigfaiss.HNSW_max_level_get, _swigfaiss.HNSW_max_level_set)
__swig_setmethods__["efConstruction"] = _swigfaiss.HNSW_efConstruction_set
__swig_getmethods__["efConstruction"] = _swigfaiss.HNSW_efConstruction_get
if _newclass:efConstruction = _swig_property(_swigfaiss.HNSW_efConstruction_get, _swigfaiss.HNSW_efConstruction_set)
__swig_setmethods__["efSearch"] = _swigfaiss.HNSW_efSearch_set
__swig_getmethods__["efSearch"] = _swigfaiss.HNSW_efSearch_get
if _newclass:efSearch = _swig_property(_swigfaiss.HNSW_efSearch_get, _swigfaiss.HNSW_efSearch_set)
__swig_setmethods__["check_relative_distance"] = _swigfaiss.HNSW_check_relative_distance_set
__swig_getmethods__["check_relative_distance"] = _swigfaiss.HNSW_check_relative_distance_get
if _newclass:check_relative_distance = _swig_property(_swigfaiss.HNSW_check_relative_distance_get, _swigfaiss.HNSW_check_relative_distance_set)
__swig_setmethods__["upper_beam"] = _swigfaiss.HNSW_upper_beam_set
__swig_getmethods__["upper_beam"] = _swigfaiss.HNSW_upper_beam_get
if _newclass:upper_beam = _swig_property(_swigfaiss.HNSW_upper_beam_get, _swigfaiss.HNSW_upper_beam_set)
def set_default_probas(self, *args): return _swigfaiss.HNSW_set_default_probas(self, *args)
def set_nb_neighbors(self, *args): return _swigfaiss.HNSW_set_nb_neighbors(self, *args)
def nb_neighbors(self, *args): return _swigfaiss.HNSW_nb_neighbors(self, *args)
def cum_nb_neighbors(self, *args): return _swigfaiss.HNSW_cum_nb_neighbors(self, *args)
def neighbor_range(self, *args): return _swigfaiss.HNSW_neighbor_range(self, *args)
def __init__(self, M=32):
this = _swigfaiss.new_HNSW(M)
try: self.this.append(this)
except: self.this = this
def random_level(self): return _swigfaiss.HNSW_random_level(self)
def fill_with_random_links(self, *args): return _swigfaiss.HNSW_fill_with_random_links(self, *args)
def add_with_locks(self, *args): return _swigfaiss.HNSW_add_with_locks(self, *args)
def search(self, *args): return _swigfaiss.HNSW_search(self, *args)
def reset(self): return _swigfaiss.HNSW_reset(self)
def clear_neighbor_tables(self, *args): return _swigfaiss.HNSW_clear_neighbor_tables(self, *args)
def print_neighbor_stats(self, *args): return _swigfaiss.HNSW_print_neighbor_stats(self, *args)
__swig_destroy__ = _swigfaiss.delete_HNSW
__del__ = lambda self : None;
HNSW_swigregister = _swigfaiss.HNSW_swigregister
HNSW_swigregister(HNSW)
class HNSWStats(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HNSWStats, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HNSWStats, name)
__repr__ = _swig_repr
__swig_setmethods__["n1"] = _swigfaiss.HNSWStats_n1_set
__swig_getmethods__["n1"] = _swigfaiss.HNSWStats_n1_get
if _newclass:n1 = _swig_property(_swigfaiss.HNSWStats_n1_get, _swigfaiss.HNSWStats_n1_set)
__swig_setmethods__["n2"] = _swigfaiss.HNSWStats_n2_set
__swig_getmethods__["n2"] = _swigfaiss.HNSWStats_n2_get
if _newclass:n2 = _swig_property(_swigfaiss.HNSWStats_n2_get, _swigfaiss.HNSWStats_n2_set)
__swig_setmethods__["n3"] = _swigfaiss.HNSWStats_n3_set
__swig_getmethods__["n3"] = _swigfaiss.HNSWStats_n3_get
if _newclass:n3 = _swig_property(_swigfaiss.HNSWStats_n3_get, _swigfaiss.HNSWStats_n3_set)
__swig_setmethods__["ndis"] = _swigfaiss.HNSWStats_ndis_set
__swig_getmethods__["ndis"] = _swigfaiss.HNSWStats_ndis_get
if _newclass:ndis = _swig_property(_swigfaiss.HNSWStats_ndis_get, _swigfaiss.HNSWStats_ndis_set)
__swig_setmethods__["nreorder"] = _swigfaiss.HNSWStats_nreorder_set
__swig_getmethods__["nreorder"] = _swigfaiss.HNSWStats_nreorder_get
if _newclass:nreorder = _swig_property(_swigfaiss.HNSWStats_nreorder_get, _swigfaiss.HNSWStats_nreorder_set)
__swig_setmethods__["view"] = _swigfaiss.HNSWStats_view_set
__swig_getmethods__["view"] = _swigfaiss.HNSWStats_view_get
if _newclass:view = _swig_property(_swigfaiss.HNSWStats_view_get, _swigfaiss.HNSWStats_view_set)
def __init__(self):
this = _swigfaiss.new_HNSWStats()
try: self.this.append(this)
except: self.this = this
def reset(self): return _swigfaiss.HNSWStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_HNSWStats
__del__ = lambda self : None;
HNSWStats_swigregister = _swigfaiss.HNSWStats_swigregister
HNSWStats_swigregister(HNSWStats)
class ReconstructFromNeighbors(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ReconstructFromNeighbors, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ReconstructFromNeighbors, name)
__repr__ = _swig_repr
__swig_getmethods__["index"] = _swigfaiss.ReconstructFromNeighbors_index_get
if _newclass:index = _swig_property(_swigfaiss.ReconstructFromNeighbors_index_get)
__swig_setmethods__["M"] = _swigfaiss.ReconstructFromNeighbors_M_set
__swig_getmethods__["M"] = _swigfaiss.ReconstructFromNeighbors_M_get
if _newclass:M = _swig_property(_swigfaiss.ReconstructFromNeighbors_M_get, _swigfaiss.ReconstructFromNeighbors_M_set)
__swig_setmethods__["k"] = _swigfaiss.ReconstructFromNeighbors_k_set
__swig_getmethods__["k"] = _swigfaiss.ReconstructFromNeighbors_k_get
if _newclass:k = _swig_property(_swigfaiss.ReconstructFromNeighbors_k_get, _swigfaiss.ReconstructFromNeighbors_k_set)
__swig_setmethods__["nsq"] = _swigfaiss.ReconstructFromNeighbors_nsq_set
__swig_getmethods__["nsq"] = _swigfaiss.ReconstructFromNeighbors_nsq_get
if _newclass:nsq = _swig_property(_swigfaiss.ReconstructFromNeighbors_nsq_get, _swigfaiss.ReconstructFromNeighbors_nsq_set)
__swig_setmethods__["code_size"] = _swigfaiss.ReconstructFromNeighbors_code_size_set
__swig_getmethods__["code_size"] = _swigfaiss.ReconstructFromNeighbors_code_size_get
if _newclass:code_size = _swig_property(_swigfaiss.ReconstructFromNeighbors_code_size_get, _swigfaiss.ReconstructFromNeighbors_code_size_set)
__swig_setmethods__["k_reorder"] = _swigfaiss.ReconstructFromNeighbors_k_reorder_set
__swig_getmethods__["k_reorder"] = _swigfaiss.ReconstructFromNeighbors_k_reorder_get
if _newclass:k_reorder = _swig_property(_swigfaiss.ReconstructFromNeighbors_k_reorder_get, _swigfaiss.ReconstructFromNeighbors_k_reorder_set)
__swig_setmethods__["codebook"] = _swigfaiss.ReconstructFromNeighbors_codebook_set
__swig_getmethods__["codebook"] = _swigfaiss.ReconstructFromNeighbors_codebook_get
if _newclass:codebook = _swig_property(_swigfaiss.ReconstructFromNeighbors_codebook_get, _swigfaiss.ReconstructFromNeighbors_codebook_set)
__swig_setmethods__["codes"] = _swigfaiss.ReconstructFromNeighbors_codes_set
__swig_getmethods__["codes"] = _swigfaiss.ReconstructFromNeighbors_codes_get
if _newclass:codes = _swig_property(_swigfaiss.ReconstructFromNeighbors_codes_get, _swigfaiss.ReconstructFromNeighbors_codes_set)
__swig_setmethods__["ntotal"] = _swigfaiss.ReconstructFromNeighbors_ntotal_set
__swig_getmethods__["ntotal"] = _swigfaiss.ReconstructFromNeighbors_ntotal_get
if _newclass:ntotal = _swig_property(_swigfaiss.ReconstructFromNeighbors_ntotal_get, _swigfaiss.ReconstructFromNeighbors_ntotal_set)
__swig_setmethods__["d"] = _swigfaiss.ReconstructFromNeighbors_d_set
__swig_getmethods__["d"] = _swigfaiss.ReconstructFromNeighbors_d_get
if _newclass:d = _swig_property(_swigfaiss.ReconstructFromNeighbors_d_get, _swigfaiss.ReconstructFromNeighbors_d_set)
__swig_setmethods__["dsub"] = _swigfaiss.ReconstructFromNeighbors_dsub_set
__swig_getmethods__["dsub"] = _swigfaiss.ReconstructFromNeighbors_dsub_get
if _newclass:dsub = _swig_property(_swigfaiss.ReconstructFromNeighbors_dsub_get, _swigfaiss.ReconstructFromNeighbors_dsub_set)
def __init__(self, *args):
this = _swigfaiss.new_ReconstructFromNeighbors(*args)
try: self.this.append(this)
except: self.this = this
def add_codes(self, *args): return _swigfaiss.ReconstructFromNeighbors_add_codes(self, *args)
def compute_distances(self, *args): return _swigfaiss.ReconstructFromNeighbors_compute_distances(self, *args)
def estimate_code(self, *args): return _swigfaiss.ReconstructFromNeighbors_estimate_code(self, *args)
def reconstruct(self, *args): return _swigfaiss.ReconstructFromNeighbors_reconstruct(self, *args)
def reconstruct_n(self, *args): return _swigfaiss.ReconstructFromNeighbors_reconstruct_n(self, *args)
def get_neighbor_table(self, *args): return _swigfaiss.ReconstructFromNeighbors_get_neighbor_table(self, *args)
__swig_destroy__ = _swigfaiss.delete_ReconstructFromNeighbors
__del__ = lambda self : None;
ReconstructFromNeighbors_swigregister = _swigfaiss.ReconstructFromNeighbors_swigregister
ReconstructFromNeighbors_swigregister(ReconstructFromNeighbors)
class IndexHNSW(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexHNSW, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexHNSW, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["hnsw"] = _swigfaiss.IndexHNSW_hnsw_set
__swig_getmethods__["hnsw"] = _swigfaiss.IndexHNSW_hnsw_get
if _newclass:hnsw = _swig_property(_swigfaiss.IndexHNSW_hnsw_get, _swigfaiss.IndexHNSW_hnsw_set)
__swig_setmethods__["own_fields"] = _swigfaiss.IndexHNSW_own_fields_set
__swig_getmethods__["own_fields"] = _swigfaiss.IndexHNSW_own_fields_get
if _newclass:own_fields = _swig_property(_swigfaiss.IndexHNSW_own_fields_get, _swigfaiss.IndexHNSW_own_fields_set)
__swig_setmethods__["storage"] = _swigfaiss.IndexHNSW_storage_set
__swig_getmethods__["storage"] = _swigfaiss.IndexHNSW_storage_get
if _newclass:storage = _swig_property(_swigfaiss.IndexHNSW_storage_get, _swigfaiss.IndexHNSW_storage_set)
__swig_setmethods__["reconstruct_from_neighbors"] = _swigfaiss.IndexHNSW_reconstruct_from_neighbors_set
__swig_getmethods__["reconstruct_from_neighbors"] = _swigfaiss.IndexHNSW_reconstruct_from_neighbors_get
if _newclass:reconstruct_from_neighbors = _swig_property(_swigfaiss.IndexHNSW_reconstruct_from_neighbors_get, _swigfaiss.IndexHNSW_reconstruct_from_neighbors_set)
__swig_destroy__ = _swigfaiss.delete_IndexHNSW
__del__ = lambda self : None;
def get_distance_computer(self): return _swigfaiss.IndexHNSW_get_distance_computer(self)
def add(self, *args): return _swigfaiss.IndexHNSW_add(self, *args)
def train(self, *args): return _swigfaiss.IndexHNSW_train(self, *args)
def search(self, *args): return _swigfaiss.IndexHNSW_search(self, *args)
def reconstruct(self, *args): return _swigfaiss.IndexHNSW_reconstruct(self, *args)
def reset(self): return _swigfaiss.IndexHNSW_reset(self)
def shrink_level_0_neighbors(self, *args): return _swigfaiss.IndexHNSW_shrink_level_0_neighbors(self, *args)
def search_level_0(self, *args): return _swigfaiss.IndexHNSW_search_level_0(self, *args)
def init_level_0_from_knngraph(self, *args): return _swigfaiss.IndexHNSW_init_level_0_from_knngraph(self, *args)
def init_level_0_from_entry_points(self, *args): return _swigfaiss.IndexHNSW_init_level_0_from_entry_points(self, *args)
def reorder_links(self): return _swigfaiss.IndexHNSW_reorder_links(self)
def link_singletons(self): return _swigfaiss.IndexHNSW_link_singletons(self)
IndexHNSW_swigregister = _swigfaiss.IndexHNSW_swigregister
IndexHNSW_swigregister(IndexHNSW)
class IndexHNSWFlat(IndexHNSW):
__swig_setmethods__ = {}
for _s in [IndexHNSW]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexHNSWFlat, name, value)
__swig_getmethods__ = {}
for _s in [IndexHNSW]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexHNSWFlat, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _swigfaiss.new_IndexHNSWFlat(*args)
try: self.this.append(this)
except: self.this = this
def get_distance_computer(self): return _swigfaiss.IndexHNSWFlat_get_distance_computer(self)
__swig_destroy__ = _swigfaiss.delete_IndexHNSWFlat
__del__ = lambda self : None;
IndexHNSWFlat_swigregister = _swigfaiss.IndexHNSWFlat_swigregister
IndexHNSWFlat_swigregister(IndexHNSWFlat)
class IndexHNSWPQ(IndexHNSW):
__swig_setmethods__ = {}
for _s in [IndexHNSW]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexHNSWPQ, name, value)
__swig_getmethods__ = {}
for _s in [IndexHNSW]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexHNSWPQ, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _swigfaiss.new_IndexHNSWPQ(*args)
try: self.this.append(this)
except: self.this = this
def train(self, *args): return _swigfaiss.IndexHNSWPQ_train(self, *args)
def get_distance_computer(self): return _swigfaiss.IndexHNSWPQ_get_distance_computer(self)
__swig_destroy__ = _swigfaiss.delete_IndexHNSWPQ
__del__ = lambda self : None;
IndexHNSWPQ_swigregister = _swigfaiss.IndexHNSWPQ_swigregister
IndexHNSWPQ_swigregister(IndexHNSWPQ)
class IndexHNSWSQ(IndexHNSW):
__swig_setmethods__ = {}
for _s in [IndexHNSW]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexHNSWSQ, name, value)
__swig_getmethods__ = {}
for _s in [IndexHNSW]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexHNSWSQ, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _swigfaiss.new_IndexHNSWSQ(*args)
try: self.this.append(this)
except: self.this = this
def get_distance_computer(self): return _swigfaiss.IndexHNSWSQ_get_distance_computer(self)
__swig_destroy__ = _swigfaiss.delete_IndexHNSWSQ
__del__ = lambda self : None;
IndexHNSWSQ_swigregister = _swigfaiss.IndexHNSWSQ_swigregister
IndexHNSWSQ_swigregister(IndexHNSWSQ)
class IndexHNSW2Level(IndexHNSW):
__swig_setmethods__ = {}
for _s in [IndexHNSW]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexHNSW2Level, name, value)
__swig_getmethods__ = {}
for _s in [IndexHNSW]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexHNSW2Level, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _swigfaiss.new_IndexHNSW2Level(*args)
try: self.this.append(this)
except: self.this = this
def get_distance_computer(self): return _swigfaiss.IndexHNSW2Level_get_distance_computer(self)
def flip_to_ivf(self): return _swigfaiss.IndexHNSW2Level_flip_to_ivf(self)
def search(self, *args): return _swigfaiss.IndexHNSW2Level_search(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexHNSW2Level
__del__ = lambda self : None;
IndexHNSW2Level_swigregister = _swigfaiss.IndexHNSW2Level_swigregister
IndexHNSW2Level_swigregister(IndexHNSW2Level)
class IndexIVFPQ(IndexIVF):
__swig_setmethods__ = {}
for _s in [IndexIVF]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVFPQ, name, value)
__swig_getmethods__ = {}
for _s in [IndexIVF]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVFPQ, name)
__repr__ = _swig_repr
__swig_setmethods__["by_residual"] = _swigfaiss.IndexIVFPQ_by_residual_set
__swig_getmethods__["by_residual"] = _swigfaiss.IndexIVFPQ_by_residual_get
if _newclass:by_residual = _swig_property(_swigfaiss.IndexIVFPQ_by_residual_get, _swigfaiss.IndexIVFPQ_by_residual_set)
__swig_setmethods__["use_precomputed_table"] = _swigfaiss.IndexIVFPQ_use_precomputed_table_set
__swig_getmethods__["use_precomputed_table"] = _swigfaiss.IndexIVFPQ_use_precomputed_table_get
if _newclass:use_precomputed_table = _swig_property(_swigfaiss.IndexIVFPQ_use_precomputed_table_get, _swigfaiss.IndexIVFPQ_use_precomputed_table_set)
__swig_setmethods__["pq"] = _swigfaiss.IndexIVFPQ_pq_set
__swig_getmethods__["pq"] = _swigfaiss.IndexIVFPQ_pq_get
if _newclass:pq = _swig_property(_swigfaiss.IndexIVFPQ_pq_get, _swigfaiss.IndexIVFPQ_pq_set)
__swig_setmethods__["do_polysemous_training"] = _swigfaiss.IndexIVFPQ_do_polysemous_training_set
__swig_getmethods__["do_polysemous_training"] = _swigfaiss.IndexIVFPQ_do_polysemous_training_get
if _newclass:do_polysemous_training = _swig_property(_swigfaiss.IndexIVFPQ_do_polysemous_training_get, _swigfaiss.IndexIVFPQ_do_polysemous_training_set)
__swig_setmethods__["polysemous_training"] = _swigfaiss.IndexIVFPQ_polysemous_training_set
__swig_getmethods__["polysemous_training"] = _swigfaiss.IndexIVFPQ_polysemous_training_get
if _newclass:polysemous_training = _swig_property(_swigfaiss.IndexIVFPQ_polysemous_training_get, _swigfaiss.IndexIVFPQ_polysemous_training_set)
__swig_setmethods__["scan_table_threshold"] = _swigfaiss.IndexIVFPQ_scan_table_threshold_set
__swig_getmethods__["scan_table_threshold"] = _swigfaiss.IndexIVFPQ_scan_table_threshold_get
if _newclass:scan_table_threshold = _swig_property(_swigfaiss.IndexIVFPQ_scan_table_threshold_get, _swigfaiss.IndexIVFPQ_scan_table_threshold_set)
__swig_setmethods__["polysemous_ht"] = _swigfaiss.IndexIVFPQ_polysemous_ht_set
__swig_getmethods__["polysemous_ht"] = _swigfaiss.IndexIVFPQ_polysemous_ht_get
if _newclass:polysemous_ht = _swig_property(_swigfaiss.IndexIVFPQ_polysemous_ht_get, _swigfaiss.IndexIVFPQ_polysemous_ht_set)
__swig_setmethods__["precomputed_table"] = _swigfaiss.IndexIVFPQ_precomputed_table_set
__swig_getmethods__["precomputed_table"] = _swigfaiss.IndexIVFPQ_precomputed_table_get
if _newclass:precomputed_table = _swig_property(_swigfaiss.IndexIVFPQ_precomputed_table_get, _swigfaiss.IndexIVFPQ_precomputed_table_set)
def add_with_ids(self, *args): return _swigfaiss.IndexIVFPQ_add_with_ids(self, *args)
def add_core_o(self, *args): return _swigfaiss.IndexIVFPQ_add_core_o(self, *args)
def train_residual(self, *args): return _swigfaiss.IndexIVFPQ_train_residual(self, *args)
def train_residual_o(self, *args): return _swigfaiss.IndexIVFPQ_train_residual_o(self, *args)
def reconstruct_from_offset(self, *args): return _swigfaiss.IndexIVFPQ_reconstruct_from_offset(self, *args)
def find_duplicates(self, *args): return _swigfaiss.IndexIVFPQ_find_duplicates(self, *args)
def encode(self, *args): return _swigfaiss.IndexIVFPQ_encode(self, *args)
def encode_multiple(self, *args): return _swigfaiss.IndexIVFPQ_encode_multiple(self, *args)
def decode_multiple(self, *args): return _swigfaiss.IndexIVFPQ_decode_multiple(self, *args)
def search_preassigned(self, *args): return _swigfaiss.IndexIVFPQ_search_preassigned(self, *args)
def precompute_table(self): return _swigfaiss.IndexIVFPQ_precompute_table(self)
def __init__(self, *args):
this = _swigfaiss.new_IndexIVFPQ(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQ
__del__ = lambda self : None;
IndexIVFPQ_swigregister = _swigfaiss.IndexIVFPQ_swigregister
IndexIVFPQ_swigregister(IndexIVFPQ)
class IndexIVFPQStats(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVFPQStats, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVFPQStats, name)
__repr__ = _swig_repr
__swig_setmethods__["nq"] = _swigfaiss.IndexIVFPQStats_nq_set
__swig_getmethods__["nq"] = _swigfaiss.IndexIVFPQStats_nq_get
if _newclass:nq = _swig_property(_swigfaiss.IndexIVFPQStats_nq_get, _swigfaiss.IndexIVFPQStats_nq_set)
__swig_setmethods__["nlist"] = _swigfaiss.IndexIVFPQStats_nlist_set
__swig_getmethods__["nlist"] = _swigfaiss.IndexIVFPQStats_nlist_get
if _newclass:nlist = _swig_property(_swigfaiss.IndexIVFPQStats_nlist_get, _swigfaiss.IndexIVFPQStats_nlist_set)
__swig_setmethods__["ncode"] = _swigfaiss.IndexIVFPQStats_ncode_set
__swig_getmethods__["ncode"] = _swigfaiss.IndexIVFPQStats_ncode_get
if _newclass:ncode = _swig_property(_swigfaiss.IndexIVFPQStats_ncode_get, _swigfaiss.IndexIVFPQStats_ncode_set)
__swig_setmethods__["nrefine"] = _swigfaiss.IndexIVFPQStats_nrefine_set
__swig_getmethods__["nrefine"] = _swigfaiss.IndexIVFPQStats_nrefine_get
if _newclass:nrefine = _swig_property(_swigfaiss.IndexIVFPQStats_nrefine_get, _swigfaiss.IndexIVFPQStats_nrefine_set)
__swig_setmethods__["n_hamming_pass"] = _swigfaiss.IndexIVFPQStats_n_hamming_pass_set
__swig_getmethods__["n_hamming_pass"] = _swigfaiss.IndexIVFPQStats_n_hamming_pass_get
if _newclass:n_hamming_pass = _swig_property(_swigfaiss.IndexIVFPQStats_n_hamming_pass_get, _swigfaiss.IndexIVFPQStats_n_hamming_pass_set)
__swig_setmethods__["assign_cycles"] = _swigfaiss.IndexIVFPQStats_assign_cycles_set
__swig_getmethods__["assign_cycles"] = _swigfaiss.IndexIVFPQStats_assign_cycles_get
if _newclass:assign_cycles = _swig_property(_swigfaiss.IndexIVFPQStats_assign_cycles_get, _swigfaiss.IndexIVFPQStats_assign_cycles_set)
__swig_setmethods__["search_cycles"] = _swigfaiss.IndexIVFPQStats_search_cycles_set
__swig_getmethods__["search_cycles"] = _swigfaiss.IndexIVFPQStats_search_cycles_get
if _newclass:search_cycles = _swig_property(_swigfaiss.IndexIVFPQStats_search_cycles_get, _swigfaiss.IndexIVFPQStats_search_cycles_set)
__swig_setmethods__["refine_cycles"] = _swigfaiss.IndexIVFPQStats_refine_cycles_set
__swig_getmethods__["refine_cycles"] = _swigfaiss.IndexIVFPQStats_refine_cycles_get
if _newclass:refine_cycles = _swig_property(_swigfaiss.IndexIVFPQStats_refine_cycles_get, _swigfaiss.IndexIVFPQStats_refine_cycles_set)
__swig_setmethods__["init_query_cycles"] = _swigfaiss.IndexIVFPQStats_init_query_cycles_set
__swig_getmethods__["init_query_cycles"] = _swigfaiss.IndexIVFPQStats_init_query_cycles_get
if _newclass:init_query_cycles = _swig_property(_swigfaiss.IndexIVFPQStats_init_query_cycles_get, _swigfaiss.IndexIVFPQStats_init_query_cycles_set)
__swig_setmethods__["init_list_cycles"] = _swigfaiss.IndexIVFPQStats_init_list_cycles_set
__swig_getmethods__["init_list_cycles"] = _swigfaiss.IndexIVFPQStats_init_list_cycles_get
if _newclass:init_list_cycles = _swig_property(_swigfaiss.IndexIVFPQStats_init_list_cycles_get, _swigfaiss.IndexIVFPQStats_init_list_cycles_set)
__swig_setmethods__["scan_cycles"] = _swigfaiss.IndexIVFPQStats_scan_cycles_set
__swig_getmethods__["scan_cycles"] = _swigfaiss.IndexIVFPQStats_scan_cycles_get
if _newclass:scan_cycles = _swig_property(_swigfaiss.IndexIVFPQStats_scan_cycles_get, _swigfaiss.IndexIVFPQStats_scan_cycles_set)
__swig_setmethods__["heap_cycles"] = _swigfaiss.IndexIVFPQStats_heap_cycles_set
__swig_getmethods__["heap_cycles"] = _swigfaiss.IndexIVFPQStats_heap_cycles_get
if _newclass:heap_cycles = _swig_property(_swigfaiss.IndexIVFPQStats_heap_cycles_get, _swigfaiss.IndexIVFPQStats_heap_cycles_set)
def __init__(self):
this = _swigfaiss.new_IndexIVFPQStats()
try: self.this.append(this)
except: self.this = this
def reset(self): return _swigfaiss.IndexIVFPQStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQStats
__del__ = lambda self : None;
IndexIVFPQStats_swigregister = _swigfaiss.IndexIVFPQStats_swigregister
IndexIVFPQStats_swigregister(IndexIVFPQStats)
class IndexIVFPQR(IndexIVFPQ):
__swig_setmethods__ = {}
for _s in [IndexIVFPQ]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVFPQR, name, value)
__swig_getmethods__ = {}
for _s in [IndexIVFPQ]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVFPQR, name)
__repr__ = _swig_repr
__swig_setmethods__["refine_pq"] = _swigfaiss.IndexIVFPQR_refine_pq_set
__swig_getmethods__["refine_pq"] = _swigfaiss.IndexIVFPQR_refine_pq_get
if _newclass:refine_pq = _swig_property(_swigfaiss.IndexIVFPQR_refine_pq_get, _swigfaiss.IndexIVFPQR_refine_pq_set)
__swig_setmethods__["refine_codes"] = _swigfaiss.IndexIVFPQR_refine_codes_set
__swig_getmethods__["refine_codes"] = _swigfaiss.IndexIVFPQR_refine_codes_get
if _newclass:refine_codes = _swig_property(_swigfaiss.IndexIVFPQR_refine_codes_get, _swigfaiss.IndexIVFPQR_refine_codes_set)
__swig_setmethods__["k_factor"] = _swigfaiss.IndexIVFPQR_k_factor_set
__swig_getmethods__["k_factor"] = _swigfaiss.IndexIVFPQR_k_factor_get
if _newclass:k_factor = _swig_property(_swigfaiss.IndexIVFPQR_k_factor_get, _swigfaiss.IndexIVFPQR_k_factor_set)
def reset(self): return _swigfaiss.IndexIVFPQR_reset(self)
def remove_ids(self, *args): return _swigfaiss.IndexIVFPQR_remove_ids(self, *args)
def train_residual(self, *args): return _swigfaiss.IndexIVFPQR_train_residual(self, *args)
def add_with_ids(self, *args): return _swigfaiss.IndexIVFPQR_add_with_ids(self, *args)
def add_core(self, *args): return _swigfaiss.IndexIVFPQR_add_core(self, *args)
def reconstruct_from_offset(self, *args): return _swigfaiss.IndexIVFPQR_reconstruct_from_offset(self, *args)
def merge_from(self, *args): return _swigfaiss.IndexIVFPQR_merge_from(self, *args)
def search_preassigned(self, *args): return _swigfaiss.IndexIVFPQR_search_preassigned(self, *args)
def __init__(self, *args):
this = _swigfaiss.new_IndexIVFPQR(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQR
__del__ = lambda self : None;
IndexIVFPQR_swigregister = _swigfaiss.IndexIVFPQR_swigregister
IndexIVFPQR_swigregister(IndexIVFPQR)
class IndexIVFPQCompact(IndexIVFPQ):
__swig_setmethods__ = {}
for _s in [IndexIVFPQ]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIVFPQCompact, name, value)
__swig_getmethods__ = {}
for _s in [IndexIVFPQ]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIVFPQCompact, name)
__repr__ = _swig_repr
Alloc_type_none = _swigfaiss.IndexIVFPQCompact_Alloc_type_none
Alloc_type_new = _swigfaiss.IndexIVFPQCompact_Alloc_type_new
Alloc_type_mmap = _swigfaiss.IndexIVFPQCompact_Alloc_type_mmap
__swig_setmethods__["limits"] = _swigfaiss.IndexIVFPQCompact_limits_set
__swig_getmethods__["limits"] = _swigfaiss.IndexIVFPQCompact_limits_get
if _newclass:limits = _swig_property(_swigfaiss.IndexIVFPQCompact_limits_get, _swigfaiss.IndexIVFPQCompact_limits_set)
__swig_setmethods__["compact_ids"] = _swigfaiss.IndexIVFPQCompact_compact_ids_set
__swig_getmethods__["compact_ids"] = _swigfaiss.IndexIVFPQCompact_compact_ids_get
if _newclass:compact_ids = _swig_property(_swigfaiss.IndexIVFPQCompact_compact_ids_get, _swigfaiss.IndexIVFPQCompact_compact_ids_set)
__swig_setmethods__["compact_codes"] = _swigfaiss.IndexIVFPQCompact_compact_codes_set
__swig_getmethods__["compact_codes"] = _swigfaiss.IndexIVFPQCompact_compact_codes_get
if _newclass:compact_codes = _swig_property(_swigfaiss.IndexIVFPQCompact_compact_codes_get, _swigfaiss.IndexIVFPQCompact_compact_codes_set)
__swig_setmethods__["mmap_buffer"] = _swigfaiss.IndexIVFPQCompact_mmap_buffer_set
__swig_getmethods__["mmap_buffer"] = _swigfaiss.IndexIVFPQCompact_mmap_buffer_get
if _newclass:mmap_buffer = _swig_property(_swigfaiss.IndexIVFPQCompact_mmap_buffer_get, _swigfaiss.IndexIVFPQCompact_mmap_buffer_set)
__swig_setmethods__["mmap_length"] = _swigfaiss.IndexIVFPQCompact_mmap_length_set
__swig_getmethods__["mmap_length"] = _swigfaiss.IndexIVFPQCompact_mmap_length_get
if _newclass:mmap_length = _swig_property(_swigfaiss.IndexIVFPQCompact_mmap_length_get, _swigfaiss.IndexIVFPQCompact_mmap_length_set)
def search_preassigned(self, *args): return _swigfaiss.IndexIVFPQCompact_search_preassigned(self, *args)
def add(self, *args): return _swigfaiss.IndexIVFPQCompact_add(self, *args)
def reset(self): return _swigfaiss.IndexIVFPQCompact_reset(self)
def train(self, *args): return _swigfaiss.IndexIVFPQCompact_train(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQCompact
__del__ = lambda self : None;
def __init__(self, *args):
this = _swigfaiss.new_IndexIVFPQCompact(*args)
try: self.this.append(this)
except: self.this = this
IndexIVFPQCompact_swigregister = _swigfaiss.IndexIVFPQCompact_swigregister
IndexIVFPQCompact_swigregister(IndexIVFPQCompact)
class Index2Layer(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Index2Layer, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, Index2Layer, name)
__repr__ = _swig_repr
__swig_setmethods__["q1"] = _swigfaiss.Index2Layer_q1_set
__swig_getmethods__["q1"] = _swigfaiss.Index2Layer_q1_get
if _newclass:q1 = _swig_property(_swigfaiss.Index2Layer_q1_get, _swigfaiss.Index2Layer_q1_set)
__swig_setmethods__["pq"] = _swigfaiss.Index2Layer_pq_set
__swig_getmethods__["pq"] = _swigfaiss.Index2Layer_pq_get
if _newclass:pq = _swig_property(_swigfaiss.Index2Layer_pq_get, _swigfaiss.Index2Layer_pq_set)
__swig_setmethods__["codes"] = _swigfaiss.Index2Layer_codes_set
__swig_getmethods__["codes"] = _swigfaiss.Index2Layer_codes_get
if _newclass:codes = _swig_property(_swigfaiss.Index2Layer_codes_get, _swigfaiss.Index2Layer_codes_set)
__swig_setmethods__["code_size_1"] = _swigfaiss.Index2Layer_code_size_1_set
__swig_getmethods__["code_size_1"] = _swigfaiss.Index2Layer_code_size_1_get
if _newclass:code_size_1 = _swig_property(_swigfaiss.Index2Layer_code_size_1_get, _swigfaiss.Index2Layer_code_size_1_set)
__swig_setmethods__["code_size_2"] = _swigfaiss.Index2Layer_code_size_2_set
__swig_getmethods__["code_size_2"] = _swigfaiss.Index2Layer_code_size_2_get
if _newclass:code_size_2 = _swig_property(_swigfaiss.Index2Layer_code_size_2_get, _swigfaiss.Index2Layer_code_size_2_set)
__swig_setmethods__["code_size"] = _swigfaiss.Index2Layer_code_size_set
__swig_getmethods__["code_size"] = _swigfaiss.Index2Layer_code_size_get
if _newclass:code_size = _swig_property(_swigfaiss.Index2Layer_code_size_get, _swigfaiss.Index2Layer_code_size_set)
def __init__(self, *args):
this = _swigfaiss.new_Index2Layer(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_Index2Layer
__del__ = lambda self : None;
def train(self, *args): return _swigfaiss.Index2Layer_train(self, *args)
def add(self, *args): return _swigfaiss.Index2Layer_add(self, *args)
def search(self, *args): return _swigfaiss.Index2Layer_search(self, *args)
def reconstruct_n(self, *args): return _swigfaiss.Index2Layer_reconstruct_n(self, *args)
def reconstruct(self, *args): return _swigfaiss.Index2Layer_reconstruct(self, *args)
def reset(self): return _swigfaiss.Index2Layer_reset(self)
def transfer_to_IVFPQ(self, *args): return _swigfaiss.Index2Layer_transfer_to_IVFPQ(self, *args)
Index2Layer_swigregister = _swigfaiss.Index2Layer_swigregister
Index2Layer_swigregister(Index2Layer)
class IndexIDMap(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIDMap, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIDMap, name)
__repr__ = _swig_repr
__swig_setmethods__["index"] = _swigfaiss.IndexIDMap_index_set
__swig_getmethods__["index"] = _swigfaiss.IndexIDMap_index_get
if _newclass:index = _swig_property(_swigfaiss.IndexIDMap_index_get, _swigfaiss.IndexIDMap_index_set)
__swig_setmethods__["own_fields"] = _swigfaiss.IndexIDMap_own_fields_set
__swig_getmethods__["own_fields"] = _swigfaiss.IndexIDMap_own_fields_get
if _newclass:own_fields = _swig_property(_swigfaiss.IndexIDMap_own_fields_get, _swigfaiss.IndexIDMap_own_fields_set)
__swig_setmethods__["id_map"] = _swigfaiss.IndexIDMap_id_map_set
__swig_getmethods__["id_map"] = _swigfaiss.IndexIDMap_id_map_get
if _newclass:id_map = _swig_property(_swigfaiss.IndexIDMap_id_map_get, _swigfaiss.IndexIDMap_id_map_set)
def add_with_ids(self, *args): return _swigfaiss.IndexIDMap_add_with_ids(self, *args)
def add(self, *args): return _swigfaiss.IndexIDMap_add(self, *args)
def search(self, *args): return _swigfaiss.IndexIDMap_search(self, *args)
def train(self, *args): return _swigfaiss.IndexIDMap_train(self, *args)
def reset(self): return _swigfaiss.IndexIDMap_reset(self)
def remove_ids(self, *args): return _swigfaiss.IndexIDMap_remove_ids(self, *args)
def range_search(self, *args): return _swigfaiss.IndexIDMap_range_search(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexIDMap
__del__ = lambda self : None;
def __init__(self, *args):
this = _swigfaiss.new_IndexIDMap(*args)
try: self.this.append(this)
except: self.this = this
IndexIDMap_swigregister = _swigfaiss.IndexIDMap_swigregister
IndexIDMap_swigregister(IndexIDMap)
class IndexIDMap2(IndexIDMap):
__swig_setmethods__ = {}
for _s in [IndexIDMap]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexIDMap2, name, value)
__swig_getmethods__ = {}
for _s in [IndexIDMap]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexIDMap2, name)
__repr__ = _swig_repr
__swig_setmethods__["rev_map"] = _swigfaiss.IndexIDMap2_rev_map_set
__swig_getmethods__["rev_map"] = _swigfaiss.IndexIDMap2_rev_map_get
if _newclass:rev_map = _swig_property(_swigfaiss.IndexIDMap2_rev_map_get, _swigfaiss.IndexIDMap2_rev_map_set)
def construct_rev_map(self): return _swigfaiss.IndexIDMap2_construct_rev_map(self)
def add_with_ids(self, *args): return _swigfaiss.IndexIDMap2_add_with_ids(self, *args)
def remove_ids(self, *args): return _swigfaiss.IndexIDMap2_remove_ids(self, *args)
def reconstruct(self, *args): return _swigfaiss.IndexIDMap2_reconstruct(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexIDMap2
__del__ = lambda self : None;
def __init__(self, *args):
this = _swigfaiss.new_IndexIDMap2(*args)
try: self.this.append(this)
except: self.this = this
IndexIDMap2_swigregister = _swigfaiss.IndexIDMap2_swigregister
IndexIDMap2_swigregister(IndexIDMap2)
class IndexShards(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexShards, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexShards, name)
__repr__ = _swig_repr
__swig_setmethods__["shard_indexes"] = _swigfaiss.IndexShards_shard_indexes_set
__swig_getmethods__["shard_indexes"] = _swigfaiss.IndexShards_shard_indexes_get
if _newclass:shard_indexes = _swig_property(_swigfaiss.IndexShards_shard_indexes_get, _swigfaiss.IndexShards_shard_indexes_set)
__swig_setmethods__["own_fields"] = _swigfaiss.IndexShards_own_fields_set
__swig_getmethods__["own_fields"] = _swigfaiss.IndexShards_own_fields_get
if _newclass:own_fields = _swig_property(_swigfaiss.IndexShards_own_fields_get, _swigfaiss.IndexShards_own_fields_set)
__swig_setmethods__["threaded"] = _swigfaiss.IndexShards_threaded_set
__swig_getmethods__["threaded"] = _swigfaiss.IndexShards_threaded_get
if _newclass:threaded = _swig_property(_swigfaiss.IndexShards_threaded_get, _swigfaiss.IndexShards_threaded_set)
__swig_setmethods__["successive_ids"] = _swigfaiss.IndexShards_successive_ids_set
__swig_getmethods__["successive_ids"] = _swigfaiss.IndexShards_successive_ids_get
if _newclass:successive_ids = _swig_property(_swigfaiss.IndexShards_successive_ids_get, _swigfaiss.IndexShards_successive_ids_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexShards(*args)
try: self.this.append(this)
except: self.this = this
def add_shard(self, *args): return _swigfaiss.IndexShards_add_shard(self, *args)
def sync_with_shard_indexes(self): return _swigfaiss.IndexShards_sync_with_shard_indexes(self)
def at(self, *args): return _swigfaiss.IndexShards_at(self, *args)
def add(self, *args): return _swigfaiss.IndexShards_add(self, *args)
def add_with_ids(self, *args): return _swigfaiss.IndexShards_add_with_ids(self, *args)
def search(self, *args): return _swigfaiss.IndexShards_search(self, *args)
def train(self, *args): return _swigfaiss.IndexShards_train(self, *args)
def reset(self): return _swigfaiss.IndexShards_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexShards
__del__ = lambda self : None;
IndexShards_swigregister = _swigfaiss.IndexShards_swigregister
IndexShards_swigregister(IndexShards)
class IndexSplitVectors(Index):
__swig_setmethods__ = {}
for _s in [Index]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IndexSplitVectors, name, value)
__swig_getmethods__ = {}
for _s in [Index]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IndexSplitVectors, name)
__repr__ = _swig_repr
__swig_setmethods__["own_fields"] = _swigfaiss.IndexSplitVectors_own_fields_set
__swig_getmethods__["own_fields"] = _swigfaiss.IndexSplitVectors_own_fields_get
if _newclass:own_fields = _swig_property(_swigfaiss.IndexSplitVectors_own_fields_get, _swigfaiss.IndexSplitVectors_own_fields_set)
__swig_setmethods__["threaded"] = _swigfaiss.IndexSplitVectors_threaded_set
__swig_getmethods__["threaded"] = _swigfaiss.IndexSplitVectors_threaded_get
if _newclass:threaded = _swig_property(_swigfaiss.IndexSplitVectors_threaded_get, _swigfaiss.IndexSplitVectors_threaded_set)
__swig_setmethods__["sub_indexes"] = _swigfaiss.IndexSplitVectors_sub_indexes_set
__swig_getmethods__["sub_indexes"] = _swigfaiss.IndexSplitVectors_sub_indexes_get
if _newclass:sub_indexes = _swig_property(_swigfaiss.IndexSplitVectors_sub_indexes_get, _swigfaiss.IndexSplitVectors_sub_indexes_set)
__swig_setmethods__["sum_d"] = _swigfaiss.IndexSplitVectors_sum_d_set
__swig_getmethods__["sum_d"] = _swigfaiss.IndexSplitVectors_sum_d_get
if _newclass:sum_d = _swig_property(_swigfaiss.IndexSplitVectors_sum_d_get, _swigfaiss.IndexSplitVectors_sum_d_set)
def __init__(self, *args):
this = _swigfaiss.new_IndexSplitVectors(*args)
try: self.this.append(this)
except: self.this = this
def add_sub_index(self, *args): return _swigfaiss.IndexSplitVectors_add_sub_index(self, *args)
def sync_with_sub_indexes(self): return _swigfaiss.IndexSplitVectors_sync_with_sub_indexes(self)
def add(self, *args): return _swigfaiss.IndexSplitVectors_add(self, *args)
def search(self, *args): return _swigfaiss.IndexSplitVectors_search(self, *args)
def train(self, *args): return _swigfaiss.IndexSplitVectors_train(self, *args)
def reset(self): return _swigfaiss.IndexSplitVectors_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexSplitVectors
__del__ = lambda self : None;
IndexSplitVectors_swigregister = _swigfaiss.IndexSplitVectors_swigregister
IndexSplitVectors_swigregister(IndexSplitVectors)
def downcast_index(*args):
return _swigfaiss.downcast_index(*args)
downcast_index = _swigfaiss.downcast_index
def downcast_VectorTransform(*args):
return _swigfaiss.downcast_VectorTransform(*args)
downcast_VectorTransform = _swigfaiss.downcast_VectorTransform
def write_index(*args):
return _swigfaiss.write_index(*args)
write_index = _swigfaiss.write_index
def read_index(*args):
return _swigfaiss.read_index(*args)
read_index = _swigfaiss.read_index
def write_VectorTransform(*args):
return _swigfaiss.write_VectorTransform(*args)
write_VectorTransform = _swigfaiss.write_VectorTransform
def read_VectorTransform(*args):
return _swigfaiss.read_VectorTransform(*args)
read_VectorTransform = _swigfaiss.read_VectorTransform
def read_ProductQuantizer(*args):
return _swigfaiss.read_ProductQuantizer(*args)
read_ProductQuantizer = _swigfaiss.read_ProductQuantizer
def write_ProductQuantizer(*args):
return _swigfaiss.write_ProductQuantizer(*args)
write_ProductQuantizer = _swigfaiss.write_ProductQuantizer
def clone_index(*args):
return _swigfaiss.clone_index(*args)
clone_index = _swigfaiss.clone_index
class Cloner(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Cloner, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Cloner, name)
__repr__ = _swig_repr
def clone_VectorTransform(self, *args): return _swigfaiss.Cloner_clone_VectorTransform(self, *args)
def clone_Index(self, *args): return _swigfaiss.Cloner_clone_Index(self, *args)
def clone_IndexIVF(self, *args): return _swigfaiss.Cloner_clone_IndexIVF(self, *args)
__swig_destroy__ = _swigfaiss.delete_Cloner
__del__ = lambda self : None;
def __init__(self):
this = _swigfaiss.new_Cloner()
try: self.this.append(this)
except: self.this = this
Cloner_swigregister = _swigfaiss.Cloner_swigregister
Cloner_swigregister(Cloner)
class AutoTuneCriterion(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AutoTuneCriterion, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AutoTuneCriterion, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["nq"] = _swigfaiss.AutoTuneCriterion_nq_set
__swig_getmethods__["nq"] = _swigfaiss.AutoTuneCriterion_nq_get
if _newclass:nq = _swig_property(_swigfaiss.AutoTuneCriterion_nq_get, _swigfaiss.AutoTuneCriterion_nq_set)
__swig_setmethods__["nnn"] = _swigfaiss.AutoTuneCriterion_nnn_set
__swig_getmethods__["nnn"] = _swigfaiss.AutoTuneCriterion_nnn_get
if _newclass:nnn = _swig_property(_swigfaiss.AutoTuneCriterion_nnn_get, _swigfaiss.AutoTuneCriterion_nnn_set)
__swig_setmethods__["gt_nnn"] = _swigfaiss.AutoTuneCriterion_gt_nnn_set
__swig_getmethods__["gt_nnn"] = _swigfaiss.AutoTuneCriterion_gt_nnn_get
if _newclass:gt_nnn = _swig_property(_swigfaiss.AutoTuneCriterion_gt_nnn_get, _swigfaiss.AutoTuneCriterion_gt_nnn_set)
__swig_setmethods__["gt_D"] = _swigfaiss.AutoTuneCriterion_gt_D_set
__swig_getmethods__["gt_D"] = _swigfaiss.AutoTuneCriterion_gt_D_get
if _newclass:gt_D = _swig_property(_swigfaiss.AutoTuneCriterion_gt_D_get, _swigfaiss.AutoTuneCriterion_gt_D_set)
__swig_setmethods__["gt_I"] = _swigfaiss.AutoTuneCriterion_gt_I_set
__swig_getmethods__["gt_I"] = _swigfaiss.AutoTuneCriterion_gt_I_get
if _newclass:gt_I = _swig_property(_swigfaiss.AutoTuneCriterion_gt_I_get, _swigfaiss.AutoTuneCriterion_gt_I_set)
def set_groundtruth(self, *args): return _swigfaiss.AutoTuneCriterion_set_groundtruth(self, *args)
def evaluate(self, *args): return _swigfaiss.AutoTuneCriterion_evaluate(self, *args)
__swig_destroy__ = _swigfaiss.delete_AutoTuneCriterion
__del__ = lambda self : None;
AutoTuneCriterion_swigregister = _swigfaiss.AutoTuneCriterion_swigregister
AutoTuneCriterion_swigregister(AutoTuneCriterion)
class OneRecallAtRCriterion(AutoTuneCriterion):
__swig_setmethods__ = {}
for _s in [AutoTuneCriterion]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, OneRecallAtRCriterion, name, value)
__swig_getmethods__ = {}
for _s in [AutoTuneCriterion]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, OneRecallAtRCriterion, name)
__repr__ = _swig_repr
__swig_setmethods__["R"] = _swigfaiss.OneRecallAtRCriterion_R_set
__swig_getmethods__["R"] = _swigfaiss.OneRecallAtRCriterion_R_get
if _newclass:R = _swig_property(_swigfaiss.OneRecallAtRCriterion_R_get, _swigfaiss.OneRecallAtRCriterion_R_set)
def __init__(self, *args):
this = _swigfaiss.new_OneRecallAtRCriterion(*args)
try: self.this.append(this)
except: self.this = this
def evaluate(self, *args): return _swigfaiss.OneRecallAtRCriterion_evaluate(self, *args)
__swig_destroy__ = _swigfaiss.delete_OneRecallAtRCriterion
__del__ = lambda self : None;
OneRecallAtRCriterion_swigregister = _swigfaiss.OneRecallAtRCriterion_swigregister
OneRecallAtRCriterion_swigregister(OneRecallAtRCriterion)
class IntersectionCriterion(AutoTuneCriterion):
__swig_setmethods__ = {}
for _s in [AutoTuneCriterion]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IntersectionCriterion, name, value)
__swig_getmethods__ = {}
for _s in [AutoTuneCriterion]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IntersectionCriterion, name)
__repr__ = _swig_repr
__swig_setmethods__["R"] = _swigfaiss.IntersectionCriterion_R_set
__swig_getmethods__["R"] = _swigfaiss.IntersectionCriterion_R_get
if _newclass:R = _swig_property(_swigfaiss.IntersectionCriterion_R_get, _swigfaiss.IntersectionCriterion_R_set)
def __init__(self, *args):
this = _swigfaiss.new_IntersectionCriterion(*args)
try: self.this.append(this)
except: self.this = this
def evaluate(self, *args): return _swigfaiss.IntersectionCriterion_evaluate(self, *args)
__swig_destroy__ = _swigfaiss.delete_IntersectionCriterion
__del__ = lambda self : None;
IntersectionCriterion_swigregister = _swigfaiss.IntersectionCriterion_swigregister
IntersectionCriterion_swigregister(IntersectionCriterion)
class OperatingPoint(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, OperatingPoint, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, OperatingPoint, name)
__repr__ = _swig_repr
__swig_setmethods__["perf"] = _swigfaiss.OperatingPoint_perf_set
__swig_getmethods__["perf"] = _swigfaiss.OperatingPoint_perf_get
if _newclass:perf = _swig_property(_swigfaiss.OperatingPoint_perf_get, _swigfaiss.OperatingPoint_perf_set)
__swig_setmethods__["t"] = _swigfaiss.OperatingPoint_t_set
__swig_getmethods__["t"] = _swigfaiss.OperatingPoint_t_get
if _newclass:t = _swig_property(_swigfaiss.OperatingPoint_t_get, _swigfaiss.OperatingPoint_t_set)
__swig_setmethods__["key"] = _swigfaiss.OperatingPoint_key_set
__swig_getmethods__["key"] = _swigfaiss.OperatingPoint_key_get
if _newclass:key = _swig_property(_swigfaiss.OperatingPoint_key_get, _swigfaiss.OperatingPoint_key_set)
__swig_setmethods__["cno"] = _swigfaiss.OperatingPoint_cno_set
__swig_getmethods__["cno"] = _swigfaiss.OperatingPoint_cno_get
if _newclass:cno = _swig_property(_swigfaiss.OperatingPoint_cno_get, _swigfaiss.OperatingPoint_cno_set)
def __init__(self):
this = _swigfaiss.new_OperatingPoint()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_OperatingPoint
__del__ = lambda self : None;
OperatingPoint_swigregister = _swigfaiss.OperatingPoint_swigregister
OperatingPoint_swigregister(OperatingPoint)
class OperatingPoints(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, OperatingPoints, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, OperatingPoints, name)
__repr__ = _swig_repr
__swig_setmethods__["all_pts"] = _swigfaiss.OperatingPoints_all_pts_set
__swig_getmethods__["all_pts"] = _swigfaiss.OperatingPoints_all_pts_get
if _newclass:all_pts = _swig_property(_swigfaiss.OperatingPoints_all_pts_get, _swigfaiss.OperatingPoints_all_pts_set)
__swig_setmethods__["optimal_pts"] = _swigfaiss.OperatingPoints_optimal_pts_set
__swig_getmethods__["optimal_pts"] = _swigfaiss.OperatingPoints_optimal_pts_get
if _newclass:optimal_pts = _swig_property(_swigfaiss.OperatingPoints_optimal_pts_get, _swigfaiss.OperatingPoints_optimal_pts_set)
def __init__(self):
this = _swigfaiss.new_OperatingPoints()
try: self.this.append(this)
except: self.this = this
def merge_with(self, *args): return _swigfaiss.OperatingPoints_merge_with(self, *args)
def clear(self): return _swigfaiss.OperatingPoints_clear(self)
def add(self, *args): return _swigfaiss.OperatingPoints_add(self, *args)
def t_for_perf(self, *args): return _swigfaiss.OperatingPoints_t_for_perf(self, *args)
def display(self, only_optimal=True): return _swigfaiss.OperatingPoints_display(self, only_optimal)
def all_to_gnuplot(self, *args): return _swigfaiss.OperatingPoints_all_to_gnuplot(self, *args)
def optimal_to_gnuplot(self, *args): return _swigfaiss.OperatingPoints_optimal_to_gnuplot(self, *args)
__swig_destroy__ = _swigfaiss.delete_OperatingPoints
__del__ = lambda self : None;
OperatingPoints_swigregister = _swigfaiss.OperatingPoints_swigregister
OperatingPoints_swigregister(OperatingPoints)
class ParameterRange(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ParameterRange, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ParameterRange, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _swigfaiss.ParameterRange_name_set
__swig_getmethods__["name"] = _swigfaiss.ParameterRange_name_get
if _newclass:name = _swig_property(_swigfaiss.ParameterRange_name_get, _swigfaiss.ParameterRange_name_set)
__swig_setmethods__["values"] = _swigfaiss.ParameterRange_values_set
__swig_getmethods__["values"] = _swigfaiss.ParameterRange_values_get
if _newclass:values = _swig_property(_swigfaiss.ParameterRange_values_get, _swigfaiss.ParameterRange_values_set)
def __init__(self):
this = _swigfaiss.new_ParameterRange()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_ParameterRange
__del__ = lambda self : None;
ParameterRange_swigregister = _swigfaiss.ParameterRange_swigregister
ParameterRange_swigregister(ParameterRange)
class ParameterSpace(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ParameterSpace, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ParameterSpace, name)
__repr__ = _swig_repr
__swig_setmethods__["parameter_ranges"] = _swigfaiss.ParameterSpace_parameter_ranges_set
__swig_getmethods__["parameter_ranges"] = _swigfaiss.ParameterSpace_parameter_ranges_get
if _newclass:parameter_ranges = _swig_property(_swigfaiss.ParameterSpace_parameter_ranges_get, _swigfaiss.ParameterSpace_parameter_ranges_set)
__swig_setmethods__["verbose"] = _swigfaiss.ParameterSpace_verbose_set
__swig_getmethods__["verbose"] = _swigfaiss.ParameterSpace_verbose_get
if _newclass:verbose = _swig_property(_swigfaiss.ParameterSpace_verbose_get, _swigfaiss.ParameterSpace_verbose_set)
__swig_setmethods__["n_experiments"] = _swigfaiss.ParameterSpace_n_experiments_set
__swig_getmethods__["n_experiments"] = _swigfaiss.ParameterSpace_n_experiments_get
if _newclass:n_experiments = _swig_property(_swigfaiss.ParameterSpace_n_experiments_get, _swigfaiss.ParameterSpace_n_experiments_set)
__swig_setmethods__["batchsize"] = _swigfaiss.ParameterSpace_batchsize_set
__swig_getmethods__["batchsize"] = _swigfaiss.ParameterSpace_batchsize_get
if _newclass:batchsize = _swig_property(_swigfaiss.ParameterSpace_batchsize_get, _swigfaiss.ParameterSpace_batchsize_set)
__swig_setmethods__["thread_over_batches"] = _swigfaiss.ParameterSpace_thread_over_batches_set
__swig_getmethods__["thread_over_batches"] = _swigfaiss.ParameterSpace_thread_over_batches_get
if _newclass:thread_over_batches = _swig_property(_swigfaiss.ParameterSpace_thread_over_batches_get, _swigfaiss.ParameterSpace_thread_over_batches_set)
def __init__(self):
this = _swigfaiss.new_ParameterSpace()
try: self.this.append(this)
except: self.this = this
def n_combinations(self): return _swigfaiss.ParameterSpace_n_combinations(self)
def combination_ge(self, *args): return _swigfaiss.ParameterSpace_combination_ge(self, *args)
def combination_name(self, *args): return _swigfaiss.ParameterSpace_combination_name(self, *args)
def display(self): return _swigfaiss.ParameterSpace_display(self)
def add_range(self, *args): return _swigfaiss.ParameterSpace_add_range(self, *args)
def initialize(self, *args): return _swigfaiss.ParameterSpace_initialize(self, *args)
def set_index_parameters(self, *args): return _swigfaiss.ParameterSpace_set_index_parameters(self, *args)
def set_index_parameter(self, *args): return _swigfaiss.ParameterSpace_set_index_parameter(self, *args)
def update_bounds(self, *args): return _swigfaiss.ParameterSpace_update_bounds(self, *args)
def explore(self, *args): return _swigfaiss.ParameterSpace_explore(self, *args)
__swig_destroy__ = _swigfaiss.delete_ParameterSpace
__del__ = lambda self : None;
ParameterSpace_swigregister = _swigfaiss.ParameterSpace_swigregister
ParameterSpace_swigregister(ParameterSpace)
def index_factory(*args):
return _swigfaiss.index_factory(*args)
index_factory = _swigfaiss.index_factory
def swig_ptr(*args):
return _swigfaiss.swig_ptr(*args)
swig_ptr = _swigfaiss.swig_ptr
def rev_swig_ptr(*args):
return _swigfaiss.rev_swig_ptr(*args)
rev_swig_ptr = _swigfaiss.rev_swig_ptr
class float_minheap_array_t(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, float_minheap_array_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, float_minheap_array_t, name)
__repr__ = _swig_repr
__swig_setmethods__["nh"] = _swigfaiss.float_minheap_array_t_nh_set
__swig_getmethods__["nh"] = _swigfaiss.float_minheap_array_t_nh_get
if _newclass:nh = _swig_property(_swigfaiss.float_minheap_array_t_nh_get, _swigfaiss.float_minheap_array_t_nh_set)
__swig_setmethods__["k"] = _swigfaiss.float_minheap_array_t_k_set
__swig_getmethods__["k"] = _swigfaiss.float_minheap_array_t_k_get
if _newclass:k = _swig_property(_swigfaiss.float_minheap_array_t_k_get, _swigfaiss.float_minheap_array_t_k_set)
__swig_setmethods__["ids"] = _swigfaiss.float_minheap_array_t_ids_set
__swig_getmethods__["ids"] = _swigfaiss.float_minheap_array_t_ids_get
if _newclass:ids = _swig_property(_swigfaiss.float_minheap_array_t_ids_get, _swigfaiss.float_minheap_array_t_ids_set)
__swig_setmethods__["val"] = _swigfaiss.float_minheap_array_t_val_set
__swig_getmethods__["val"] = _swigfaiss.float_minheap_array_t_val_get
if _newclass:val = _swig_property(_swigfaiss.float_minheap_array_t_val_get, _swigfaiss.float_minheap_array_t_val_set)
def get_val(self, *args): return _swigfaiss.float_minheap_array_t_get_val(self, *args)
def get_ids(self, *args): return _swigfaiss.float_minheap_array_t_get_ids(self, *args)
def heapify(self): return _swigfaiss.float_minheap_array_t_heapify(self)
def addn(self, *args): return _swigfaiss.float_minheap_array_t_addn(self, *args)
def addn_with_ids(self, *args): return _swigfaiss.float_minheap_array_t_addn_with_ids(self, *args)
def reorder(self): return _swigfaiss.float_minheap_array_t_reorder(self)
def per_line_extrema(self, *args): return _swigfaiss.float_minheap_array_t_per_line_extrema(self, *args)
def __init__(self):
this = _swigfaiss.new_float_minheap_array_t()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_float_minheap_array_t
__del__ = lambda self : None;
float_minheap_array_t_swigregister = _swigfaiss.float_minheap_array_t_swigregister
float_minheap_array_t_swigregister(float_minheap_array_t)
class int_minheap_array_t(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, int_minheap_array_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, int_minheap_array_t, name)
__repr__ = _swig_repr
__swig_setmethods__["nh"] = _swigfaiss.int_minheap_array_t_nh_set
__swig_getmethods__["nh"] = _swigfaiss.int_minheap_array_t_nh_get
if _newclass:nh = _swig_property(_swigfaiss.int_minheap_array_t_nh_get, _swigfaiss.int_minheap_array_t_nh_set)
__swig_setmethods__["k"] = _swigfaiss.int_minheap_array_t_k_set
__swig_getmethods__["k"] = _swigfaiss.int_minheap_array_t_k_get
if _newclass:k = _swig_property(_swigfaiss.int_minheap_array_t_k_get, _swigfaiss.int_minheap_array_t_k_set)
__swig_setmethods__["ids"] = _swigfaiss.int_minheap_array_t_ids_set
__swig_getmethods__["ids"] = _swigfaiss.int_minheap_array_t_ids_get
if _newclass:ids = _swig_property(_swigfaiss.int_minheap_array_t_ids_get, _swigfaiss.int_minheap_array_t_ids_set)
__swig_setmethods__["val"] = _swigfaiss.int_minheap_array_t_val_set
__swig_getmethods__["val"] = _swigfaiss.int_minheap_array_t_val_get
if _newclass:val = _swig_property(_swigfaiss.int_minheap_array_t_val_get, _swigfaiss.int_minheap_array_t_val_set)
def get_val(self, *args): return _swigfaiss.int_minheap_array_t_get_val(self, *args)
def get_ids(self, *args): return _swigfaiss.int_minheap_array_t_get_ids(self, *args)
def heapify(self): return _swigfaiss.int_minheap_array_t_heapify(self)
def addn(self, *args): return _swigfaiss.int_minheap_array_t_addn(self, *args)
def addn_with_ids(self, *args): return _swigfaiss.int_minheap_array_t_addn_with_ids(self, *args)
def reorder(self): return _swigfaiss.int_minheap_array_t_reorder(self)
def per_line_extrema(self, *args): return _swigfaiss.int_minheap_array_t_per_line_extrema(self, *args)
def __init__(self):
this = _swigfaiss.new_int_minheap_array_t()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_int_minheap_array_t
__del__ = lambda self : None;
int_minheap_array_t_swigregister = _swigfaiss.int_minheap_array_t_swigregister
int_minheap_array_t_swigregister(int_minheap_array_t)
class float_maxheap_array_t(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, float_maxheap_array_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, float_maxheap_array_t, name)
__repr__ = _swig_repr
__swig_setmethods__["nh"] = _swigfaiss.float_maxheap_array_t_nh_set
__swig_getmethods__["nh"] = _swigfaiss.float_maxheap_array_t_nh_get
if _newclass:nh = _swig_property(_swigfaiss.float_maxheap_array_t_nh_get, _swigfaiss.float_maxheap_array_t_nh_set)
__swig_setmethods__["k"] = _swigfaiss.float_maxheap_array_t_k_set
__swig_getmethods__["k"] = _swigfaiss.float_maxheap_array_t_k_get
if _newclass:k = _swig_property(_swigfaiss.float_maxheap_array_t_k_get, _swigfaiss.float_maxheap_array_t_k_set)
__swig_setmethods__["ids"] = _swigfaiss.float_maxheap_array_t_ids_set
__swig_getmethods__["ids"] = _swigfaiss.float_maxheap_array_t_ids_get
if _newclass:ids = _swig_property(_swigfaiss.float_maxheap_array_t_ids_get, _swigfaiss.float_maxheap_array_t_ids_set)
__swig_setmethods__["val"] = _swigfaiss.float_maxheap_array_t_val_set
__swig_getmethods__["val"] = _swigfaiss.float_maxheap_array_t_val_get
if _newclass:val = _swig_property(_swigfaiss.float_maxheap_array_t_val_get, _swigfaiss.float_maxheap_array_t_val_set)
def get_val(self, *args): return _swigfaiss.float_maxheap_array_t_get_val(self, *args)
def get_ids(self, *args): return _swigfaiss.float_maxheap_array_t_get_ids(self, *args)
def heapify(self): return _swigfaiss.float_maxheap_array_t_heapify(self)
def addn(self, *args): return _swigfaiss.float_maxheap_array_t_addn(self, *args)
def addn_with_ids(self, *args): return _swigfaiss.float_maxheap_array_t_addn_with_ids(self, *args)
def reorder(self): return _swigfaiss.float_maxheap_array_t_reorder(self)
def per_line_extrema(self, *args): return _swigfaiss.float_maxheap_array_t_per_line_extrema(self, *args)
def __init__(self):
this = _swigfaiss.new_float_maxheap_array_t()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_float_maxheap_array_t
__del__ = lambda self : None;
float_maxheap_array_t_swigregister = _swigfaiss.float_maxheap_array_t_swigregister
float_maxheap_array_t_swigregister(float_maxheap_array_t)
class int_maxheap_array_t(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, int_maxheap_array_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, int_maxheap_array_t, name)
__repr__ = _swig_repr
__swig_setmethods__["nh"] = _swigfaiss.int_maxheap_array_t_nh_set
__swig_getmethods__["nh"] = _swigfaiss.int_maxheap_array_t_nh_get
if _newclass:nh = _swig_property(_swigfaiss.int_maxheap_array_t_nh_get, _swigfaiss.int_maxheap_array_t_nh_set)
__swig_setmethods__["k"] = _swigfaiss.int_maxheap_array_t_k_set
__swig_getmethods__["k"] = _swigfaiss.int_maxheap_array_t_k_get
if _newclass:k = _swig_property(_swigfaiss.int_maxheap_array_t_k_get, _swigfaiss.int_maxheap_array_t_k_set)
__swig_setmethods__["ids"] = _swigfaiss.int_maxheap_array_t_ids_set
__swig_getmethods__["ids"] = _swigfaiss.int_maxheap_array_t_ids_get
if _newclass:ids = _swig_property(_swigfaiss.int_maxheap_array_t_ids_get, _swigfaiss.int_maxheap_array_t_ids_set)
__swig_setmethods__["val"] = _swigfaiss.int_maxheap_array_t_val_set
__swig_getmethods__["val"] = _swigfaiss.int_maxheap_array_t_val_get
if _newclass:val = _swig_property(_swigfaiss.int_maxheap_array_t_val_get, _swigfaiss.int_maxheap_array_t_val_set)
def get_val(self, *args): return _swigfaiss.int_maxheap_array_t_get_val(self, *args)
def get_ids(self, *args): return _swigfaiss.int_maxheap_array_t_get_ids(self, *args)
def heapify(self): return _swigfaiss.int_maxheap_array_t_heapify(self)
def addn(self, *args): return _swigfaiss.int_maxheap_array_t_addn(self, *args)
def addn_with_ids(self, *args): return _swigfaiss.int_maxheap_array_t_addn_with_ids(self, *args)
def reorder(self): return _swigfaiss.int_maxheap_array_t_reorder(self)
def per_line_extrema(self, *args): return _swigfaiss.int_maxheap_array_t_per_line_extrema(self, *args)
def __init__(self):
this = _swigfaiss.new_int_maxheap_array_t()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_int_maxheap_array_t
__del__ = lambda self : None;
int_maxheap_array_t_swigregister = _swigfaiss.int_maxheap_array_t_swigregister
int_maxheap_array_t_swigregister(int_maxheap_array_t)
def omp_set_num_threads(*args):
return _swigfaiss.omp_set_num_threads(*args)
omp_set_num_threads = _swigfaiss.omp_set_num_threads
def omp_get_max_threads():
return _swigfaiss.omp_get_max_threads()
omp_get_max_threads = _swigfaiss.omp_get_max_threads
def memcpy(*args):
return _swigfaiss.memcpy(*args)
memcpy = _swigfaiss.memcpy
def cast_integer_to_float_ptr(*args):
return _swigfaiss.cast_integer_to_float_ptr(*args)
cast_integer_to_float_ptr = _swigfaiss.cast_integer_to_float_ptr
def cast_integer_to_long_ptr(*args):
return _swigfaiss.cast_integer_to_long_ptr(*args)
cast_integer_to_long_ptr = _swigfaiss.cast_integer_to_long_ptr
def cast_integer_to_int_ptr(*args):
return _swigfaiss.cast_integer_to_int_ptr(*args)
cast_integer_to_int_ptr = _swigfaiss.cast_integer_to_int_ptr
class RangeSearchResult(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RangeSearchResult, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RangeSearchResult, name)
__repr__ = _swig_repr
__swig_setmethods__["nq"] = _swigfaiss.RangeSearchResult_nq_set
__swig_getmethods__["nq"] = _swigfaiss.RangeSearchResult_nq_get
if _newclass:nq = _swig_property(_swigfaiss.RangeSearchResult_nq_get, _swigfaiss.RangeSearchResult_nq_set)
__swig_setmethods__["lims"] = _swigfaiss.RangeSearchResult_lims_set
__swig_getmethods__["lims"] = _swigfaiss.RangeSearchResult_lims_get
if _newclass:lims = _swig_property(_swigfaiss.RangeSearchResult_lims_get, _swigfaiss.RangeSearchResult_lims_set)
__swig_setmethods__["labels"] = _swigfaiss.RangeSearchResult_labels_set
__swig_getmethods__["labels"] = _swigfaiss.RangeSearchResult_labels_get
if _newclass:labels = _swig_property(_swigfaiss.RangeSearchResult_labels_get, _swigfaiss.RangeSearchResult_labels_set)
__swig_setmethods__["distances"] = _swigfaiss.RangeSearchResult_distances_set
__swig_getmethods__["distances"] = _swigfaiss.RangeSearchResult_distances_get
if _newclass:distances = _swig_property(_swigfaiss.RangeSearchResult_distances_get, _swigfaiss.RangeSearchResult_distances_set)
__swig_setmethods__["buffer_size"] = _swigfaiss.RangeSearchResult_buffer_size_set
__swig_getmethods__["buffer_size"] = _swigfaiss.RangeSearchResult_buffer_size_get
if _newclass:buffer_size = _swig_property(_swigfaiss.RangeSearchResult_buffer_size_get, _swigfaiss.RangeSearchResult_buffer_size_set)
def __init__(self, *args):
this = _swigfaiss.new_RangeSearchResult(*args)
try: self.this.append(this)
except: self.this = this
def do_allocation(self): return _swigfaiss.RangeSearchResult_do_allocation(self)
__swig_destroy__ = _swigfaiss.delete_RangeSearchResult
__del__ = lambda self : None;
RangeSearchResult_swigregister = _swigfaiss.RangeSearchResult_swigregister
RangeSearchResult_swigregister(RangeSearchResult)
class IDSelector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IDSelector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IDSelector, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def is_member(self, *args): return _swigfaiss.IDSelector_is_member(self, *args)
__swig_destroy__ = _swigfaiss.delete_IDSelector
__del__ = lambda self : None;
IDSelector_swigregister = _swigfaiss.IDSelector_swigregister
IDSelector_swigregister(IDSelector)
class IDSelectorRange(IDSelector):
__swig_setmethods__ = {}
for _s in [IDSelector]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IDSelectorRange, name, value)
__swig_getmethods__ = {}
for _s in [IDSelector]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IDSelectorRange, name)
__repr__ = _swig_repr
__swig_setmethods__["imin"] = _swigfaiss.IDSelectorRange_imin_set
__swig_getmethods__["imin"] = _swigfaiss.IDSelectorRange_imin_get
if _newclass:imin = _swig_property(_swigfaiss.IDSelectorRange_imin_get, _swigfaiss.IDSelectorRange_imin_set)
__swig_setmethods__["imax"] = _swigfaiss.IDSelectorRange_imax_set
__swig_getmethods__["imax"] = _swigfaiss.IDSelectorRange_imax_get
if _newclass:imax = _swig_property(_swigfaiss.IDSelectorRange_imax_get, _swigfaiss.IDSelectorRange_imax_set)
def __init__(self, *args):
this = _swigfaiss.new_IDSelectorRange(*args)
try: self.this.append(this)
except: self.this = this
def is_member(self, *args): return _swigfaiss.IDSelectorRange_is_member(self, *args)
__swig_destroy__ = _swigfaiss.delete_IDSelectorRange
__del__ = lambda self : None;
IDSelectorRange_swigregister = _swigfaiss.IDSelectorRange_swigregister
IDSelectorRange_swigregister(IDSelectorRange)
class IDSelectorBatch(IDSelector):
__swig_setmethods__ = {}
for _s in [IDSelector]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IDSelectorBatch, name, value)
__swig_getmethods__ = {}
for _s in [IDSelector]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IDSelectorBatch, name)
__repr__ = _swig_repr
__swig_setmethods__["nbits"] = _swigfaiss.IDSelectorBatch_nbits_set
__swig_getmethods__["nbits"] = _swigfaiss.IDSelectorBatch_nbits_get
if _newclass:nbits = _swig_property(_swigfaiss.IDSelectorBatch_nbits_get, _swigfaiss.IDSelectorBatch_nbits_set)
__swig_setmethods__["mask"] = _swigfaiss.IDSelectorBatch_mask_set
__swig_getmethods__["mask"] = _swigfaiss.IDSelectorBatch_mask_get
if _newclass:mask = _swig_property(_swigfaiss.IDSelectorBatch_mask_get, _swigfaiss.IDSelectorBatch_mask_set)
def __init__(self, *args):
this = _swigfaiss.new_IDSelectorBatch(*args)
try: self.this.append(this)
except: self.this = this
def is_member(self, *args): return _swigfaiss.IDSelectorBatch_is_member(self, *args)
__swig_destroy__ = _swigfaiss.delete_IDSelectorBatch
__del__ = lambda self : None;
IDSelectorBatch_swigregister = _swigfaiss.IDSelectorBatch_swigregister
IDSelectorBatch_swigregister(IDSelectorBatch)
class BufferList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BufferList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BufferList, name)
__repr__ = _swig_repr
__swig_setmethods__["buffer_size"] = _swigfaiss.BufferList_buffer_size_set
__swig_getmethods__["buffer_size"] = _swigfaiss.BufferList_buffer_size_get
if _newclass:buffer_size = _swig_property(_swigfaiss.BufferList_buffer_size_get, _swigfaiss.BufferList_buffer_size_set)
__swig_setmethods__["buffers"] = _swigfaiss.BufferList_buffers_set
__swig_getmethods__["buffers"] = _swigfaiss.BufferList_buffers_get
if _newclass:buffers = _swig_property(_swigfaiss.BufferList_buffers_get, _swigfaiss.BufferList_buffers_set)
__swig_setmethods__["wp"] = _swigfaiss.BufferList_wp_set
__swig_getmethods__["wp"] = _swigfaiss.BufferList_wp_get
if _newclass:wp = _swig_property(_swigfaiss.BufferList_wp_get, _swigfaiss.BufferList_wp_set)
def __init__(self, *args):
this = _swigfaiss.new_BufferList(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_BufferList
__del__ = lambda self : None;
def append_buffer(self): return _swigfaiss.BufferList_append_buffer(self)
def add(self, *args): return _swigfaiss.BufferList_add(self, *args)
def copy_range(self, *args): return _swigfaiss.BufferList_copy_range(self, *args)
BufferList_swigregister = _swigfaiss.BufferList_swigregister
BufferList_swigregister(BufferList)
class RangeSearchPartialResult(BufferList):
__swig_setmethods__ = {}
for _s in [BufferList]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, RangeSearchPartialResult, name, value)
__swig_getmethods__ = {}
for _s in [BufferList]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, RangeSearchPartialResult, name)
__repr__ = _swig_repr
__swig_setmethods__["res"] = _swigfaiss.RangeSearchPartialResult_res_set
__swig_getmethods__["res"] = _swigfaiss.RangeSearchPartialResult_res_get
if _newclass:res = _swig_property(_swigfaiss.RangeSearchPartialResult_res_get, _swigfaiss.RangeSearchPartialResult_res_set)
def __init__(self, *args):
this = _swigfaiss.new_RangeSearchPartialResult(*args)
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["queries"] = _swigfaiss.RangeSearchPartialResult_queries_set
__swig_getmethods__["queries"] = _swigfaiss.RangeSearchPartialResult_queries_get
if _newclass:queries = _swig_property(_swigfaiss.RangeSearchPartialResult_queries_get, _swigfaiss.RangeSearchPartialResult_queries_set)
def new_result(self, *args): return _swigfaiss.RangeSearchPartialResult_new_result(self, *args)
def finalize(self): return _swigfaiss.RangeSearchPartialResult_finalize(self)
def set_lims(self): return _swigfaiss.RangeSearchPartialResult_set_lims(self)
def set_result(self, incremental=False): return _swigfaiss.RangeSearchPartialResult_set_result(self, incremental)
__swig_destroy__ = _swigfaiss.delete_RangeSearchPartialResult
__del__ = lambda self : None;
RangeSearchPartialResult_swigregister = _swigfaiss.RangeSearchPartialResult_swigregister
RangeSearchPartialResult_swigregister(RangeSearchPartialResult)
def ignore_SIGTTIN():
return _swigfaiss.ignore_SIGTTIN()
ignore_SIGTTIN = _swigfaiss.ignore_SIGTTIN
class MapLong2Long(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, MapLong2Long, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, MapLong2Long, name)
__repr__ = _swig_repr
__swig_setmethods__["map"] = _swigfaiss.MapLong2Long_map_set
__swig_getmethods__["map"] = _swigfaiss.MapLong2Long_map_get
if _newclass:map = _swig_property(_swigfaiss.MapLong2Long_map_get, _swigfaiss.MapLong2Long_map_set)
def add(self, *args): return _swigfaiss.MapLong2Long_add(self, *args)
def search(self, *args): return _swigfaiss.MapLong2Long_search(self, *args)
def search_multiple(self, *args): return _swigfaiss.MapLong2Long_search_multiple(self, *args)
def __init__(self):
this = _swigfaiss.new_MapLong2Long()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _swigfaiss.delete_MapLong2Long
__del__ = lambda self : None;
MapLong2Long_swigregister = _swigfaiss.MapLong2Long_swigregister
MapLong2Long_swigregister(MapLong2Long)
# This file is compatible with both classic and new-style classes.
|
py | 7df6c5fe8b3e5eab40b5f875d3d0d0aaae9a40e2 | from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any
import pytest
from datamodel_code_generator.model.base import (
DataModel,
DataModelFieldBase,
TemplateBase,
)
from datamodel_code_generator.types import DataType, Types
class A(TemplateBase):
def render(self) -> str:
return ''
class B(DataModel):
@classmethod
def get_data_type(cls, types: Types, **kwargs: Any) -> DataType:
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
TEMPLATE_FILE_PATH = ''
class C(DataModel):
@classmethod
def get_data_type(cls, types: Types, **kwargs: Any) -> DataType:
pass
template: str = '''{%- for decorator in decorators -%}
{{ decorator }}
{%- endfor %}
@dataclass
class {{ class_name }}:
{%- for field in fields -%}
{%- if field.required %}
{{ field.name }}: {{ field.type_hint }}
{%- else %}
{{ field.name }}: {{ field.type_hint }} = {{field.default}}
{%- endif %}
{%- endfor -%}'''
def test_template_base():
with NamedTemporaryFile('w') as dummy_template:
dummy_template.write('abc')
dummy_template.seek(0)
a: TemplateBase = A(Path(dummy_template.name))
assert str(a.template_file_path) == dummy_template.name
assert a._render() == 'abc'
assert str(a) == ''
def test_data_model():
field = DataModelFieldBase(
name='a', data_type=DataType(type='str'), default="" 'abc' "", required=True
)
with NamedTemporaryFile('w') as dummy_template:
dummy_template.write(template)
dummy_template.seek(0)
B.TEMPLATE_FILE_PATH = dummy_template.name
data_model = B(
name='test_model',
fields=[field],
decorators=['@validate'],
base_classes=['Base'],
)
assert data_model.name == 'test_model'
assert data_model.fields == [field]
assert data_model.decorators == ['@validate']
assert data_model.base_class == 'Base'
assert (
data_model.render() == '@validate\n'
'@dataclass\n'
'class test_model:\n'
' a: str'
)
def test_data_model_exception():
field = DataModelFieldBase(
name='a', data_type=DataType(type='str'), default="" 'abc' "", required=True
)
with pytest.raises(Exception, match='TEMPLATE_FILE_PATH is undefined'):
C(name='abc', fields=[field])
def test_data_field():
# field = DataModelField(name='a', data_types=[], required=True)
# assert field.type_hint == ''
field = DataModelFieldBase(
name='a',
data_type=DataType(is_list=True),
required=True,
is_list=True,
is_union=True,
)
assert field.type_hint == 'List'
# field = DataModelField(
# name='a', data_types=[], required=True, is_list=False, is_union=True
# )
# assert field.type_hint == ''
# field = DataModelField(
# name='a', data_types=[], required=True, is_list=False, is_union=False
# )
# assert field.type_hint == ''
field = DataModelFieldBase(
name='a',
data_type=DataType(is_list=True),
required=True,
is_list=True,
is_union=False,
)
assert field.type_hint == 'List'
field = DataModelFieldBase(name='a', data_type=DataType(), required=False)
assert field.type_hint == 'Optional'
field = DataModelFieldBase(
name='a',
data_type=DataType(is_list=True),
required=False,
is_list=True,
is_union=True,
)
assert field.type_hint == 'Optional[List]'
field = DataModelFieldBase(
name='a', data_type=DataType(), required=False, is_list=False, is_union=True
)
assert field.type_hint == 'Optional'
field = DataModelFieldBase(
name='a', data_type=DataType(), required=False, is_list=False, is_union=False
)
assert field.type_hint == 'Optional'
field = DataModelFieldBase(
name='a',
data_type=DataType(is_list=True),
required=False,
is_list=True,
is_union=False,
)
assert field.type_hint == 'Optional[List]'
field = DataModelFieldBase(name='a', data_type=DataType(type='str'), required=True)
assert field.type_hint == 'str'
field = DataModelFieldBase(
name='a',
data_type=DataType(type='str', is_list=True),
required=True,
)
assert field.type_hint == 'List[str]'
field = DataModelFieldBase(name='a', data_type=DataType(type='str'), required=True)
assert field.type_hint == 'str'
field = DataModelFieldBase(
name='a',
data_type=DataType(type='str'),
required=True,
)
assert field.type_hint == 'str'
field = DataModelFieldBase(
name='a',
data_type=DataType(type='str', is_list=True),
required=True,
)
assert field.type_hint == 'List[str]'
field = DataModelFieldBase(name='a', data_type=DataType(type='str'), required=False)
assert field.type_hint == 'Optional[str]'
field = DataModelFieldBase(
name='a',
data_type=DataType(
type='str',
is_list=True,
),
required=False,
)
assert field.type_hint == 'Optional[List[str]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(type='str'),
required=False,
)
assert field.type_hint == 'Optional[str]'
field = DataModelFieldBase(
name='a',
data_type=DataType(type='str'),
required=False,
)
assert field.type_hint == 'Optional[str]'
field = DataModelFieldBase(
name='a',
data_type=DataType(
type='str',
is_list=True,
),
required=False,
)
assert field.type_hint == 'Optional[List[str]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(data_types=[DataType(type='str'), DataType(type='int')]),
required=True,
)
assert field.type_hint == 'Union[str, int]'
field = DataModelFieldBase(
name='a',
data_type=DataType(
data_types=[DataType(type='str'), DataType(type='int')],
is_list=True,
),
required=True,
)
assert field.type_hint == 'List[Union[str, int]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(data_types=[DataType(type='str'), DataType(type='int')]),
required=True,
)
assert field.type_hint == 'Union[str, int]'
field = DataModelFieldBase(
name='a',
data_type=DataType(data_types=[DataType(type='str'), DataType(type='int')]),
required=True,
)
assert field.type_hint == 'Union[str, int]'
field = DataModelFieldBase(
name='a',
data_type=DataType(
data_types=[DataType(type='str'), DataType(type='int')], is_list=True
),
required=True,
)
assert field.type_hint == 'List[Union[str, int]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(data_types=[DataType(type='str'), DataType(type='int')]),
required=False,
)
assert field.type_hint == 'Optional[Union[str, int]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(
data_types=[DataType(type='str'), DataType(type='int')],
is_list=True,
),
required=False,
)
assert field.type_hint == 'Optional[List[Union[str, int]]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(data_types=[DataType(type='str'), DataType(type='int')]),
required=False,
)
assert field.type_hint == 'Optional[Union[str, int]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(data_types=[DataType(type='str'), DataType(type='int')]),
required=False,
)
assert field.type_hint == 'Optional[Union[str, int]]'
field = DataModelFieldBase(
name='a',
data_type=DataType(
data_types=[DataType(type='str'), DataType(type='int')], is_list=True
),
required=False,
)
assert field.type_hint == 'Optional[List[Union[str, int]]]'
field = DataModelFieldBase(
name='a', data_type=DataType(is_list=True), required=False
)
assert field.type_hint == 'Optional[List]'
|
py | 7df6c851cc98deaef17b35bda43a358e6af64316 | import datetime
import astropy.units as u
from astropy.coordinates import BaseCoordinateFrame, CoordinateAttribute, SkyCoord, TimeAttribute
from astropy.time import Time
from sunpy.time import parse_time
__all__ = ['TimeFrameAttributeSunPy', 'ObserverCoordinateAttribute']
class TimeFrameAttributeSunPy(TimeAttribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value
if ``default`` is ``None`` and no value was supplied during initialization.
Returns
-------
frame_attr : descriptor
A new data descriptor to hold a frame attribute
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through the
Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, Time):
out = value
converted = False
elif isinstance(value, str):
if value == 'now':
return Time(datetime.datetime.now()), True
try:
out = Time(parse_time(value))
except Exception as err:
raise ValueError(f'Invalid time input {self.name}={value!r}\n{err}')
converted = True
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(f'Invalid time input {self.name}={value!r}\n{err}')
converted = True
return out, converted
class ObserverCoordinateAttribute(CoordinateAttribute):
"""
An Attribute to describe the location of the observer in the solar system.
The observer location can be given as a string of a known observer, which
will be converted to a coordinate as long as the ``obstime`` attribute is
valid on the instance of the frame. Alternatively a low-level frame class
*or* a `~astropy.coordinates.SkyCoord` can be provided to specify the
location of the observer. If a `~astropy.coordinates.SkyCoord` is passed it
will always be converted to the low-level frame class when accessed.
Parameters
----------
frame : a coordinate frame class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
# Keep string here.
if isinstance(value, str):
return value, False
else:
# Upgrade the coordinate to a `SkyCoord` so that frame attributes will be merged
if isinstance(value, BaseCoordinateFrame) and not isinstance(value, self._frame):
value = SkyCoord(value)
return super().convert_input(value)
def _convert_string_to_coord(self, out, obstime):
"""
Given a value and and frame instance calculate the position of the
object given as a string.
"""
# Import here to prevent circular import
from .ephemeris import get_body_heliographic_stonyhurst
obscoord = get_body_heliographic_stonyhurst(out, obstime)
if out == "earth":
rep = obscoord.spherical
rep.lon[()] = 0*u.deg
obscoord = obscoord.realize_frame(rep)
return obscoord
def __get__(self, instance, frame_cls=None):
# If instance is None then we can't get obstime so it doesn't matter.
if instance is not None:
observer = getattr(instance, '_' + self.name)
obstime = getattr(instance, 'obstime', None) # TODO: Why is this `None` needed?
# If the observer is a string and we have obstime then calculate
# the position of the observer.
if isinstance(observer, str):
if observer != "self" and obstime is not None:
new_observer = self._convert_string_to_coord(observer.lower(), obstime)
new_observer.object_name = observer
setattr(instance, '_' + self.name, new_observer)
else:
return observer
return super().__get__(instance, frame_cls=frame_cls)
|
py | 7df6c853c91bd00d0df99237aac5aaf2c46bf499 | # Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't picklable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os.path import abspath, dirname, join
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000 but this isn't enough for
# building docs/ref/settings.txt sometimes.
# https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion
sys.setrecursionlimit(2000)
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.6.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
'sphinx.ext.extlinks',
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
extlinks = {
'commit': ('https://github.com/django/django/commit/%s', ''),
'cve': ('https://nvd.nist.gov/view/vuln/detail?vulnId=%s', 'CVE-'),
'ticket': ('https://code.djangoproject.com/ticket/%s', '#'),
}
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep440ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver:
return pep440ver + '.dev'
return pep440ver
release = django_release()
# The "development version" of Django
django_next_version = '3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_theme']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://www.sphinx-doc.org/en/master/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# The 'versionadded' and 'versionchanged' directives are overridden.
suppress_warnings = ['app.add_directive']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': (
'\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}'
'\\DeclareUnicodeCharacter{2665}{[unicode-heart]}'
'\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}'
),
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'ref/django-admin',
'django-admin',
'Utility script for the Django Web framework',
['Django Software Foundation'],
1
)]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
|
py | 7df6ca0b10eb0b8b7815ef64520cc62307a16aea | import random
from collections import namedtuple
import numpy as np
import simpy
LatencyDatum = namedtuple(
'LatencyDatum',
('t_queued', 't_processing', 't_total')
)
class SpeculatingRequestExecutor(object):
""" Simulates a M/G/k process common in request processing (computing) but
with always on speculation to another host
:param worker_desc: A tuple of (count, capacity) to construct workers with
:param local_balancer: A function which takes the current request number
and the list of workers and returns the index of the worker to
send the next request to
:param latency_fn: A function which takes the curent request number and the
worker that was assigned by the load balancer and returns the number of
milliseconds a request took to process
:param number_of_requests: The number of requests to run through the
simulator
:param request_per_s: The rate of requests per second.
"""
def __init__(
self, worker_desc, load_balancer, latency_fn,
number_of_requests, request_per_s):
self.worker_desc = worker_desc
self.load_balancer = load_balancer
self.latency_fn = latency_fn
self.number_of_requests = int(number_of_requests)
self.request_interval_ms = 1. / (request_per_s / 1000.)
self.received_first = {'1': 0, '2': 0}
self.data = []
def simulate(self):
# Setup and start the simulation
random.seed(1)
np.random.seed(1)
self.env = simpy.Environment()
count, cap = self.worker_desc
self.workers = [
simpy.Resource(self.env, capacity=cap) for i in range(count)
]
self.env.process(self.generate_requests())
self.env.run()
def generate_requests(self):
for i in range(self.number_of_requests):
workers = []
for j in range(2):
idx = self.load_balancer(i, self.workers)
workers.append(self.workers[idx])
response = self.process_request(
i, workers[0], workers[1],
)
self.env.process(response)
# Exponential inter-arrival times == Poisson
arrival_interval = random.expovariate(
1.0 / self.request_interval_ms
)
yield self.env.timeout(arrival_interval)
def process_request(self, request_id, worker1, worker2):
""" Request arrives, possibly queues, and then processes"""
t_arrive = self.env.now
req1 = worker1.request()
req2 = worker2.request()
try:
result = yield req1 | req2
if req1 in result:
self.received_first['1'] += 1
req2.cancel()
req2.resource.release(req2)
else:
self.received_first['2'] += 1
req1.cancel()
req1.resource.release(req1)
t_start = self.env.now
t_queued = t_start - t_arrive
# Let the operation take w.e. amount of time the latency
# function tells us to
yield self.env.timeout(self.latency_fn(request_id))
t_done = self.env.now
t_processing = t_done - t_start
t_total_response = t_done - t_arrive
self.data.append(LatencyDatum(
t_queued, t_processing, t_total_response))
finally:
worker1.release(req1)
worker2.release(req2)
def run_speculation(
worker_desc, load_balancer, num_requests, request_per_s, latency_fn):
simulator = SpeculatingRequestExecutor(
worker_desc, load_balancer, latency_fn,
num_requests, request_per_s
)
simulator.simulate()
return simulator.data, simulator.received_first
|
py | 7df6ca4b486143d0d6833bfcfe8fdb5cb837393c | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import PresidentielcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from io import BytesIO
class DecodeScriptTest(PresidentielcoinTestFramework):
"""Tests decoding scripts via RPC command "decodescript"."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
self.is_network_split = False
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Tests decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
py | 7df6ca51f3f38d74d66f8f68a71af634be8c8c61 | import csv
from sklearn import tree, svm
from sklearn.neural_network import MLPClassifier
import random
gameSituations_train = []
playCalls_train = []
gameSituations_test = []
playCalls_test = []
# initialize different models
treeModel = tree.DecisionTreeClassifier()
linearSVC = svm.LinearSVC()
neuralNetwork = MLPClassifier()
# get data for 1st-3rd down calls
print('Importing 2012 NFL season play by play data...\n')
with open("2012_nfl_pbp_data_rp.csv") as f:
reader = csv.reader(f)
header = next(reader) # Jump the header
for row in reader:
# row = gameid, qtr, min, sec, off, def, down, togo, ydline, description, offscore, defscore, season,,,
if(int(row[6]) > 0 and int(row[6]) <=3):
down = int(row[6])
distance = int(row[7])
spot = int(row[8])
play = str(row[9])
time = 60.0 - ( (int(row[2])) - (1.0 - int(row[3])/60))
# use roughly 80% of data to train / 20% of data to test
if(random.random() < .80):
gameSituations_train.append([down, distance, spot, time])
playCalls_train.append(play)
else:
gameSituations_test.append([down, distance, spot, time])
playCalls_test.append(play)
f.close()
# train models
print("Training Decision Tree...")
treeModel = treeModel.fit(gameSituations_train, playCalls_train)
print("Training Linear SVC...")
linearSVC = linearSVC.fit(gameSituations_train, playCalls_train)
print("Training MLP Neural Network...")
neuralNetwork = neuralNetwork.fit(gameSituations_train, playCalls_train)
# test models
testCounter = 0
treeModelHits = 0
linearSVCHits = 0
neuralNetworkHits = 0
for situation in gameSituations_test:
if(playCalls_test[testCounter] == treeModel.predict([situation])):
treeModelHits += 1
if(playCalls_test[testCounter] == linearSVC.predict([situation])):
linearSVCHits += 1
if(playCalls_test[testCounter] == neuralNetwork.predict([situation])):
neuralNetworkHits += 1
testCounter += 1
# Output Results
print("\nModels and respective percentage of correct predictions out of ", testCounter + 1, " tested game situations:\n")
print("Decision Tree: ", "%.2f" % (treeModelHits * 100 / (testCounter + 1)), " %")
print("Linear SVC: ", "%.2f" % (linearSVCHits * 100 / (testCounter + 1)), " %")
print("MLP Neural Network: ", "%.2f" % (neuralNetworkHits * 100 / (testCounter + 1)), " %") |
py | 7df6ca96a5d8eba1414200e2a86040b03c825db5 | # Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted SSL support.
"""
from twisted.trial import unittest
from twisted.internet import protocol, reactor, interfaces, defer
from twisted.protocols import basic
from twisted.python import util
from twisted.python.reflect import getClass, fullyQualifiedName
from twisted.python.runtime import platform
from twisted.test.test_tcp import WriteDataTestCase, ProperlyCloseFilesMixin
import os, errno
try:
from OpenSSL import SSL, crypto
from twisted.internet import ssl
from twisted.test.ssl_helpers import ClientTLSContext
except ImportError:
def _noSSL():
# ugh, make pyflakes happy.
global SSL
global ssl
SSL = ssl = None
_noSSL()
certPath = util.sibpath(__file__, "server.pem")
class UnintelligentProtocol(basic.LineReceiver):
"""
@ivar deferred: a deferred that will fire at connection lost.
@type deferred: L{defer.Deferred}
@cvar pretext: text sent before TLS is set up.
@type pretext: C{str}
@cvar posttext: text sent after TLS is set up.
@type posttext: C{str}
"""
pretext = [
"first line",
"last thing before tls starts",
"STARTTLS"]
posttext = [
"first thing after tls started",
"last thing ever"]
def __init__(self):
self.deferred = defer.Deferred()
def connectionMade(self):
for l in self.pretext:
self.sendLine(l)
def lineReceived(self, line):
if line == "READY":
self.transport.startTLS(ClientTLSContext(), self.factory.client)
for l in self.posttext:
self.sendLine(l)
self.transport.loseConnection()
def connectionLost(self, reason):
self.deferred.callback(None)
class LineCollector(basic.LineReceiver):
"""
@ivar deferred: a deferred that will fire at connection lost.
@type deferred: L{defer.Deferred}
@ivar doTLS: whether the protocol is initiate TLS or not.
@type doTLS: C{bool}
@ivar fillBuffer: if set to True, it will send lots of data once
C{STARTTLS} is received.
@type fillBuffer: C{bool}
"""
def __init__(self, doTLS, fillBuffer=False):
self.doTLS = doTLS
self.fillBuffer = fillBuffer
self.deferred = defer.Deferred()
def connectionMade(self):
self.factory.rawdata = ''
self.factory.lines = []
def lineReceived(self, line):
self.factory.lines.append(line)
if line == 'STARTTLS':
if self.fillBuffer:
for x in range(500):
self.sendLine('X' * 1000)
self.sendLine('READY')
if self.doTLS:
ctx = ServerTLSContext(
privateKeyFileName=certPath,
certificateFileName=certPath,
)
self.transport.startTLS(ctx, self.factory.server)
else:
self.setRawMode()
def rawDataReceived(self, data):
self.factory.rawdata += data
self.transport.loseConnection()
def connectionLost(self, reason):
self.deferred.callback(None)
class SingleLineServerProtocol(protocol.Protocol):
"""
A protocol that sends a single line of data at C{connectionMade}.
"""
def connectionMade(self):
self.transport.write("+OK <some crap>\r\n")
self.transport.getPeerCertificate()
class RecordingClientProtocol(protocol.Protocol):
"""
@ivar deferred: a deferred that will fire with first received content.
@type deferred: L{defer.Deferred}
"""
def __init__(self):
self.deferred = defer.Deferred()
def connectionMade(self):
self.transport.getPeerCertificate()
def dataReceived(self, data):
self.deferred.callback(data)
class ImmediatelyDisconnectingProtocol(protocol.Protocol):
"""
A protocol that disconnect immediately on connection. It fires the
C{connectionDisconnected} deferred of its factory on connetion lost.
"""
def connectionMade(self):
self.transport.loseConnection()
def connectionLost(self, reason):
self.factory.connectionDisconnected.callback(None)
def generateCertificateObjects(organization, organizationalUnit):
"""
Create a certificate for given C{organization} and C{organizationalUnit}.
@return: a tuple of (key, request, certificate) objects.
"""
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 512)
req = crypto.X509Req()
subject = req.get_subject()
subject.O = organization
subject.OU = organizationalUnit
req.set_pubkey(pkey)
req.sign(pkey, "md5")
# Here comes the actual certificate
cert = crypto.X509()
cert.set_serial_number(1)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60) # Testing certificates need not be long lived
cert.set_issuer(req.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(pkey, "md5")
return pkey, req, cert
def generateCertificateFiles(basename, organization, organizationalUnit):
"""
Create certificate files key, req and cert prefixed by C{basename} for
given C{organization} and C{organizationalUnit}.
"""
pkey, req, cert = generateCertificateObjects(organization, organizationalUnit)
for ext, obj, dumpFunc in [
('key', pkey, crypto.dump_privatekey),
('req', req, crypto.dump_certificate_request),
('cert', cert, crypto.dump_certificate)]:
fName = os.extsep.join((basename, ext))
fObj = file(fName, 'w')
fObj.write(dumpFunc(crypto.FILETYPE_PEM, obj))
fObj.close()
class ContextGeneratingMixin:
"""
Offer methods to create L{ssl.DefaultOpenSSLContextFactory} for both client
and server.
@ivar clientBase: prefix of client certificate files.
@type clientBase: C{str}
@ivar serverBase: prefix of server certificate files.
@type serverBase: C{str}
@ivar clientCtxFactory: a generated context factory to be used in
C{reactor.connectSSL}.
@type clientCtxFactory: L{ssl.DefaultOpenSSLContextFactory}
@ivar serverCtxFactory: a generated context factory to be used in
C{reactor.listenSSL}.
@type serverCtxFactory: L{ssl.DefaultOpenSSLContextFactory}
"""
def makeContextFactory(self, org, orgUnit, *args, **kwArgs):
base = self.mktemp()
generateCertificateFiles(base, org, orgUnit)
serverCtxFactory = ssl.DefaultOpenSSLContextFactory(
os.extsep.join((base, 'key')),
os.extsep.join((base, 'cert')),
*args, **kwArgs)
return base, serverCtxFactory
def setupServerAndClient(self, clientArgs, clientKwArgs, serverArgs,
serverKwArgs):
self.clientBase, self.clientCtxFactory = self.makeContextFactory(
*clientArgs, **clientKwArgs)
self.serverBase, self.serverCtxFactory = self.makeContextFactory(
*serverArgs, **serverKwArgs)
if SSL is not None:
class ServerTLSContext(ssl.DefaultOpenSSLContextFactory):
"""
A context factory with a default method set to L{SSL.TLSv1_METHOD}.
"""
isClient = False
def __init__(self, *args, **kw):
kw['sslmethod'] = SSL.TLSv1_METHOD
ssl.DefaultOpenSSLContextFactory.__init__(self, *args, **kw)
class StolenTCPTestCase(ProperlyCloseFilesMixin, unittest.TestCase):
"""
For SSL transports, test many of the same things which are tested for
TCP transports.
"""
def createServer(self, address, portNumber, factory):
"""
Create an SSL server with a certificate using L{IReactorSSL.listenSSL}.
"""
cert = ssl.PrivateCertificate.loadPEM(file(certPath).read())
contextFactory = cert.options()
return reactor.listenSSL(
portNumber, factory, contextFactory, interface=address)
def connectClient(self, address, portNumber, clientCreator):
"""
Create an SSL client using L{IReactorSSL.connectSSL}.
"""
contextFactory = ssl.CertificateOptions()
return clientCreator.connectSSL(address, portNumber, contextFactory)
def getHandleExceptionType(self):
"""
Return L{SSL.Error} as the expected error type which will be raised by
a write to the L{OpenSSL.SSL.Connection} object after it has been
closed.
"""
return SSL.Error
_iocp = 'twisted.internet.iocpreactor.reactor.IOCPReactor'
def getHandleErrorCode(self):
"""
Return the argument L{SSL.Error} will be constructed with for this
case. This is basically just a random OpenSSL implementation detail.
It would be better if this test worked in a way which did not require
this.
"""
# Windows 2000 SP 4 and Windows XP SP 2 give back WSAENOTSOCK for
# SSL.Connection.write for some reason. The twisted.protocols.tls
# implementation of IReactorSSL doesn't suffer from this imprecation,
# though, since it is isolated from the Windows I/O layer (I suppose?).
# If test_properlyCloseFiles waited for the SSL handshake to complete
# and performed an orderly shutdown, then this would probably be a
# little less weird: writing to a shutdown SSL connection has a more
# well-defined failure mode (or at least it should).
name = fullyQualifiedName(getClass(reactor))
if platform.getType() == 'win32' and name != self._iocp:
return errno.WSAENOTSOCK
# This is terribly implementation-specific.
return [('SSL routines', 'SSL_write', 'protocol is shutdown')]
class TLSTestCase(unittest.TestCase):
"""
Tests for startTLS support.
@ivar fillBuffer: forwarded to L{LineCollector.fillBuffer}
@type fillBuffer: C{bool}
"""
fillBuffer = False
clientProto = None
serverProto = None
def tearDown(self):
if self.clientProto.transport is not None:
self.clientProto.transport.loseConnection()
if self.serverProto.transport is not None:
self.serverProto.transport.loseConnection()
def _runTest(self, clientProto, serverProto, clientIsServer=False):
"""
Helper method to run TLS tests.
@param clientProto: protocol instance attached to the client
connection.
@param serverProto: protocol instance attached to the server
connection.
@param clientIsServer: flag indicated if client should initiate
startTLS instead of server.
@return: a L{defer.Deferred} that will fire when both connections are
lost.
"""
self.clientProto = clientProto
cf = self.clientFactory = protocol.ClientFactory()
cf.protocol = lambda: clientProto
if clientIsServer:
cf.server = False
else:
cf.client = True
self.serverProto = serverProto
sf = self.serverFactory = protocol.ServerFactory()
sf.protocol = lambda: serverProto
if clientIsServer:
sf.client = False
else:
sf.server = True
port = reactor.listenTCP(0, sf, interface="127.0.0.1")
self.addCleanup(port.stopListening)
reactor.connectTCP('127.0.0.1', port.getHost().port, cf)
return defer.gatherResults([clientProto.deferred, serverProto.deferred])
def test_TLS(self):
"""
Test for server and client startTLS: client should received data both
before and after the startTLS.
"""
def check(ignore):
self.assertEquals(
self.serverFactory.lines,
UnintelligentProtocol.pretext + UnintelligentProtocol.posttext
)
d = self._runTest(UnintelligentProtocol(),
LineCollector(True, self.fillBuffer))
return d.addCallback(check)
def test_unTLS(self):
"""
Test for server startTLS not followed by a startTLS in client: the data
received after server startTLS should be received as raw.
"""
def check(ignored):
self.assertEquals(
self.serverFactory.lines,
UnintelligentProtocol.pretext
)
self.failUnless(self.serverFactory.rawdata,
"No encrypted bytes received")
d = self._runTest(UnintelligentProtocol(),
LineCollector(False, self.fillBuffer))
return d.addCallback(check)
def test_backwardsTLS(self):
"""
Test startTLS first initiated by client.
"""
def check(ignored):
self.assertEquals(
self.clientFactory.lines,
UnintelligentProtocol.pretext + UnintelligentProtocol.posttext
)
d = self._runTest(LineCollector(True, self.fillBuffer),
UnintelligentProtocol(), True)
return d.addCallback(check)
class SpammyTLSTestCase(TLSTestCase):
"""
Test TLS features with bytes sitting in the out buffer.
"""
fillBuffer = True
class BufferingTestCase(unittest.TestCase):
serverProto = None
clientProto = None
def tearDown(self):
if self.serverProto.transport is not None:
self.serverProto.transport.loseConnection()
if self.clientProto.transport is not None:
self.clientProto.transport.loseConnection()
def test_openSSLBuffering(self):
serverProto = self.serverProto = SingleLineServerProtocol()
clientProto = self.clientProto = RecordingClientProtocol()
server = protocol.ServerFactory()
client = self.client = protocol.ClientFactory()
server.protocol = lambda: serverProto
client.protocol = lambda: clientProto
sCTX = ssl.DefaultOpenSSLContextFactory(certPath, certPath)
cCTX = ssl.ClientContextFactory()
port = reactor.listenSSL(0, server, sCTX, interface='127.0.0.1')
self.addCleanup(port.stopListening)
reactor.connectSSL('127.0.0.1', port.getHost().port, client, cCTX)
return clientProto.deferred.addCallback(
self.assertEquals, "+OK <some crap>\r\n")
class ConnectionLostTestCase(unittest.TestCase, ContextGeneratingMixin):
def testImmediateDisconnect(self):
org = "twisted.test.test_ssl"
self.setupServerAndClient(
(org, org + ", client"), {},
(org, org + ", server"), {})
# Set up a server, connect to it with a client, which should work since our verifiers
# allow anything, then disconnect.
serverProtocolFactory = protocol.ServerFactory()
serverProtocolFactory.protocol = protocol.Protocol
self.serverPort = serverPort = reactor.listenSSL(0,
serverProtocolFactory, self.serverCtxFactory)
clientProtocolFactory = protocol.ClientFactory()
clientProtocolFactory.protocol = ImmediatelyDisconnectingProtocol
clientProtocolFactory.connectionDisconnected = defer.Deferred()
clientConnector = reactor.connectSSL('127.0.0.1',
serverPort.getHost().port, clientProtocolFactory, self.clientCtxFactory)
return clientProtocolFactory.connectionDisconnected.addCallback(
lambda ignoredResult: self.serverPort.stopListening())
def testFailedVerify(self):
org = "twisted.test.test_ssl"
self.setupServerAndClient(
(org, org + ", client"), {},
(org, org + ", server"), {})
def verify(*a):
return False
self.clientCtxFactory.getContext().set_verify(SSL.VERIFY_PEER, verify)
serverConnLost = defer.Deferred()
serverProtocol = protocol.Protocol()
serverProtocol.connectionLost = serverConnLost.callback
serverProtocolFactory = protocol.ServerFactory()
serverProtocolFactory.protocol = lambda: serverProtocol
self.serverPort = serverPort = reactor.listenSSL(0,
serverProtocolFactory, self.serverCtxFactory)
clientConnLost = defer.Deferred()
clientProtocol = protocol.Protocol()
clientProtocol.connectionLost = clientConnLost.callback
clientProtocolFactory = protocol.ClientFactory()
clientProtocolFactory.protocol = lambda: clientProtocol
clientConnector = reactor.connectSSL('127.0.0.1',
serverPort.getHost().port, clientProtocolFactory, self.clientCtxFactory)
dl = defer.DeferredList([serverConnLost, clientConnLost], consumeErrors=True)
return dl.addCallback(self._cbLostConns)
def _cbLostConns(self, results):
(sSuccess, sResult), (cSuccess, cResult) = results
self.failIf(sSuccess)
self.failIf(cSuccess)
acceptableErrors = [SSL.Error]
# Rather than getting a verification failure on Windows, we are getting
# a connection failure. Without something like sslverify proxying
# in-between we can't fix up the platform's errors, so let's just
# specifically say it is only OK in this one case to keep the tests
# passing. Normally we'd like to be as strict as possible here, so
# we're not going to allow this to report errors incorrectly on any
# other platforms.
if platform.isWindows():
from twisted.internet.error import ConnectionLost
acceptableErrors.append(ConnectionLost)
sResult.trap(*acceptableErrors)
cResult.trap(*acceptableErrors)
return self.serverPort.stopListening()
class FakeContext:
"""
L{OpenSSL.SSL.Context} double which can more easily be inspected.
"""
def __init__(self, method):
self._method = method
self._options = 0
def set_options(self, options):
self._options |= options
def use_certificate_file(self, fileName):
pass
def use_privatekey_file(self, fileName):
pass
class DefaultOpenSSLContextFactoryTests(unittest.TestCase):
"""
Tests for L{ssl.DefaultOpenSSLContextFactory}.
"""
def setUp(self):
# pyOpenSSL Context objects aren't introspectable enough. Pass in
# an alternate context factory so we can inspect what is done to it.
self.contextFactory = ssl.DefaultOpenSSLContextFactory(
certPath, certPath, _contextFactory=FakeContext)
self.context = self.contextFactory.getContext()
def test_method(self):
"""
L{ssl.DefaultOpenSSLContextFactory.getContext} returns an SSL context
which can use SSLv3 or TLSv1 but not SSLv2.
"""
# SSLv23_METHOD allows SSLv2, SSLv3, or TLSv1
self.assertEqual(self.context._method, SSL.SSLv23_METHOD)
# And OP_NO_SSLv2 disables the SSLv2 support.
self.assertTrue(self.context._options & SSL.OP_NO_SSLv2)
# Make sure SSLv3 and TLSv1 aren't disabled though.
self.assertFalse(self.context._options & SSL.OP_NO_SSLv3)
self.assertFalse(self.context._options & SSL.OP_NO_TLSv1)
def test_missingCertificateFile(self):
"""
Instantiating L{ssl.DefaultOpenSSLContextFactory} with a certificate
filename which does not identify an existing file results in the
initializer raising L{OpenSSL.SSL.Error}.
"""
self.assertRaises(
SSL.Error,
ssl.DefaultOpenSSLContextFactory, certPath, self.mktemp())
def test_missingPrivateKeyFile(self):
"""
Instantiating L{ssl.DefaultOpenSSLContextFactory} with a private key
filename which does not identify an existing file results in the
initializer raising L{OpenSSL.SSL.Error}.
"""
self.assertRaises(
SSL.Error,
ssl.DefaultOpenSSLContextFactory, self.mktemp(), certPath)
class ClientContextFactoryTests(unittest.TestCase):
"""
Tests for L{ssl.ClientContextFactory}.
"""
def setUp(self):
self.contextFactory = ssl.ClientContextFactory()
self.contextFactory._contextFactory = FakeContext
self.context = self.contextFactory.getContext()
def test_method(self):
"""
L{ssl.ClientContextFactory.getContext} returns a context which can use
SSLv3 or TLSv1 but not SSLv2.
"""
self.assertEqual(self.context._method, SSL.SSLv23_METHOD)
self.assertTrue(self.context._options & SSL.OP_NO_SSLv2)
self.assertFalse(self.context._options & SSL.OP_NO_SSLv3)
self.assertFalse(self.context._options & SSL.OP_NO_TLSv1)
if interfaces.IReactorSSL(reactor, None) is None:
for tCase in [StolenTCPTestCase, TLSTestCase, SpammyTLSTestCase,
BufferingTestCase, ConnectionLostTestCase,
DefaultOpenSSLContextFactoryTests,
ClientContextFactoryTests]:
tCase.skip = "Reactor does not support SSL, cannot run SSL tests"
# Otherwise trial will run this test here
del WriteDataTestCase
|
py | 7df6cb3e1320c7f2d8ffdd580ee8d74266e1ca5c | # -*- coding: utf-8 -*-
import sys
import unittest
import datetime
class BaseTestCase(unittest.TestCase):
def tap(self, out):
sys.stderr.write("--- tap output start ---\n")
for line in out.splitlines():
sys.stderr.write(line + '\n')
sys.stderr.write("--- tap output end ---\n")
class TestCase(BaseTestCase):
def assertOpenInterval(self, interval,
expectedStart=None,
expectedTags=None,
expectedAnnotation=None,
description="interval"):
self.assertKeyExists(interval, "start", description, "{} does not contain a start date")
self.assertKeyNotExists(interval, "end", description, "{} does contain an end date")
return self.assertInterval(interval,
expectedStart=expectedStart,
expectedEnd=None,
expectedTags=expectedTags,
expectedAnnotation=expectedAnnotation,
description=description)
def assertClosedInterval(self, interval,
expectedStart=None,
expectedEnd=None,
expectedTags=None,
expectedAnnotation=None,
description="interval"):
self.assertKeyExists(interval, "start", description, "{} does not contain a start date")
self.assertKeyExists(interval, "end", description, "{} does not contain an end date")
return self.assertInterval(interval,
expectedStart=expectedStart,
expectedEnd=expectedEnd,
expectedTags=expectedTags,
expectedAnnotation=expectedAnnotation,
description=description)
def assertInterval(self, interval,
expectedStart=None,
expectedEnd=None,
expectedTags=None,
expectedAnnotation=None,
description="interval"):
if expectedStart:
self.assertIntervalTimestamp(interval, "start", expectedStart, description)
if expectedEnd:
self.assertIntervalTimestamp(interval, "end", expectedEnd, description)
if expectedTags:
self.assertKeyExists(interval, "tags", description, "{} does not contain tags")
self.assertIntervalValue(interval,
"tags",
expectedTags,
description,
"{} of {} do not match (expected: '{}', actual: '{}')")
if expectedAnnotation:
self.assertKeyExists(interval, "annotation", description, "{} is not annotated")
self.assertIntervalValue(interval,
"annotation",
expectedAnnotation,
description,
"{} of {} do not match (expected: '{}', actual: '{}')")
def assertKeyExists(self, interval, key, description, message):
self.assertTrue(key in interval, message.format(description))
def assertKeyNotExists(self, interval, key, description, message):
self.assertFalse(key in interval, message.format(description))
def assertIntervalTimestamp(self, interval, key, expected, description):
if isinstance(expected, datetime.datetime):
expected = "{:%Y%m%dT%H%M%SZ}".format(expected)
self.assertIntervalValue(interval,
key,
expected,
description,
"{} time of {} does not match (expected: '{}', actual: '{}')")
def assertIntervalValue(self, interval, key, expected, description, message):
actual = interval[key]
if isinstance(actual, list):
self.assertItemsEqual(actual,
expected,
message.format(key, description, expected, actual))
else:
self.assertEqual(actual,
expected,
message.format(key, description, expected, actual))
# vim: ai sts=4 et sw=4
|
py | 7df6cce3476273e2fcddded34a369e575a7fbedc | from .main import inform, Dingbot |
py | 7df6cd045f0c5d15277a05916433528ecd767ed3 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._C._te as te
import torch.fx as fx
from torch.fx import map_arg
from torch.fx.passes.shape_prop import ShapeProp
import operator
scope = te.KernelScope()
def truncate(model, k):
model = fx.symbolic_trace(model)
new_graph= fx.Graph()
env = {}
cnt = 0
for node in list(model.graph.nodes):
new_node = new_graph.node_copy(node, lambda x: env[x.name])
env[node.name] = new_node
cnt += 1
if cnt == k:
new_graph.output(env[node.name])
break
return fx.GraphModule(model, new_graph)
# NNC Lowering Pass
def remove_args(model: torch.nn.Module):
fx_model = fx.symbolic_trace(model)
for node in fx_model.graph.nodes:
if len(node.users) == 0 and node.op != 'output':
fx_model.graph.erase_node(node)
fx_model.recompile()
return fx_model
class kernel_arena_scope(object):
def __enter__(self):
self.scope = te.KernelScope()
def __exit__(self, typ, val, traceback):
self.scope = None
def get_dim_args(dims):
dim_args = []
for dim in dims:
dim_args.append(te.DimArg(te.ExprHandle.int(dim), 'i' + str(len(dim_args))))
return dim_args
def get_te_shapes(shape):
return [te.ExprHandle.int(i) for i in shape]
def to_expr(x):
if isinstance(x, int):
return te.ExprHandle.int(x)
elif isinstance(x, float):
return te.ExprHandle.float(x)
else:
raise RuntimeError(f"type {type(x)} not supported")
def get_nnc_type(dtype):
if dtype == torch.float:
return te.Dtype.Float
elif dtype == torch.long:
return te.Dtype.Long
elif dtype == torch.float64:
return te.Dtype.Double
else:
raise RuntimeError("nyi")
lowering_functions = { }
def index_or_broadcast(shape, *args):
out = []
for idx, arg in enumerate(args):
if idx >= len(shape): continue
if shape[idx] == 1:
out.append(to_expr(0))
else:
out.append(arg)
return out
def ones_like_lower(name, out_shape, inp_shapes, args):
def f(*idxs):
return to_expr(1.0)
res = te.Compute(name, get_dim_args(out_shape), f)
return res
# def select_lower(name, out_shape, inp_shapes, args):
# A = args[0]
# dim = args[1]
# idx = args[2]
# import pdb; pdb.set_trace()
# def f(*idxs):
# # idxs = list(idxs)
# idxs.insert(dim, to_expr(idx))
# # idxs = [to_expr(0)]
# return A.load(idxs)
# res = te.Compute(name, get_dim_args(out_shape), f)
# return res
def dot_lower(name, out_shape, inp_shapes, args):
mul_te = te.lower('aten::mul', list(args), get_te_shapes(inp_shapes[0][0]), get_nnc_type(inp_shapes[0][1]))
res = te.lower('aten::sum', [mul_te.buf()], get_te_shapes(out_shape), get_nnc_type(inp_shapes[0][1]))
return (res.buf(), [mul_te.stmt(), res.stmt()])
def mv_lower(name, out_shape, inp_shapes, args):
A = args[0]
B = args[1]
N, M = inp_shapes[0][0]
def f(n, m):
return A.load([n, m]) * B.load([m])
# mm = te.Compute('mm', get_dim_args([N,M]), f)
# out = te.Reduce(name, get_dim_args([N]), te.Sum(), mm, get_dim_args([M]))
# return out.buf(), [mm.stmt(), out.stmt()]
C = torch._C._te.BufHandle('C', get_te_shapes([N]), get_nnc_type(inp_shapes[0][1]))
s = torch._C._te.ExternalCall(C, "nnc_aten_mv", [A, B], [])
return C, [s]
lowering_functions[torch.ops.aten.ones_like] = ones_like_lower
lowering_functions[torch.ops.aten.dot] = dot_lower
# lowering_functions[torch.ops.aten.select] = select_lower
lowering_functions[torch.ops.aten.mv] = mv_lower
func_to_aten = {
operator.getitem: torch.ops.aten.slice,
operator.add: torch.ops.aten.add,
operator.mul: torch.ops.aten.mul,
torch.mul: torch.ops.aten.mul,
torch.sin: torch.ops.aten.sin,
torch.cos: torch.ops.aten.cos,
}
def process_shape(x):
if len(x) == 0:
return [1]
return x
def lower_function(node, op, nnc_args, args):
inp_shapes = fx.node.map_aggregate(args, lambda arg: (process_shape(arg.meta['tensor_meta'].shape), arg.meta['tensor_meta'].dtype) if isinstance(arg, fx.Node) and 'tensor_meta' in arg.meta else None)
if op in lowering_functions:
out = lowering_functions[op](node.name, process_shape(node.meta['tensor_meta'].shape), inp_shapes, nnc_args)
else:
if op in func_to_aten:
op = func_to_aten[op]
aten_str = f'aten::{op.__name__}'
out_shape = get_te_shapes(process_shape(node.meta['tensor_meta'].shape))
out = te.lower(aten_str, list(nnc_args), out_shape, get_nnc_type(node.meta['tensor_meta'].dtype))
if isinstance(out, te.Tensor):
return out.buf(), [out.stmt()]
else:
return out[0], out[1]
def nnc_compile(model: torch.nn.Module, example_inputs) -> torch.nn.Module:
"""
nnc_compile(model, example_inputs) returns a function with the same args
as `model.forward`, with an extra argument corresponding to where the
output is stored. This function takes the inputs (which must be PyTorch
tensors with the same shapes as example_inputs), and passes them to an
NNC executor.
"""
fx_model = fx.symbolic_trace(model)
ShapeProp(fx_model).propagate(*example_inputs)
# This env maps from nodes to `te.ExprHandle`, which represent the output
# of an NNC computation.
env = {}
def get_te_type(node):
return get_nnc_type(node.meta['tensor_meta'].dtype)
def gen_compute(args):
te_args = [env[arg.name] for arg in args]
def lookup_env(l):
res = fx.node.map_aggregate(l, lambda x: env[x.name] if isinstance(x, fx.Node) else x)
return res
def fetch_attr(target : str):
target_atoms = target.split('.')
attr_itr = fx_model
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
outs = None
inputs = []
module_attrs = []
compute_stmts = []
for node in fx_model.graph.nodes:
if node.op == 'placeholder':
# We simply map the input placeholder to a `te.Placeholder`, which
# also represents an input to the NNC computation.
shapes = get_te_shapes(node.meta['tensor_meta'].shape)
placeholder = te.Placeholder(node.name, get_te_type(node), shapes)
env[node.name] = placeholder.data()
inputs.append(placeholder)
elif node.op == 'call_function':
# This does the bulk of the work - we call `lower_function`, which
# returns a `te.ExprHandle` (the output of a NNC computation), and
# put it in our environment.
if 'tensor_meta' in node.meta:
# todo: fix kwargs handling
if node.kwargs:
raise RuntimeError("kwargs nyi")
buf, stmt = lower_function(node, node.target, lookup_env(node.args), node.args)
# if isinstance(stmt, list)
compute_stmts.extend(stmt)
env[node.name] = buf
elif node.target == getattr or node.target == operator.getitem:
# todo: handle non-tensor computations correctly
continue
elif node.op == 'output':
args = node.args
if not isinstance(args, tuple):
args = (args,)
if isinstance(args[0], tuple):
args = args[0]
te_args = lookup_env(args)
outs = (list(te_args), [(i.meta['tensor_meta'].shape, i.meta['tensor_meta'].dtype) for i in args])
elif node.op == 'get_attr':
# As NNC doesn't have any concept of state, we pull out the module
# attributes and pass them in as inputs to NNC.
module_attrs.append(node)
shapes = get_te_shapes(process_shape(node.meta['tensor_meta'].shape))
placeholder = te.Placeholder(node.name, get_te_type(node), shapes)
env[node.name] = placeholder.data()
else:
print(node.op, node.target)
raise RuntimeError("not yet implemented")
loopnest = te.LoopNest(te.Stmt(compute_stmts), outs[0])
# loopnest.inline_intermediate_bufs(True)
loopnest.simplify()
loopnest.prepare_for_codegen()
stmt = te.simplify(loopnest.root_stmt())
cg = te.construct_codegen('llvm', stmt, [te.BufferArg(x) for x in [env[i.name] for i in module_attrs] + inputs + outs[0]])
alloc_results = [torch.empty(shape, dtype=dtype) for shape,dtype in outs[1]]
if module_attrs:
module_stuff = [fetch_attr(i.target).contiguous().data for i in module_attrs]
else:
module_stuff = []
def f(*inps, out_tensors=None):
# begin = time.time()
if out_tensors is None:
results = alloc_results
else:
results = out_tensors
cg.call(module_stuff + list(inps) + results)
if out_tensors is None:
if len(results) == 1:
return results[0]
return results
return f
################################
# Example usage and Benchmarking
################################
def bench(f, warmup=3, iters=1000):
for _ in range(warmup):
f()
begin = time.time()
for _ in range(iters):
f()
print(time.time()-begin)
if __name__ == '__main__':
def f(a, b):
return (torch.cos(a)* torch.sin(b))[:2000]
mod = fx.symbolic_trace(f)
inps = (torch.randn(5000), torch.randn(5000))
ShapeProp(mod).propagate(*inps)
cg = nnc_compile(mod, inps)
bench(lambda: cg(*inps))
bench(lambda: f(*inps))
exit(0) |
py | 7df6cd349268c1a72c227a901985079f6d2bf971 | class Solution:
def findNumbers(self, nums: List[int]) -> int:
evens = 0
for num in nums:
evens += 1 if len(str(num)) % 2 == 0 else 0
return evens |
py | 7df6cdad5609b355fbef42b1ddbfe5bcb2ffd3b8 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generator, List, Optional, Tuple, Type, TypeVar, Union
from . import enums, flags, utils
from .asset import Asset
from .colour import Colour
from .invite import Invite
from .mixins import Hashable
from .object import Object
from .permissions import PermissionOverwrite, Permissions
__all__ = (
'AuditLogDiff',
'AuditLogChanges',
'AuditLogEntry',
)
if TYPE_CHECKING:
import datetime
from . import abc
from .emoji import Emoji
from .guild import Guild
from .member import Member
from .role import Role
from .scheduled_event import ScheduledEvent
from .state import ConnectionState
from .types.audit_log import (
AuditLogChange as AuditLogChangePayload,
AuditLogEntry as AuditLogEntryPayload,
)
from .types.channel import (
PermissionOverwrite as PermissionOverwritePayload,
)
from .types.invite import Invite as InvitePayload
from .types.role import Role as RolePayload
from .types.snowflake import Snowflake
from .types.command import ApplicationCommandPermissions
from .user import User
from .stage_instance import StageInstance
from .sticker import GuildSticker
from .threads import Thread
from .integrations import PartialIntegration
from .app_commands import AppCommand
TargetType = Union[
Guild,
abc.GuildChannel,
Member,
User,
Role,
Invite,
Emoji,
StageInstance,
GuildSticker,
Thread,
Object,
PartialIntegration,
None,
]
def _transform_timestamp(entry: AuditLogEntry, data: Optional[str]) -> Optional[datetime.datetime]:
return utils.parse_time(data)
def _transform_color(entry: AuditLogEntry, data: int) -> Colour:
return Colour(data)
def _transform_snowflake(entry: AuditLogEntry, data: Snowflake) -> int:
return int(data)
def _transform_channel(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Union[abc.GuildChannel, Object]]:
if data is None:
return None
return entry.guild.get_channel(int(data)) or Object(id=data)
def _transform_member_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Union[Member, User, None]:
if data is None:
return None
return entry._get_member(int(data))
def _transform_guild_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Guild]:
if data is None:
return None
return entry._state._get_guild(int(data))
def _transform_overwrites(
entry: AuditLogEntry, data: List[PermissionOverwritePayload]
) -> List[Tuple[Object, PermissionOverwrite]]:
overwrites = []
for elem in data:
allow = Permissions(int(elem['allow']))
deny = Permissions(int(elem['deny']))
ow = PermissionOverwrite.from_pair(allow, deny)
ow_type = elem['type']
ow_id = int(elem['id'])
target = None
if ow_type == '0':
target = entry.guild.get_role(ow_id)
elif ow_type == '1':
target = entry._get_member(ow_id)
if target is None:
target = Object(id=ow_id)
overwrites.append((target, ow))
return overwrites
def _transform_icon(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
if entry.action is enums.AuditLogAction.guild_update:
return Asset._from_guild_icon(entry._state, entry.guild.id, data)
else:
return Asset._from_icon(entry._state, entry._target_id, data, path='role') # type: ignore # target_id won't be None in this case
def _transform_avatar(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_avatar(entry._state, entry._target_id, data) # type: ignore # target_id won't be None in this case
def _transform_cover_image(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_scheduled_event_cover_image(entry._state, entry._target_id, data) # type: ignore # target_id won't be None in this case
def _guild_hash_transformer(path: str) -> Callable[[AuditLogEntry, Optional[str]], Optional[Asset]]:
def _transform(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_guild_image(entry._state, entry.guild.id, data, path=path)
return _transform
E = TypeVar('E', bound=enums.Enum)
def _enum_transformer(enum: Type[E]) -> Callable[[AuditLogEntry, int], E]:
def _transform(entry: AuditLogEntry, data: int) -> E:
return enums.try_enum(enum, data)
return _transform
F = TypeVar('F', bound=flags.BaseFlags)
def _flag_transformer(cls: Type[F]) -> Callable[[AuditLogEntry, Union[int, str]], F]:
def _transform(entry: AuditLogEntry, data: Union[int, str]) -> F:
return cls._from_value(int(data))
return _transform
def _transform_type(entry: AuditLogEntry, data: int) -> Union[enums.ChannelType, enums.StickerType]:
if entry.action.name.startswith('sticker_'):
return enums.try_enum(enums.StickerType, data)
else:
return enums.try_enum(enums.ChannelType, data)
class AuditLogDiff:
def __len__(self) -> int:
return len(self.__dict__)
def __iter__(self) -> Generator[Tuple[str, Any], None, None]:
yield from self.__dict__.items()
def __repr__(self) -> str:
values = ' '.join('%s=%r' % item for item in self.__dict__.items())
return f'<AuditLogDiff {values}>'
if TYPE_CHECKING:
def __getattr__(self, item: str) -> Any:
...
def __setattr__(self, key: str, value: Any) -> Any:
...
Transformer = Callable[["AuditLogEntry", Any], Any]
class AuditLogChanges:
# fmt: off
TRANSFORMERS: ClassVar[Dict[str, Tuple[Optional[str], Optional[Transformer]]]] = {
'verification_level': (None, _enum_transformer(enums.VerificationLevel)),
'explicit_content_filter': (None, _enum_transformer(enums.ContentFilter)),
'allow': (None, _flag_transformer(Permissions)),
'deny': (None, _flag_transformer(Permissions)),
'permissions': (None, _flag_transformer(Permissions)),
'id': (None, _transform_snowflake),
'color': ('colour', _transform_color),
'owner_id': ('owner', _transform_member_id),
'inviter_id': ('inviter', _transform_member_id),
'channel_id': ('channel', _transform_channel),
'afk_channel_id': ('afk_channel', _transform_channel),
'system_channel_id': ('system_channel', _transform_channel),
'system_channel_flags': (None, _flag_transformer(flags.SystemChannelFlags)),
'widget_channel_id': ('widget_channel', _transform_channel),
'rules_channel_id': ('rules_channel', _transform_channel),
'public_updates_channel_id': ('public_updates_channel', _transform_channel),
'permission_overwrites': ('overwrites', _transform_overwrites),
'splash_hash': ('splash', _guild_hash_transformer('splashes')),
'banner_hash': ('banner', _guild_hash_transformer('banners')),
'discovery_splash_hash': ('discovery_splash', _guild_hash_transformer('discovery-splashes')),
'icon_hash': ('icon', _transform_icon),
'avatar_hash': ('avatar', _transform_avatar),
'rate_limit_per_user': ('slowmode_delay', None),
'guild_id': ('guild', _transform_guild_id),
'tags': ('emoji', None),
'default_message_notifications': ('default_notifications', _enum_transformer(enums.NotificationLevel)),
'video_quality_mode': (None, _enum_transformer(enums.VideoQualityMode)),
'privacy_level': (None, _enum_transformer(enums.PrivacyLevel)),
'format_type': (None, _enum_transformer(enums.StickerFormatType)),
'type': (None, _transform_type),
'communication_disabled_until': ('timed_out_until', _transform_timestamp),
'expire_behavior': (None, _enum_transformer(enums.ExpireBehaviour)),
'mfa_level': (None, _enum_transformer(enums.MFALevel)),
'status': (None, _enum_transformer(enums.EventStatus)),
'entity_type': (None, _enum_transformer(enums.EntityType)),
'preferred_locale': (None, _enum_transformer(enums.Locale)),
'image_hash': ('cover_image', _transform_cover_image),
}
# fmt: on
def __init__(self, entry: AuditLogEntry, data: List[AuditLogChangePayload]):
self.before: AuditLogDiff = AuditLogDiff()
self.after: AuditLogDiff = AuditLogDiff()
if entry.action is enums.AuditLogAction.app_command_permission_update:
# special case entire process since each
# element in data is a different target
self.before.app_command_permissions = []
self.after.app_command_permissions = []
for d in data:
self._handle_app_command_permissions(
self.before,
self.after,
entry,
int(d['key']),
d.get('old_value'), # type: ignore # old value will be an ApplicationCommandPermissions if present
d.get('new_value'), # type: ignore # new value will be an ApplicationCommandPermissions if present
)
return
for elem in data:
attr = elem['key']
# special cases for role add/remove
if attr == '$add':
self._handle_role(self.before, self.after, entry, elem['new_value']) # type: ignore # new_value is a list of roles in this case
continue
elif attr == '$remove':
self._handle_role(self.after, self.before, entry, elem['new_value']) # type: ignore # new_value is a list of roles in this case
continue
try:
key, transformer = self.TRANSFORMERS[attr]
except (ValueError, KeyError):
transformer = None
else:
if key:
attr = key
transformer: Optional[Transformer]
try:
before = elem['old_value']
except KeyError:
before = None
else:
if transformer:
before = transformer(entry, before)
setattr(self.before, attr, before)
try:
after = elem['new_value']
except KeyError:
after = None
else:
if transformer:
after = transformer(entry, after)
setattr(self.after, attr, after)
# add an alias
if hasattr(self.after, 'colour'):
self.after.color = self.after.colour
self.before.color = self.before.colour
if hasattr(self.after, 'expire_behavior'):
self.after.expire_behaviour = self.after.expire_behavior
self.before.expire_behaviour = self.before.expire_behavior
def __repr__(self) -> str:
return f'<AuditLogChanges before={self.before!r} after={self.after!r}>'
def _handle_role(self, first: AuditLogDiff, second: AuditLogDiff, entry: AuditLogEntry, elem: List[RolePayload]) -> None:
if not hasattr(first, 'roles'):
setattr(first, 'roles', [])
data = []
g: Guild = entry.guild
for e in elem:
role_id = int(e['id'])
role = g.get_role(role_id)
if role is None:
role = Object(id=role_id)
role.name = e['name'] # type: ignore # Object doesn't usually have name
data.append(role)
setattr(second, 'roles', data)
def _handle_app_command_permissions(
self,
before: AuditLogDiff,
after: AuditLogDiff,
entry: AuditLogEntry,
target_id: int,
old_value: Optional[ApplicationCommandPermissions],
new_value: Optional[ApplicationCommandPermissions],
):
guild = entry.guild
old_permission = new_permission = target = None
if target_id == (guild.id - 1):
# avoid circular import
from .app_commands import AllChannels
# all channels
target = AllChannels(guild)
else:
# get type and determine role, user or channel
_value = old_value or new_value
if _value is None:
return
permission_type = _value['type']
if permission_type == 1:
# role
target = guild.get_role(target_id)
elif permission_type == 2:
# user
target = entry._get_member(target_id)
elif permission_type == 3:
# channel
target = guild.get_channel(target_id)
if target is None:
target = Object(target_id)
if old_value is not None:
old_permission = old_value['permission']
before.app_command_permissions.append((target, old_permission))
if new_value is not None:
new_permission = new_value['permission']
after.app_command_permissions.append((target, new_permission))
class _AuditLogProxy:
def __init__(self, **kwargs: Any) -> None:
for k, v in kwargs.items():
setattr(self, k, v)
class _AuditLogProxyMemberPrune(_AuditLogProxy):
delete_member_days: int
members_removed: int
class _AuditLogProxyMemberMoveOrMessageDelete(_AuditLogProxy):
channel: Union[abc.GuildChannel, Thread]
count: int
class _AuditLogProxyMemberDisconnect(_AuditLogProxy):
count: int
class _AuditLogProxyPinAction(_AuditLogProxy):
channel: Union[abc.GuildChannel, Thread]
message_id: int
class _AuditLogProxyStageInstanceAction(_AuditLogProxy):
channel: abc.GuildChannel
class _AuditLogProxyMessageBulkDelete(_AuditLogProxy):
count: int
class AuditLogEntry(Hashable):
r"""Represents an Audit Log entry.
You retrieve these via :meth:`Guild.audit_logs`.
.. container:: operations
.. describe:: x == y
Checks if two entries are equal.
.. describe:: x != y
Checks if two entries are not equal.
.. describe:: hash(x)
Returns the entry's hash.
.. versionchanged:: 1.7
Audit log entries are now comparable and hashable.
Attributes
-----------
action: :class:`AuditLogAction`
The action that was done.
user: :class:`abc.User`
The user who initiated this action. Usually a :class:`Member`\, unless gone
then it's a :class:`User`.
id: :class:`int`
The entry ID.
target: Any
The target that got changed. The exact type of this depends on
the action being done.
reason: Optional[:class:`str`]
The reason this action was done.
extra: Any
Extra information that this entry has that might be useful.
For most actions, this is ``None``. However in some cases it
contains extra information. See :class:`AuditLogAction` for
which actions have this field filled out.
"""
def __init__(
self,
*,
users: Dict[int, User],
integrations: Dict[int, PartialIntegration],
app_commands: Dict[int, AppCommand],
data: AuditLogEntryPayload,
guild: Guild,
):
self._state: ConnectionState = guild._state
self.guild: Guild = guild
self._users: Dict[int, User] = users
self._integrations: Dict[int, PartialIntegration] = integrations
self._app_commands: Dict[int, AppCommand] = app_commands
self._from_data(data)
def _from_data(self, data: AuditLogEntryPayload) -> None:
self.action: enums.AuditLogAction = enums.try_enum(enums.AuditLogAction, data['action_type'])
self.id: int = int(data['id'])
# this key is technically not usually present
self.reason: Optional[str] = data.get('reason')
extra = data.get('options')
# fmt: off
self.extra: Union[
_AuditLogProxyMemberPrune,
_AuditLogProxyMemberMoveOrMessageDelete,
_AuditLogProxyMemberDisconnect,
_AuditLogProxyPinAction,
_AuditLogProxyStageInstanceAction,
_AuditLogProxyMessageBulkDelete,
Member, User, None, PartialIntegration,
Role, Object
] = None
# fmt: on
if isinstance(self.action, enums.AuditLogAction) and extra:
if self.action is enums.AuditLogAction.member_prune:
# member prune has two keys with useful information
self.extra = _AuditLogProxyMemberPrune(
delete_member_days=int(extra['delete_member_days']),
members_removed=int(extra['members_removed']),
)
elif self.action is enums.AuditLogAction.member_move or self.action is enums.AuditLogAction.message_delete:
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyMemberMoveOrMessageDelete(
count=int(extra['count']),
channel=self.guild.get_channel_or_thread(channel_id) or Object(id=channel_id),
)
elif self.action is enums.AuditLogAction.member_disconnect:
# The member disconnect action has a dict with some information
self.extra = _AuditLogProxyMemberDisconnect(count=int(extra['count']))
elif self.action is enums.AuditLogAction.message_bulk_delete:
# The bulk message delete action has the number of messages deleted
self.extra = _AuditLogProxyMessageBulkDelete(count=int(extra['count']))
elif self.action.name.endswith('pin'):
# the pin actions have a dict with some information
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyPinAction(
channel=self.guild.get_channel_or_thread(channel_id) or Object(id=channel_id),
message_id=int(extra['message_id']),
)
elif self.action.name.startswith('overwrite_'):
# the overwrite_ actions have a dict with some information
instance_id = int(extra['id'])
the_type = extra.get('type')
if the_type == '1':
self.extra = self._get_member(instance_id)
elif the_type == '0':
role = self.guild.get_role(instance_id)
if role is None:
role = Object(id=instance_id)
role.name = extra.get('role_name') # type: ignore # Object doesn't usually have name
self.extra = role
elif self.action.name.startswith('stage_instance'):
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyStageInstanceAction(
channel=self.guild.get_channel(channel_id) or Object(id=channel_id)
)
elif self.action.name.startswith('app_command'):
application_id = int(extra['application_id'])
self.extra = self._get_integration_by_app_id(application_id) or Object(application_id)
# this key is not present when the above is present, typically.
# It's a list of { new_value: a, old_value: b, key: c }
# where new_value and old_value are not guaranteed to be there depending
# on the action type, so let's just fetch it for now and only turn it
# into meaningful data when requested
self._changes = data.get('changes', [])
user_id = utils._get_as_snowflake(data, 'user_id')
self.user: Optional[Union[User, Member]] = self._get_member(user_id)
self._target_id = utils._get_as_snowflake(data, 'target_id')
def _get_member(self, user_id: Optional[int]) -> Union[Member, User, None]:
if user_id is None:
return None
return self.guild.get_member(user_id) or self._users.get(user_id)
def _get_integration(self, integration_id: Optional[int]) -> Optional[PartialIntegration]:
if integration_id is None:
return None
return self._integrations.get(integration_id)
def _get_integration_by_app_id(self, application_id: Optional[int]) -> Optional[PartialIntegration]:
if application_id is None:
return None
# get PartialIntegration by application id
return utils.get(self._integrations.values(), application_id=application_id)
def _get_app_command(self, app_command_id: Optional[int]) -> Optional[AppCommand]:
if app_command_id is None:
return None
return self._app_commands.get(app_command_id)
def __repr__(self) -> str:
return f'<AuditLogEntry id={self.id} action={self.action} user={self.user!r}>'
@utils.cached_property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the entry's creation time in UTC."""
return utils.snowflake_time(self.id)
@utils.cached_property
def target(self) -> TargetType:
if self.action.target_type is None:
return None
try:
converter = getattr(self, '_convert_target_' + self.action.target_type)
except AttributeError:
if self._target_id is None:
return None
return Object(id=self._target_id)
else:
return converter(self._target_id)
@utils.cached_property
def category(self) -> Optional[enums.AuditLogActionCategory]:
"""Optional[:class:`AuditLogActionCategory`]: The category of the action, if applicable."""
return self.action.category
@utils.cached_property
def changes(self) -> AuditLogChanges:
""":class:`AuditLogChanges`: The list of changes this entry has."""
obj = AuditLogChanges(self, self._changes)
del self._changes
return obj
@utils.cached_property
def before(self) -> AuditLogDiff:
""":class:`AuditLogDiff`: The target's prior state."""
return self.changes.before
@utils.cached_property
def after(self) -> AuditLogDiff:
""":class:`AuditLogDiff`: The target's subsequent state."""
return self.changes.after
def _convert_target_guild(self, target_id: int) -> Guild:
return self.guild
def _convert_target_channel(self, target_id: int) -> Union[abc.GuildChannel, Object]:
return self.guild.get_channel(target_id) or Object(id=target_id)
def _convert_target_user(self, target_id: int) -> Union[Member, User, None]:
return self._get_member(target_id)
def _convert_target_role(self, target_id: int) -> Union[Role, Object]:
return self.guild.get_role(target_id) or Object(id=target_id)
def _convert_target_invite(self, target_id: None) -> Invite:
# invites have target_id set to null
# so figure out which change has the full invite data
changeset = self.before if self.action is enums.AuditLogAction.invite_delete else self.after
fake_payload: InvitePayload = {
'max_age': changeset.max_age,
'max_uses': changeset.max_uses,
'code': changeset.code,
'temporary': changeset.temporary,
'uses': changeset.uses,
'channel': None, # type: ignore # the channel is passed to the Invite constructor directly
}
obj = Invite(state=self._state, data=fake_payload, guild=self.guild, channel=changeset.channel)
try:
obj.inviter = changeset.inviter
except AttributeError:
pass
return obj
def _convert_target_emoji(self, target_id: int) -> Union[Emoji, Object]:
return self._state.get_emoji(target_id) or Object(id=target_id)
def _convert_target_message(self, target_id: int) -> Union[Member, User, None]:
return self._get_member(target_id)
def _convert_target_stage_instance(self, target_id: int) -> Union[StageInstance, Object]:
return self.guild.get_stage_instance(target_id) or Object(id=target_id)
def _convert_target_sticker(self, target_id: int) -> Union[GuildSticker, Object]:
return self._state.get_sticker(target_id) or Object(id=target_id)
def _convert_target_thread(self, target_id: int) -> Union[Thread, Object]:
return self.guild.get_thread(target_id) or Object(id=target_id)
def _convert_target_guild_scheduled_event(self, target_id: int) -> Union[ScheduledEvent, Object]:
return self.guild.get_scheduled_event(target_id) or Object(id=target_id)
def _convert_target_integration(self, target_id: int) -> Union[PartialIntegration, Object]:
return self._get_integration(target_id) or Object(target_id)
def _convert_target_app_command(self, target_id: int) -> Union[AppCommand, Object]:
return self._get_app_command(target_id) or Object(target_id)
def _convert_target_integration_or_app_command(self, target_id: int) -> Union[PartialIntegration, AppCommand, Object]:
return self._get_integration_by_app_id(target_id) or self._get_app_command(target_id) or Object(target_id)
|
py | 7df6ce7e104dccfe9e0a2d3e4cea154c4931f80b | #!/usr/bin/env python
import rospy
import numpy as np
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
import math
import numpy as np
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
MAX_DECEL = .5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
#rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_wp_cp)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.base_lane = None
self.pose = None
self.stopline_wp_idx = -1
self.waypoints_2d = None
self.waypoint_tree = None
self.loop()
#rospy.spin()
def loop(self):
rate = rospy.Rate(25)
while not rospy.is_shutdown():
if self.pose and self.base_lane:
#closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints()
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Check if closest behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
# Hyperplane through closest_coord
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self):
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]
# If no light detected, publish base_waypoints
print(self.stopline_wp_idx, farthest_idx)
if (self.stopline_wp_idx == -1) or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def decelerate_waypoints(self, waypoints, closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0) # 2 waypoints back from line so car stops at line
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_lane = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
py | 7df6cebb8ef28b2e9f3c7555d3cf84ff4908c844 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pyrdf
----------------------------------
Tests for `pyrdf` module.
"""
import unittest
from pyrdf import pyrdf
class TestPyrdf(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
py | 7df6cfa50a6eab2cc3facbc9a2c220faf47a569e | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# mininode.py - Sprint P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a sprint node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# sprint/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from .util import hex_str_to_bytes, bytes_to_hex_str
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
from test_framework.siphash import siphash256
import sprint_hash
BIP0031_VERSION = 60000
MY_VERSION = 70209 # SHORT_IDS_BLOCKS_VERSION to support cmpct blocks
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def sprinthash(s):
return sprint_hash.getPoWHash(s)
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to sprintd objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
20: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(sprinthash(r))
self.hash = encode(sprinthash(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self):
r = b""
r += ser_compact_size(self.index)
r += self.tx.serialize()
return r
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
def serialize(self):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
def initialize_from_block(self, block, nonce=0, prefill_list = [0]):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
self.shortids.append(calculate_shortid(k0, k1, block.vtx[i].sha256))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in sprintd indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_open(self, conn): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xbf\x0c\x6b\xbd", # mainnet
"testnet3": b"\xce\xe2\xca\xff", # testnet3
"regtest": b"\xfc\xc1\xb7\xdc", # regtest
"devnet": b"\xe2\xca\xff\xce", # devnet
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Sprint Node IP # ' + dstaddr + ':' \
+ str(dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
if self.state != "connected":
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + str(command) + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
py | 7df6cfc5d00ad91c1df431c4f43197d1fddb7e1a | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_200spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./200,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 200
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_200spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 10000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=3000,
replay_buffer_size=15000, # 20000
gamma=gamma, # 1
update_target_frequency=4000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() |
py | 7df6cfcac95d7c53470df188c0baa28e84d6426e | from .base import BaseModule
from .wrappers import Residual, ReZero
from .normalization import LayerNormSimple
__all__ = ['BaseModule', 'Residual', 'ReZero', 'LayerNormSimple']
|
py | 7df6d04ae6b8aebd179e54cc6ba1efeef02bba9e | # -*- coding: utf-8 -*-
"""
Created on Saturday, March 31st 2018
@author: Sagar Kishore
API wrapper that fetches word definitions, example sentences, synonyms,
antonyms, audio file link for the given word. Handles exceptions as well
for specific cases.
"""
import requests
from requests.exceptions import HTTPError
from word import Word
from pprint import PrettyPrinter
def getdefinitions(word_object, word_id):
"""
Adds definitions to the word_object. Returns an error status indicating
if the GET failed.
"""
# Maybe encrypt these information?
APP_ID = '5c0f7093'
APP_KEY = 'd10576f4a34a64e8236472e947af906f'
LANGUAGE = 'en'
url_head = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/'
url = url_head + LANGUAGE + '/' + word_id.lower()
r = requests.get(url, headers={'app_id': APP_ID, 'app_key': APP_KEY})
try:
r.raise_for_status()
status = True, r.status_code
except HTTPError:
status = False, r.status_code
return status
response_dict = r.json()
header = response_dict['results'][0]
name = header['word']
lex_entries = {}
AudioLink = ''
lex_list = header['lexicalEntries']
for lex in lex_list:
lexentry = {}
category = lex['lexicalCategory']
try:
AudioLink = lex['pronunciations'][0]['audioFile']
except KeyError:
pass
try:
sense = lex['entries'][0]['senses'][0]
try:
lexentry['definition'] = sense['definitions']
except KeyError:
continue
except KeyError:
derivativeOf = lex['derivativeOf'][0]['text']
status = False, r.status_code, derivativeOf
return status
lexentry['examples'] = []
try:
for examples in sense['examples']:
lexentry['examples'].append(examples['text'])
except KeyError:
pass
lex_entries[category] = lexentry
word_object.name = name
word_object.definitions = lex_entries
word_object.audio = AudioLink
return status
def getsynant(word_object, word_id):
"""
Adds Synonyms and Antonyms to the word_object. Returns an error status
indicating if the GET failed.
"""
app_id = '5c0f7093'
app_key = 'd10576f4a34a64e8236472e947af906f'
language = 'en'
url_head = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/'
url = url_head + language + '/' + word_id.lower() + '/synonyms;antonyms'
r = requests.get(url, headers={'app_id': app_id, 'app_key': app_key})
try:
r.raise_for_status()
status = True, r.status_code
except HTTPError:
status = False, r.status_code
return status
response_dict = r.json()
header = response_dict['results'][0]
lex_entries = {}
lex_list = header['lexicalEntries']
for lex in lex_list:
lexentry = {}
category = lex['lexicalCategory']
sense = lex['entries'][0]['senses'][0]
try:
lex_ant_list = sense['antonyms']
except KeyError:
lex_ant_list = []
try:
lex_syn_list = sense['synonyms']
except KeyError:
lex_syn_list = []
antonyms_list = []
for antonyms in lex_ant_list:
antonyms_list.append(antonyms['text'])
synonyms_list = []
for synonyms in lex_syn_list:
synonyms_list.append(synonyms['text'])
lexentry['synonyms'] = synonyms_list
lexentry['antonyms'] = antonyms_list
lex_entries[category] = lexentry
word_object.synant = lex_entries
return status
def addword(word_object):
word_id = word_object.name
status1 = getdefinitions(word_object, word_id)
status2 = getsynant(word_object, word_id)
return (word_object, status1, status2)
# DEBUGGING
def main(name=None):
pp = PrettyPrinter(indent=4)
if name is None:
name = 'hypochondriac'
a = Word(name)
a, status1, status2 = addword(a)
pp.pprint(a)
pp.pprint(status1)
pp.pprint(status2)
if __name__ == '__main__':
main('venality')
# -i debugging
# name = 'precipitate'
# pp = PrettyPrinter(indent=4)
# a = Word(name)
# a, status1, status2 = addword(a)
# repr(a)
# pp.pprint(a)
# if(status1[2]):
# a = Word(status1[2])
# a, s1, s2 = addword(a)
|
py | 7df6d075366a7c64916bb6c1922048fa935b6ea7 | # coding: utf-8
import numpy as np
from itertools import product
from mpi4py import MPI
from psydac.ddm.partition import mpi_compute_dims
__all__ = ['find_mpi_type', 'CartDecomposition', 'CartDataExchanger']
#===============================================================================
def find_mpi_type( dtype ):
"""
Find correct MPI datatype that corresponds to user-provided datatype.
Parameters
----------
dtype : [type | str | numpy.dtype | mpi4py.MPI.Datatype]
Datatype for which the corresponding MPI datatype is requested.
Returns
-------
mpi_type : mpi4py.MPI.Datatype
MPI datatype to be used for communication.
"""
if isinstance( dtype, MPI.Datatype ):
mpi_type = dtype
else:
nt = np.dtype( dtype )
mpi_type = MPI._typedict[nt.char]
return mpi_type
#===============================================================================
class CartDecomposition():
"""
Cartesian decomposition of a tensor-product grid of coefficients.
This is built on top of an MPI communicator with multi-dimensional
Cartesian topology.
Parameters
----------
npts : list or tuple of int
Number of coefficients in the global grid along each dimension.
pads : list or tuple of int
Padding along each grid dimension.
In 1D, this is the number of extra coefficients added at each boundary
of the local domain to permit non-local operations with compact support;
this concept extends to multiple dimensions through a tensor product.
periods : list or tuple of bool
Periodicity (True|False) along each grid dimension.
reorder : bool
Whether individual ranks can be changed in the new Cartesian communicator.
comm : mpi4py.MPI.Comm
MPI communicator that will be used to spawn a new Cartesian communicator
(optional: default is MPI_COMM_WORLD).
"""
def __init__( self, npts, pads, periods, reorder, comm=MPI.COMM_WORLD ):
# Check input arguments
# TODO: check that arguments are identical across all processes
assert len( npts ) == len( pads ) == len( periods )
assert all( n >=1 for n in npts )
assert all( p >=0 for p in pads )
assert all( isinstance( period, bool ) for period in periods )
assert isinstance( reorder, bool )
assert isinstance( comm, MPI.Comm )
# Store input arguments
self._npts = tuple( npts )
self._pads = tuple( pads )
self._periods = tuple( periods )
self._reorder = reorder
self._comm = comm
# ...
self._ndims = len( npts )
# ...
# ...
self._size = comm.Get_size()
self._rank = comm.Get_rank()
# ...
# ...
# Know the number of processes along each direction
# self._dims = MPI.Compute_dims( self._size, self._ndims )
mpi_dims, block_shape = mpi_compute_dims( self._size, npts, pads )
self._dims = mpi_dims
# ...
# ...
# Create a 2D MPI cart
self._comm_cart = comm.Create_cart(
dims = self._dims,
periods = self._periods,
reorder = self._reorder
)
# Know my coordinates in the topology
self._rank_in_topo = self._comm_cart.Get_rank()
self._coords = self._comm_cart.Get_coords( rank=self._rank_in_topo )
# Start/end values of global indices (without ghost regions)
self._starts = tuple( ( c *n)//d for n,d,c in zip( npts, self._dims, self._coords ) )
self._ends = tuple( ((c+1)*n)//d-1 for n,d,c in zip( npts, self._dims, self._coords ) )
# List of 1D global indices (without ghost regions)
self._grids = tuple( range(s,e+1) for s,e in zip( self._starts, self._ends ) )
# N-dimensional global indices (without ghost regions)
self._indices = product( *self._grids )
# Compute shape of local arrays in topology (with ghost regions)
self._shape = tuple( e-s+1+2*p for s,e,p in zip( self._starts, self._ends, self._pads ) )
# Extended grids with ghost regions
self._extended_grids = tuple( range(s-p,e+p+1) for s,e,p in zip( self._starts, self._ends, self._pads ) )
# N-dimensional global indices with ghost regions
self._extended_indices = product( *self._extended_grids )
# Create (N-1)-dimensional communicators within the Cartesian topology
self._subcomm = [None]*self._ndims
for i in range(self._ndims):
remain_dims = [i==j for j in range( self._ndims )]
self._subcomm[i] = self._comm_cart.Sub( remain_dims )
# Compute/store information for communicating with neighbors
self._shift_info = {}
for axis in range( self._ndims ):
for disp in [-1,1]:
self._shift_info[ axis, disp ] = \
self._compute_shift_info( axis, disp )
# Store arrays with all the starts and ends along each direction
self._global_starts = [None]*self._ndims
self._global_ends = [None]*self._ndims
for axis in range( self._ndims ):
n = npts[axis]
d = mpi_dims[axis]
self._global_starts[axis] = np.array( [( c *n)//d for c in range( d )] )
self._global_ends [axis] = np.array( [((c+1)*n)//d-1 for c in range( d )] )
#---------------------------------------------------------------------------
# Global properties (same for each process)
#---------------------------------------------------------------------------
@property
def ndim( self ):
return self._ndims
@property
def npts( self ):
return self._npts
@property
def pads( self ):
return self._pads
@property
def periods( self ):
return self._periods
@property
def reorder( self ):
return self._reorder
@property
def comm( self ):
return self._comm
@property
def comm_cart( self ):
return self._comm_cart
@property
def nprocs( self ):
return self._dims
@property
def global_starts( self ):
return self._global_starts
@property
def global_ends( self ):
return self._global_ends
#---------------------------------------------------------------------------
# Local properties
#---------------------------------------------------------------------------
@property
def starts( self ):
return self._starts
@property
def ends( self ):
return self._ends
@property
def coords( self ):
return self._coords
@property
def shape( self ):
return self._shape
@property
def subcomm( self ):
return self._subcomm
#---------------------------------------------------------------------------
def coords_exist( self, coords ):
return all( P or (0 <= c < d) for P,c,d in zip( self._periods, coords, self._dims ) )
#---------------------------------------------------------------------------
def get_shift_info( self, direction, disp ):
return self._shift_info[ direction, disp ]
#---------------------------------------------------------------------------
def _compute_shift_info( self, direction, disp ):
assert( 0 <= direction < self._ndims )
assert( isinstance( disp, int ) )
# Process ranks for data shifting with MPI_SENDRECV
(rank_source, rank_dest) = self.comm_cart.Shift( direction, disp )
# Mesh info info along given direction
s = self._starts[direction]
e = self._ends [direction]
p = self._pads [direction]
# Shape of send/recv subarrays
buf_shape = np.array( self._shape )
buf_shape[direction] = p
# Start location of send/recv subarrays
send_starts = np.zeros( self._ndims, dtype=int )
recv_starts = np.zeros( self._ndims, dtype=int )
if disp > 0:
recv_starts[direction] = 0
send_starts[direction] = e-s+1
elif disp < 0:
recv_starts[direction] = e-s+1+p
send_starts[direction] = p
# Store all information into dictionary
info = {'rank_dest' : rank_dest,
'rank_source': rank_source,
'buf_shape' : tuple( buf_shape ),
'send_starts': tuple( send_starts ),
'recv_starts': tuple( recv_starts )}
return info
def remove_last_element( self, axes):
if isinstance(axes, int):
axes = [axes]
cart = CartDecomposition(self._npts, self._pads, self._periods, self._reorder)
cart._dims = self._dims
cart._comm_cart = self._comm_cart
cart._coords = self._coords
coords = cart.coords
nprocs = cart.nprocs
starts = cart._starts
ends = list(cart._ends)
for axis in axes:
assert(axis<cart._ndims)
# Recalculate start/end values of global indices (without ghost regions)
if not cart.periods[axis] and coords[axis] == nprocs[axis]-1:
ends[axis] -= 1
# set pads and npts
cart._npts = tuple(n - 1 if axis == i and not cart.periods[i] else n for i,n in enumerate(cart.npts))
cart._starts = starts
cart._ends = tuple(ends)
# List of 1D global indices (without ghost regions)
cart._grids = tuple( range(s,e+1) for s,e in zip( cart._starts, cart._ends ) )
# N-dimensional global indices (without ghost regions)
cart._indices = product( *cart._grids )
# Compute shape of local arrays in topology (with ghost regions)
cart._shape = tuple( e-s+1+2*p for s,e,p in zip( cart._starts, cart._ends, cart._pads ) )
# Extended grids with ghost regions
cart._extended_grids = tuple( range(s-p,e+p+1) for s,e,p in zip( cart._starts, cart._ends, cart._pads ) )
# N-dimensional global indices with ghost regions
cart._extended_indices = product( *cart._extended_grids )
# Create (N-1)-dimensional communicators within the cartsian topology
cart._subcomm = [None]*cart._ndims
for i in range(cart._ndims):
remain_dims = [i==j for j in range( cart._ndims )]
cart._subcomm[i] = cart._comm_cart.Sub( remain_dims )
# Compute/store information for communicating with neighbors
cart._shift_info = {}
for axis in range( cart._ndims ):
for disp in [-1,1]:
cart._shift_info[ axis, disp ] = \
cart._compute_shift_info( axis, disp )
# Store arrays with all the starts and ends along each direction
cart._global_starts = [None]*cart._ndims
cart._global_ends = [None]*cart._ndims
for axis in range( cart._ndims ):
n = cart.npts[axis]
d = cart._dims[axis]
cart._global_starts[axis] = np.array( [( c *n)//d for c in range( d )] )
cart._global_ends [axis] = np.array( [((c+1)*n)//d-1 for c in range( d )] )
return cart
def reduce_grid(self, global_starts, global_ends):
"""
Returns a new CartDecomposition object with a coarser grid from the original one
we do that by giving a new global_starts and global_ends of the coefficients
in each dimension.
Parameters
----------
global_starts : list/tuple
the list of the new global_starts in each dimesion.
global_ends : list/tuple
the list of the new global_ends in each dimesion.
"""
# Make a copy
cart = CartDecomposition(self.npts, self.pads, self.periods, self.reorder, comm=self.comm)
cart._npts = tuple(end[-1] + 1 for end in global_ends)
cart._dims = self._dims
# Create a 2D MPI cart
cart._comm_cart = self._comm_cart
# Know my coordinates in the topology
cart._rank_in_topo = self._rank_in_topo
cart._coords = self._coords
# Start/end values of global indices (without ghost regions)
cart._starts = tuple( starts[i] for i,starts in zip( self._coords, global_starts) )
cart._ends = tuple( ends[i] for i,ends in zip( self._coords, global_ends ) )
# List of 1D global indices (without ghost regions)
cart._grids = tuple( range(s,e+1) for s,e in zip( cart._starts, cart._ends ) )
# N-dimensional global indices (without ghost regions)
cart._indices = product( *cart._grids )
# Compute shape of local arrays in topology (with ghost regions)
cart._shape = tuple( e-s+1+2*p for s,e,p in zip( cart._starts, cart._ends, cart._pads ) )
# Extended grids with ghost regions
cart._extended_grids = tuple( range(s-p,e+p+1) for s,e,p in zip( cart._starts, cart._ends, cart._pads ) )
# N-dimensional global indices with ghost regions
cart._extended_indices = product( *cart._extended_grids )
# Compute/store information for communicating with neighbors
cart._shift_info = {}
for dimension in range( cart._ndims ):
for disp in [-1,1]:
cart._shift_info[ dimension, disp ] = \
cart._compute_shift_info( dimension, disp )
# Store arrays with all the starts and ends along each direction
cart._global_starts = global_starts
cart._global_ends = global_ends
return cart
#===============================================================================
class CartDataExchanger:
"""
Type that takes care of updating the ghost regions (padding) of a
multi-dimensional array distributed according to the given Cartesian
decomposition of a tensor-product grid of coefficients.
Each coefficient in the decomposed grid may have multiple components,
contiguous in memory.
Parameters
----------
cart : psydac.ddm.CartDecomposition
Object that contains all information about the Cartesian decomposition
of a tensor-product grid of coefficients.
dtype : [type | str | numpy.dtype | mpi4py.MPI.Datatype]
Datatype of single coefficient (if scalar) or of each of its
components (if vector).
coeff_shape : [tuple(int) | list(int)]
Shape of a single coefficient, if this is multi-dimensional
(optional: by default, we assume scalar coefficients).
"""
def __init__( self, cart, dtype, *, coeff_shape=() ):
self._send_types, self._recv_types = self._create_buffer_types(
cart, dtype, coeff_shape=coeff_shape )
self._cart = cart
self._comm = cart.comm_cart
#---------------------------------------------------------------------------
# Public interface
#---------------------------------------------------------------------------
def get_send_type( self, direction, disp ):
return self._send_types[direction, disp]
# ...
def get_recv_type( self, direction, disp ):
return self._recv_types[direction, disp]
# ...
def update_ghost_regions( self, array, *, direction=None ):
"""
Update ghost regions in a numpy array with dimensions compatible with
CartDecomposition (and coeff_shape) provided at initialization.
Parameters
----------
array : numpy.ndarray
Multidimensional array corresponding to local subdomain in
decomposed tensor grid, including padding.
direction : int
Index of dimension over which ghost regions should be updated
(optional: by default all ghost regions are updated).
"""
if direction is None:
for d in range( self._cart.ndim ):
self.update_ghost_regions( array, direction=d )
return
assert isinstance( array, np.ndarray )
assert isinstance( direction, int )
# Shortcuts
cart = self._cart
comm = self._comm
# Choose non-negative invertible function tag(disp) >= 0
# NOTES:
# . different values of disp must return different tags!
# . tag at receiver must match message tag at sender
tag = lambda disp: 42+disp
# Requests' handles
requests = []
# Start receiving data (MPI_IRECV)
for disp in [-1,1]:
info = cart.get_shift_info( direction, disp )
recv_typ = self.get_recv_type ( direction, disp )
recv_buf = (array, 1, recv_typ)
recv_req = comm.Irecv( recv_buf, info['rank_source'], tag(disp) )
requests.append( recv_req )
# Start sending data (MPI_ISEND)
for disp in [-1,1]:
info = cart.get_shift_info( direction, disp )
send_typ = self.get_send_type ( direction, disp )
send_buf = (array, 1, send_typ)
send_req = comm.Isend( send_buf, info['rank_dest'], tag(disp) )
requests.append( send_req )
# Wait for end of data exchange (MPI_WAITALL)
MPI.Request.Waitall( requests )
comm.Barrier()
#---------------------------------------------------------------------------
# Private methods
#---------------------------------------------------------------------------
@staticmethod
def _create_buffer_types( cart, dtype, *, coeff_shape=() ):
"""
Create MPI subarray datatypes for updating the ghost regions (padding)
of a multi-dimensional array distributed according to the given Cartesian
decomposition of a tensor-product grid of coefficients.
MPI requires a subarray datatype for accessing non-contiguous slices of
a multi-dimensional array; this is a typical situation when updating the
ghost regions.
Each coefficient in the decomposed grid may have multiple components,
contiguous in memory.
Parameters
----------
cart : psydac.ddm.CartDecomposition
Object that contains all information about the Cartesian decomposition
of a tensor-product grid of coefficients.
dtype : [type | str | numpy.dtype | mpi4py.MPI.Datatype]
Datatype of single coefficient (if scalar) or of each of its
components (if vector).
coeff_shape : [tuple(int) | list(int)]
Shape of a single coefficient, if this is multidimensional
(optional: by default, we assume scalar coefficients).
Returns
-------
send_types : dict
Dictionary of MPI subarray datatypes for SEND BUFFERS, accessed
through the integer pair (direction, displacement) as key;
'direction' takes values from 0 to ndim, 'disp' is -1 or +1.
recv_types : dict
Dictionary of MPI subarray datatypes for RECEIVE BUFFERS, accessed
through the integer pair (direction, displacement) as key;
'direction' takes values from 0 to ndim, 'disp' is -1 or +1.
"""
assert isinstance( cart, CartDecomposition )
mpi_type = find_mpi_type( dtype )
# Possibly, each coefficient could have multiple components
coeff_shape = list( coeff_shape )
coeff_start = [0] * len( coeff_shape )
data_shape = list( cart.shape ) + coeff_shape
send_types = {}
recv_types = {}
for direction in range( cart.ndim ):
for disp in [-1, 1]:
info = cart.get_shift_info( direction, disp )
buf_shape = list( info[ 'buf_shape' ] ) + coeff_shape
send_starts = list( info['send_starts'] ) + coeff_start
recv_starts = list( info['recv_starts'] ) + coeff_start
send_types[direction,disp] = mpi_type.Create_subarray(
sizes = data_shape ,
subsizes = buf_shape ,
starts = send_starts,
).Commit()
recv_types[direction,disp] = mpi_type.Create_subarray(
sizes = data_shape ,
subsizes = buf_shape ,
starts = recv_starts,
).Commit()
return send_types, recv_types
|
py | 7df6d18c5c365fd50d93343bff72dc33d084afc2 | #!/usr/bin/python3
# coding: utf-8
import time
from tkinter import *
import os
#-------------------------------------------------------------------------
def keyup(e):
print ("STOP")
def keydown(e):
key = e.keysym
if key == "Up":
print ("Vorwaerts")
elif key == "Down":
print ("Rueckwaerts")
elif key == "Left":
print ("Links")
elif key == "Right":
print ("Rechts")
#-------------------------------------------------------------------------
os.system('xset r off')
root = Tk()
frame = Frame(root, width=600, height=400)
frame.bind("<KeyPress>", keydown)
frame.bind("<KeyRelease>", keyup)
frame.pack()
frame.focus_set()
root.mainloop()
|
py | 7df6d29e4ba3176a11636774f21d83fe5e2ccfb1 | from flask_restful import Resource, current_app, request
from schematics.exceptions import DataError
from server.models.dtos.grid_dto import GridDTO
from server.services.grid.grid_service import GridService
from server.services.project_admin_service import InvalidGeoJson
from server.services.users.authentication_service import token_auth, tm
class IntersectingTilesAPI(Resource):
@tm.pm_only()
@token_auth.login_required
def put(self):
"""
Gets the tiles intersecting the aoi
---
tags:
- grid
produces:
- application/json
parameters:
- in: header
name: Authorization
description: Base64 encoded session token
required: true
type: string
default: Token sessionTokenHere==
- in: body
name: body
required: true
description: JSON object containing aoi and tasks and bool flag for controlling clip grid to aoi
schema:
properties:
clipToAoi:
type: boolean
default: true
areaOfInterest:
schema:
properties:
type:
type: string
default: FeatureCollection
features:
type: array
items:
schema:
$ref: "#/definitions/GeoJsonFeature"
grid:
schema:
properties:
type:
type: string
default: FeatureCollection
features:
type: array
items:
schema:
$ref: "#/definitions/GeoJsonFeature"
responses:
200:
description: Intersecting tasks found successfully
400:
description: Client Error - Invalid Request
500:
description: Internal Server Error
"""
try:
grid_dto = GridDTO(request.get_json())
grid_dto.validate()
except DataError as e:
current_app.logger.error(f'error validating request: {str(e)}')
return str(e), 400
try:
grid = GridService.trim_grid_to_aoi(grid_dto)
return grid, 200
except InvalidGeoJson as e:
return {"error": f'{str(e)}'}, 400
except Exception as e:
error_msg = f'IntersectingTiles GET API - unhandled error: {str(e)}'
current_app.logger.critical(error_msg)
return {"error": error_msg}, 500
|
py | 7df6d3cb42a608fa4d745938b4f693db88c69724 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AS2AcknowledgementConnectionSettingsResponse',
'AS2AgreementContentResponse',
'AS2EnvelopeSettingsResponse',
'AS2ErrorSettingsResponse',
'AS2MdnSettingsResponse',
'AS2MessageConnectionSettingsResponse',
'AS2OneWayAgreementResponse',
'AS2ProtocolSettingsResponse',
'AS2SecuritySettingsResponse',
'AS2ValidationSettingsResponse',
'AgreementContentResponse',
'B2BPartnerContentResponse',
'BusinessIdentityResponse',
'EdifactAcknowledgementSettingsResponse',
'EdifactAgreementContentResponse',
'EdifactDelimiterOverrideResponse',
'EdifactEnvelopeOverrideResponse',
'EdifactEnvelopeSettingsResponse',
'EdifactFramingSettingsResponse',
'EdifactMessageFilterResponse',
'EdifactMessageIdentifierResponse',
'EdifactOneWayAgreementResponse',
'EdifactProcessingSettingsResponse',
'EdifactProtocolSettingsResponse',
'EdifactSchemaReferenceResponse',
'EdifactValidationOverrideResponse',
'EdifactValidationSettingsResponse',
'IntegrationAccountContentHashResponse',
'IntegrationAccountContentLinkResponse',
'IntegrationAccountSkuResponse',
'KeyVaultKeyReferenceResponse',
'KeyVaultKeyReferenceResponseKeyVault',
'PartnerContentResponse',
'X12AcknowledgementSettingsResponse',
'X12AgreementContentResponse',
'X12DelimiterOverridesResponse',
'X12EnvelopeOverrideResponse',
'X12EnvelopeSettingsResponse',
'X12FramingSettingsResponse',
'X12MessageFilterResponse',
'X12MessageIdentifierResponse',
'X12OneWayAgreementResponse',
'X12ProcessingSettingsResponse',
'X12ProtocolSettingsResponse',
'X12SchemaReferenceResponse',
'X12SecuritySettingsResponse',
'X12ValidationOverrideResponse',
'X12ValidationSettingsResponse',
]
@pulumi.output_type
class AS2AcknowledgementConnectionSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ignoreCertificateNameMismatch":
suggest = "ignore_certificate_name_mismatch"
elif key == "keepHttpConnectionAlive":
suggest = "keep_http_connection_alive"
elif key == "supportHttpStatusCodeContinue":
suggest = "support_http_status_code_continue"
elif key == "unfoldHttpHeaders":
suggest = "unfold_http_headers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2AcknowledgementConnectionSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2AcknowledgementConnectionSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2AcknowledgementConnectionSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ignore_certificate_name_mismatch: Optional[bool] = None,
keep_http_connection_alive: Optional[bool] = None,
support_http_status_code_continue: Optional[bool] = None,
unfold_http_headers: Optional[bool] = None):
"""
:param bool ignore_certificate_name_mismatch: The value indicating whether to ignore mismatch in certificate name.
:param bool keep_http_connection_alive: The value indicating whether to keep the connection alive.
:param bool support_http_status_code_continue: The value indicating whether to support HTTP status code 'CONTINUE'.
:param bool unfold_http_headers: The value indicating whether to unfold the HTTP headers.
"""
if ignore_certificate_name_mismatch is not None:
pulumi.set(__self__, "ignore_certificate_name_mismatch", ignore_certificate_name_mismatch)
if keep_http_connection_alive is not None:
pulumi.set(__self__, "keep_http_connection_alive", keep_http_connection_alive)
if support_http_status_code_continue is not None:
pulumi.set(__self__, "support_http_status_code_continue", support_http_status_code_continue)
if unfold_http_headers is not None:
pulumi.set(__self__, "unfold_http_headers", unfold_http_headers)
@property
@pulumi.getter(name="ignoreCertificateNameMismatch")
def ignore_certificate_name_mismatch(self) -> Optional[bool]:
"""
The value indicating whether to ignore mismatch in certificate name.
"""
return pulumi.get(self, "ignore_certificate_name_mismatch")
@property
@pulumi.getter(name="keepHttpConnectionAlive")
def keep_http_connection_alive(self) -> Optional[bool]:
"""
The value indicating whether to keep the connection alive.
"""
return pulumi.get(self, "keep_http_connection_alive")
@property
@pulumi.getter(name="supportHttpStatusCodeContinue")
def support_http_status_code_continue(self) -> Optional[bool]:
"""
The value indicating whether to support HTTP status code 'CONTINUE'.
"""
return pulumi.get(self, "support_http_status_code_continue")
@property
@pulumi.getter(name="unfoldHttpHeaders")
def unfold_http_headers(self) -> Optional[bool]:
"""
The value indicating whether to unfold the HTTP headers.
"""
return pulumi.get(self, "unfold_http_headers")
@pulumi.output_type
class AS2AgreementContentResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "receiveAgreement":
suggest = "receive_agreement"
elif key == "sendAgreement":
suggest = "send_agreement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2AgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2AgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2AgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
receive_agreement: Optional['outputs.AS2OneWayAgreementResponse'] = None,
send_agreement: Optional['outputs.AS2OneWayAgreementResponse'] = None):
"""
:param 'AS2OneWayAgreementResponse' receive_agreement: The AS2 one-way receive agreement.
:param 'AS2OneWayAgreementResponse' send_agreement: The AS2 one-way send agreement.
"""
if receive_agreement is not None:
pulumi.set(__self__, "receive_agreement", receive_agreement)
if send_agreement is not None:
pulumi.set(__self__, "send_agreement", send_agreement)
@property
@pulumi.getter(name="receiveAgreement")
def receive_agreement(self) -> Optional['outputs.AS2OneWayAgreementResponse']:
"""
The AS2 one-way receive agreement.
"""
return pulumi.get(self, "receive_agreement")
@property
@pulumi.getter(name="sendAgreement")
def send_agreement(self) -> Optional['outputs.AS2OneWayAgreementResponse']:
"""
The AS2 one-way send agreement.
"""
return pulumi.get(self, "send_agreement")
@pulumi.output_type
class AS2EnvelopeSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autogenerateFileName":
suggest = "autogenerate_file_name"
elif key == "fileNameTemplate":
suggest = "file_name_template"
elif key == "messageContentType":
suggest = "message_content_type"
elif key == "suspendMessageOnFileNameGenerationError":
suggest = "suspend_message_on_file_name_generation_error"
elif key == "transmitFileNameInMimeHeader":
suggest = "transmit_file_name_in_mime_header"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2EnvelopeSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2EnvelopeSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2EnvelopeSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
autogenerate_file_name: Optional[bool] = None,
file_name_template: Optional[str] = None,
message_content_type: Optional[str] = None,
suspend_message_on_file_name_generation_error: Optional[bool] = None,
transmit_file_name_in_mime_header: Optional[bool] = None):
"""
:param bool autogenerate_file_name: The value indicating whether to auto generate file name.
:param str file_name_template: The template for file name.
:param str message_content_type: The message content type.
:param bool suspend_message_on_file_name_generation_error: The value indicating whether to suspend message on file name generation error.
:param bool transmit_file_name_in_mime_header: The value indicating whether to transmit file name in mime header.
"""
if autogenerate_file_name is not None:
pulumi.set(__self__, "autogenerate_file_name", autogenerate_file_name)
if file_name_template is not None:
pulumi.set(__self__, "file_name_template", file_name_template)
if message_content_type is not None:
pulumi.set(__self__, "message_content_type", message_content_type)
if suspend_message_on_file_name_generation_error is not None:
pulumi.set(__self__, "suspend_message_on_file_name_generation_error", suspend_message_on_file_name_generation_error)
if transmit_file_name_in_mime_header is not None:
pulumi.set(__self__, "transmit_file_name_in_mime_header", transmit_file_name_in_mime_header)
@property
@pulumi.getter(name="autogenerateFileName")
def autogenerate_file_name(self) -> Optional[bool]:
"""
The value indicating whether to auto generate file name.
"""
return pulumi.get(self, "autogenerate_file_name")
@property
@pulumi.getter(name="fileNameTemplate")
def file_name_template(self) -> Optional[str]:
"""
The template for file name.
"""
return pulumi.get(self, "file_name_template")
@property
@pulumi.getter(name="messageContentType")
def message_content_type(self) -> Optional[str]:
"""
The message content type.
"""
return pulumi.get(self, "message_content_type")
@property
@pulumi.getter(name="suspendMessageOnFileNameGenerationError")
def suspend_message_on_file_name_generation_error(self) -> Optional[bool]:
"""
The value indicating whether to suspend message on file name generation error.
"""
return pulumi.get(self, "suspend_message_on_file_name_generation_error")
@property
@pulumi.getter(name="transmitFileNameInMimeHeader")
def transmit_file_name_in_mime_header(self) -> Optional[bool]:
"""
The value indicating whether to transmit file name in mime header.
"""
return pulumi.get(self, "transmit_file_name_in_mime_header")
@pulumi.output_type
class AS2ErrorSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resendIfMdnNotReceived":
suggest = "resend_if_mdn_not_received"
elif key == "suspendDuplicateMessage":
suggest = "suspend_duplicate_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2ErrorSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2ErrorSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2ErrorSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
resend_if_mdn_not_received: Optional[bool] = None,
suspend_duplicate_message: Optional[bool] = None):
"""
:param bool resend_if_mdn_not_received: The value indicating whether to resend message If MDN is not received.
:param bool suspend_duplicate_message: The value indicating whether to suspend duplicate message.
"""
if resend_if_mdn_not_received is not None:
pulumi.set(__self__, "resend_if_mdn_not_received", resend_if_mdn_not_received)
if suspend_duplicate_message is not None:
pulumi.set(__self__, "suspend_duplicate_message", suspend_duplicate_message)
@property
@pulumi.getter(name="resendIfMdnNotReceived")
def resend_if_mdn_not_received(self) -> Optional[bool]:
"""
The value indicating whether to resend message If MDN is not received.
"""
return pulumi.get(self, "resend_if_mdn_not_received")
@property
@pulumi.getter(name="suspendDuplicateMessage")
def suspend_duplicate_message(self) -> Optional[bool]:
"""
The value indicating whether to suspend duplicate message.
"""
return pulumi.get(self, "suspend_duplicate_message")
@pulumi.output_type
class AS2MdnSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dispositionNotificationTo":
suggest = "disposition_notification_to"
elif key == "mdnText":
suggest = "mdn_text"
elif key == "micHashingAlgorithm":
suggest = "mic_hashing_algorithm"
elif key == "needMdn":
suggest = "need_mdn"
elif key == "receiptDeliveryUrl":
suggest = "receipt_delivery_url"
elif key == "sendInboundMdnToMessageBox":
suggest = "send_inbound_mdn_to_message_box"
elif key == "sendMdnAsynchronously":
suggest = "send_mdn_asynchronously"
elif key == "signMdn":
suggest = "sign_mdn"
elif key == "signOutboundMdnIfOptional":
suggest = "sign_outbound_mdn_if_optional"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2MdnSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2MdnSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2MdnSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disposition_notification_to: Optional[str] = None,
mdn_text: Optional[str] = None,
mic_hashing_algorithm: Optional[str] = None,
need_mdn: Optional[bool] = None,
receipt_delivery_url: Optional[str] = None,
send_inbound_mdn_to_message_box: Optional[bool] = None,
send_mdn_asynchronously: Optional[bool] = None,
sign_mdn: Optional[bool] = None,
sign_outbound_mdn_if_optional: Optional[bool] = None):
"""
:param str disposition_notification_to: The disposition notification to header value.
:param str mdn_text: The MDN text.
:param str mic_hashing_algorithm: The signing or hashing algorithm.
:param bool need_mdn: The value indicating whether to send or request a MDN.
:param str receipt_delivery_url: The receipt delivery URL.
:param bool send_inbound_mdn_to_message_box: The value indicating whether to send inbound MDN to message box.
:param bool send_mdn_asynchronously: The value indicating whether to send the asynchronous MDN.
:param bool sign_mdn: The value indicating whether the MDN needs to be signed or not.
:param bool sign_outbound_mdn_if_optional: The value indicating whether to sign the outbound MDN if optional.
"""
if disposition_notification_to is not None:
pulumi.set(__self__, "disposition_notification_to", disposition_notification_to)
if mdn_text is not None:
pulumi.set(__self__, "mdn_text", mdn_text)
if mic_hashing_algorithm is not None:
pulumi.set(__self__, "mic_hashing_algorithm", mic_hashing_algorithm)
if need_mdn is not None:
pulumi.set(__self__, "need_mdn", need_mdn)
if receipt_delivery_url is not None:
pulumi.set(__self__, "receipt_delivery_url", receipt_delivery_url)
if send_inbound_mdn_to_message_box is not None:
pulumi.set(__self__, "send_inbound_mdn_to_message_box", send_inbound_mdn_to_message_box)
if send_mdn_asynchronously is not None:
pulumi.set(__self__, "send_mdn_asynchronously", send_mdn_asynchronously)
if sign_mdn is not None:
pulumi.set(__self__, "sign_mdn", sign_mdn)
if sign_outbound_mdn_if_optional is not None:
pulumi.set(__self__, "sign_outbound_mdn_if_optional", sign_outbound_mdn_if_optional)
@property
@pulumi.getter(name="dispositionNotificationTo")
def disposition_notification_to(self) -> Optional[str]:
"""
The disposition notification to header value.
"""
return pulumi.get(self, "disposition_notification_to")
@property
@pulumi.getter(name="mdnText")
def mdn_text(self) -> Optional[str]:
"""
The MDN text.
"""
return pulumi.get(self, "mdn_text")
@property
@pulumi.getter(name="micHashingAlgorithm")
def mic_hashing_algorithm(self) -> Optional[str]:
"""
The signing or hashing algorithm.
"""
return pulumi.get(self, "mic_hashing_algorithm")
@property
@pulumi.getter(name="needMdn")
def need_mdn(self) -> Optional[bool]:
"""
The value indicating whether to send or request a MDN.
"""
return pulumi.get(self, "need_mdn")
@property
@pulumi.getter(name="receiptDeliveryUrl")
def receipt_delivery_url(self) -> Optional[str]:
"""
The receipt delivery URL.
"""
return pulumi.get(self, "receipt_delivery_url")
@property
@pulumi.getter(name="sendInboundMdnToMessageBox")
def send_inbound_mdn_to_message_box(self) -> Optional[bool]:
"""
The value indicating whether to send inbound MDN to message box.
"""
return pulumi.get(self, "send_inbound_mdn_to_message_box")
@property
@pulumi.getter(name="sendMdnAsynchronously")
def send_mdn_asynchronously(self) -> Optional[bool]:
"""
The value indicating whether to send the asynchronous MDN.
"""
return pulumi.get(self, "send_mdn_asynchronously")
@property
@pulumi.getter(name="signMdn")
def sign_mdn(self) -> Optional[bool]:
"""
The value indicating whether the MDN needs to be signed or not.
"""
return pulumi.get(self, "sign_mdn")
@property
@pulumi.getter(name="signOutboundMdnIfOptional")
def sign_outbound_mdn_if_optional(self) -> Optional[bool]:
"""
The value indicating whether to sign the outbound MDN if optional.
"""
return pulumi.get(self, "sign_outbound_mdn_if_optional")
@pulumi.output_type
class AS2MessageConnectionSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ignoreCertificateNameMismatch":
suggest = "ignore_certificate_name_mismatch"
elif key == "keepHttpConnectionAlive":
suggest = "keep_http_connection_alive"
elif key == "supportHttpStatusCodeContinue":
suggest = "support_http_status_code_continue"
elif key == "unfoldHttpHeaders":
suggest = "unfold_http_headers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2MessageConnectionSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2MessageConnectionSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2MessageConnectionSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ignore_certificate_name_mismatch: Optional[bool] = None,
keep_http_connection_alive: Optional[bool] = None,
support_http_status_code_continue: Optional[bool] = None,
unfold_http_headers: Optional[bool] = None):
"""
:param bool ignore_certificate_name_mismatch: The value indicating whether to ignore mismatch in certificate name.
:param bool keep_http_connection_alive: The value indicating whether to keep the connection alive.
:param bool support_http_status_code_continue: The value indicating whether to support HTTP status code 'CONTINUE'.
:param bool unfold_http_headers: The value indicating whether to unfold the HTTP headers.
"""
if ignore_certificate_name_mismatch is not None:
pulumi.set(__self__, "ignore_certificate_name_mismatch", ignore_certificate_name_mismatch)
if keep_http_connection_alive is not None:
pulumi.set(__self__, "keep_http_connection_alive", keep_http_connection_alive)
if support_http_status_code_continue is not None:
pulumi.set(__self__, "support_http_status_code_continue", support_http_status_code_continue)
if unfold_http_headers is not None:
pulumi.set(__self__, "unfold_http_headers", unfold_http_headers)
@property
@pulumi.getter(name="ignoreCertificateNameMismatch")
def ignore_certificate_name_mismatch(self) -> Optional[bool]:
"""
The value indicating whether to ignore mismatch in certificate name.
"""
return pulumi.get(self, "ignore_certificate_name_mismatch")
@property
@pulumi.getter(name="keepHttpConnectionAlive")
def keep_http_connection_alive(self) -> Optional[bool]:
"""
The value indicating whether to keep the connection alive.
"""
return pulumi.get(self, "keep_http_connection_alive")
@property
@pulumi.getter(name="supportHttpStatusCodeContinue")
def support_http_status_code_continue(self) -> Optional[bool]:
"""
The value indicating whether to support HTTP status code 'CONTINUE'.
"""
return pulumi.get(self, "support_http_status_code_continue")
@property
@pulumi.getter(name="unfoldHttpHeaders")
def unfold_http_headers(self) -> Optional[bool]:
"""
The value indicating whether to unfold the HTTP headers.
"""
return pulumi.get(self, "unfold_http_headers")
@pulumi.output_type
class AS2OneWayAgreementResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "protocolSettings":
suggest = "protocol_settings"
elif key == "receiverBusinessIdentity":
suggest = "receiver_business_identity"
elif key == "senderBusinessIdentity":
suggest = "sender_business_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2OneWayAgreementResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2OneWayAgreementResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2OneWayAgreementResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
protocol_settings: Optional['outputs.AS2ProtocolSettingsResponse'] = None,
receiver_business_identity: Optional['outputs.BusinessIdentityResponse'] = None,
sender_business_identity: Optional['outputs.BusinessIdentityResponse'] = None):
"""
:param 'AS2ProtocolSettingsResponse' protocol_settings: The AS2 protocol settings.
:param 'BusinessIdentityResponse' receiver_business_identity: The receiver business identity
:param 'BusinessIdentityResponse' sender_business_identity: The sender business identity
"""
if protocol_settings is not None:
pulumi.set(__self__, "protocol_settings", protocol_settings)
if receiver_business_identity is not None:
pulumi.set(__self__, "receiver_business_identity", receiver_business_identity)
if sender_business_identity is not None:
pulumi.set(__self__, "sender_business_identity", sender_business_identity)
@property
@pulumi.getter(name="protocolSettings")
def protocol_settings(self) -> Optional['outputs.AS2ProtocolSettingsResponse']:
"""
The AS2 protocol settings.
"""
return pulumi.get(self, "protocol_settings")
@property
@pulumi.getter(name="receiverBusinessIdentity")
def receiver_business_identity(self) -> Optional['outputs.BusinessIdentityResponse']:
"""
The receiver business identity
"""
return pulumi.get(self, "receiver_business_identity")
@property
@pulumi.getter(name="senderBusinessIdentity")
def sender_business_identity(self) -> Optional['outputs.BusinessIdentityResponse']:
"""
The sender business identity
"""
return pulumi.get(self, "sender_business_identity")
@pulumi.output_type
class AS2ProtocolSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementConnectionSettings":
suggest = "acknowledgement_connection_settings"
elif key == "envelopeSettings":
suggest = "envelope_settings"
elif key == "errorSettings":
suggest = "error_settings"
elif key == "mdnSettings":
suggest = "mdn_settings"
elif key == "messageConnectionSettings":
suggest = "message_connection_settings"
elif key == "securitySettings":
suggest = "security_settings"
elif key == "validationSettings":
suggest = "validation_settings"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2ProtocolSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2ProtocolSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2ProtocolSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_connection_settings: Optional['outputs.AS2AcknowledgementConnectionSettingsResponse'] = None,
envelope_settings: Optional['outputs.AS2EnvelopeSettingsResponse'] = None,
error_settings: Optional['outputs.AS2ErrorSettingsResponse'] = None,
mdn_settings: Optional['outputs.AS2MdnSettingsResponse'] = None,
message_connection_settings: Optional['outputs.AS2MessageConnectionSettingsResponse'] = None,
security_settings: Optional['outputs.AS2SecuritySettingsResponse'] = None,
validation_settings: Optional['outputs.AS2ValidationSettingsResponse'] = None):
"""
:param 'AS2AcknowledgementConnectionSettingsResponse' acknowledgement_connection_settings: The acknowledgement connection settings.
:param 'AS2EnvelopeSettingsResponse' envelope_settings: The envelope settings.
:param 'AS2ErrorSettingsResponse' error_settings: The error settings.
:param 'AS2MdnSettingsResponse' mdn_settings: The MDN settings.
:param 'AS2MessageConnectionSettingsResponse' message_connection_settings: The message connection settings.
:param 'AS2SecuritySettingsResponse' security_settings: The security settings.
:param 'AS2ValidationSettingsResponse' validation_settings: The validation settings.
"""
if acknowledgement_connection_settings is not None:
pulumi.set(__self__, "acknowledgement_connection_settings", acknowledgement_connection_settings)
if envelope_settings is not None:
pulumi.set(__self__, "envelope_settings", envelope_settings)
if error_settings is not None:
pulumi.set(__self__, "error_settings", error_settings)
if mdn_settings is not None:
pulumi.set(__self__, "mdn_settings", mdn_settings)
if message_connection_settings is not None:
pulumi.set(__self__, "message_connection_settings", message_connection_settings)
if security_settings is not None:
pulumi.set(__self__, "security_settings", security_settings)
if validation_settings is not None:
pulumi.set(__self__, "validation_settings", validation_settings)
@property
@pulumi.getter(name="acknowledgementConnectionSettings")
def acknowledgement_connection_settings(self) -> Optional['outputs.AS2AcknowledgementConnectionSettingsResponse']:
"""
The acknowledgement connection settings.
"""
return pulumi.get(self, "acknowledgement_connection_settings")
@property
@pulumi.getter(name="envelopeSettings")
def envelope_settings(self) -> Optional['outputs.AS2EnvelopeSettingsResponse']:
"""
The envelope settings.
"""
return pulumi.get(self, "envelope_settings")
@property
@pulumi.getter(name="errorSettings")
def error_settings(self) -> Optional['outputs.AS2ErrorSettingsResponse']:
"""
The error settings.
"""
return pulumi.get(self, "error_settings")
@property
@pulumi.getter(name="mdnSettings")
def mdn_settings(self) -> Optional['outputs.AS2MdnSettingsResponse']:
"""
The MDN settings.
"""
return pulumi.get(self, "mdn_settings")
@property
@pulumi.getter(name="messageConnectionSettings")
def message_connection_settings(self) -> Optional['outputs.AS2MessageConnectionSettingsResponse']:
"""
The message connection settings.
"""
return pulumi.get(self, "message_connection_settings")
@property
@pulumi.getter(name="securitySettings")
def security_settings(self) -> Optional['outputs.AS2SecuritySettingsResponse']:
"""
The security settings.
"""
return pulumi.get(self, "security_settings")
@property
@pulumi.getter(name="validationSettings")
def validation_settings(self) -> Optional['outputs.AS2ValidationSettingsResponse']:
"""
The validation settings.
"""
return pulumi.get(self, "validation_settings")
@pulumi.output_type
class AS2SecuritySettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableNrrForInboundDecodedMessages":
suggest = "enable_nrr_for_inbound_decoded_messages"
elif key == "enableNrrForInboundEncodedMessages":
suggest = "enable_nrr_for_inbound_encoded_messages"
elif key == "enableNrrForInboundMdn":
suggest = "enable_nrr_for_inbound_mdn"
elif key == "enableNrrForOutboundDecodedMessages":
suggest = "enable_nrr_for_outbound_decoded_messages"
elif key == "enableNrrForOutboundEncodedMessages":
suggest = "enable_nrr_for_outbound_encoded_messages"
elif key == "enableNrrForOutboundMdn":
suggest = "enable_nrr_for_outbound_mdn"
elif key == "encryptionCertificateName":
suggest = "encryption_certificate_name"
elif key == "overrideGroupSigningCertificate":
suggest = "override_group_signing_certificate"
elif key == "signingCertificateName":
suggest = "signing_certificate_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2SecuritySettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2SecuritySettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2SecuritySettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_nrr_for_inbound_decoded_messages: Optional[bool] = None,
enable_nrr_for_inbound_encoded_messages: Optional[bool] = None,
enable_nrr_for_inbound_mdn: Optional[bool] = None,
enable_nrr_for_outbound_decoded_messages: Optional[bool] = None,
enable_nrr_for_outbound_encoded_messages: Optional[bool] = None,
enable_nrr_for_outbound_mdn: Optional[bool] = None,
encryption_certificate_name: Optional[str] = None,
override_group_signing_certificate: Optional[bool] = None,
signing_certificate_name: Optional[str] = None):
"""
:param bool enable_nrr_for_inbound_decoded_messages: The value indicating whether to enable NRR for inbound decoded messages.
:param bool enable_nrr_for_inbound_encoded_messages: The value indicating whether to enable NRR for inbound encoded messages.
:param bool enable_nrr_for_inbound_mdn: The value indicating whether to enable NRR for inbound MDN.
:param bool enable_nrr_for_outbound_decoded_messages: The value indicating whether to enable NRR for outbound decoded messages.
:param bool enable_nrr_for_outbound_encoded_messages: The value indicating whether to enable NRR for outbound encoded messages.
:param bool enable_nrr_for_outbound_mdn: The value indicating whether to enable NRR for outbound MDN.
:param str encryption_certificate_name: The name of the encryption certificate.
:param bool override_group_signing_certificate: The value indicating whether to send or request a MDN.
:param str signing_certificate_name: The name of the signing certificate.
"""
if enable_nrr_for_inbound_decoded_messages is not None:
pulumi.set(__self__, "enable_nrr_for_inbound_decoded_messages", enable_nrr_for_inbound_decoded_messages)
if enable_nrr_for_inbound_encoded_messages is not None:
pulumi.set(__self__, "enable_nrr_for_inbound_encoded_messages", enable_nrr_for_inbound_encoded_messages)
if enable_nrr_for_inbound_mdn is not None:
pulumi.set(__self__, "enable_nrr_for_inbound_mdn", enable_nrr_for_inbound_mdn)
if enable_nrr_for_outbound_decoded_messages is not None:
pulumi.set(__self__, "enable_nrr_for_outbound_decoded_messages", enable_nrr_for_outbound_decoded_messages)
if enable_nrr_for_outbound_encoded_messages is not None:
pulumi.set(__self__, "enable_nrr_for_outbound_encoded_messages", enable_nrr_for_outbound_encoded_messages)
if enable_nrr_for_outbound_mdn is not None:
pulumi.set(__self__, "enable_nrr_for_outbound_mdn", enable_nrr_for_outbound_mdn)
if encryption_certificate_name is not None:
pulumi.set(__self__, "encryption_certificate_name", encryption_certificate_name)
if override_group_signing_certificate is not None:
pulumi.set(__self__, "override_group_signing_certificate", override_group_signing_certificate)
if signing_certificate_name is not None:
pulumi.set(__self__, "signing_certificate_name", signing_certificate_name)
@property
@pulumi.getter(name="enableNrrForInboundDecodedMessages")
def enable_nrr_for_inbound_decoded_messages(self) -> Optional[bool]:
"""
The value indicating whether to enable NRR for inbound decoded messages.
"""
return pulumi.get(self, "enable_nrr_for_inbound_decoded_messages")
@property
@pulumi.getter(name="enableNrrForInboundEncodedMessages")
def enable_nrr_for_inbound_encoded_messages(self) -> Optional[bool]:
"""
The value indicating whether to enable NRR for inbound encoded messages.
"""
return pulumi.get(self, "enable_nrr_for_inbound_encoded_messages")
@property
@pulumi.getter(name="enableNrrForInboundMdn")
def enable_nrr_for_inbound_mdn(self) -> Optional[bool]:
"""
The value indicating whether to enable NRR for inbound MDN.
"""
return pulumi.get(self, "enable_nrr_for_inbound_mdn")
@property
@pulumi.getter(name="enableNrrForOutboundDecodedMessages")
def enable_nrr_for_outbound_decoded_messages(self) -> Optional[bool]:
"""
The value indicating whether to enable NRR for outbound decoded messages.
"""
return pulumi.get(self, "enable_nrr_for_outbound_decoded_messages")
@property
@pulumi.getter(name="enableNrrForOutboundEncodedMessages")
def enable_nrr_for_outbound_encoded_messages(self) -> Optional[bool]:
"""
The value indicating whether to enable NRR for outbound encoded messages.
"""
return pulumi.get(self, "enable_nrr_for_outbound_encoded_messages")
@property
@pulumi.getter(name="enableNrrForOutboundMdn")
def enable_nrr_for_outbound_mdn(self) -> Optional[bool]:
"""
The value indicating whether to enable NRR for outbound MDN.
"""
return pulumi.get(self, "enable_nrr_for_outbound_mdn")
@property
@pulumi.getter(name="encryptionCertificateName")
def encryption_certificate_name(self) -> Optional[str]:
"""
The name of the encryption certificate.
"""
return pulumi.get(self, "encryption_certificate_name")
@property
@pulumi.getter(name="overrideGroupSigningCertificate")
def override_group_signing_certificate(self) -> Optional[bool]:
"""
The value indicating whether to send or request a MDN.
"""
return pulumi.get(self, "override_group_signing_certificate")
@property
@pulumi.getter(name="signingCertificateName")
def signing_certificate_name(self) -> Optional[str]:
"""
The name of the signing certificate.
"""
return pulumi.get(self, "signing_certificate_name")
@pulumi.output_type
class AS2ValidationSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "checkCertificateRevocationListOnReceive":
suggest = "check_certificate_revocation_list_on_receive"
elif key == "checkCertificateRevocationListOnSend":
suggest = "check_certificate_revocation_list_on_send"
elif key == "checkDuplicateMessage":
suggest = "check_duplicate_message"
elif key == "compressMessage":
suggest = "compress_message"
elif key == "encryptMessage":
suggest = "encrypt_message"
elif key == "encryptionAlgorithm":
suggest = "encryption_algorithm"
elif key == "interchangeDuplicatesValidityDays":
suggest = "interchange_duplicates_validity_days"
elif key == "overrideMessageProperties":
suggest = "override_message_properties"
elif key == "signMessage":
suggest = "sign_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2ValidationSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2ValidationSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2ValidationSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
check_certificate_revocation_list_on_receive: Optional[bool] = None,
check_certificate_revocation_list_on_send: Optional[bool] = None,
check_duplicate_message: Optional[bool] = None,
compress_message: Optional[bool] = None,
encrypt_message: Optional[bool] = None,
encryption_algorithm: Optional[str] = None,
interchange_duplicates_validity_days: Optional[int] = None,
override_message_properties: Optional[bool] = None,
sign_message: Optional[bool] = None):
"""
:param bool check_certificate_revocation_list_on_receive: The value indicating whether to check for certificate revocation list on receive.
:param bool check_certificate_revocation_list_on_send: The value indicating whether to check for certificate revocation list on send.
:param bool check_duplicate_message: The value indicating whether to check for duplicate message.
:param bool compress_message: The value indicating whether the message has to be compressed.
:param bool encrypt_message: The value indicating whether the message has to be encrypted.
:param str encryption_algorithm: The encryption algorithm.
:param int interchange_duplicates_validity_days: The number of days to look back for duplicate interchange.
:param bool override_message_properties: The value indicating whether to override incoming message properties with those in agreement.
:param bool sign_message: The value indicating whether the message has to be signed.
"""
if check_certificate_revocation_list_on_receive is not None:
pulumi.set(__self__, "check_certificate_revocation_list_on_receive", check_certificate_revocation_list_on_receive)
if check_certificate_revocation_list_on_send is not None:
pulumi.set(__self__, "check_certificate_revocation_list_on_send", check_certificate_revocation_list_on_send)
if check_duplicate_message is not None:
pulumi.set(__self__, "check_duplicate_message", check_duplicate_message)
if compress_message is not None:
pulumi.set(__self__, "compress_message", compress_message)
if encrypt_message is not None:
pulumi.set(__self__, "encrypt_message", encrypt_message)
if encryption_algorithm is not None:
pulumi.set(__self__, "encryption_algorithm", encryption_algorithm)
if interchange_duplicates_validity_days is not None:
pulumi.set(__self__, "interchange_duplicates_validity_days", interchange_duplicates_validity_days)
if override_message_properties is not None:
pulumi.set(__self__, "override_message_properties", override_message_properties)
if sign_message is not None:
pulumi.set(__self__, "sign_message", sign_message)
@property
@pulumi.getter(name="checkCertificateRevocationListOnReceive")
def check_certificate_revocation_list_on_receive(self) -> Optional[bool]:
"""
The value indicating whether to check for certificate revocation list on receive.
"""
return pulumi.get(self, "check_certificate_revocation_list_on_receive")
@property
@pulumi.getter(name="checkCertificateRevocationListOnSend")
def check_certificate_revocation_list_on_send(self) -> Optional[bool]:
"""
The value indicating whether to check for certificate revocation list on send.
"""
return pulumi.get(self, "check_certificate_revocation_list_on_send")
@property
@pulumi.getter(name="checkDuplicateMessage")
def check_duplicate_message(self) -> Optional[bool]:
"""
The value indicating whether to check for duplicate message.
"""
return pulumi.get(self, "check_duplicate_message")
@property
@pulumi.getter(name="compressMessage")
def compress_message(self) -> Optional[bool]:
"""
The value indicating whether the message has to be compressed.
"""
return pulumi.get(self, "compress_message")
@property
@pulumi.getter(name="encryptMessage")
def encrypt_message(self) -> Optional[bool]:
"""
The value indicating whether the message has to be encrypted.
"""
return pulumi.get(self, "encrypt_message")
@property
@pulumi.getter(name="encryptionAlgorithm")
def encryption_algorithm(self) -> Optional[str]:
"""
The encryption algorithm.
"""
return pulumi.get(self, "encryption_algorithm")
@property
@pulumi.getter(name="interchangeDuplicatesValidityDays")
def interchange_duplicates_validity_days(self) -> Optional[int]:
"""
The number of days to look back for duplicate interchange.
"""
return pulumi.get(self, "interchange_duplicates_validity_days")
@property
@pulumi.getter(name="overrideMessageProperties")
def override_message_properties(self) -> Optional[bool]:
"""
The value indicating whether to override incoming message properties with those in agreement.
"""
return pulumi.get(self, "override_message_properties")
@property
@pulumi.getter(name="signMessage")
def sign_message(self) -> Optional[bool]:
"""
The value indicating whether the message has to be signed.
"""
return pulumi.get(self, "sign_message")
@pulumi.output_type
class AgreementContentResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "aS2":
suggest = "a_s2"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
a_s2: Optional['outputs.AS2AgreementContentResponse'] = None,
edifact: Optional['outputs.EdifactAgreementContentResponse'] = None,
x12: Optional['outputs.X12AgreementContentResponse'] = None):
"""
:param 'AS2AgreementContentResponse' a_s2: The AS2 agreement content.
:param 'EdifactAgreementContentResponse' edifact: The EDIFACT agreement content.
:param 'X12AgreementContentResponse' x12: The X12 agreement content.
"""
if a_s2 is not None:
pulumi.set(__self__, "a_s2", a_s2)
if edifact is not None:
pulumi.set(__self__, "edifact", edifact)
if x12 is not None:
pulumi.set(__self__, "x12", x12)
@property
@pulumi.getter(name="aS2")
def a_s2(self) -> Optional['outputs.AS2AgreementContentResponse']:
"""
The AS2 agreement content.
"""
return pulumi.get(self, "a_s2")
@property
@pulumi.getter
def edifact(self) -> Optional['outputs.EdifactAgreementContentResponse']:
"""
The EDIFACT agreement content.
"""
return pulumi.get(self, "edifact")
@property
@pulumi.getter
def x12(self) -> Optional['outputs.X12AgreementContentResponse']:
"""
The X12 agreement content.
"""
return pulumi.get(self, "x12")
@pulumi.output_type
class B2BPartnerContentResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "businessIdentities":
suggest = "business_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in B2BPartnerContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
B2BPartnerContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
B2BPartnerContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
business_identities: Optional[Sequence['outputs.BusinessIdentityResponse']] = None):
"""
:param Sequence['BusinessIdentityResponse'] business_identities: The list of partner business identities.
"""
if business_identities is not None:
pulumi.set(__self__, "business_identities", business_identities)
@property
@pulumi.getter(name="businessIdentities")
def business_identities(self) -> Optional[Sequence['outputs.BusinessIdentityResponse']]:
"""
The list of partner business identities.
"""
return pulumi.get(self, "business_identities")
@pulumi.output_type
class BusinessIdentityResponse(dict):
def __init__(__self__, *,
qualifier: Optional[str] = None,
value: Optional[str] = None):
"""
:param str qualifier: The business identity qualifier.
:param str value: The business identity value.
"""
if qualifier is not None:
pulumi.set(__self__, "qualifier", qualifier)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def qualifier(self) -> Optional[str]:
"""
The business identity qualifier.
"""
return pulumi.get(self, "qualifier")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The business identity value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class EdifactAcknowledgementSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementControlNumberLowerBound":
suggest = "acknowledgement_control_number_lower_bound"
elif key == "acknowledgementControlNumberPrefix":
suggest = "acknowledgement_control_number_prefix"
elif key == "acknowledgementControlNumberSuffix":
suggest = "acknowledgement_control_number_suffix"
elif key == "acknowledgementControlNumberUpperBound":
suggest = "acknowledgement_control_number_upper_bound"
elif key == "batchFunctionalAcknowledgements":
suggest = "batch_functional_acknowledgements"
elif key == "batchTechnicalAcknowledgements":
suggest = "batch_technical_acknowledgements"
elif key == "needFunctionalAcknowledgement":
suggest = "need_functional_acknowledgement"
elif key == "needLoopForValidMessages":
suggest = "need_loop_for_valid_messages"
elif key == "needTechnicalAcknowledgement":
suggest = "need_technical_acknowledgement"
elif key == "rolloverAcknowledgementControlNumber":
suggest = "rollover_acknowledgement_control_number"
elif key == "sendSynchronousAcknowledgement":
suggest = "send_synchronous_acknowledgement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactAcknowledgementSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactAcknowledgementSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactAcknowledgementSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_control_number_lower_bound: Optional[int] = None,
acknowledgement_control_number_prefix: Optional[str] = None,
acknowledgement_control_number_suffix: Optional[str] = None,
acknowledgement_control_number_upper_bound: Optional[int] = None,
batch_functional_acknowledgements: Optional[bool] = None,
batch_technical_acknowledgements: Optional[bool] = None,
need_functional_acknowledgement: Optional[bool] = None,
need_loop_for_valid_messages: Optional[bool] = None,
need_technical_acknowledgement: Optional[bool] = None,
rollover_acknowledgement_control_number: Optional[bool] = None,
send_synchronous_acknowledgement: Optional[bool] = None):
"""
:param int acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound.
:param str acknowledgement_control_number_prefix: The acknowledgement control number prefix.
:param str acknowledgement_control_number_suffix: The acknowledgement control number suffix.
:param int acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound.
:param bool batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements.
:param bool batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements.
:param bool need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed.
:param bool need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages.
:param bool need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed.
:param bool rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number.
:param bool send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement.
"""
if acknowledgement_control_number_lower_bound is not None:
pulumi.set(__self__, "acknowledgement_control_number_lower_bound", acknowledgement_control_number_lower_bound)
if acknowledgement_control_number_prefix is not None:
pulumi.set(__self__, "acknowledgement_control_number_prefix", acknowledgement_control_number_prefix)
if acknowledgement_control_number_suffix is not None:
pulumi.set(__self__, "acknowledgement_control_number_suffix", acknowledgement_control_number_suffix)
if acknowledgement_control_number_upper_bound is not None:
pulumi.set(__self__, "acknowledgement_control_number_upper_bound", acknowledgement_control_number_upper_bound)
if batch_functional_acknowledgements is not None:
pulumi.set(__self__, "batch_functional_acknowledgements", batch_functional_acknowledgements)
if batch_technical_acknowledgements is not None:
pulumi.set(__self__, "batch_technical_acknowledgements", batch_technical_acknowledgements)
if need_functional_acknowledgement is not None:
pulumi.set(__self__, "need_functional_acknowledgement", need_functional_acknowledgement)
if need_loop_for_valid_messages is not None:
pulumi.set(__self__, "need_loop_for_valid_messages", need_loop_for_valid_messages)
if need_technical_acknowledgement is not None:
pulumi.set(__self__, "need_technical_acknowledgement", need_technical_acknowledgement)
if rollover_acknowledgement_control_number is not None:
pulumi.set(__self__, "rollover_acknowledgement_control_number", rollover_acknowledgement_control_number)
if send_synchronous_acknowledgement is not None:
pulumi.set(__self__, "send_synchronous_acknowledgement", send_synchronous_acknowledgement)
@property
@pulumi.getter(name="acknowledgementControlNumberLowerBound")
def acknowledgement_control_number_lower_bound(self) -> Optional[int]:
"""
The acknowledgement control number lower bound.
"""
return pulumi.get(self, "acknowledgement_control_number_lower_bound")
@property
@pulumi.getter(name="acknowledgementControlNumberPrefix")
def acknowledgement_control_number_prefix(self) -> Optional[str]:
"""
The acknowledgement control number prefix.
"""
return pulumi.get(self, "acknowledgement_control_number_prefix")
@property
@pulumi.getter(name="acknowledgementControlNumberSuffix")
def acknowledgement_control_number_suffix(self) -> Optional[str]:
"""
The acknowledgement control number suffix.
"""
return pulumi.get(self, "acknowledgement_control_number_suffix")
@property
@pulumi.getter(name="acknowledgementControlNumberUpperBound")
def acknowledgement_control_number_upper_bound(self) -> Optional[int]:
"""
The acknowledgement control number upper bound.
"""
return pulumi.get(self, "acknowledgement_control_number_upper_bound")
@property
@pulumi.getter(name="batchFunctionalAcknowledgements")
def batch_functional_acknowledgements(self) -> Optional[bool]:
"""
The value indicating whether to batch functional acknowledgements.
"""
return pulumi.get(self, "batch_functional_acknowledgements")
@property
@pulumi.getter(name="batchTechnicalAcknowledgements")
def batch_technical_acknowledgements(self) -> Optional[bool]:
"""
The value indicating whether to batch the technical acknowledgements.
"""
return pulumi.get(self, "batch_technical_acknowledgements")
@property
@pulumi.getter(name="needFunctionalAcknowledgement")
def need_functional_acknowledgement(self) -> Optional[bool]:
"""
The value indicating whether functional acknowledgement is needed.
"""
return pulumi.get(self, "need_functional_acknowledgement")
@property
@pulumi.getter(name="needLoopForValidMessages")
def need_loop_for_valid_messages(self) -> Optional[bool]:
"""
The value indicating whether a loop is needed for valid messages.
"""
return pulumi.get(self, "need_loop_for_valid_messages")
@property
@pulumi.getter(name="needTechnicalAcknowledgement")
def need_technical_acknowledgement(self) -> Optional[bool]:
"""
The value indicating whether technical acknowledgement is needed.
"""
return pulumi.get(self, "need_technical_acknowledgement")
@property
@pulumi.getter(name="rolloverAcknowledgementControlNumber")
def rollover_acknowledgement_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover acknowledgement control number.
"""
return pulumi.get(self, "rollover_acknowledgement_control_number")
@property
@pulumi.getter(name="sendSynchronousAcknowledgement")
def send_synchronous_acknowledgement(self) -> Optional[bool]:
"""
The value indicating whether to send synchronous acknowledgement.
"""
return pulumi.get(self, "send_synchronous_acknowledgement")
@pulumi.output_type
class EdifactAgreementContentResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "receiveAgreement":
suggest = "receive_agreement"
elif key == "sendAgreement":
suggest = "send_agreement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactAgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactAgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactAgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
receive_agreement: Optional['outputs.EdifactOneWayAgreementResponse'] = None,
send_agreement: Optional['outputs.EdifactOneWayAgreementResponse'] = None):
"""
:param 'EdifactOneWayAgreementResponse' receive_agreement: The EDIFACT one-way receive agreement.
:param 'EdifactOneWayAgreementResponse' send_agreement: The EDIFACT one-way send agreement.
"""
if receive_agreement is not None:
pulumi.set(__self__, "receive_agreement", receive_agreement)
if send_agreement is not None:
pulumi.set(__self__, "send_agreement", send_agreement)
@property
@pulumi.getter(name="receiveAgreement")
def receive_agreement(self) -> Optional['outputs.EdifactOneWayAgreementResponse']:
"""
The EDIFACT one-way receive agreement.
"""
return pulumi.get(self, "receive_agreement")
@property
@pulumi.getter(name="sendAgreement")
def send_agreement(self) -> Optional['outputs.EdifactOneWayAgreementResponse']:
"""
The EDIFACT one-way send agreement.
"""
return pulumi.get(self, "send_agreement")
@pulumi.output_type
class EdifactDelimiterOverrideResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "decimalPointIndicator":
suggest = "decimal_point_indicator"
elif key == "messageAssociationAssignedCode":
suggest = "message_association_assigned_code"
elif key == "messageId":
suggest = "message_id"
elif key == "messageRelease":
suggest = "message_release"
elif key == "messageVersion":
suggest = "message_version"
elif key == "releaseIndicator":
suggest = "release_indicator"
elif key == "repetitionSeparator":
suggest = "repetition_separator"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
elif key == "targetNamespace":
suggest = "target_namespace"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactDelimiterOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactDelimiterOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactDelimiterOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
component_separator: Optional[int] = None,
data_element_separator: Optional[int] = None,
decimal_point_indicator: Optional[str] = None,
message_association_assigned_code: Optional[str] = None,
message_id: Optional[str] = None,
message_release: Optional[str] = None,
message_version: Optional[str] = None,
release_indicator: Optional[int] = None,
repetition_separator: Optional[int] = None,
segment_terminator: Optional[int] = None,
segment_terminator_suffix: Optional[str] = None,
target_namespace: Optional[str] = None):
"""
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param str decimal_point_indicator: The decimal point indicator.
:param str message_association_assigned_code: The message association assigned code.
:param str message_id: The message id.
:param str message_release: The message release version.
:param str message_version: The message version.
:param int release_indicator: The release indicator.
:param int repetition_separator: The repetition separator.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The segment terminator suffix.
:param str target_namespace: The target namespace on which this delimiter settings has to be applied.
"""
if component_separator is not None:
pulumi.set(__self__, "component_separator", component_separator)
if data_element_separator is not None:
pulumi.set(__self__, "data_element_separator", data_element_separator)
if decimal_point_indicator is not None:
pulumi.set(__self__, "decimal_point_indicator", decimal_point_indicator)
if message_association_assigned_code is not None:
pulumi.set(__self__, "message_association_assigned_code", message_association_assigned_code)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if message_release is not None:
pulumi.set(__self__, "message_release", message_release)
if message_version is not None:
pulumi.set(__self__, "message_version", message_version)
if release_indicator is not None:
pulumi.set(__self__, "release_indicator", release_indicator)
if repetition_separator is not None:
pulumi.set(__self__, "repetition_separator", repetition_separator)
if segment_terminator is not None:
pulumi.set(__self__, "segment_terminator", segment_terminator)
if segment_terminator_suffix is not None:
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> Optional[int]:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> Optional[int]:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="decimalPointIndicator")
def decimal_point_indicator(self) -> Optional[str]:
"""
The decimal point indicator.
"""
return pulumi.get(self, "decimal_point_indicator")
@property
@pulumi.getter(name="messageAssociationAssignedCode")
def message_association_assigned_code(self) -> Optional[str]:
"""
The message association assigned code.
"""
return pulumi.get(self, "message_association_assigned_code")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> Optional[str]:
"""
The message release version.
"""
return pulumi.get(self, "message_release")
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> Optional[str]:
"""
The message version.
"""
return pulumi.get(self, "message_version")
@property
@pulumi.getter(name="releaseIndicator")
def release_indicator(self) -> Optional[int]:
"""
The release indicator.
"""
return pulumi.get(self, "release_indicator")
@property
@pulumi.getter(name="repetitionSeparator")
def repetition_separator(self) -> Optional[int]:
"""
The repetition separator.
"""
return pulumi.get(self, "repetition_separator")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> Optional[int]:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> Optional[str]:
"""
The segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[str]:
"""
The target namespace on which this delimiter settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@pulumi.output_type
class EdifactEnvelopeOverrideResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "applicationPassword":
suggest = "application_password"
elif key == "associationAssignedCode":
suggest = "association_assigned_code"
elif key == "controllingAgencyCode":
suggest = "controlling_agency_code"
elif key == "functionalGroupId":
suggest = "functional_group_id"
elif key == "groupHeaderMessageRelease":
suggest = "group_header_message_release"
elif key == "groupHeaderMessageVersion":
suggest = "group_header_message_version"
elif key == "messageAssociationAssignedCode":
suggest = "message_association_assigned_code"
elif key == "messageId":
suggest = "message_id"
elif key == "messageRelease":
suggest = "message_release"
elif key == "messageVersion":
suggest = "message_version"
elif key == "receiverApplicationId":
suggest = "receiver_application_id"
elif key == "receiverApplicationQualifier":
suggest = "receiver_application_qualifier"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "senderApplicationQualifier":
suggest = "sender_application_qualifier"
elif key == "targetNamespace":
suggest = "target_namespace"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactEnvelopeOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactEnvelopeOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactEnvelopeOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
application_password: Optional[str] = None,
association_assigned_code: Optional[str] = None,
controlling_agency_code: Optional[str] = None,
functional_group_id: Optional[str] = None,
group_header_message_release: Optional[str] = None,
group_header_message_version: Optional[str] = None,
message_association_assigned_code: Optional[str] = None,
message_id: Optional[str] = None,
message_release: Optional[str] = None,
message_version: Optional[str] = None,
receiver_application_id: Optional[str] = None,
receiver_application_qualifier: Optional[str] = None,
sender_application_id: Optional[str] = None,
sender_application_qualifier: Optional[str] = None,
target_namespace: Optional[str] = None):
"""
:param str application_password: The application password.
:param str association_assigned_code: The association assigned code.
:param str controlling_agency_code: The controlling agency code.
:param str functional_group_id: The functional group id.
:param str group_header_message_release: The group header message release.
:param str group_header_message_version: The group header message version.
:param str message_association_assigned_code: The message association assigned code.
:param str message_id: The message id on which this envelope settings has to be applied.
:param str message_release: The message release version on which this envelope settings has to be applied.
:param str message_version: The message version on which this envelope settings has to be applied.
:param str receiver_application_id: The receiver application id.
:param str receiver_application_qualifier: The receiver application qualifier.
:param str sender_application_id: The sender application id.
:param str sender_application_qualifier: The sender application qualifier.
:param str target_namespace: The target namespace on which this envelope settings has to be applied.
"""
if application_password is not None:
pulumi.set(__self__, "application_password", application_password)
if association_assigned_code is not None:
pulumi.set(__self__, "association_assigned_code", association_assigned_code)
if controlling_agency_code is not None:
pulumi.set(__self__, "controlling_agency_code", controlling_agency_code)
if functional_group_id is not None:
pulumi.set(__self__, "functional_group_id", functional_group_id)
if group_header_message_release is not None:
pulumi.set(__self__, "group_header_message_release", group_header_message_release)
if group_header_message_version is not None:
pulumi.set(__self__, "group_header_message_version", group_header_message_version)
if message_association_assigned_code is not None:
pulumi.set(__self__, "message_association_assigned_code", message_association_assigned_code)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if message_release is not None:
pulumi.set(__self__, "message_release", message_release)
if message_version is not None:
pulumi.set(__self__, "message_version", message_version)
if receiver_application_id is not None:
pulumi.set(__self__, "receiver_application_id", receiver_application_id)
if receiver_application_qualifier is not None:
pulumi.set(__self__, "receiver_application_qualifier", receiver_application_qualifier)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
if sender_application_qualifier is not None:
pulumi.set(__self__, "sender_application_qualifier", sender_application_qualifier)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="applicationPassword")
def application_password(self) -> Optional[str]:
"""
The application password.
"""
return pulumi.get(self, "application_password")
@property
@pulumi.getter(name="associationAssignedCode")
def association_assigned_code(self) -> Optional[str]:
"""
The association assigned code.
"""
return pulumi.get(self, "association_assigned_code")
@property
@pulumi.getter(name="controllingAgencyCode")
def controlling_agency_code(self) -> Optional[str]:
"""
The controlling agency code.
"""
return pulumi.get(self, "controlling_agency_code")
@property
@pulumi.getter(name="functionalGroupId")
def functional_group_id(self) -> Optional[str]:
"""
The functional group id.
"""
return pulumi.get(self, "functional_group_id")
@property
@pulumi.getter(name="groupHeaderMessageRelease")
def group_header_message_release(self) -> Optional[str]:
"""
The group header message release.
"""
return pulumi.get(self, "group_header_message_release")
@property
@pulumi.getter(name="groupHeaderMessageVersion")
def group_header_message_version(self) -> Optional[str]:
"""
The group header message version.
"""
return pulumi.get(self, "group_header_message_version")
@property
@pulumi.getter(name="messageAssociationAssignedCode")
def message_association_assigned_code(self) -> Optional[str]:
"""
The message association assigned code.
"""
return pulumi.get(self, "message_association_assigned_code")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> Optional[str]:
"""
The message release version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_release")
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> Optional[str]:
"""
The message version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_version")
@property
@pulumi.getter(name="receiverApplicationId")
def receiver_application_id(self) -> Optional[str]:
"""
The receiver application id.
"""
return pulumi.get(self, "receiver_application_id")
@property
@pulumi.getter(name="receiverApplicationQualifier")
def receiver_application_qualifier(self) -> Optional[str]:
"""
The receiver application qualifier.
"""
return pulumi.get(self, "receiver_application_qualifier")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="senderApplicationQualifier")
def sender_application_qualifier(self) -> Optional[str]:
"""
The sender application qualifier.
"""
return pulumi.get(self, "sender_application_qualifier")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[str]:
"""
The target namespace on which this envelope settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@pulumi.output_type
class EdifactEnvelopeSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "applicationReferenceId":
suggest = "application_reference_id"
elif key == "applyDelimiterStringAdvice":
suggest = "apply_delimiter_string_advice"
elif key == "communicationAgreementId":
suggest = "communication_agreement_id"
elif key == "createGroupingSegments":
suggest = "create_grouping_segments"
elif key == "enableDefaultGroupHeaders":
suggest = "enable_default_group_headers"
elif key == "functionalGroupId":
suggest = "functional_group_id"
elif key == "groupApplicationPassword":
suggest = "group_application_password"
elif key == "groupApplicationReceiverId":
suggest = "group_application_receiver_id"
elif key == "groupApplicationReceiverQualifier":
suggest = "group_application_receiver_qualifier"
elif key == "groupApplicationSenderId":
suggest = "group_application_sender_id"
elif key == "groupApplicationSenderQualifier":
suggest = "group_application_sender_qualifier"
elif key == "groupAssociationAssignedCode":
suggest = "group_association_assigned_code"
elif key == "groupControlNumberLowerBound":
suggest = "group_control_number_lower_bound"
elif key == "groupControlNumberPrefix":
suggest = "group_control_number_prefix"
elif key == "groupControlNumberSuffix":
suggest = "group_control_number_suffix"
elif key == "groupControlNumberUpperBound":
suggest = "group_control_number_upper_bound"
elif key == "groupControllingAgencyCode":
suggest = "group_controlling_agency_code"
elif key == "groupMessageRelease":
suggest = "group_message_release"
elif key == "groupMessageVersion":
suggest = "group_message_version"
elif key == "interchangeControlNumberLowerBound":
suggest = "interchange_control_number_lower_bound"
elif key == "interchangeControlNumberPrefix":
suggest = "interchange_control_number_prefix"
elif key == "interchangeControlNumberSuffix":
suggest = "interchange_control_number_suffix"
elif key == "interchangeControlNumberUpperBound":
suggest = "interchange_control_number_upper_bound"
elif key == "isTestInterchange":
suggest = "is_test_interchange"
elif key == "overwriteExistingTransactionSetControlNumber":
suggest = "overwrite_existing_transaction_set_control_number"
elif key == "processingPriorityCode":
suggest = "processing_priority_code"
elif key == "receiverInternalIdentification":
suggest = "receiver_internal_identification"
elif key == "receiverInternalSubIdentification":
suggest = "receiver_internal_sub_identification"
elif key == "receiverReverseRoutingAddress":
suggest = "receiver_reverse_routing_address"
elif key == "recipientReferencePasswordQualifier":
suggest = "recipient_reference_password_qualifier"
elif key == "recipientReferencePasswordValue":
suggest = "recipient_reference_password_value"
elif key == "rolloverGroupControlNumber":
suggest = "rollover_group_control_number"
elif key == "rolloverInterchangeControlNumber":
suggest = "rollover_interchange_control_number"
elif key == "rolloverTransactionSetControlNumber":
suggest = "rollover_transaction_set_control_number"
elif key == "senderInternalIdentification":
suggest = "sender_internal_identification"
elif key == "senderInternalSubIdentification":
suggest = "sender_internal_sub_identification"
elif key == "senderReverseRoutingAddress":
suggest = "sender_reverse_routing_address"
elif key == "transactionSetControlNumberLowerBound":
suggest = "transaction_set_control_number_lower_bound"
elif key == "transactionSetControlNumberPrefix":
suggest = "transaction_set_control_number_prefix"
elif key == "transactionSetControlNumberSuffix":
suggest = "transaction_set_control_number_suffix"
elif key == "transactionSetControlNumberUpperBound":
suggest = "transaction_set_control_number_upper_bound"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactEnvelopeSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactEnvelopeSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactEnvelopeSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
application_reference_id: Optional[str] = None,
apply_delimiter_string_advice: Optional[bool] = None,
communication_agreement_id: Optional[str] = None,
create_grouping_segments: Optional[bool] = None,
enable_default_group_headers: Optional[bool] = None,
functional_group_id: Optional[str] = None,
group_application_password: Optional[str] = None,
group_application_receiver_id: Optional[str] = None,
group_application_receiver_qualifier: Optional[str] = None,
group_application_sender_id: Optional[str] = None,
group_application_sender_qualifier: Optional[str] = None,
group_association_assigned_code: Optional[str] = None,
group_control_number_lower_bound: Optional[float] = None,
group_control_number_prefix: Optional[str] = None,
group_control_number_suffix: Optional[str] = None,
group_control_number_upper_bound: Optional[float] = None,
group_controlling_agency_code: Optional[str] = None,
group_message_release: Optional[str] = None,
group_message_version: Optional[str] = None,
interchange_control_number_lower_bound: Optional[float] = None,
interchange_control_number_prefix: Optional[str] = None,
interchange_control_number_suffix: Optional[str] = None,
interchange_control_number_upper_bound: Optional[float] = None,
is_test_interchange: Optional[bool] = None,
overwrite_existing_transaction_set_control_number: Optional[bool] = None,
processing_priority_code: Optional[str] = None,
receiver_internal_identification: Optional[str] = None,
receiver_internal_sub_identification: Optional[str] = None,
receiver_reverse_routing_address: Optional[str] = None,
recipient_reference_password_qualifier: Optional[str] = None,
recipient_reference_password_value: Optional[str] = None,
rollover_group_control_number: Optional[bool] = None,
rollover_interchange_control_number: Optional[bool] = None,
rollover_transaction_set_control_number: Optional[bool] = None,
sender_internal_identification: Optional[str] = None,
sender_internal_sub_identification: Optional[str] = None,
sender_reverse_routing_address: Optional[str] = None,
transaction_set_control_number_lower_bound: Optional[float] = None,
transaction_set_control_number_prefix: Optional[str] = None,
transaction_set_control_number_suffix: Optional[str] = None,
transaction_set_control_number_upper_bound: Optional[float] = None):
"""
:param str application_reference_id: The application reference id.
:param bool apply_delimiter_string_advice: The value indicating whether to apply delimiter string advice.
:param str communication_agreement_id: The communication agreement id.
:param bool create_grouping_segments: The value indicating whether to create grouping segments.
:param bool enable_default_group_headers: The value indicating whether to enable default group headers.
:param str functional_group_id: The functional group id.
:param str group_application_password: The group application password.
:param str group_application_receiver_id: The group application receiver id.
:param str group_application_receiver_qualifier: The group application receiver qualifier.
:param str group_application_sender_id: The group application sender id.
:param str group_application_sender_qualifier: The group application sender qualifier.
:param str group_association_assigned_code: The group association assigned code.
:param float group_control_number_lower_bound: The group control number lower bound.
:param str group_control_number_prefix: The group control number prefix.
:param str group_control_number_suffix: The group control number suffix.
:param float group_control_number_upper_bound: The group control number upper bound.
:param str group_controlling_agency_code: The group controlling agency code.
:param str group_message_release: The group message release.
:param str group_message_version: The group message version.
:param float interchange_control_number_lower_bound: The interchange control number lower bound.
:param str interchange_control_number_prefix: The interchange control number prefix.
:param str interchange_control_number_suffix: The interchange control number suffix.
:param float interchange_control_number_upper_bound: The interchange control number upper bound.
:param bool is_test_interchange: The value indicating whether the message is a test interchange.
:param bool overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number.
:param str processing_priority_code: The processing priority code.
:param str receiver_internal_identification: The receiver internal identification.
:param str receiver_internal_sub_identification: The receiver internal sub identification.
:param str receiver_reverse_routing_address: The receiver reverse routing address.
:param str recipient_reference_password_qualifier: The recipient reference password qualifier.
:param str recipient_reference_password_value: The recipient reference password value.
:param bool rollover_group_control_number: The value indicating whether to rollover group control number.
:param bool rollover_interchange_control_number: The value indicating whether to rollover interchange control number.
:param bool rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number.
:param str sender_internal_identification: The sender internal identification.
:param str sender_internal_sub_identification: The sender internal sub identification.
:param str sender_reverse_routing_address: The sender reverse routing address.
:param float transaction_set_control_number_lower_bound: The transaction set control number lower bound.
:param str transaction_set_control_number_prefix: The transaction set control number prefix.
:param str transaction_set_control_number_suffix: The transaction set control number suffix.
:param float transaction_set_control_number_upper_bound: The transaction set control number upper bound.
"""
if application_reference_id is not None:
pulumi.set(__self__, "application_reference_id", application_reference_id)
if apply_delimiter_string_advice is not None:
pulumi.set(__self__, "apply_delimiter_string_advice", apply_delimiter_string_advice)
if communication_agreement_id is not None:
pulumi.set(__self__, "communication_agreement_id", communication_agreement_id)
if create_grouping_segments is not None:
pulumi.set(__self__, "create_grouping_segments", create_grouping_segments)
if enable_default_group_headers is not None:
pulumi.set(__self__, "enable_default_group_headers", enable_default_group_headers)
if functional_group_id is not None:
pulumi.set(__self__, "functional_group_id", functional_group_id)
if group_application_password is not None:
pulumi.set(__self__, "group_application_password", group_application_password)
if group_application_receiver_id is not None:
pulumi.set(__self__, "group_application_receiver_id", group_application_receiver_id)
if group_application_receiver_qualifier is not None:
pulumi.set(__self__, "group_application_receiver_qualifier", group_application_receiver_qualifier)
if group_application_sender_id is not None:
pulumi.set(__self__, "group_application_sender_id", group_application_sender_id)
if group_application_sender_qualifier is not None:
pulumi.set(__self__, "group_application_sender_qualifier", group_application_sender_qualifier)
if group_association_assigned_code is not None:
pulumi.set(__self__, "group_association_assigned_code", group_association_assigned_code)
if group_control_number_lower_bound is not None:
pulumi.set(__self__, "group_control_number_lower_bound", group_control_number_lower_bound)
if group_control_number_prefix is not None:
pulumi.set(__self__, "group_control_number_prefix", group_control_number_prefix)
if group_control_number_suffix is not None:
pulumi.set(__self__, "group_control_number_suffix", group_control_number_suffix)
if group_control_number_upper_bound is not None:
pulumi.set(__self__, "group_control_number_upper_bound", group_control_number_upper_bound)
if group_controlling_agency_code is not None:
pulumi.set(__self__, "group_controlling_agency_code", group_controlling_agency_code)
if group_message_release is not None:
pulumi.set(__self__, "group_message_release", group_message_release)
if group_message_version is not None:
pulumi.set(__self__, "group_message_version", group_message_version)
if interchange_control_number_lower_bound is not None:
pulumi.set(__self__, "interchange_control_number_lower_bound", interchange_control_number_lower_bound)
if interchange_control_number_prefix is not None:
pulumi.set(__self__, "interchange_control_number_prefix", interchange_control_number_prefix)
if interchange_control_number_suffix is not None:
pulumi.set(__self__, "interchange_control_number_suffix", interchange_control_number_suffix)
if interchange_control_number_upper_bound is not None:
pulumi.set(__self__, "interchange_control_number_upper_bound", interchange_control_number_upper_bound)
if is_test_interchange is not None:
pulumi.set(__self__, "is_test_interchange", is_test_interchange)
if overwrite_existing_transaction_set_control_number is not None:
pulumi.set(__self__, "overwrite_existing_transaction_set_control_number", overwrite_existing_transaction_set_control_number)
if processing_priority_code is not None:
pulumi.set(__self__, "processing_priority_code", processing_priority_code)
if receiver_internal_identification is not None:
pulumi.set(__self__, "receiver_internal_identification", receiver_internal_identification)
if receiver_internal_sub_identification is not None:
pulumi.set(__self__, "receiver_internal_sub_identification", receiver_internal_sub_identification)
if receiver_reverse_routing_address is not None:
pulumi.set(__self__, "receiver_reverse_routing_address", receiver_reverse_routing_address)
if recipient_reference_password_qualifier is not None:
pulumi.set(__self__, "recipient_reference_password_qualifier", recipient_reference_password_qualifier)
if recipient_reference_password_value is not None:
pulumi.set(__self__, "recipient_reference_password_value", recipient_reference_password_value)
if rollover_group_control_number is not None:
pulumi.set(__self__, "rollover_group_control_number", rollover_group_control_number)
if rollover_interchange_control_number is not None:
pulumi.set(__self__, "rollover_interchange_control_number", rollover_interchange_control_number)
if rollover_transaction_set_control_number is not None:
pulumi.set(__self__, "rollover_transaction_set_control_number", rollover_transaction_set_control_number)
if sender_internal_identification is not None:
pulumi.set(__self__, "sender_internal_identification", sender_internal_identification)
if sender_internal_sub_identification is not None:
pulumi.set(__self__, "sender_internal_sub_identification", sender_internal_sub_identification)
if sender_reverse_routing_address is not None:
pulumi.set(__self__, "sender_reverse_routing_address", sender_reverse_routing_address)
if transaction_set_control_number_lower_bound is not None:
pulumi.set(__self__, "transaction_set_control_number_lower_bound", transaction_set_control_number_lower_bound)
if transaction_set_control_number_prefix is not None:
pulumi.set(__self__, "transaction_set_control_number_prefix", transaction_set_control_number_prefix)
if transaction_set_control_number_suffix is not None:
pulumi.set(__self__, "transaction_set_control_number_suffix", transaction_set_control_number_suffix)
if transaction_set_control_number_upper_bound is not None:
pulumi.set(__self__, "transaction_set_control_number_upper_bound", transaction_set_control_number_upper_bound)
@property
@pulumi.getter(name="applicationReferenceId")
def application_reference_id(self) -> Optional[str]:
"""
The application reference id.
"""
return pulumi.get(self, "application_reference_id")
@property
@pulumi.getter(name="applyDelimiterStringAdvice")
def apply_delimiter_string_advice(self) -> Optional[bool]:
"""
The value indicating whether to apply delimiter string advice.
"""
return pulumi.get(self, "apply_delimiter_string_advice")
@property
@pulumi.getter(name="communicationAgreementId")
def communication_agreement_id(self) -> Optional[str]:
"""
The communication agreement id.
"""
return pulumi.get(self, "communication_agreement_id")
@property
@pulumi.getter(name="createGroupingSegments")
def create_grouping_segments(self) -> Optional[bool]:
"""
The value indicating whether to create grouping segments.
"""
return pulumi.get(self, "create_grouping_segments")
@property
@pulumi.getter(name="enableDefaultGroupHeaders")
def enable_default_group_headers(self) -> Optional[bool]:
"""
The value indicating whether to enable default group headers.
"""
return pulumi.get(self, "enable_default_group_headers")
@property
@pulumi.getter(name="functionalGroupId")
def functional_group_id(self) -> Optional[str]:
"""
The functional group id.
"""
return pulumi.get(self, "functional_group_id")
@property
@pulumi.getter(name="groupApplicationPassword")
def group_application_password(self) -> Optional[str]:
"""
The group application password.
"""
return pulumi.get(self, "group_application_password")
@property
@pulumi.getter(name="groupApplicationReceiverId")
def group_application_receiver_id(self) -> Optional[str]:
"""
The group application receiver id.
"""
return pulumi.get(self, "group_application_receiver_id")
@property
@pulumi.getter(name="groupApplicationReceiverQualifier")
def group_application_receiver_qualifier(self) -> Optional[str]:
"""
The group application receiver qualifier.
"""
return pulumi.get(self, "group_application_receiver_qualifier")
@property
@pulumi.getter(name="groupApplicationSenderId")
def group_application_sender_id(self) -> Optional[str]:
"""
The group application sender id.
"""
return pulumi.get(self, "group_application_sender_id")
@property
@pulumi.getter(name="groupApplicationSenderQualifier")
def group_application_sender_qualifier(self) -> Optional[str]:
"""
The group application sender qualifier.
"""
return pulumi.get(self, "group_application_sender_qualifier")
@property
@pulumi.getter(name="groupAssociationAssignedCode")
def group_association_assigned_code(self) -> Optional[str]:
"""
The group association assigned code.
"""
return pulumi.get(self, "group_association_assigned_code")
@property
@pulumi.getter(name="groupControlNumberLowerBound")
def group_control_number_lower_bound(self) -> Optional[float]:
"""
The group control number lower bound.
"""
return pulumi.get(self, "group_control_number_lower_bound")
@property
@pulumi.getter(name="groupControlNumberPrefix")
def group_control_number_prefix(self) -> Optional[str]:
"""
The group control number prefix.
"""
return pulumi.get(self, "group_control_number_prefix")
@property
@pulumi.getter(name="groupControlNumberSuffix")
def group_control_number_suffix(self) -> Optional[str]:
"""
The group control number suffix.
"""
return pulumi.get(self, "group_control_number_suffix")
@property
@pulumi.getter(name="groupControlNumberUpperBound")
def group_control_number_upper_bound(self) -> Optional[float]:
"""
The group control number upper bound.
"""
return pulumi.get(self, "group_control_number_upper_bound")
@property
@pulumi.getter(name="groupControllingAgencyCode")
def group_controlling_agency_code(self) -> Optional[str]:
"""
The group controlling agency code.
"""
return pulumi.get(self, "group_controlling_agency_code")
@property
@pulumi.getter(name="groupMessageRelease")
def group_message_release(self) -> Optional[str]:
"""
The group message release.
"""
return pulumi.get(self, "group_message_release")
@property
@pulumi.getter(name="groupMessageVersion")
def group_message_version(self) -> Optional[str]:
"""
The group message version.
"""
return pulumi.get(self, "group_message_version")
@property
@pulumi.getter(name="interchangeControlNumberLowerBound")
def interchange_control_number_lower_bound(self) -> Optional[float]:
"""
The interchange control number lower bound.
"""
return pulumi.get(self, "interchange_control_number_lower_bound")
@property
@pulumi.getter(name="interchangeControlNumberPrefix")
def interchange_control_number_prefix(self) -> Optional[str]:
"""
The interchange control number prefix.
"""
return pulumi.get(self, "interchange_control_number_prefix")
@property
@pulumi.getter(name="interchangeControlNumberSuffix")
def interchange_control_number_suffix(self) -> Optional[str]:
"""
The interchange control number suffix.
"""
return pulumi.get(self, "interchange_control_number_suffix")
@property
@pulumi.getter(name="interchangeControlNumberUpperBound")
def interchange_control_number_upper_bound(self) -> Optional[float]:
"""
The interchange control number upper bound.
"""
return pulumi.get(self, "interchange_control_number_upper_bound")
@property
@pulumi.getter(name="isTestInterchange")
def is_test_interchange(self) -> Optional[bool]:
"""
The value indicating whether the message is a test interchange.
"""
return pulumi.get(self, "is_test_interchange")
@property
@pulumi.getter(name="overwriteExistingTransactionSetControlNumber")
def overwrite_existing_transaction_set_control_number(self) -> Optional[bool]:
"""
The value indicating whether to overwrite existing transaction set control number.
"""
return pulumi.get(self, "overwrite_existing_transaction_set_control_number")
@property
@pulumi.getter(name="processingPriorityCode")
def processing_priority_code(self) -> Optional[str]:
"""
The processing priority code.
"""
return pulumi.get(self, "processing_priority_code")
@property
@pulumi.getter(name="receiverInternalIdentification")
def receiver_internal_identification(self) -> Optional[str]:
"""
The receiver internal identification.
"""
return pulumi.get(self, "receiver_internal_identification")
@property
@pulumi.getter(name="receiverInternalSubIdentification")
def receiver_internal_sub_identification(self) -> Optional[str]:
"""
The receiver internal sub identification.
"""
return pulumi.get(self, "receiver_internal_sub_identification")
@property
@pulumi.getter(name="receiverReverseRoutingAddress")
def receiver_reverse_routing_address(self) -> Optional[str]:
"""
The receiver reverse routing address.
"""
return pulumi.get(self, "receiver_reverse_routing_address")
@property
@pulumi.getter(name="recipientReferencePasswordQualifier")
def recipient_reference_password_qualifier(self) -> Optional[str]:
"""
The recipient reference password qualifier.
"""
return pulumi.get(self, "recipient_reference_password_qualifier")
@property
@pulumi.getter(name="recipientReferencePasswordValue")
def recipient_reference_password_value(self) -> Optional[str]:
"""
The recipient reference password value.
"""
return pulumi.get(self, "recipient_reference_password_value")
@property
@pulumi.getter(name="rolloverGroupControlNumber")
def rollover_group_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover group control number.
"""
return pulumi.get(self, "rollover_group_control_number")
@property
@pulumi.getter(name="rolloverInterchangeControlNumber")
def rollover_interchange_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover interchange control number.
"""
return pulumi.get(self, "rollover_interchange_control_number")
@property
@pulumi.getter(name="rolloverTransactionSetControlNumber")
def rollover_transaction_set_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover transaction set control number.
"""
return pulumi.get(self, "rollover_transaction_set_control_number")
@property
@pulumi.getter(name="senderInternalIdentification")
def sender_internal_identification(self) -> Optional[str]:
"""
The sender internal identification.
"""
return pulumi.get(self, "sender_internal_identification")
@property
@pulumi.getter(name="senderInternalSubIdentification")
def sender_internal_sub_identification(self) -> Optional[str]:
"""
The sender internal sub identification.
"""
return pulumi.get(self, "sender_internal_sub_identification")
@property
@pulumi.getter(name="senderReverseRoutingAddress")
def sender_reverse_routing_address(self) -> Optional[str]:
"""
The sender reverse routing address.
"""
return pulumi.get(self, "sender_reverse_routing_address")
@property
@pulumi.getter(name="transactionSetControlNumberLowerBound")
def transaction_set_control_number_lower_bound(self) -> Optional[float]:
"""
The transaction set control number lower bound.
"""
return pulumi.get(self, "transaction_set_control_number_lower_bound")
@property
@pulumi.getter(name="transactionSetControlNumberPrefix")
def transaction_set_control_number_prefix(self) -> Optional[str]:
"""
The transaction set control number prefix.
"""
return pulumi.get(self, "transaction_set_control_number_prefix")
@property
@pulumi.getter(name="transactionSetControlNumberSuffix")
def transaction_set_control_number_suffix(self) -> Optional[str]:
"""
The transaction set control number suffix.
"""
return pulumi.get(self, "transaction_set_control_number_suffix")
@property
@pulumi.getter(name="transactionSetControlNumberUpperBound")
def transaction_set_control_number_upper_bound(self) -> Optional[float]:
"""
The transaction set control number upper bound.
"""
return pulumi.get(self, "transaction_set_control_number_upper_bound")
@pulumi.output_type
class EdifactFramingSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "characterEncoding":
suggest = "character_encoding"
elif key == "characterSet":
suggest = "character_set"
elif key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "decimalPointIndicator":
suggest = "decimal_point_indicator"
elif key == "protocolVersion":
suggest = "protocol_version"
elif key == "releaseIndicator":
suggest = "release_indicator"
elif key == "repetitionSeparator":
suggest = "repetition_separator"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
elif key == "serviceCodeListDirectoryVersion":
suggest = "service_code_list_directory_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactFramingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactFramingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactFramingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
character_encoding: Optional[str] = None,
character_set: Optional[str] = None,
component_separator: Optional[int] = None,
data_element_separator: Optional[int] = None,
decimal_point_indicator: Optional[str] = None,
protocol_version: Optional[int] = None,
release_indicator: Optional[int] = None,
repetition_separator: Optional[int] = None,
segment_terminator: Optional[int] = None,
segment_terminator_suffix: Optional[str] = None,
service_code_list_directory_version: Optional[str] = None):
"""
:param str character_encoding: The character encoding.
:param str character_set: The EDIFACT frame setting characterSet.
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param str decimal_point_indicator: The EDIFACT frame setting decimal indicator.
:param int protocol_version: The protocol version.
:param int release_indicator: The release indicator.
:param int repetition_separator: The repetition separator.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The EDIFACT frame setting segment terminator suffix.
:param str service_code_list_directory_version: The service code list directory version.
"""
if character_encoding is not None:
pulumi.set(__self__, "character_encoding", character_encoding)
if character_set is not None:
pulumi.set(__self__, "character_set", character_set)
if component_separator is not None:
pulumi.set(__self__, "component_separator", component_separator)
if data_element_separator is not None:
pulumi.set(__self__, "data_element_separator", data_element_separator)
if decimal_point_indicator is not None:
pulumi.set(__self__, "decimal_point_indicator", decimal_point_indicator)
if protocol_version is not None:
pulumi.set(__self__, "protocol_version", protocol_version)
if release_indicator is not None:
pulumi.set(__self__, "release_indicator", release_indicator)
if repetition_separator is not None:
pulumi.set(__self__, "repetition_separator", repetition_separator)
if segment_terminator is not None:
pulumi.set(__self__, "segment_terminator", segment_terminator)
if segment_terminator_suffix is not None:
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
if service_code_list_directory_version is not None:
pulumi.set(__self__, "service_code_list_directory_version", service_code_list_directory_version)
@property
@pulumi.getter(name="characterEncoding")
def character_encoding(self) -> Optional[str]:
"""
The character encoding.
"""
return pulumi.get(self, "character_encoding")
@property
@pulumi.getter(name="characterSet")
def character_set(self) -> Optional[str]:
"""
The EDIFACT frame setting characterSet.
"""
return pulumi.get(self, "character_set")
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> Optional[int]:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> Optional[int]:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="decimalPointIndicator")
def decimal_point_indicator(self) -> Optional[str]:
"""
The EDIFACT frame setting decimal indicator.
"""
return pulumi.get(self, "decimal_point_indicator")
@property
@pulumi.getter(name="protocolVersion")
def protocol_version(self) -> Optional[int]:
"""
The protocol version.
"""
return pulumi.get(self, "protocol_version")
@property
@pulumi.getter(name="releaseIndicator")
def release_indicator(self) -> Optional[int]:
"""
The release indicator.
"""
return pulumi.get(self, "release_indicator")
@property
@pulumi.getter(name="repetitionSeparator")
def repetition_separator(self) -> Optional[int]:
"""
The repetition separator.
"""
return pulumi.get(self, "repetition_separator")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> Optional[int]:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> Optional[str]:
"""
The EDIFACT frame setting segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@property
@pulumi.getter(name="serviceCodeListDirectoryVersion")
def service_code_list_directory_version(self) -> Optional[str]:
"""
The service code list directory version.
"""
return pulumi.get(self, "service_code_list_directory_version")
@pulumi.output_type
class EdifactMessageFilterResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageFilterType":
suggest = "message_filter_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactMessageFilterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactMessageFilterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactMessageFilterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_filter_type: Optional[str] = None):
"""
:param str message_filter_type: The message filter type.
"""
if message_filter_type is not None:
pulumi.set(__self__, "message_filter_type", message_filter_type)
@property
@pulumi.getter(name="messageFilterType")
def message_filter_type(self) -> Optional[str]:
"""
The message filter type.
"""
return pulumi.get(self, "message_filter_type")
@pulumi.output_type
class EdifactMessageIdentifierResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageId":
suggest = "message_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactMessageIdentifierResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactMessageIdentifierResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactMessageIdentifierResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_id: Optional[str] = None):
"""
:param str message_id: The message id on which this envelope settings has to be applied.
"""
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_id")
@pulumi.output_type
class EdifactOneWayAgreementResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "protocolSettings":
suggest = "protocol_settings"
elif key == "receiverBusinessIdentity":
suggest = "receiver_business_identity"
elif key == "senderBusinessIdentity":
suggest = "sender_business_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactOneWayAgreementResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactOneWayAgreementResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactOneWayAgreementResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
protocol_settings: Optional['outputs.EdifactProtocolSettingsResponse'] = None,
receiver_business_identity: Optional['outputs.BusinessIdentityResponse'] = None,
sender_business_identity: Optional['outputs.BusinessIdentityResponse'] = None):
"""
:param 'EdifactProtocolSettingsResponse' protocol_settings: The EDIFACT protocol settings.
:param 'BusinessIdentityResponse' receiver_business_identity: The receiver business identity
:param 'BusinessIdentityResponse' sender_business_identity: The sender business identity
"""
if protocol_settings is not None:
pulumi.set(__self__, "protocol_settings", protocol_settings)
if receiver_business_identity is not None:
pulumi.set(__self__, "receiver_business_identity", receiver_business_identity)
if sender_business_identity is not None:
pulumi.set(__self__, "sender_business_identity", sender_business_identity)
@property
@pulumi.getter(name="protocolSettings")
def protocol_settings(self) -> Optional['outputs.EdifactProtocolSettingsResponse']:
"""
The EDIFACT protocol settings.
"""
return pulumi.get(self, "protocol_settings")
@property
@pulumi.getter(name="receiverBusinessIdentity")
def receiver_business_identity(self) -> Optional['outputs.BusinessIdentityResponse']:
"""
The receiver business identity
"""
return pulumi.get(self, "receiver_business_identity")
@property
@pulumi.getter(name="senderBusinessIdentity")
def sender_business_identity(self) -> Optional['outputs.BusinessIdentityResponse']:
"""
The sender business identity
"""
return pulumi.get(self, "sender_business_identity")
@pulumi.output_type
class EdifactProcessingSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createEmptyXmlTagsForTrailingSeparators":
suggest = "create_empty_xml_tags_for_trailing_separators"
elif key == "maskSecurityInfo":
suggest = "mask_security_info"
elif key == "preserveInterchange":
suggest = "preserve_interchange"
elif key == "suspendInterchangeOnError":
suggest = "suspend_interchange_on_error"
elif key == "useDotAsDecimalSeparator":
suggest = "use_dot_as_decimal_separator"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactProcessingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactProcessingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactProcessingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
create_empty_xml_tags_for_trailing_separators: Optional[bool] = None,
mask_security_info: Optional[bool] = None,
preserve_interchange: Optional[bool] = None,
suspend_interchange_on_error: Optional[bool] = None,
use_dot_as_decimal_separator: Optional[bool] = None):
"""
:param bool create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators.
:param bool mask_security_info: The value indicating whether to mask security information.
:param bool preserve_interchange: The value indicating whether to preserve interchange.
:param bool suspend_interchange_on_error: The value indicating whether to suspend interchange on error.
:param bool use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator.
"""
if create_empty_xml_tags_for_trailing_separators is not None:
pulumi.set(__self__, "create_empty_xml_tags_for_trailing_separators", create_empty_xml_tags_for_trailing_separators)
if mask_security_info is not None:
pulumi.set(__self__, "mask_security_info", mask_security_info)
if preserve_interchange is not None:
pulumi.set(__self__, "preserve_interchange", preserve_interchange)
if suspend_interchange_on_error is not None:
pulumi.set(__self__, "suspend_interchange_on_error", suspend_interchange_on_error)
if use_dot_as_decimal_separator is not None:
pulumi.set(__self__, "use_dot_as_decimal_separator", use_dot_as_decimal_separator)
@property
@pulumi.getter(name="createEmptyXmlTagsForTrailingSeparators")
def create_empty_xml_tags_for_trailing_separators(self) -> Optional[bool]:
"""
The value indicating whether to create empty xml tags for trailing separators.
"""
return pulumi.get(self, "create_empty_xml_tags_for_trailing_separators")
@property
@pulumi.getter(name="maskSecurityInfo")
def mask_security_info(self) -> Optional[bool]:
"""
The value indicating whether to mask security information.
"""
return pulumi.get(self, "mask_security_info")
@property
@pulumi.getter(name="preserveInterchange")
def preserve_interchange(self) -> Optional[bool]:
"""
The value indicating whether to preserve interchange.
"""
return pulumi.get(self, "preserve_interchange")
@property
@pulumi.getter(name="suspendInterchangeOnError")
def suspend_interchange_on_error(self) -> Optional[bool]:
"""
The value indicating whether to suspend interchange on error.
"""
return pulumi.get(self, "suspend_interchange_on_error")
@property
@pulumi.getter(name="useDotAsDecimalSeparator")
def use_dot_as_decimal_separator(self) -> Optional[bool]:
"""
The value indicating whether to use dot as decimal separator.
"""
return pulumi.get(self, "use_dot_as_decimal_separator")
@pulumi.output_type
class EdifactProtocolSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementSettings":
suggest = "acknowledgement_settings"
elif key == "edifactDelimiterOverrides":
suggest = "edifact_delimiter_overrides"
elif key == "envelopeOverrides":
suggest = "envelope_overrides"
elif key == "envelopeSettings":
suggest = "envelope_settings"
elif key == "framingSettings":
suggest = "framing_settings"
elif key == "messageFilter":
suggest = "message_filter"
elif key == "messageFilterList":
suggest = "message_filter_list"
elif key == "processingSettings":
suggest = "processing_settings"
elif key == "schemaReferences":
suggest = "schema_references"
elif key == "validationOverrides":
suggest = "validation_overrides"
elif key == "validationSettings":
suggest = "validation_settings"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactProtocolSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactProtocolSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactProtocolSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_settings: Optional['outputs.EdifactAcknowledgementSettingsResponse'] = None,
edifact_delimiter_overrides: Optional[Sequence['outputs.EdifactDelimiterOverrideResponse']] = None,
envelope_overrides: Optional[Sequence['outputs.EdifactEnvelopeOverrideResponse']] = None,
envelope_settings: Optional['outputs.EdifactEnvelopeSettingsResponse'] = None,
framing_settings: Optional['outputs.EdifactFramingSettingsResponse'] = None,
message_filter: Optional['outputs.EdifactMessageFilterResponse'] = None,
message_filter_list: Optional[Sequence['outputs.EdifactMessageIdentifierResponse']] = None,
processing_settings: Optional['outputs.EdifactProcessingSettingsResponse'] = None,
schema_references: Optional[Sequence['outputs.EdifactSchemaReferenceResponse']] = None,
validation_overrides: Optional[Sequence['outputs.EdifactValidationOverrideResponse']] = None,
validation_settings: Optional['outputs.EdifactValidationSettingsResponse'] = None):
"""
:param 'EdifactAcknowledgementSettingsResponse' acknowledgement_settings: The EDIFACT acknowledgement settings.
:param Sequence['EdifactDelimiterOverrideResponse'] edifact_delimiter_overrides: The EDIFACT delimiter override settings.
:param Sequence['EdifactEnvelopeOverrideResponse'] envelope_overrides: The EDIFACT envelope override settings.
:param 'EdifactEnvelopeSettingsResponse' envelope_settings: The EDIFACT envelope settings.
:param 'EdifactFramingSettingsResponse' framing_settings: The EDIFACT framing settings.
:param 'EdifactMessageFilterResponse' message_filter: The EDIFACT message filter.
:param Sequence['EdifactMessageIdentifierResponse'] message_filter_list: The EDIFACT message filter list.
:param 'EdifactProcessingSettingsResponse' processing_settings: The EDIFACT processing Settings.
:param Sequence['EdifactSchemaReferenceResponse'] schema_references: The EDIFACT schema references.
:param Sequence['EdifactValidationOverrideResponse'] validation_overrides: The EDIFACT validation override settings.
:param 'EdifactValidationSettingsResponse' validation_settings: The EDIFACT validation settings.
"""
if acknowledgement_settings is not None:
pulumi.set(__self__, "acknowledgement_settings", acknowledgement_settings)
if edifact_delimiter_overrides is not None:
pulumi.set(__self__, "edifact_delimiter_overrides", edifact_delimiter_overrides)
if envelope_overrides is not None:
pulumi.set(__self__, "envelope_overrides", envelope_overrides)
if envelope_settings is not None:
pulumi.set(__self__, "envelope_settings", envelope_settings)
if framing_settings is not None:
pulumi.set(__self__, "framing_settings", framing_settings)
if message_filter is not None:
pulumi.set(__self__, "message_filter", message_filter)
if message_filter_list is not None:
pulumi.set(__self__, "message_filter_list", message_filter_list)
if processing_settings is not None:
pulumi.set(__self__, "processing_settings", processing_settings)
if schema_references is not None:
pulumi.set(__self__, "schema_references", schema_references)
if validation_overrides is not None:
pulumi.set(__self__, "validation_overrides", validation_overrides)
if validation_settings is not None:
pulumi.set(__self__, "validation_settings", validation_settings)
@property
@pulumi.getter(name="acknowledgementSettings")
def acknowledgement_settings(self) -> Optional['outputs.EdifactAcknowledgementSettingsResponse']:
"""
The EDIFACT acknowledgement settings.
"""
return pulumi.get(self, "acknowledgement_settings")
@property
@pulumi.getter(name="edifactDelimiterOverrides")
def edifact_delimiter_overrides(self) -> Optional[Sequence['outputs.EdifactDelimiterOverrideResponse']]:
"""
The EDIFACT delimiter override settings.
"""
return pulumi.get(self, "edifact_delimiter_overrides")
@property
@pulumi.getter(name="envelopeOverrides")
def envelope_overrides(self) -> Optional[Sequence['outputs.EdifactEnvelopeOverrideResponse']]:
"""
The EDIFACT envelope override settings.
"""
return pulumi.get(self, "envelope_overrides")
@property
@pulumi.getter(name="envelopeSettings")
def envelope_settings(self) -> Optional['outputs.EdifactEnvelopeSettingsResponse']:
"""
The EDIFACT envelope settings.
"""
return pulumi.get(self, "envelope_settings")
@property
@pulumi.getter(name="framingSettings")
def framing_settings(self) -> Optional['outputs.EdifactFramingSettingsResponse']:
"""
The EDIFACT framing settings.
"""
return pulumi.get(self, "framing_settings")
@property
@pulumi.getter(name="messageFilter")
def message_filter(self) -> Optional['outputs.EdifactMessageFilterResponse']:
"""
The EDIFACT message filter.
"""
return pulumi.get(self, "message_filter")
@property
@pulumi.getter(name="messageFilterList")
def message_filter_list(self) -> Optional[Sequence['outputs.EdifactMessageIdentifierResponse']]:
"""
The EDIFACT message filter list.
"""
return pulumi.get(self, "message_filter_list")
@property
@pulumi.getter(name="processingSettings")
def processing_settings(self) -> Optional['outputs.EdifactProcessingSettingsResponse']:
"""
The EDIFACT processing Settings.
"""
return pulumi.get(self, "processing_settings")
@property
@pulumi.getter(name="schemaReferences")
def schema_references(self) -> Optional[Sequence['outputs.EdifactSchemaReferenceResponse']]:
"""
The EDIFACT schema references.
"""
return pulumi.get(self, "schema_references")
@property
@pulumi.getter(name="validationOverrides")
def validation_overrides(self) -> Optional[Sequence['outputs.EdifactValidationOverrideResponse']]:
"""
The EDIFACT validation override settings.
"""
return pulumi.get(self, "validation_overrides")
@property
@pulumi.getter(name="validationSettings")
def validation_settings(self) -> Optional['outputs.EdifactValidationSettingsResponse']:
"""
The EDIFACT validation settings.
"""
return pulumi.get(self, "validation_settings")
@pulumi.output_type
class EdifactSchemaReferenceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "associationAssignedCode":
suggest = "association_assigned_code"
elif key == "messageId":
suggest = "message_id"
elif key == "messageRelease":
suggest = "message_release"
elif key == "messageVersion":
suggest = "message_version"
elif key == "schemaName":
suggest = "schema_name"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "senderApplicationQualifier":
suggest = "sender_application_qualifier"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactSchemaReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactSchemaReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactSchemaReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
association_assigned_code: Optional[str] = None,
message_id: Optional[str] = None,
message_release: Optional[str] = None,
message_version: Optional[str] = None,
schema_name: Optional[str] = None,
sender_application_id: Optional[str] = None,
sender_application_qualifier: Optional[str] = None):
"""
:param str association_assigned_code: The association assigned code.
:param str message_id: The message id.
:param str message_release: The message release version.
:param str message_version: The message version.
:param str schema_name: The schema name.
:param str sender_application_id: The sender application id.
:param str sender_application_qualifier: The sender application qualifier.
"""
if association_assigned_code is not None:
pulumi.set(__self__, "association_assigned_code", association_assigned_code)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if message_release is not None:
pulumi.set(__self__, "message_release", message_release)
if message_version is not None:
pulumi.set(__self__, "message_version", message_version)
if schema_name is not None:
pulumi.set(__self__, "schema_name", schema_name)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
if sender_application_qualifier is not None:
pulumi.set(__self__, "sender_application_qualifier", sender_application_qualifier)
@property
@pulumi.getter(name="associationAssignedCode")
def association_assigned_code(self) -> Optional[str]:
"""
The association assigned code.
"""
return pulumi.get(self, "association_assigned_code")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> Optional[str]:
"""
The message release version.
"""
return pulumi.get(self, "message_release")
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> Optional[str]:
"""
The message version.
"""
return pulumi.get(self, "message_version")
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> Optional[str]:
"""
The schema name.
"""
return pulumi.get(self, "schema_name")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="senderApplicationQualifier")
def sender_application_qualifier(self) -> Optional[str]:
"""
The sender application qualifier.
"""
return pulumi.get(self, "sender_application_qualifier")
@pulumi.output_type
class EdifactValidationOverrideResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "enforceCharacterSet":
suggest = "enforce_character_set"
elif key == "messageId":
suggest = "message_id"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactValidationOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactValidationOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactValidationOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
enforce_character_set: Optional[bool] = None,
message_id: Optional[str] = None,
trailing_separator_policy: Optional[str] = None,
trim_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
validate_edi_types: Optional[bool] = None,
validate_xsd_types: Optional[bool] = None):
"""
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param bool enforce_character_set: The value indicating whether to validate character Set.
:param str message_id: The message id on which the validation settings has to be applied.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_edi_types: The value indicating whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to validate XSD types.
"""
if allow_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
if enforce_character_set is not None:
pulumi.set(__self__, "enforce_character_set", enforce_character_set)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if trailing_separator_policy is not None:
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
if trim_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
if validate_edi_types is not None:
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
if validate_xsd_types is not None:
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="enforceCharacterSet")
def enforce_character_set(self) -> Optional[bool]:
"""
The value indicating whether to validate character Set.
"""
return pulumi.get(self, "enforce_character_set")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id on which the validation settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> Optional[str]:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> Optional[bool]:
"""
The value indicating whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> Optional[bool]:
"""
The value indicating whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
@pulumi.output_type
class EdifactValidationSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "checkDuplicateGroupControlNumber":
suggest = "check_duplicate_group_control_number"
elif key == "checkDuplicateInterchangeControlNumber":
suggest = "check_duplicate_interchange_control_number"
elif key == "checkDuplicateTransactionSetControlNumber":
suggest = "check_duplicate_transaction_set_control_number"
elif key == "interchangeControlNumberValidityDays":
suggest = "interchange_control_number_validity_days"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateCharacterSet":
suggest = "validate_character_set"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactValidationSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactValidationSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactValidationSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
check_duplicate_group_control_number: Optional[bool] = None,
check_duplicate_interchange_control_number: Optional[bool] = None,
check_duplicate_transaction_set_control_number: Optional[bool] = None,
interchange_control_number_validity_days: Optional[int] = None,
trailing_separator_policy: Optional[str] = None,
trim_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
validate_character_set: Optional[bool] = None,
validate_edi_types: Optional[bool] = None,
validate_xsd_types: Optional[bool] = None):
"""
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param bool check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number.
:param bool check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number.
:param bool check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number.
:param int interchange_control_number_validity_days: The validity period of interchange control number.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_character_set: The value indicating whether to validate character set in the message.
:param bool validate_edi_types: The value indicating whether to Whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to Whether to validate XSD types.
"""
if allow_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
if check_duplicate_group_control_number is not None:
pulumi.set(__self__, "check_duplicate_group_control_number", check_duplicate_group_control_number)
if check_duplicate_interchange_control_number is not None:
pulumi.set(__self__, "check_duplicate_interchange_control_number", check_duplicate_interchange_control_number)
if check_duplicate_transaction_set_control_number is not None:
pulumi.set(__self__, "check_duplicate_transaction_set_control_number", check_duplicate_transaction_set_control_number)
if interchange_control_number_validity_days is not None:
pulumi.set(__self__, "interchange_control_number_validity_days", interchange_control_number_validity_days)
if trailing_separator_policy is not None:
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
if trim_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
if validate_character_set is not None:
pulumi.set(__self__, "validate_character_set", validate_character_set)
if validate_edi_types is not None:
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
if validate_xsd_types is not None:
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="checkDuplicateGroupControlNumber")
def check_duplicate_group_control_number(self) -> Optional[bool]:
"""
The value indicating whether to check for duplicate group control number.
"""
return pulumi.get(self, "check_duplicate_group_control_number")
@property
@pulumi.getter(name="checkDuplicateInterchangeControlNumber")
def check_duplicate_interchange_control_number(self) -> Optional[bool]:
"""
The value indicating whether to check for duplicate interchange control number.
"""
return pulumi.get(self, "check_duplicate_interchange_control_number")
@property
@pulumi.getter(name="checkDuplicateTransactionSetControlNumber")
def check_duplicate_transaction_set_control_number(self) -> Optional[bool]:
"""
The value indicating whether to check for duplicate transaction set control number.
"""
return pulumi.get(self, "check_duplicate_transaction_set_control_number")
@property
@pulumi.getter(name="interchangeControlNumberValidityDays")
def interchange_control_number_validity_days(self) -> Optional[int]:
"""
The validity period of interchange control number.
"""
return pulumi.get(self, "interchange_control_number_validity_days")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> Optional[str]:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateCharacterSet")
def validate_character_set(self) -> Optional[bool]:
"""
The value indicating whether to validate character set in the message.
"""
return pulumi.get(self, "validate_character_set")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> Optional[bool]:
"""
The value indicating whether to Whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> Optional[bool]:
"""
The value indicating whether to Whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
@pulumi.output_type
class IntegrationAccountContentHashResponse(dict):
def __init__(__self__, *,
algorithm: Optional[str] = None,
value: Optional[str] = None):
"""
:param str algorithm: The content hash algorithm.
:param str value: The content hash value.
"""
if algorithm is not None:
pulumi.set(__self__, "algorithm", algorithm)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def algorithm(self) -> Optional[str]:
"""
The content hash algorithm.
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The content hash value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class IntegrationAccountContentLinkResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentHash":
suggest = "content_hash"
elif key == "contentSize":
suggest = "content_size"
elif key == "contentVersion":
suggest = "content_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IntegrationAccountContentLinkResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IntegrationAccountContentLinkResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IntegrationAccountContentLinkResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
content_hash: Optional['outputs.IntegrationAccountContentHashResponse'] = None,
content_size: Optional[float] = None,
content_version: Optional[str] = None,
metadata: Optional[Any] = None,
uri: Optional[str] = None):
"""
:param 'IntegrationAccountContentHashResponse' content_hash: The content hash.
:param float content_size: The content size.
:param str content_version: The content version.
:param Any metadata: The metadata.
:param str uri: The content link URI.
"""
if content_hash is not None:
pulumi.set(__self__, "content_hash", content_hash)
if content_size is not None:
pulumi.set(__self__, "content_size", content_size)
if content_version is not None:
pulumi.set(__self__, "content_version", content_version)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> Optional['outputs.IntegrationAccountContentHashResponse']:
"""
The content hash.
"""
return pulumi.get(self, "content_hash")
@property
@pulumi.getter(name="contentSize")
def content_size(self) -> Optional[float]:
"""
The content size.
"""
return pulumi.get(self, "content_size")
@property
@pulumi.getter(name="contentVersion")
def content_version(self) -> Optional[str]:
"""
The content version.
"""
return pulumi.get(self, "content_version")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The content link URI.
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class IntegrationAccountSkuResponse(dict):
def __init__(__self__, *,
name: Optional[str] = None):
"""
:param str name: The sku name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class KeyVaultKeyReferenceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyName":
suggest = "key_name"
elif key == "keyVault":
suggest = "key_vault"
elif key == "keyVersion":
suggest = "key_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyVaultKeyReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyVaultKeyReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyVaultKeyReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_name: Optional[str] = None,
key_vault: Optional['outputs.KeyVaultKeyReferenceResponseKeyVault'] = None,
key_version: Optional[str] = None):
"""
:param str key_name: The private key name in key vault.
:param 'KeyVaultKeyReferenceResponseKeyVault' key_vault: The key vault reference.
:param str key_version: The private key version in key vault.
"""
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if key_vault is not None:
pulumi.set(__self__, "key_vault", key_vault)
if key_version is not None:
pulumi.set(__self__, "key_version", key_version)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[str]:
"""
The private key name in key vault.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="keyVault")
def key_vault(self) -> Optional['outputs.KeyVaultKeyReferenceResponseKeyVault']:
"""
The key vault reference.
"""
return pulumi.get(self, "key_vault")
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> Optional[str]:
"""
The private key version in key vault.
"""
return pulumi.get(self, "key_version")
@pulumi.output_type
class KeyVaultKeyReferenceResponseKeyVault(dict):
"""
The key vault reference.
"""
def __init__(__self__, *,
name: str,
type: str,
id: Optional[str] = None):
"""
The key vault reference.
:param str name: The resource name.
:param str type: The resource type.
:param str id: The resource id.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The resource id.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class PartnerContentResponse(dict):
def __init__(__self__, *,
b2b: Optional['outputs.B2BPartnerContentResponse'] = None):
"""
:param 'B2BPartnerContentResponse' b2b: The B2B partner content.
"""
if b2b is not None:
pulumi.set(__self__, "b2b", b2b)
@property
@pulumi.getter
def b2b(self) -> Optional['outputs.B2BPartnerContentResponse']:
"""
The B2B partner content.
"""
return pulumi.get(self, "b2b")
@pulumi.output_type
class X12AcknowledgementSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementControlNumberLowerBound":
suggest = "acknowledgement_control_number_lower_bound"
elif key == "acknowledgementControlNumberPrefix":
suggest = "acknowledgement_control_number_prefix"
elif key == "acknowledgementControlNumberSuffix":
suggest = "acknowledgement_control_number_suffix"
elif key == "acknowledgementControlNumberUpperBound":
suggest = "acknowledgement_control_number_upper_bound"
elif key == "batchFunctionalAcknowledgements":
suggest = "batch_functional_acknowledgements"
elif key == "batchImplementationAcknowledgements":
suggest = "batch_implementation_acknowledgements"
elif key == "batchTechnicalAcknowledgements":
suggest = "batch_technical_acknowledgements"
elif key == "functionalAcknowledgementVersion":
suggest = "functional_acknowledgement_version"
elif key == "implementationAcknowledgementVersion":
suggest = "implementation_acknowledgement_version"
elif key == "needFunctionalAcknowledgement":
suggest = "need_functional_acknowledgement"
elif key == "needImplementationAcknowledgement":
suggest = "need_implementation_acknowledgement"
elif key == "needLoopForValidMessages":
suggest = "need_loop_for_valid_messages"
elif key == "needTechnicalAcknowledgement":
suggest = "need_technical_acknowledgement"
elif key == "rolloverAcknowledgementControlNumber":
suggest = "rollover_acknowledgement_control_number"
elif key == "sendSynchronousAcknowledgement":
suggest = "send_synchronous_acknowledgement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12AcknowledgementSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12AcknowledgementSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12AcknowledgementSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_control_number_lower_bound: Optional[int] = None,
acknowledgement_control_number_prefix: Optional[str] = None,
acknowledgement_control_number_suffix: Optional[str] = None,
acknowledgement_control_number_upper_bound: Optional[int] = None,
batch_functional_acknowledgements: Optional[bool] = None,
batch_implementation_acknowledgements: Optional[bool] = None,
batch_technical_acknowledgements: Optional[bool] = None,
functional_acknowledgement_version: Optional[str] = None,
implementation_acknowledgement_version: Optional[str] = None,
need_functional_acknowledgement: Optional[bool] = None,
need_implementation_acknowledgement: Optional[bool] = None,
need_loop_for_valid_messages: Optional[bool] = None,
need_technical_acknowledgement: Optional[bool] = None,
rollover_acknowledgement_control_number: Optional[bool] = None,
send_synchronous_acknowledgement: Optional[bool] = None):
"""
:param int acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound.
:param str acknowledgement_control_number_prefix: The acknowledgement control number prefix.
:param str acknowledgement_control_number_suffix: The acknowledgement control number suffix.
:param int acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound.
:param bool batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements.
:param bool batch_implementation_acknowledgements: The value indicating whether to batch implementation acknowledgements.
:param bool batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements.
:param str functional_acknowledgement_version: The functional acknowledgement version.
:param str implementation_acknowledgement_version: The implementation acknowledgement version.
:param bool need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed.
:param bool need_implementation_acknowledgement: The value indicating whether implementation acknowledgement is needed.
:param bool need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages.
:param bool need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed.
:param bool rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number.
:param bool send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement.
"""
if acknowledgement_control_number_lower_bound is not None:
pulumi.set(__self__, "acknowledgement_control_number_lower_bound", acknowledgement_control_number_lower_bound)
if acknowledgement_control_number_prefix is not None:
pulumi.set(__self__, "acknowledgement_control_number_prefix", acknowledgement_control_number_prefix)
if acknowledgement_control_number_suffix is not None:
pulumi.set(__self__, "acknowledgement_control_number_suffix", acknowledgement_control_number_suffix)
if acknowledgement_control_number_upper_bound is not None:
pulumi.set(__self__, "acknowledgement_control_number_upper_bound", acknowledgement_control_number_upper_bound)
if batch_functional_acknowledgements is not None:
pulumi.set(__self__, "batch_functional_acknowledgements", batch_functional_acknowledgements)
if batch_implementation_acknowledgements is not None:
pulumi.set(__self__, "batch_implementation_acknowledgements", batch_implementation_acknowledgements)
if batch_technical_acknowledgements is not None:
pulumi.set(__self__, "batch_technical_acknowledgements", batch_technical_acknowledgements)
if functional_acknowledgement_version is not None:
pulumi.set(__self__, "functional_acknowledgement_version", functional_acknowledgement_version)
if implementation_acknowledgement_version is not None:
pulumi.set(__self__, "implementation_acknowledgement_version", implementation_acknowledgement_version)
if need_functional_acknowledgement is not None:
pulumi.set(__self__, "need_functional_acknowledgement", need_functional_acknowledgement)
if need_implementation_acknowledgement is not None:
pulumi.set(__self__, "need_implementation_acknowledgement", need_implementation_acknowledgement)
if need_loop_for_valid_messages is not None:
pulumi.set(__self__, "need_loop_for_valid_messages", need_loop_for_valid_messages)
if need_technical_acknowledgement is not None:
pulumi.set(__self__, "need_technical_acknowledgement", need_technical_acknowledgement)
if rollover_acknowledgement_control_number is not None:
pulumi.set(__self__, "rollover_acknowledgement_control_number", rollover_acknowledgement_control_number)
if send_synchronous_acknowledgement is not None:
pulumi.set(__self__, "send_synchronous_acknowledgement", send_synchronous_acknowledgement)
@property
@pulumi.getter(name="acknowledgementControlNumberLowerBound")
def acknowledgement_control_number_lower_bound(self) -> Optional[int]:
"""
The acknowledgement control number lower bound.
"""
return pulumi.get(self, "acknowledgement_control_number_lower_bound")
@property
@pulumi.getter(name="acknowledgementControlNumberPrefix")
def acknowledgement_control_number_prefix(self) -> Optional[str]:
"""
The acknowledgement control number prefix.
"""
return pulumi.get(self, "acknowledgement_control_number_prefix")
@property
@pulumi.getter(name="acknowledgementControlNumberSuffix")
def acknowledgement_control_number_suffix(self) -> Optional[str]:
"""
The acknowledgement control number suffix.
"""
return pulumi.get(self, "acknowledgement_control_number_suffix")
@property
@pulumi.getter(name="acknowledgementControlNumberUpperBound")
def acknowledgement_control_number_upper_bound(self) -> Optional[int]:
"""
The acknowledgement control number upper bound.
"""
return pulumi.get(self, "acknowledgement_control_number_upper_bound")
@property
@pulumi.getter(name="batchFunctionalAcknowledgements")
def batch_functional_acknowledgements(self) -> Optional[bool]:
"""
The value indicating whether to batch functional acknowledgements.
"""
return pulumi.get(self, "batch_functional_acknowledgements")
@property
@pulumi.getter(name="batchImplementationAcknowledgements")
def batch_implementation_acknowledgements(self) -> Optional[bool]:
"""
The value indicating whether to batch implementation acknowledgements.
"""
return pulumi.get(self, "batch_implementation_acknowledgements")
@property
@pulumi.getter(name="batchTechnicalAcknowledgements")
def batch_technical_acknowledgements(self) -> Optional[bool]:
"""
The value indicating whether to batch the technical acknowledgements.
"""
return pulumi.get(self, "batch_technical_acknowledgements")
@property
@pulumi.getter(name="functionalAcknowledgementVersion")
def functional_acknowledgement_version(self) -> Optional[str]:
"""
The functional acknowledgement version.
"""
return pulumi.get(self, "functional_acknowledgement_version")
@property
@pulumi.getter(name="implementationAcknowledgementVersion")
def implementation_acknowledgement_version(self) -> Optional[str]:
"""
The implementation acknowledgement version.
"""
return pulumi.get(self, "implementation_acknowledgement_version")
@property
@pulumi.getter(name="needFunctionalAcknowledgement")
def need_functional_acknowledgement(self) -> Optional[bool]:
"""
The value indicating whether functional acknowledgement is needed.
"""
return pulumi.get(self, "need_functional_acknowledgement")
@property
@pulumi.getter(name="needImplementationAcknowledgement")
def need_implementation_acknowledgement(self) -> Optional[bool]:
"""
The value indicating whether implementation acknowledgement is needed.
"""
return pulumi.get(self, "need_implementation_acknowledgement")
@property
@pulumi.getter(name="needLoopForValidMessages")
def need_loop_for_valid_messages(self) -> Optional[bool]:
"""
The value indicating whether a loop is needed for valid messages.
"""
return pulumi.get(self, "need_loop_for_valid_messages")
@property
@pulumi.getter(name="needTechnicalAcknowledgement")
def need_technical_acknowledgement(self) -> Optional[bool]:
"""
The value indicating whether technical acknowledgement is needed.
"""
return pulumi.get(self, "need_technical_acknowledgement")
@property
@pulumi.getter(name="rolloverAcknowledgementControlNumber")
def rollover_acknowledgement_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover acknowledgement control number.
"""
return pulumi.get(self, "rollover_acknowledgement_control_number")
@property
@pulumi.getter(name="sendSynchronousAcknowledgement")
def send_synchronous_acknowledgement(self) -> Optional[bool]:
"""
The value indicating whether to send synchronous acknowledgement.
"""
return pulumi.get(self, "send_synchronous_acknowledgement")
@pulumi.output_type
class X12AgreementContentResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "receiveAgreement":
suggest = "receive_agreement"
elif key == "sendAgreement":
suggest = "send_agreement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12AgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12AgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12AgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
receive_agreement: Optional['outputs.X12OneWayAgreementResponse'] = None,
send_agreement: Optional['outputs.X12OneWayAgreementResponse'] = None):
"""
:param 'X12OneWayAgreementResponse' receive_agreement: The X12 one-way receive agreement.
:param 'X12OneWayAgreementResponse' send_agreement: The X12 one-way send agreement.
"""
if receive_agreement is not None:
pulumi.set(__self__, "receive_agreement", receive_agreement)
if send_agreement is not None:
pulumi.set(__self__, "send_agreement", send_agreement)
@property
@pulumi.getter(name="receiveAgreement")
def receive_agreement(self) -> Optional['outputs.X12OneWayAgreementResponse']:
"""
The X12 one-way receive agreement.
"""
return pulumi.get(self, "receive_agreement")
@property
@pulumi.getter(name="sendAgreement")
def send_agreement(self) -> Optional['outputs.X12OneWayAgreementResponse']:
"""
The X12 one-way send agreement.
"""
return pulumi.get(self, "send_agreement")
@pulumi.output_type
class X12DelimiterOverridesResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "messageId":
suggest = "message_id"
elif key == "protocolVersion":
suggest = "protocol_version"
elif key == "replaceCharacter":
suggest = "replace_character"
elif key == "replaceSeparatorsInPayload":
suggest = "replace_separators_in_payload"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
elif key == "targetNamespace":
suggest = "target_namespace"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12DelimiterOverridesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12DelimiterOverridesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12DelimiterOverridesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
component_separator: Optional[int] = None,
data_element_separator: Optional[int] = None,
message_id: Optional[str] = None,
protocol_version: Optional[str] = None,
replace_character: Optional[int] = None,
replace_separators_in_payload: Optional[bool] = None,
segment_terminator: Optional[int] = None,
segment_terminator_suffix: Optional[str] = None,
target_namespace: Optional[str] = None):
"""
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param str message_id: The message id.
:param str protocol_version: The protocol version.
:param int replace_character: The replacement character.
:param bool replace_separators_in_payload: The value indicating whether to replace separators in payload.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The segment terminator suffix.
:param str target_namespace: The target namespace on which this delimiter settings has to be applied.
"""
if component_separator is not None:
pulumi.set(__self__, "component_separator", component_separator)
if data_element_separator is not None:
pulumi.set(__self__, "data_element_separator", data_element_separator)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if protocol_version is not None:
pulumi.set(__self__, "protocol_version", protocol_version)
if replace_character is not None:
pulumi.set(__self__, "replace_character", replace_character)
if replace_separators_in_payload is not None:
pulumi.set(__self__, "replace_separators_in_payload", replace_separators_in_payload)
if segment_terminator is not None:
pulumi.set(__self__, "segment_terminator", segment_terminator)
if segment_terminator_suffix is not None:
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> Optional[int]:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> Optional[int]:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="protocolVersion")
def protocol_version(self) -> Optional[str]:
"""
The protocol version.
"""
return pulumi.get(self, "protocol_version")
@property
@pulumi.getter(name="replaceCharacter")
def replace_character(self) -> Optional[int]:
"""
The replacement character.
"""
return pulumi.get(self, "replace_character")
@property
@pulumi.getter(name="replaceSeparatorsInPayload")
def replace_separators_in_payload(self) -> Optional[bool]:
"""
The value indicating whether to replace separators in payload.
"""
return pulumi.get(self, "replace_separators_in_payload")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> Optional[int]:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> Optional[str]:
"""
The segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[str]:
"""
The target namespace on which this delimiter settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@pulumi.output_type
class X12EnvelopeOverrideResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dateFormat":
suggest = "date_format"
elif key == "functionalIdentifierCode":
suggest = "functional_identifier_code"
elif key == "headerVersion":
suggest = "header_version"
elif key == "messageId":
suggest = "message_id"
elif key == "protocolVersion":
suggest = "protocol_version"
elif key == "receiverApplicationId":
suggest = "receiver_application_id"
elif key == "responsibleAgencyCode":
suggest = "responsible_agency_code"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "targetNamespace":
suggest = "target_namespace"
elif key == "timeFormat":
suggest = "time_format"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12EnvelopeOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12EnvelopeOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12EnvelopeOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
date_format: Optional[str] = None,
functional_identifier_code: Optional[str] = None,
header_version: Optional[str] = None,
message_id: Optional[str] = None,
protocol_version: Optional[str] = None,
receiver_application_id: Optional[str] = None,
responsible_agency_code: Optional[int] = None,
sender_application_id: Optional[str] = None,
target_namespace: Optional[str] = None,
time_format: Optional[str] = None):
"""
:param str date_format: The date format.
:param str functional_identifier_code: The functional identifier code.
:param str header_version: The header version.
:param str message_id: The message id on which this envelope settings has to be applied.
:param str protocol_version: The protocol version on which this envelope settings has to be applied.
:param str receiver_application_id: The receiver application id.
:param int responsible_agency_code: The responsible agency code.
:param str sender_application_id: The sender application id.
:param str target_namespace: The target namespace on which this envelope settings has to be applied.
:param str time_format: The time format.
"""
if date_format is not None:
pulumi.set(__self__, "date_format", date_format)
if functional_identifier_code is not None:
pulumi.set(__self__, "functional_identifier_code", functional_identifier_code)
if header_version is not None:
pulumi.set(__self__, "header_version", header_version)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if protocol_version is not None:
pulumi.set(__self__, "protocol_version", protocol_version)
if receiver_application_id is not None:
pulumi.set(__self__, "receiver_application_id", receiver_application_id)
if responsible_agency_code is not None:
pulumi.set(__self__, "responsible_agency_code", responsible_agency_code)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
if time_format is not None:
pulumi.set(__self__, "time_format", time_format)
@property
@pulumi.getter(name="dateFormat")
def date_format(self) -> Optional[str]:
"""
The date format.
"""
return pulumi.get(self, "date_format")
@property
@pulumi.getter(name="functionalIdentifierCode")
def functional_identifier_code(self) -> Optional[str]:
"""
The functional identifier code.
"""
return pulumi.get(self, "functional_identifier_code")
@property
@pulumi.getter(name="headerVersion")
def header_version(self) -> Optional[str]:
"""
The header version.
"""
return pulumi.get(self, "header_version")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="protocolVersion")
def protocol_version(self) -> Optional[str]:
"""
The protocol version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "protocol_version")
@property
@pulumi.getter(name="receiverApplicationId")
def receiver_application_id(self) -> Optional[str]:
"""
The receiver application id.
"""
return pulumi.get(self, "receiver_application_id")
@property
@pulumi.getter(name="responsibleAgencyCode")
def responsible_agency_code(self) -> Optional[int]:
"""
The responsible agency code.
"""
return pulumi.get(self, "responsible_agency_code")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[str]:
"""
The target namespace on which this envelope settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@property
@pulumi.getter(name="timeFormat")
def time_format(self) -> Optional[str]:
"""
The time format.
"""
return pulumi.get(self, "time_format")
@pulumi.output_type
class X12EnvelopeSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "controlStandardsId":
suggest = "control_standards_id"
elif key == "controlVersionNumber":
suggest = "control_version_number"
elif key == "enableDefaultGroupHeaders":
suggest = "enable_default_group_headers"
elif key == "functionalGroupId":
suggest = "functional_group_id"
elif key == "groupControlNumberLowerBound":
suggest = "group_control_number_lower_bound"
elif key == "groupControlNumberUpperBound":
suggest = "group_control_number_upper_bound"
elif key == "groupHeaderAgencyCode":
suggest = "group_header_agency_code"
elif key == "groupHeaderDateFormat":
suggest = "group_header_date_format"
elif key == "groupHeaderTimeFormat":
suggest = "group_header_time_format"
elif key == "groupHeaderVersion":
suggest = "group_header_version"
elif key == "interchangeControlNumberLowerBound":
suggest = "interchange_control_number_lower_bound"
elif key == "interchangeControlNumberUpperBound":
suggest = "interchange_control_number_upper_bound"
elif key == "overwriteExistingTransactionSetControlNumber":
suggest = "overwrite_existing_transaction_set_control_number"
elif key == "receiverApplicationId":
suggest = "receiver_application_id"
elif key == "rolloverGroupControlNumber":
suggest = "rollover_group_control_number"
elif key == "rolloverInterchangeControlNumber":
suggest = "rollover_interchange_control_number"
elif key == "rolloverTransactionSetControlNumber":
suggest = "rollover_transaction_set_control_number"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "transactionSetControlNumberLowerBound":
suggest = "transaction_set_control_number_lower_bound"
elif key == "transactionSetControlNumberPrefix":
suggest = "transaction_set_control_number_prefix"
elif key == "transactionSetControlNumberSuffix":
suggest = "transaction_set_control_number_suffix"
elif key == "transactionSetControlNumberUpperBound":
suggest = "transaction_set_control_number_upper_bound"
elif key == "usageIndicator":
suggest = "usage_indicator"
elif key == "useControlStandardsIdAsRepetitionCharacter":
suggest = "use_control_standards_id_as_repetition_character"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12EnvelopeSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12EnvelopeSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12EnvelopeSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
control_standards_id: Optional[int] = None,
control_version_number: Optional[str] = None,
enable_default_group_headers: Optional[bool] = None,
functional_group_id: Optional[str] = None,
group_control_number_lower_bound: Optional[int] = None,
group_control_number_upper_bound: Optional[int] = None,
group_header_agency_code: Optional[str] = None,
group_header_date_format: Optional[str] = None,
group_header_time_format: Optional[str] = None,
group_header_version: Optional[str] = None,
interchange_control_number_lower_bound: Optional[int] = None,
interchange_control_number_upper_bound: Optional[int] = None,
overwrite_existing_transaction_set_control_number: Optional[bool] = None,
receiver_application_id: Optional[str] = None,
rollover_group_control_number: Optional[bool] = None,
rollover_interchange_control_number: Optional[bool] = None,
rollover_transaction_set_control_number: Optional[bool] = None,
sender_application_id: Optional[str] = None,
transaction_set_control_number_lower_bound: Optional[int] = None,
transaction_set_control_number_prefix: Optional[str] = None,
transaction_set_control_number_suffix: Optional[str] = None,
transaction_set_control_number_upper_bound: Optional[int] = None,
usage_indicator: Optional[str] = None,
use_control_standards_id_as_repetition_character: Optional[bool] = None):
"""
:param int control_standards_id: The controls standards id.
:param str control_version_number: The control version number.
:param bool enable_default_group_headers: The value indicating whether to enable default group headers.
:param str functional_group_id: The functional group id.
:param int group_control_number_lower_bound: The group control number lower bound.
:param int group_control_number_upper_bound: The group control number upper bound.
:param str group_header_agency_code: The group header agency code.
:param str group_header_date_format: The group header date format.
:param str group_header_time_format: The group header time format.
:param str group_header_version: The group header version.
:param int interchange_control_number_lower_bound: The interchange control number lower bound.
:param int interchange_control_number_upper_bound: The interchange control number upper bound.
:param bool overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number.
:param str receiver_application_id: The receiver application id.
:param bool rollover_group_control_number: The value indicating whether to rollover group control number.
:param bool rollover_interchange_control_number: The value indicating whether to rollover interchange control number.
:param bool rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number.
:param str sender_application_id: The sender application id.
:param int transaction_set_control_number_lower_bound: The transaction set control number lower bound.
:param str transaction_set_control_number_prefix: The transaction set control number prefix.
:param str transaction_set_control_number_suffix: The transaction set control number suffix.
:param int transaction_set_control_number_upper_bound: The transaction set control number upper bound.
:param str usage_indicator: The usage indicator.
:param bool use_control_standards_id_as_repetition_character: The value indicating whether to use control standards id as repetition character.
"""
if control_standards_id is not None:
pulumi.set(__self__, "control_standards_id", control_standards_id)
if control_version_number is not None:
pulumi.set(__self__, "control_version_number", control_version_number)
if enable_default_group_headers is not None:
pulumi.set(__self__, "enable_default_group_headers", enable_default_group_headers)
if functional_group_id is not None:
pulumi.set(__self__, "functional_group_id", functional_group_id)
if group_control_number_lower_bound is not None:
pulumi.set(__self__, "group_control_number_lower_bound", group_control_number_lower_bound)
if group_control_number_upper_bound is not None:
pulumi.set(__self__, "group_control_number_upper_bound", group_control_number_upper_bound)
if group_header_agency_code is not None:
pulumi.set(__self__, "group_header_agency_code", group_header_agency_code)
if group_header_date_format is not None:
pulumi.set(__self__, "group_header_date_format", group_header_date_format)
if group_header_time_format is not None:
pulumi.set(__self__, "group_header_time_format", group_header_time_format)
if group_header_version is not None:
pulumi.set(__self__, "group_header_version", group_header_version)
if interchange_control_number_lower_bound is not None:
pulumi.set(__self__, "interchange_control_number_lower_bound", interchange_control_number_lower_bound)
if interchange_control_number_upper_bound is not None:
pulumi.set(__self__, "interchange_control_number_upper_bound", interchange_control_number_upper_bound)
if overwrite_existing_transaction_set_control_number is not None:
pulumi.set(__self__, "overwrite_existing_transaction_set_control_number", overwrite_existing_transaction_set_control_number)
if receiver_application_id is not None:
pulumi.set(__self__, "receiver_application_id", receiver_application_id)
if rollover_group_control_number is not None:
pulumi.set(__self__, "rollover_group_control_number", rollover_group_control_number)
if rollover_interchange_control_number is not None:
pulumi.set(__self__, "rollover_interchange_control_number", rollover_interchange_control_number)
if rollover_transaction_set_control_number is not None:
pulumi.set(__self__, "rollover_transaction_set_control_number", rollover_transaction_set_control_number)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
if transaction_set_control_number_lower_bound is not None:
pulumi.set(__self__, "transaction_set_control_number_lower_bound", transaction_set_control_number_lower_bound)
if transaction_set_control_number_prefix is not None:
pulumi.set(__self__, "transaction_set_control_number_prefix", transaction_set_control_number_prefix)
if transaction_set_control_number_suffix is not None:
pulumi.set(__self__, "transaction_set_control_number_suffix", transaction_set_control_number_suffix)
if transaction_set_control_number_upper_bound is not None:
pulumi.set(__self__, "transaction_set_control_number_upper_bound", transaction_set_control_number_upper_bound)
if usage_indicator is not None:
pulumi.set(__self__, "usage_indicator", usage_indicator)
if use_control_standards_id_as_repetition_character is not None:
pulumi.set(__self__, "use_control_standards_id_as_repetition_character", use_control_standards_id_as_repetition_character)
@property
@pulumi.getter(name="controlStandardsId")
def control_standards_id(self) -> Optional[int]:
"""
The controls standards id.
"""
return pulumi.get(self, "control_standards_id")
@property
@pulumi.getter(name="controlVersionNumber")
def control_version_number(self) -> Optional[str]:
"""
The control version number.
"""
return pulumi.get(self, "control_version_number")
@property
@pulumi.getter(name="enableDefaultGroupHeaders")
def enable_default_group_headers(self) -> Optional[bool]:
"""
The value indicating whether to enable default group headers.
"""
return pulumi.get(self, "enable_default_group_headers")
@property
@pulumi.getter(name="functionalGroupId")
def functional_group_id(self) -> Optional[str]:
"""
The functional group id.
"""
return pulumi.get(self, "functional_group_id")
@property
@pulumi.getter(name="groupControlNumberLowerBound")
def group_control_number_lower_bound(self) -> Optional[int]:
"""
The group control number lower bound.
"""
return pulumi.get(self, "group_control_number_lower_bound")
@property
@pulumi.getter(name="groupControlNumberUpperBound")
def group_control_number_upper_bound(self) -> Optional[int]:
"""
The group control number upper bound.
"""
return pulumi.get(self, "group_control_number_upper_bound")
@property
@pulumi.getter(name="groupHeaderAgencyCode")
def group_header_agency_code(self) -> Optional[str]:
"""
The group header agency code.
"""
return pulumi.get(self, "group_header_agency_code")
@property
@pulumi.getter(name="groupHeaderDateFormat")
def group_header_date_format(self) -> Optional[str]:
"""
The group header date format.
"""
return pulumi.get(self, "group_header_date_format")
@property
@pulumi.getter(name="groupHeaderTimeFormat")
def group_header_time_format(self) -> Optional[str]:
"""
The group header time format.
"""
return pulumi.get(self, "group_header_time_format")
@property
@pulumi.getter(name="groupHeaderVersion")
def group_header_version(self) -> Optional[str]:
"""
The group header version.
"""
return pulumi.get(self, "group_header_version")
@property
@pulumi.getter(name="interchangeControlNumberLowerBound")
def interchange_control_number_lower_bound(self) -> Optional[int]:
"""
The interchange control number lower bound.
"""
return pulumi.get(self, "interchange_control_number_lower_bound")
@property
@pulumi.getter(name="interchangeControlNumberUpperBound")
def interchange_control_number_upper_bound(self) -> Optional[int]:
"""
The interchange control number upper bound.
"""
return pulumi.get(self, "interchange_control_number_upper_bound")
@property
@pulumi.getter(name="overwriteExistingTransactionSetControlNumber")
def overwrite_existing_transaction_set_control_number(self) -> Optional[bool]:
"""
The value indicating whether to overwrite existing transaction set control number.
"""
return pulumi.get(self, "overwrite_existing_transaction_set_control_number")
@property
@pulumi.getter(name="receiverApplicationId")
def receiver_application_id(self) -> Optional[str]:
"""
The receiver application id.
"""
return pulumi.get(self, "receiver_application_id")
@property
@pulumi.getter(name="rolloverGroupControlNumber")
def rollover_group_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover group control number.
"""
return pulumi.get(self, "rollover_group_control_number")
@property
@pulumi.getter(name="rolloverInterchangeControlNumber")
def rollover_interchange_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover interchange control number.
"""
return pulumi.get(self, "rollover_interchange_control_number")
@property
@pulumi.getter(name="rolloverTransactionSetControlNumber")
def rollover_transaction_set_control_number(self) -> Optional[bool]:
"""
The value indicating whether to rollover transaction set control number.
"""
return pulumi.get(self, "rollover_transaction_set_control_number")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="transactionSetControlNumberLowerBound")
def transaction_set_control_number_lower_bound(self) -> Optional[int]:
"""
The transaction set control number lower bound.
"""
return pulumi.get(self, "transaction_set_control_number_lower_bound")
@property
@pulumi.getter(name="transactionSetControlNumberPrefix")
def transaction_set_control_number_prefix(self) -> Optional[str]:
"""
The transaction set control number prefix.
"""
return pulumi.get(self, "transaction_set_control_number_prefix")
@property
@pulumi.getter(name="transactionSetControlNumberSuffix")
def transaction_set_control_number_suffix(self) -> Optional[str]:
"""
The transaction set control number suffix.
"""
return pulumi.get(self, "transaction_set_control_number_suffix")
@property
@pulumi.getter(name="transactionSetControlNumberUpperBound")
def transaction_set_control_number_upper_bound(self) -> Optional[int]:
"""
The transaction set control number upper bound.
"""
return pulumi.get(self, "transaction_set_control_number_upper_bound")
@property
@pulumi.getter(name="usageIndicator")
def usage_indicator(self) -> Optional[str]:
"""
The usage indicator.
"""
return pulumi.get(self, "usage_indicator")
@property
@pulumi.getter(name="useControlStandardsIdAsRepetitionCharacter")
def use_control_standards_id_as_repetition_character(self) -> Optional[bool]:
"""
The value indicating whether to use control standards id as repetition character.
"""
return pulumi.get(self, "use_control_standards_id_as_repetition_character")
@pulumi.output_type
class X12FramingSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "characterSet":
suggest = "character_set"
elif key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "replaceCharacter":
suggest = "replace_character"
elif key == "replaceSeparatorsInPayload":
suggest = "replace_separators_in_payload"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12FramingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12FramingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12FramingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
character_set: Optional[str] = None,
component_separator: Optional[int] = None,
data_element_separator: Optional[int] = None,
replace_character: Optional[int] = None,
replace_separators_in_payload: Optional[bool] = None,
segment_terminator: Optional[int] = None,
segment_terminator_suffix: Optional[str] = None):
"""
:param str character_set: The X12 character set.
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param int replace_character: The replacement character.
:param bool replace_separators_in_payload: The value indicating whether to replace separators in payload.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The segment terminator suffix.
"""
if character_set is not None:
pulumi.set(__self__, "character_set", character_set)
if component_separator is not None:
pulumi.set(__self__, "component_separator", component_separator)
if data_element_separator is not None:
pulumi.set(__self__, "data_element_separator", data_element_separator)
if replace_character is not None:
pulumi.set(__self__, "replace_character", replace_character)
if replace_separators_in_payload is not None:
pulumi.set(__self__, "replace_separators_in_payload", replace_separators_in_payload)
if segment_terminator is not None:
pulumi.set(__self__, "segment_terminator", segment_terminator)
if segment_terminator_suffix is not None:
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
@property
@pulumi.getter(name="characterSet")
def character_set(self) -> Optional[str]:
"""
The X12 character set.
"""
return pulumi.get(self, "character_set")
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> Optional[int]:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> Optional[int]:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="replaceCharacter")
def replace_character(self) -> Optional[int]:
"""
The replacement character.
"""
return pulumi.get(self, "replace_character")
@property
@pulumi.getter(name="replaceSeparatorsInPayload")
def replace_separators_in_payload(self) -> Optional[bool]:
"""
The value indicating whether to replace separators in payload.
"""
return pulumi.get(self, "replace_separators_in_payload")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> Optional[int]:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> Optional[str]:
"""
The segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@pulumi.output_type
class X12MessageFilterResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageFilterType":
suggest = "message_filter_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12MessageFilterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12MessageFilterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12MessageFilterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_filter_type: Optional[str] = None):
"""
:param str message_filter_type: The message filter type.
"""
if message_filter_type is not None:
pulumi.set(__self__, "message_filter_type", message_filter_type)
@property
@pulumi.getter(name="messageFilterType")
def message_filter_type(self) -> Optional[str]:
"""
The message filter type.
"""
return pulumi.get(self, "message_filter_type")
@pulumi.output_type
class X12MessageIdentifierResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageId":
suggest = "message_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12MessageIdentifierResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12MessageIdentifierResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12MessageIdentifierResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_id: Optional[str] = None):
"""
:param str message_id: The message id.
"""
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@pulumi.output_type
class X12OneWayAgreementResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "protocolSettings":
suggest = "protocol_settings"
elif key == "receiverBusinessIdentity":
suggest = "receiver_business_identity"
elif key == "senderBusinessIdentity":
suggest = "sender_business_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12OneWayAgreementResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12OneWayAgreementResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12OneWayAgreementResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
protocol_settings: Optional['outputs.X12ProtocolSettingsResponse'] = None,
receiver_business_identity: Optional['outputs.BusinessIdentityResponse'] = None,
sender_business_identity: Optional['outputs.BusinessIdentityResponse'] = None):
"""
:param 'X12ProtocolSettingsResponse' protocol_settings: The X12 protocol settings.
:param 'BusinessIdentityResponse' receiver_business_identity: The receiver business identity
:param 'BusinessIdentityResponse' sender_business_identity: The sender business identity
"""
if protocol_settings is not None:
pulumi.set(__self__, "protocol_settings", protocol_settings)
if receiver_business_identity is not None:
pulumi.set(__self__, "receiver_business_identity", receiver_business_identity)
if sender_business_identity is not None:
pulumi.set(__self__, "sender_business_identity", sender_business_identity)
@property
@pulumi.getter(name="protocolSettings")
def protocol_settings(self) -> Optional['outputs.X12ProtocolSettingsResponse']:
"""
The X12 protocol settings.
"""
return pulumi.get(self, "protocol_settings")
@property
@pulumi.getter(name="receiverBusinessIdentity")
def receiver_business_identity(self) -> Optional['outputs.BusinessIdentityResponse']:
"""
The receiver business identity
"""
return pulumi.get(self, "receiver_business_identity")
@property
@pulumi.getter(name="senderBusinessIdentity")
def sender_business_identity(self) -> Optional['outputs.BusinessIdentityResponse']:
"""
The sender business identity
"""
return pulumi.get(self, "sender_business_identity")
@pulumi.output_type
class X12ProcessingSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "convertImpliedDecimal":
suggest = "convert_implied_decimal"
elif key == "createEmptyXmlTagsForTrailingSeparators":
suggest = "create_empty_xml_tags_for_trailing_separators"
elif key == "maskSecurityInfo":
suggest = "mask_security_info"
elif key == "preserveInterchange":
suggest = "preserve_interchange"
elif key == "suspendInterchangeOnError":
suggest = "suspend_interchange_on_error"
elif key == "useDotAsDecimalSeparator":
suggest = "use_dot_as_decimal_separator"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ProcessingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ProcessingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ProcessingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
convert_implied_decimal: Optional[bool] = None,
create_empty_xml_tags_for_trailing_separators: Optional[bool] = None,
mask_security_info: Optional[bool] = None,
preserve_interchange: Optional[bool] = None,
suspend_interchange_on_error: Optional[bool] = None,
use_dot_as_decimal_separator: Optional[bool] = None):
"""
:param bool convert_implied_decimal: The value indicating whether to convert numerical type to implied decimal.
:param bool create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators.
:param bool mask_security_info: The value indicating whether to mask security information.
:param bool preserve_interchange: The value indicating whether to preserve interchange.
:param bool suspend_interchange_on_error: The value indicating whether to suspend interchange on error.
:param bool use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator.
"""
if convert_implied_decimal is not None:
pulumi.set(__self__, "convert_implied_decimal", convert_implied_decimal)
if create_empty_xml_tags_for_trailing_separators is not None:
pulumi.set(__self__, "create_empty_xml_tags_for_trailing_separators", create_empty_xml_tags_for_trailing_separators)
if mask_security_info is not None:
pulumi.set(__self__, "mask_security_info", mask_security_info)
if preserve_interchange is not None:
pulumi.set(__self__, "preserve_interchange", preserve_interchange)
if suspend_interchange_on_error is not None:
pulumi.set(__self__, "suspend_interchange_on_error", suspend_interchange_on_error)
if use_dot_as_decimal_separator is not None:
pulumi.set(__self__, "use_dot_as_decimal_separator", use_dot_as_decimal_separator)
@property
@pulumi.getter(name="convertImpliedDecimal")
def convert_implied_decimal(self) -> Optional[bool]:
"""
The value indicating whether to convert numerical type to implied decimal.
"""
return pulumi.get(self, "convert_implied_decimal")
@property
@pulumi.getter(name="createEmptyXmlTagsForTrailingSeparators")
def create_empty_xml_tags_for_trailing_separators(self) -> Optional[bool]:
"""
The value indicating whether to create empty xml tags for trailing separators.
"""
return pulumi.get(self, "create_empty_xml_tags_for_trailing_separators")
@property
@pulumi.getter(name="maskSecurityInfo")
def mask_security_info(self) -> Optional[bool]:
"""
The value indicating whether to mask security information.
"""
return pulumi.get(self, "mask_security_info")
@property
@pulumi.getter(name="preserveInterchange")
def preserve_interchange(self) -> Optional[bool]:
"""
The value indicating whether to preserve interchange.
"""
return pulumi.get(self, "preserve_interchange")
@property
@pulumi.getter(name="suspendInterchangeOnError")
def suspend_interchange_on_error(self) -> Optional[bool]:
"""
The value indicating whether to suspend interchange on error.
"""
return pulumi.get(self, "suspend_interchange_on_error")
@property
@pulumi.getter(name="useDotAsDecimalSeparator")
def use_dot_as_decimal_separator(self) -> Optional[bool]:
"""
The value indicating whether to use dot as decimal separator.
"""
return pulumi.get(self, "use_dot_as_decimal_separator")
@pulumi.output_type
class X12ProtocolSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementSettings":
suggest = "acknowledgement_settings"
elif key == "envelopeOverrides":
suggest = "envelope_overrides"
elif key == "envelopeSettings":
suggest = "envelope_settings"
elif key == "framingSettings":
suggest = "framing_settings"
elif key == "messageFilter":
suggest = "message_filter"
elif key == "messageFilterList":
suggest = "message_filter_list"
elif key == "processingSettings":
suggest = "processing_settings"
elif key == "schemaReferences":
suggest = "schema_references"
elif key == "securitySettings":
suggest = "security_settings"
elif key == "validationOverrides":
suggest = "validation_overrides"
elif key == "validationSettings":
suggest = "validation_settings"
elif key == "x12DelimiterOverrides":
suggest = "x12_delimiter_overrides"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ProtocolSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ProtocolSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ProtocolSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_settings: Optional['outputs.X12AcknowledgementSettingsResponse'] = None,
envelope_overrides: Optional[Sequence['outputs.X12EnvelopeOverrideResponse']] = None,
envelope_settings: Optional['outputs.X12EnvelopeSettingsResponse'] = None,
framing_settings: Optional['outputs.X12FramingSettingsResponse'] = None,
message_filter: Optional['outputs.X12MessageFilterResponse'] = None,
message_filter_list: Optional[Sequence['outputs.X12MessageIdentifierResponse']] = None,
processing_settings: Optional['outputs.X12ProcessingSettingsResponse'] = None,
schema_references: Optional[Sequence['outputs.X12SchemaReferenceResponse']] = None,
security_settings: Optional['outputs.X12SecuritySettingsResponse'] = None,
validation_overrides: Optional[Sequence['outputs.X12ValidationOverrideResponse']] = None,
validation_settings: Optional['outputs.X12ValidationSettingsResponse'] = None,
x12_delimiter_overrides: Optional[Sequence['outputs.X12DelimiterOverridesResponse']] = None):
"""
:param 'X12AcknowledgementSettingsResponse' acknowledgement_settings: The X12 acknowledgment settings.
:param Sequence['X12EnvelopeOverrideResponse'] envelope_overrides: The X12 envelope override settings.
:param 'X12EnvelopeSettingsResponse' envelope_settings: The X12 envelope settings.
:param 'X12FramingSettingsResponse' framing_settings: The X12 framing settings.
:param 'X12MessageFilterResponse' message_filter: The X12 message filter.
:param Sequence['X12MessageIdentifierResponse'] message_filter_list: The X12 message filter list.
:param 'X12ProcessingSettingsResponse' processing_settings: The X12 processing settings.
:param Sequence['X12SchemaReferenceResponse'] schema_references: The X12 schema references.
:param 'X12SecuritySettingsResponse' security_settings: The X12 security settings.
:param Sequence['X12ValidationOverrideResponse'] validation_overrides: The X12 validation override settings.
:param 'X12ValidationSettingsResponse' validation_settings: The X12 validation settings.
:param Sequence['X12DelimiterOverridesResponse'] x12_delimiter_overrides: The X12 delimiter override settings.
"""
if acknowledgement_settings is not None:
pulumi.set(__self__, "acknowledgement_settings", acknowledgement_settings)
if envelope_overrides is not None:
pulumi.set(__self__, "envelope_overrides", envelope_overrides)
if envelope_settings is not None:
pulumi.set(__self__, "envelope_settings", envelope_settings)
if framing_settings is not None:
pulumi.set(__self__, "framing_settings", framing_settings)
if message_filter is not None:
pulumi.set(__self__, "message_filter", message_filter)
if message_filter_list is not None:
pulumi.set(__self__, "message_filter_list", message_filter_list)
if processing_settings is not None:
pulumi.set(__self__, "processing_settings", processing_settings)
if schema_references is not None:
pulumi.set(__self__, "schema_references", schema_references)
if security_settings is not None:
pulumi.set(__self__, "security_settings", security_settings)
if validation_overrides is not None:
pulumi.set(__self__, "validation_overrides", validation_overrides)
if validation_settings is not None:
pulumi.set(__self__, "validation_settings", validation_settings)
if x12_delimiter_overrides is not None:
pulumi.set(__self__, "x12_delimiter_overrides", x12_delimiter_overrides)
@property
@pulumi.getter(name="acknowledgementSettings")
def acknowledgement_settings(self) -> Optional['outputs.X12AcknowledgementSettingsResponse']:
"""
The X12 acknowledgment settings.
"""
return pulumi.get(self, "acknowledgement_settings")
@property
@pulumi.getter(name="envelopeOverrides")
def envelope_overrides(self) -> Optional[Sequence['outputs.X12EnvelopeOverrideResponse']]:
"""
The X12 envelope override settings.
"""
return pulumi.get(self, "envelope_overrides")
@property
@pulumi.getter(name="envelopeSettings")
def envelope_settings(self) -> Optional['outputs.X12EnvelopeSettingsResponse']:
"""
The X12 envelope settings.
"""
return pulumi.get(self, "envelope_settings")
@property
@pulumi.getter(name="framingSettings")
def framing_settings(self) -> Optional['outputs.X12FramingSettingsResponse']:
"""
The X12 framing settings.
"""
return pulumi.get(self, "framing_settings")
@property
@pulumi.getter(name="messageFilter")
def message_filter(self) -> Optional['outputs.X12MessageFilterResponse']:
"""
The X12 message filter.
"""
return pulumi.get(self, "message_filter")
@property
@pulumi.getter(name="messageFilterList")
def message_filter_list(self) -> Optional[Sequence['outputs.X12MessageIdentifierResponse']]:
"""
The X12 message filter list.
"""
return pulumi.get(self, "message_filter_list")
@property
@pulumi.getter(name="processingSettings")
def processing_settings(self) -> Optional['outputs.X12ProcessingSettingsResponse']:
"""
The X12 processing settings.
"""
return pulumi.get(self, "processing_settings")
@property
@pulumi.getter(name="schemaReferences")
def schema_references(self) -> Optional[Sequence['outputs.X12SchemaReferenceResponse']]:
"""
The X12 schema references.
"""
return pulumi.get(self, "schema_references")
@property
@pulumi.getter(name="securitySettings")
def security_settings(self) -> Optional['outputs.X12SecuritySettingsResponse']:
"""
The X12 security settings.
"""
return pulumi.get(self, "security_settings")
@property
@pulumi.getter(name="validationOverrides")
def validation_overrides(self) -> Optional[Sequence['outputs.X12ValidationOverrideResponse']]:
"""
The X12 validation override settings.
"""
return pulumi.get(self, "validation_overrides")
@property
@pulumi.getter(name="validationSettings")
def validation_settings(self) -> Optional['outputs.X12ValidationSettingsResponse']:
"""
The X12 validation settings.
"""
return pulumi.get(self, "validation_settings")
@property
@pulumi.getter(name="x12DelimiterOverrides")
def x12_delimiter_overrides(self) -> Optional[Sequence['outputs.X12DelimiterOverridesResponse']]:
"""
The X12 delimiter override settings.
"""
return pulumi.get(self, "x12_delimiter_overrides")
@pulumi.output_type
class X12SchemaReferenceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageId":
suggest = "message_id"
elif key == "schemaName":
suggest = "schema_name"
elif key == "schemaVersion":
suggest = "schema_version"
elif key == "senderApplicationId":
suggest = "sender_application_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12SchemaReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12SchemaReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12SchemaReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_id: Optional[str] = None,
schema_name: Optional[str] = None,
schema_version: Optional[str] = None,
sender_application_id: Optional[str] = None):
"""
:param str message_id: The message id.
:param str schema_name: The schema name.
:param str schema_version: The schema version.
:param str sender_application_id: The sender application id.
"""
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if schema_name is not None:
pulumi.set(__self__, "schema_name", schema_name)
if schema_version is not None:
pulumi.set(__self__, "schema_version", schema_version)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> Optional[str]:
"""
The schema name.
"""
return pulumi.get(self, "schema_name")
@property
@pulumi.getter(name="schemaVersion")
def schema_version(self) -> Optional[str]:
"""
The schema version.
"""
return pulumi.get(self, "schema_version")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@pulumi.output_type
class X12SecuritySettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationQualifier":
suggest = "authorization_qualifier"
elif key == "authorizationValue":
suggest = "authorization_value"
elif key == "passwordValue":
suggest = "password_value"
elif key == "securityQualifier":
suggest = "security_qualifier"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12SecuritySettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12SecuritySettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12SecuritySettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_qualifier: Optional[str] = None,
authorization_value: Optional[str] = None,
password_value: Optional[str] = None,
security_qualifier: Optional[str] = None):
"""
:param str authorization_qualifier: The authorization qualifier.
:param str authorization_value: The authorization value.
:param str password_value: The password value.
:param str security_qualifier: The security qualifier.
"""
if authorization_qualifier is not None:
pulumi.set(__self__, "authorization_qualifier", authorization_qualifier)
if authorization_value is not None:
pulumi.set(__self__, "authorization_value", authorization_value)
if password_value is not None:
pulumi.set(__self__, "password_value", password_value)
if security_qualifier is not None:
pulumi.set(__self__, "security_qualifier", security_qualifier)
@property
@pulumi.getter(name="authorizationQualifier")
def authorization_qualifier(self) -> Optional[str]:
"""
The authorization qualifier.
"""
return pulumi.get(self, "authorization_qualifier")
@property
@pulumi.getter(name="authorizationValue")
def authorization_value(self) -> Optional[str]:
"""
The authorization value.
"""
return pulumi.get(self, "authorization_value")
@property
@pulumi.getter(name="passwordValue")
def password_value(self) -> Optional[str]:
"""
The password value.
"""
return pulumi.get(self, "password_value")
@property
@pulumi.getter(name="securityQualifier")
def security_qualifier(self) -> Optional[str]:
"""
The security qualifier.
"""
return pulumi.get(self, "security_qualifier")
@pulumi.output_type
class X12ValidationOverrideResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "messageId":
suggest = "message_id"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateCharacterSet":
suggest = "validate_character_set"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ValidationOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ValidationOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ValidationOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
message_id: Optional[str] = None,
trailing_separator_policy: Optional[str] = None,
trim_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
validate_character_set: Optional[bool] = None,
validate_edi_types: Optional[bool] = None,
validate_xsd_types: Optional[bool] = None):
"""
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param str message_id: The message id on which the validation settings has to be applied.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_character_set: The value indicating whether to validate character Set.
:param bool validate_edi_types: The value indicating whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to validate XSD types.
"""
if allow_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if trailing_separator_policy is not None:
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
if trim_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
if validate_character_set is not None:
pulumi.set(__self__, "validate_character_set", validate_character_set)
if validate_edi_types is not None:
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
if validate_xsd_types is not None:
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id on which the validation settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> Optional[str]:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateCharacterSet")
def validate_character_set(self) -> Optional[bool]:
"""
The value indicating whether to validate character Set.
"""
return pulumi.get(self, "validate_character_set")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> Optional[bool]:
"""
The value indicating whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> Optional[bool]:
"""
The value indicating whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
@pulumi.output_type
class X12ValidationSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "checkDuplicateGroupControlNumber":
suggest = "check_duplicate_group_control_number"
elif key == "checkDuplicateInterchangeControlNumber":
suggest = "check_duplicate_interchange_control_number"
elif key == "checkDuplicateTransactionSetControlNumber":
suggest = "check_duplicate_transaction_set_control_number"
elif key == "interchangeControlNumberValidityDays":
suggest = "interchange_control_number_validity_days"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateCharacterSet":
suggest = "validate_character_set"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ValidationSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ValidationSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ValidationSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
check_duplicate_group_control_number: Optional[bool] = None,
check_duplicate_interchange_control_number: Optional[bool] = None,
check_duplicate_transaction_set_control_number: Optional[bool] = None,
interchange_control_number_validity_days: Optional[int] = None,
trailing_separator_policy: Optional[str] = None,
trim_leading_and_trailing_spaces_and_zeroes: Optional[bool] = None,
validate_character_set: Optional[bool] = None,
validate_edi_types: Optional[bool] = None,
validate_xsd_types: Optional[bool] = None):
"""
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param bool check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number.
:param bool check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number.
:param bool check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number.
:param int interchange_control_number_validity_days: The validity period of interchange control number.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_character_set: The value indicating whether to validate character set in the message.
:param bool validate_edi_types: The value indicating whether to Whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to Whether to validate XSD types.
"""
if allow_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
if check_duplicate_group_control_number is not None:
pulumi.set(__self__, "check_duplicate_group_control_number", check_duplicate_group_control_number)
if check_duplicate_interchange_control_number is not None:
pulumi.set(__self__, "check_duplicate_interchange_control_number", check_duplicate_interchange_control_number)
if check_duplicate_transaction_set_control_number is not None:
pulumi.set(__self__, "check_duplicate_transaction_set_control_number", check_duplicate_transaction_set_control_number)
if interchange_control_number_validity_days is not None:
pulumi.set(__self__, "interchange_control_number_validity_days", interchange_control_number_validity_days)
if trailing_separator_policy is not None:
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
if trim_leading_and_trailing_spaces_and_zeroes is not None:
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
if validate_character_set is not None:
pulumi.set(__self__, "validate_character_set", validate_character_set)
if validate_edi_types is not None:
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
if validate_xsd_types is not None:
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="checkDuplicateGroupControlNumber")
def check_duplicate_group_control_number(self) -> Optional[bool]:
"""
The value indicating whether to check for duplicate group control number.
"""
return pulumi.get(self, "check_duplicate_group_control_number")
@property
@pulumi.getter(name="checkDuplicateInterchangeControlNumber")
def check_duplicate_interchange_control_number(self) -> Optional[bool]:
"""
The value indicating whether to check for duplicate interchange control number.
"""
return pulumi.get(self, "check_duplicate_interchange_control_number")
@property
@pulumi.getter(name="checkDuplicateTransactionSetControlNumber")
def check_duplicate_transaction_set_control_number(self) -> Optional[bool]:
"""
The value indicating whether to check for duplicate transaction set control number.
"""
return pulumi.get(self, "check_duplicate_transaction_set_control_number")
@property
@pulumi.getter(name="interchangeControlNumberValidityDays")
def interchange_control_number_validity_days(self) -> Optional[int]:
"""
The validity period of interchange control number.
"""
return pulumi.get(self, "interchange_control_number_validity_days")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> Optional[str]:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> Optional[bool]:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateCharacterSet")
def validate_character_set(self) -> Optional[bool]:
"""
The value indicating whether to validate character set in the message.
"""
return pulumi.get(self, "validate_character_set")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> Optional[bool]:
"""
The value indicating whether to Whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> Optional[bool]:
"""
The value indicating whether to Whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
|
py | 7df6d625bd5271b148fde1042196fbd0f3e3dd21 | from neurotorch.augmentations.augmentation import Augmentation
from neurotorch.datasets.dataset import Data
import random
import numpy as np
from scipy.ndimage.filters import convolve
class Duplicate(Augmentation):
def __init__(self, volume, max_slices=20, **kwargs):
self.setMaxSlices(max_slices)
super().__init__(volume, **kwargs)
def augment(self, bounding_box):
slices = self.getSlices()
end = bounding_box.getSize().getComponents()[0]
location = random.randrange(end-slices)
raw_data, label_data = self.getParent().get(bounding_box)
augmented_raw, augmented_label = self.duplication(raw_data, label_data,
location=location,
slices=slices)
return (augmented_raw, augmented_label)
def setMaxSlices(self, max_slices):
self.max_slices = max_slices
def getMaxSlices(self):
return self.max_slices
def getSlices(self):
return random.randrange(2, self.getMaxSlices())
def duplication(self, raw_data, label_data, location=20, slices=3,
axis=0):
raw = raw_data.getArray()
distorted_raw = raw.copy()
noise = raw[:, :, location:location+slices]
noise = noise - convolve(noise, weights=np.full((3, 3, 3), 1.0/27))
duplicate_slices = np.repeat(raw[:, :, location].reshape(raw.shape[0],
raw.shape[1],
1),
slices, axis=2)
duplicate_slices += noise
distorted_raw[:, :, location:location+slices] = duplicate_slices
augmented_raw_data = Data(distorted_raw, raw_data.getBoundingBox())
return augmented_raw_data, label_data
|
py | 7df6d732115ee27ea6a8206008a747799f9e1850 | import sys
(F_LINK,
F_LINKABLE,
F_OPTIONAL) = [ 1<<x for x in range(3) ]
def AddVersion(version, ns, versionId='', isLegacy=0, serviceNs=''):
isLegacy = "true" if isLegacy else "false"
builder = []
builder.append("add_version(")
ParseStrings(builder, [version, ns, versionId])
builder.append(", %s, " % isLegacy)
ParseString(builder, serviceNs)
builder.append(")")
print "".join(builder)
def AddVersionParent(version, parent):
builder = []
builder.append("add_version_parent(")
ParseStrings(builder, [version, parent])
builder.append(")")
print "".join(builder)
def CreateManagedType(vmodlName, wsdlName, parent, version, props, methods):
builder = []
builder.append("create_managed_type(")
ParseStrings(builder, [vmodlName, wsdlName, parent, version])
builder.append(", ")
ParseProps(builder, props)
builder.append(", ")
ParseMethods(builder, methods)
builder.append(")")
print "".join(builder)
def CreateDataType(vmodlName, wsdlName, parent, version, props):
builder = []
builder.append("create_data_type(")
ParseStrings(builder, [vmodlName, wsdlName, parent, version])
builder.append(", ")
ParseProps(builder, props)
builder.append(")")
print "".join(builder)
def CreateEnumType(vmodlName, wsdlName, version, values):
builder = []
builder.append("create_enum_type(")
ParseStrings(builder, [vmodlName, wsdlName, version])
builder.append(", [")
ParseStrings(builder, values)
builder.append("])")
print "".join(builder)
def ParseFlags(builder, flags):
builder.append("{")
flags_builder = []
if flags & F_LINK:
flags_builder.append(":link => true")
if flags & F_LINKABLE:
flags_builder.append(":linkable => true")
if flags & F_OPTIONAL:
flags_builder.append(":optional => true")
builder.append(", ".join(flags_builder))
builder.append("}")
def ParseProps(builder, props):
if props is None:
builder.append("nil")
else:
entries = []
for p in props:
name, typeName, propVersion, flags = p[:4]
privId = len(p) == 5
entry_builder = []
entry_builder.append("[")
ParseString(entry_builder, name)
entry_builder.append(", ")
ParseString(entry_builder, typeName)
entry_builder.append(", ")
ParseString(entry_builder, propVersion)
entry_builder.append(", ")
ParseFlags(entry_builder, flags)
if privId:
entry_builder.append(", ")
ParseString(entry_builder, p[4])
entry_builder.append("]")
entries.append("".join(entry_builder))
builder.append("[")
builder.append(", ".join(entries))
builder.append("]")
def ParseString(builder, string):
if string == None:
builder.append("nil")
else:
builder.append("\"%s\"" % string)
def ParseStrings(builder, strings):
entries = []
for string in strings:
ParseString(entries, string)
builder.append(", ".join(entries))
def ParseMethods(builder, methods):
if methods is None:
builder.append("nil")
else:
entries = []
for (mVmodl, mWsdl, mVersion, mParams, mResult, mPrivilege, mFaults) in methods:
entry_builder = []
entry_builder.append("[")
ParseStrings(entry_builder, [mVmodl, mWsdl, mVersion])
entry_builder.append(", ")
ParseProps(entry_builder, mParams)
entry_builder.append(", [")
resultFlags, resultName, methodResultName = mResult
ParseFlags(entry_builder, resultFlags)
entry_builder.append(", ")
ParseString(entry_builder, resultName)
entry_builder.append(", ")
ParseString(entry_builder, methodResultName)
entry_builder.append("], ")
ParseString(entry_builder, mPrivilege)
entry_builder.append(", ")
if mFaults is None:
entry_builder.append("nil")
else:
faults_builder = []
for fault in mFaults:
ParseString(faults_builder, fault)
entry_builder.append("[")
entry_builder.append(", ".join(faults_builder))
entry_builder.append("]")
entry_builder.append("]")
entries.append("".join(entry_builder))
builder.append("[")
builder.append(", ".join(entries))
builder.append("]")
|
py | 7df6d7c42167ad549680ffd89d2078cdb3e97aa7 |
from . import TagContainer
from .text.TextNode import Line
from .text.CommentNode import CommentNode
from .text.TagsubCommentNode import TagsubCommentNode
from ..exceptions import TagsubTemplateSyntaxError
class TagAlternateChoice(TagContainer.TagContainer):
def __init__(self, tagchar, template):
super().__init__(tagchar, template)
# Create alternate choice structure.
self._alternateChoices = []
def addAlternate(self, alternateChoice):
if self.tagchar != alternateChoice.tagchar:
raise TagsubTemplateSyntaxError("Mismatched tagchar for %s tag" % alternateChoice.tag, tag=alternateChoice)
self._alternateChoices.append(alternateChoice)
# FIXME Verify this is actually workable and that IfTagContainer and CaseTag do not need different code to do tracebacks correctly
alternateChoice.parent = self
def addChild(self, node):
if not self._alternateChoices:
# I think this can only happen with a case tag between it and the
# first option tag.
if isinstance(node, Line) or isinstance(node, (TagsubCommentNode, CommentNode)):
return
else:
raise TagsubTemplateSyntaxError("Misplaced tag", tag=node)
else:
self._alternateChoices[-1].addChild(node)
# These methods may be implemented entirely differently for the two
# subclasses: IfTagContainer and CaseTag
def chooseAlternate(self, outputFormatter):
raise NotImplementedError()
def format(self, outputFormatter):
activeAlternate = self.chooseAlternate(outputFormatter)
if activeAlternate:
activeAlternate.format(outputFormatter)
|
py | 7df6d86152764a351308da5b7a810b6313dd1252 | import os
import random
import string
import azure.mgmt.compute as mgmt_compute
import azure.mgmt.network as mgmt_network
import azure.mgmt.resource as mgmt_resource
from azure.mgmt.compute import models as compute_models
from azure.mgmt.network import models as network_models
from azure.mgmt.resource.resources import models as resource_models
from azure.identity import DefaultAzureCredential
YOUR_PASSWORD = 'A1_' + ''.join(random.choice(string.ascii_lowercase) for i in range(21))
class createVMSample(object):
def __init__(self, group_name, location):
self.location = location
self.subscription_id = os.environ.get("SUBSCRIPTION_ID", None)
# Use 2019-07-01 api version to test create VM
self.compute_client = mgmt_compute.ComputeManagementClient(
credential=DefaultAzureCredential(),
subscription_id=self.subscription_id,
api_version="2019-07-01"
)
self.network_client = mgmt_network.NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id=self.subscription_id
)
self.resource_client = mgmt_resource.ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=self.subscription_id
)
self.group = self.resource_client.resource_groups.create_or_update(
group_name,
# model style
resource_models.ResourceGroup(
location=self.location
)
# json style
# {'location': self.location}
)
def create_virtual_network(self, group_name, location, network_name, subnet_name):
result = self.network_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
# model style
network_models.VirtualNetwork(
location=location,
address_space=network_models.AddressSpace(
address_prefixes=['10.0.0.0/16']
)
)
# json style
# {
# 'location': location,
# 'address_space': {
# 'address_prefixes': ['10.0.0.0/16']
# }
# }
)
vnet = result.result()
async_subnet_creation = self.network_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name,
# model style
network_models.Subnet(
address_prefix='10.0.0.0/24'
)
# json style
# {'address_prefix': '10.0.0.0/24'}
)
subnet = async_subnet_creation.result()
return (vnet, subnet)
def create_network_interface(self, group_name, location, nic_name, subnet):
async_nic_creation = self.network_client.network_interfaces.begin_create_or_update(
group_name,
nic_name,
# model style
network_models.NetworkInterface(
location=location,
ip_configurations=[
network_models.NetworkInterfaceIPConfiguration(
name="MyIpConfig",
subnet=network_models.Subnet(
id=subnet.id
)
)
]
)
# json style
# {
# 'location': location,
# 'ip_configurations': [{
# 'name': 'MyIpConfig',
# 'subnet': {
# 'id': subnet.id
# }
# }]
# }
)
nic = async_nic_creation.result()
return nic
def create_vm(self, vm_name, network_name, subnet_name, interface_name):
group_name = self.group.name
location = self.location
# create network
vnet, subnet = self.create_virtual_network(group_name, location, network_name, subnet_name)
nic = self.create_network_interface(group_name, location, interface_name, subnet)
# Create a vm with empty data disks.
# model style
model_style_vm = compute_models.VirtualMachine(
location=location,
hardware_profile=compute_models.HardwareProfile(
vm_size="Standard_D2_v2"
),
storage_profile=compute_models.StorageProfile(
image_reference=compute_models.ImageReference(
sku="2016-Datacenter",
publisher="MicrosoftWindowsServer",
version="latest",
offer="WindowsServer"
),
os_disk=compute_models.OSDisk(
caching=compute_models.CachingTypes.read_write,
managed_disk=compute_models.ManagedDiskParameters(
storage_account_type="Standard_LRS"
),
name="myVMosdisk",
create_option="FromImage"
),
data_disks=[
compute_models.DataDisk(
disk_size_gb=1023,
create_option="Empty",
lun=0
),
compute_models.DataDisk(
disk_size_gb=1023,
create_option="Empty",
lun=1
)
]
),
os_profile=compute_models.OSProfile(
admin_username="testuser",
computer_name="myVM",
admin_password=YOUR_PASSWORD,
windows_configuration=compute_models.WindowsConfiguration(
enable_automatic_updates=True
)
),
network_profile=compute_models.NetworkProfile(
network_interfaces=[
compute_models.NetworkInterfaceReference(
id=nic.id,
primary=True
)
]
)
)
# json style
json_style_vm = {
"location": location,
"hardware_profile": {
"vm_size": "Standard_D2_v2"
},
"storage_profile": {
"image_reference": {
"sku": "2016-Datacenter",
"publisher": "MicrosoftWindowsServer",
"version": "latest",
"offer": "WindowsServer"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"name": "myVMosdisk",
"create_option": "FromImage"
},
"data_disks": [
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "0"
},
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "1"
}
]
},
"os_profile": {
"admin_username": "testuser",
"computer_name": "myVM",
"admin_password": YOUR_PASSWORD,
"windows_configuration": {
"enable_automatic_updates": True # need automatic update for reimage
}
},
"network_profile": {
"network_interfaces": [
{
"id": nic.id,
"primary": True
}
]
}
}
result = self.compute_client.virtual_machines.begin_create_or_update(
group_name,
vm_name,
model_style_vm
)
vm = result.result()
print("Create VM successfully\nVM:\n{}".format(vm))
def main():
print("init sample.")
sample = createVMSample('testvmmultiapi', 'eastus')
print("create vm ...")
sample.create_vm('testvm', 'testnetwork', 'testsubnet', 'testinterface')
print("finish.")
if __name__ == '__main__':
main()
|
py | 7df6d8dc7dcfc1e3b771098851dafae0845e7446 | """
Class for Independent Component Analysis
"""
from thunder.factorization.svd import SVD
from thunder.rdds.series import Series
from thunder.rdds.matrices import RowMatrix
class ICA(object):
"""
Independent component analysis on a distributed matrix.
Initial dimensionality reduction performed via SVD
Parameters
----------
k : int
Number of principal components to use
c : int
Number of independent components to estimate
svdMethod : string, optional, default = "direct"
Which SVD method to use
maxIter : Int, optional, default = 10
Maximum number of iterations
tol : float, optional, default = 0.00001
Tolerance for convergence
Attributes
----------
`w` : array, shape (c, ncols)
Recovered unmixing matrix
`a` : array, shape (ncols, ncols)
Recovered mixing matrix
`sigs` : RowMatrix, nrows, each array of shape (c,)
Estimated independent components
"""
def __init__(self, c, k=None, svdMethod="direct", maxIter=10, tol=0.000001, seed=0):
self.k = k
self.c = c
self.svdMethod = svdMethod
self.maxIter = maxIter
self.tol = tol
self.seed = seed
self.w = None
self.a = None
self.sigs = None
def fit(self, data):
"""
Fit independent components using an iterative fixed-point algorithm
Parameters
----------
data : Series or a subclass (e.g. RowMatrix)
Data to estimate independent components from, must be a collection of
key-value pairs where the keys are identifiers and the values are
one-dimensional arrays
Returns
----------
self : returns an instance of self.
"""
from numpy import random, sqrt, zeros, real, dot, outer, diag, transpose
from scipy.linalg import sqrtm, inv, orth
if not (isinstance(data, Series)):
raise Exception('Input must be Series or a subclass (e.g. RowMatrix)')
if not isinstance(data, RowMatrix):
data = data.toRowMatrix()
d = data.ncols
if self.k is None:
self.k = d
if self.c > self.k:
raise Exception("number of independent comps " + str(self.c) +
" must be less than the number of principal comps " + str(self.k))
if self.k > d:
raise Exception("number of principal comps " + str(self.k) +
" must be less than the data dimensionality " + str(d))
# reduce dimensionality
svd = SVD(k=self.k, method=self.svdMethod).calc(data)
# whiten data
whtMat = real(dot(inv(diag(svd.s/sqrt(data.nrows))), svd.v))
unWhtMat = real(dot(transpose(svd.v), diag(svd.s/sqrt(data.nrows))))
wht = data.times(whtMat.T)
# do multiple independent component extraction
if self.seed != 0:
random.seed(self.seed)
b = orth(random.randn(self.k, self.c))
bOld = zeros((self.k, self.c))
niter = 0
minAbsCos = 0
errVec = zeros(self.maxIter)
while (niter < self.maxIter) & ((1 - minAbsCos) > self.tol):
niter += 1
# update rule for pow3 non-linearity (TODO: add others)
b = wht.rows().map(lambda x: outer(x, dot(x, b) ** 3)).sum() / wht.nrows - 3 * b
# make orthogonal
b = dot(b, real(sqrtm(inv(dot(transpose(b), b)))))
# evaluate error
minAbsCos = min(abs(diag(dot(transpose(b), bOld))))
# store results
bOld = b
errVec[niter-1] = (1 - minAbsCos)
# get un-mixing matrix
w = dot(b.T, whtMat)
# get mixing matrix
a = dot(unWhtMat, b)
# get components
sigs = data.times(w.T)
self.w = w
self.a = a
self.sigs = sigs
return self
|
py | 7df6d9173c049ec12a846259d480c74e8f4c98a6 | """
The default experiment with default model Initial State, System Parameters, and Simulation Configuration.
The defaults are defined in their respective modules:
* Initial State in `model/state_variables.py`
* System Parameters in `model/system_parameters.py`
* Simulation Configuration in `experiments/simulation_configuration.py`
"""
from radcad import Simulation, Experiment, Backend
from model import model
from experiments.simulation_configuration import TIMESTEPS, MONTE_CARLO_RUNS
# Create Model Simulation
simulation = Simulation(
model=model,
timesteps=TIMESTEPS,
runs=MONTE_CARLO_RUNS
)
# Create Experiment of single Simulation
experiment = Experiment([simulation])
# Configure Simulation & Experiment engine
simulation.engine = experiment.engine
experiment.engine.backend = Backend.SINGLE_PROCESS
experiment.engine.deepcopy = False
experiment.engine.drop_substeps = True
|
py | 7df6d9a29c012a8e2b9870bccdfd157af8133f76 | # coding=utf-8
import collections
import pandas as pd
import tensorflow as tf
import _pickle as pickle
import numpy as np
from absl import logging
vocab_tokens = ['[PAD]', '[UKN]']
vocab_chars = ['[PAD]', '[UKN]']
def _load_dataset(name):
label_list = []
dataset = {"text": [], "labels": []}
logging.info(name + ": " + str(tf.io.gfile.exists(name)))
with tf.io.gfile.GFile(name) as f:
words = []
labels = []
for line in f:
contents = line.strip()
tokens = contents.split(u' ')
if contents.startswith("-DOCSTART-"):
continue
if len(tokens) > 1:
if tokens[0] not in vocab_tokens:
vocab_tokens.append(tokens[0])
for char in tokens[0]:
if char not in vocab_chars:
vocab_chars.append(char)
words.append(tokens[0])
labels.append(tokens[-1])
else:
if len(contents) == 0 and len(words) > 0:
for l in labels:
if l not in label_list:
label_list.append(l)
dataset["text"].append(' '.join(words))
dataset["labels"].append(' '.join(labels))
words = []
labels = []
return pd.DataFrame.from_dict(dataset), label_list
class InputExample(object):
def __init__(self, text=None, labels=None):
self.text = text
self.labels = labels
class InputFeatures(object):
def __init__(self, chars, tokens, labels):
self.chars = chars
self.tokens = tokens
self.labels = labels
def load_examples(conll_file):
dataset, label_list = _load_dataset(conll_file)
dataset_df = pd.concat([dataset]).sample(frac=1).reset_index(drop=True)
dataset_examples = dataset_df.apply(
lambda x: InputExample(text=x["text"], labels=x["labels"]), axis=1)
return dataset_examples, label_list
def convert_tokens_to_ids(tokens):
tokens_id = []
for token in tokens:
if token in vocab_tokens:
tokens_id.append(vocab_tokens.index(token))
else:
tokens_id.append(vocab_tokens.index('[UKN]'))
return tokens_id
def convert_chars_to_ids(chars):
chars_id = []
for char in chars:
if char in vocab_chars:
chars_id.append(vocab_chars.index(char))
else:
chars_id.append(vocab_chars.index('[UKN]'))
return chars_id
def get_embedding_map(embeddings_path):
embeddings = {}
embedding_size = 0
logging.info(embeddings_path + ": " + str(tf.io.gfile.exists(embeddings_path)))
with tf.io.gfile.GFile(embeddings_path) as fp:
for idx, line in enumerate(fp):
if len(line) < 30 and idx == 0:
# In fasttext, the first line is the # of vocab words.
continue
tokens = line.strip().split(u' ')
word = tokens[0]
embedding_size = len(tokens[1:])
vec = list(map(float, tokens[1:]))
if word not in vocab_tokens:
continue
embeddings[word] = vec
embeddings['[PAD]'] = np.zeros(embedding_size).tolist()
boundary = np.sqrt(3.0 / embedding_size)
embeddings['[UKN]'] = np.random.uniform(-boundary, boundary, embedding_size).tolist()
return list(embeddings.keys()), list(embeddings.values())
def convert_single_example(ex_index, example, label_list, max_token_seq_length,
max_sentence_seq_length):
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
ntokens = example.text.strip().split(u' ')
nlabels = example.labels.strip().split(u' ')
tokens = [w.encode('utf-8') if w in vocab_tokens else b'[UKN]' for w in ntokens]
labels = [l.encode('utf-8') for l in nlabels]
chars = [[c.encode('utf-8') if c in vocab_chars else b'[UKN]' for c in w] for w in
example.text.strip().split(u' ')]
if len(tokens) > max_sentence_seq_length:
tokens = tokens[0:max_sentence_seq_length]
labels = labels[0:max_sentence_seq_length]
ntokens = ntokens[0:max_sentence_seq_length]
nlabels = nlabels[0:max_sentence_seq_length]
chars = chars[0:max_sentence_seq_length]
for i, _ in enumerate(chars):
if len(chars[i]) > max_token_seq_length:
chars[i] = chars[i][0:max_token_seq_length]
if len(tokens) < max_sentence_seq_length:
tokens.extend([b'[PAD]'] * (max_sentence_seq_length - len(tokens)))
labels.extend([b'O'] * (max_sentence_seq_length - len(labels)))
ntokens.extend(['[PAD]'] * (max_sentence_seq_length - len(ntokens)))
nlabels.extend(['O'] * (max_sentence_seq_length - len(nlabels)))
lengths = [len(c) for c in chars]
chars = [c + [b'[PAD]'] * (max_token_seq_length - l) for c, l in zip(chars, lengths)]
while len(chars) < max_sentence_seq_length:
chars.append([b'[PAD]'] * max_token_seq_length)
assert len(chars) == len(tokens) == len(labels) == len(ntokens) == len(nlabels) == \
max_sentence_seq_length
for tmp_chars in chars:
assert len(tmp_chars) == max_token_seq_length
chars = np.reshape([tmp_chars for tmp_chars in chars], -1).tolist()
assert len(tokens) == max_sentence_seq_length
assert len(chars) == max_sentence_seq_length * max_token_seq_length
assert len(labels) == max_sentence_seq_length
if ex_index < 5:
logging.info("*** Example ***")
logging.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logging.info("chars: %s" % " ".join([str(x) for x in chars]))
logging.info("labels: %s" % " ".join([str(x) for x in labels]))
feature = InputFeatures(
chars=chars,
tokens=tokens,
labels=labels,
)
return feature, ntokens, nlabels
def file_based_convert_examples_to_features(examples, label_list, max_token_seq_length,
max_sentence_seq_length, output_file):
writer = tf.io.TFRecordWriter(output_file)
batch_tokens = []
batch_labels = []
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature, tokens, labels = convert_single_example(ex_index, example, label_list,
max_token_seq_length,
max_sentence_seq_length)
batch_tokens.extend(tokens)
batch_labels.extend(labels)
def create_bytes_feature(values):
f = tf.train.Feature(bytes_list=tf.train.BytesList(value=list(values)))
return f
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=values))
return f
features = collections.OrderedDict()
features["tokens"] = create_bytes_feature(feature.tokens)
features["chars"] = create_bytes_feature(feature.chars)
features["labels"] = create_bytes_feature(feature.labels)
features["size_tokens"] = create_int_feature(np.full(max_sentence_seq_length,
max_token_seq_length))
features["size_sentence"] = create_int_feature([max_sentence_seq_length])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
return batch_tokens, batch_labels
def create_features(max_token_seq_length, max_sentence_seq_length, train_conll, test_conll,
train_tfrecord_file, eval_tfrecord_file, metadata_file, embedding_path):
global vocab_tokens
if tf.io.gfile.exists(train_tfrecord_file):
tf.io.gfile.remove(train_tfrecord_file)
if tf.io.gfile.exists(eval_tfrecord_file):
tf.io.gfile.remove(eval_tfrecord_file)
if tf.io.gfile.exists(metadata_file):
tf.io.gfile.remove(metadata_file)
train_input_examples, label_list = load_examples(train_conll)
eval_input_examples, _ = load_examples(test_conll)
vocab_tokens, embeddings = get_embedding_map(embedding_path)
_, _ = file_based_convert_examples_to_features(train_input_examples, label_list,
max_token_seq_length, max_sentence_seq_length,
train_tfrecord_file)
batch_tokens, batch_labels = file_based_convert_examples_to_features(eval_input_examples,
label_list,
max_token_seq_length,
max_sentence_seq_length,
eval_tfrecord_file)
metadata = {"max_token_seq_length": max_token_seq_length,
"max_sentence_seq_length": max_sentence_seq_length, "labels": label_list,
"train_number_examples": len(train_input_examples),
"eval_number_examples": len(eval_input_examples), "embeddings": embeddings,
"vocab_tokens": vocab_tokens, "vocab_chars": vocab_chars,
"batch_tokens": batch_tokens, "batch_labels": batch_labels}
with tf.io.gfile.GFile(metadata_file, "w") as f:
pickle.dump(metadata, f)
def main():
logging.set_verbosity(logging.INFO)
create_features(45, 128, "../train.conll", "../test.conll", "../train.tfrecord",
"../eval.tfrecord", "../metadata.pkl", "../cc.fr.300.short.vec")
if __name__ == "__main__":
main()
|
py | 7df6d9fcab17361a7f1c8af22add2b96efe2dd29 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
AUTHOR = 'Hang Xu'
SITENAME = 'Hang Xu'
SITEURL = ''
PATH = 'content'
OUTPUT_PATH = '.'
STATIC_PATHS = ['images', 'extra', ]
EXTRA_PATH_METADATA = {
'extra/favicon.ico': {'path': 'favicon.ico'},
}
TIMEZONE = 'Asia/Hong_Kong'
DEFAULT_LANG = 'en'
LOCALE = ('usa', # On Windows
'en_US' # On Unix/Linux
)
THEME = "simpleplus"
PLUGIN_PATHS = ['plugins']
PLUGINS = ['i18n_subsites']
I18N_SUBSITES = {
'zh': {
'SITENAME': '徐航',
'THEME': 'simpleplus_zh',
'AUTHOR' : '徐航',
}
}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'https://getpelican.com/'),
('Python.org', 'https://www.python.org/'),
('Jinja2', 'https://palletsprojects.com/p/jinja/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True |
py | 7df6dabb4a2800ac6f8878c7220b3c023b3d5671 | from io import BytesIO
from PIL import Image
from .scrap import get_data_from_url
def get_img_from_url(url: str) -> Image:
"""Returns a gray image given an url
Parameters
----------
url : str
Url where the image is
Returns
-------
Image
Gray image
"""
ans = get_data_from_url(url)
# Open as grayscale
img = Image.open(BytesIO(ans.content)).convert("L")
return img
|
py | 7df6dae4da2e03afa21cd2bf1fee892530d7f7f1 | from abc import ABC
from typing import Tuple
from ..model import Board, JailhouseConfig
from ..utils.logging import getLogger
class BasePass(ABC):
def __init__(self) -> None:
self.logger = getLogger()
def __call__(
self, board: Board, config: JailhouseConfig
) -> Tuple[Board, JailhouseConfig]:
pass
@property
def name(self) -> str:
return self.__class__.__name__
|
py | 7df6db974630cb326c803e5c363e2cab61be0c5b | # coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.join(os.getcwd(), "DeepLearningExamples", "TensorFlow", "LanguageModeling", "BERT"))
sys.path.insert(0, os.getcwd())
from transformers import BertTokenizer
from utils.create_squad_data import read_squad_examples, convert_examples_to_features
import mlperf_loadgen as lg
max_seq_length = 384
max_query_length = 64
doc_stride = 128
class SQuAD_v1_QSL():
def __init__(self, perf_count=None):
print("Creating tokenizer...")
tokenizer = BertTokenizer("build/data/bert_tf_v1_1_large_fp32_384_v2/vocab.txt")
print("Reading examples...")
eval_examples = read_squad_examples(input_file="build/data/dev-v1.1.json",
is_training=False, version_2_with_negative=False)
print("Converting examples to features...")
eval_features = []
def append_feature(feature):
eval_features.append(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False,
output_fn=append_feature,
verbose_logging=False)
print("Constructing QSL...")
self.eval_features = eval_features
self.count = len(self.eval_features)
self.perf_count = perf_count if perf_count is not None else self.count
self.qsl = lg.ConstructQSL(self.count, self.perf_count, self.load_query_samples, self.unload_query_samples)
print("Finished constructing QSL.")
def load_query_samples(self, sample_list):
pass
def unload_query_samples(self, sample_list):
pass
def get_features(self, sample_id):
return self.eval_features[sample_id]
def __del__(self):
lg.DestroyQSL(self.qsl)
print("Finished destroying QSL.")
def get_squad_QSL():
return SQuAD_v1_QSL()
|
py | 7df6dd25be930e3e82d5f5ffe081b9f1f4d8e3ae | # -*- coding: utf-8 -*-
"""
collection
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class HttpResponse(object):
"""Information about an HTTP Response including its status code, returned
headers, and raw body
Attributes:
status_code (int): The status code response from the server that
corresponds to this response.
headers (dict): A dictionary of headers (key : value) that were
returned with the response
raw_body (string): The Raw body of the HTTP Response as a string
"""
def __init__(self,
status_code,
headers,
raw_body):
"""Constructor for the HttpResponse class
Args:
status_code (int): The response status code.
headers (dict): The response headers.
raw_body (string): The raw body from the server.
"""
self.status_code = status_code
self.headers = headers
self.raw_body = raw_body
|
py | 7df6de257bea9fda49a09ee8a5798d0dd7d25539 | import sys
import graphene
from dagster import check
from dagster.core.instance import DagsterInstance, is_dagit_telemetry_enabled
from dagster.core.launcher.base import RunLauncher
from dagster.daemon.controller import get_daemon_status
from dagster.daemon.types import DaemonStatus
from .errors import GraphenePythonError
from .util import non_null_list
class GrapheneRunLauncher(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
class Meta:
name = "RunLauncher"
def __init__(self, run_launcher):
super().__init__()
self._run_launcher = check.inst_param(run_launcher, "run_launcher", RunLauncher)
def resolve_name(self, _graphene_info):
return self._run_launcher.__class__.__name__
class GrapheneDaemonStatus(graphene.ObjectType):
daemonType = graphene.NonNull(graphene.String)
id = graphene.NonNull(graphene.ID)
required = graphene.NonNull(graphene.Boolean)
healthy = graphene.Boolean()
lastHeartbeatTime = graphene.Float()
lastHeartbeatErrors = non_null_list(GraphenePythonError)
class Meta:
name = "DaemonStatus"
def __init__(self, daemon_status):
check.inst_param(daemon_status, "daemon_status", DaemonStatus)
super().__init__(
daemonType=daemon_status.daemon_type,
required=daemon_status.required,
healthy=daemon_status.healthy,
lastHeartbeatTime=daemon_status.last_heartbeat.timestamp
if daemon_status.last_heartbeat
else None,
lastHeartbeatErrors=[
GraphenePythonError(error) for error in daemon_status.last_heartbeat.errors
]
if daemon_status.last_heartbeat and daemon_status.last_heartbeat.errors
else [],
)
def resolve_id(self, _graphene_info):
return self.daemonType
class GrapheneDaemonHealth(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
daemonStatus = graphene.Field(
graphene.NonNull(GrapheneDaemonStatus), daemon_type=graphene.Argument(graphene.String)
)
allDaemonStatuses = non_null_list(GrapheneDaemonStatus)
class Meta:
name = "DaemonHealth"
def __init__(self, instance):
super().__init__()
self._instance = check.inst_param(instance, "instance", DagsterInstance)
def resolve_id(self, _graphene_info):
return "daemonHealth"
def resolve_daemonStatus(self, _graphene_info, daemon_type):
check.str_param(daemon_type, "daemon_type")
return GrapheneDaemonStatus(
get_daemon_status(self._instance, daemon_type, ignore_errors=True)
)
def resolve_allDaemonStatuses(self, _graphene_info):
return [
GrapheneDaemonStatus(get_daemon_status(self._instance, daemon_type, ignore_errors=True))
for daemon_type in self._instance.get_required_daemon_types()
]
class GrapheneInstance(graphene.ObjectType):
info = graphene.Field(graphene.String)
runLauncher = graphene.Field(GrapheneRunLauncher)
runQueuingSupported = graphene.NonNull(graphene.Boolean)
executablePath = graphene.NonNull(graphene.String)
daemonHealth = graphene.NonNull(GrapheneDaemonHealth)
hasInfo = graphene.NonNull(graphene.Boolean)
dagitTelemetryEnabled = graphene.NonNull(graphene.Boolean)
class Meta:
name = "Instance"
def __init__(self, instance):
super().__init__()
self._instance = check.inst_param(instance, "instance", DagsterInstance)
def resolve_hasInfo(self, graphene_info) -> bool:
return graphene_info.context.show_instance_config
def resolve_info(self, graphene_info):
return self._instance.info_str() if graphene_info.context.show_instance_config else None
def resolve_runLauncher(self, _graphene_info):
return (
GrapheneRunLauncher(self._instance.run_launcher)
if self._instance.run_launcher
else None
)
def resolve_runQueuingSupported(self, _graphene_info):
from dagster.core.run_coordinator import QueuedRunCoordinator
return isinstance(self._instance.run_coordinator, QueuedRunCoordinator)
def resolve_executablePath(self, _graphene_info):
return sys.executable
def resolve_daemonHealth(self, _graphene_info):
return GrapheneDaemonHealth(instance=self._instance)
def resolve_dagitTelemetryEnabled(self, _graphene_info):
return is_dagit_telemetry_enabled(self._instance)
|
py | 7df6debe13628cdb0dcd8839263f5df48f75257d | """
WSGI config for dmp_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dmp_project.settings")
application = get_wsgi_application()
|
py | 7df6dfb3f88c9ed232f597535871512fc6b2d7de | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecognizeBusinessCardRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'BusinessCardRequestBody'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""RecognizeBusinessCardRequest - a model defined in huaweicloud sdk"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this RecognizeBusinessCardRequest.
:return: The body of this RecognizeBusinessCardRequest.
:rtype: BusinessCardRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RecognizeBusinessCardRequest.
:param body: The body of this RecognizeBusinessCardRequest.
:type: BusinessCardRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeBusinessCardRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7df6dfb8699e4c44f0dac335e2370466122d6a9a | from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["Accuracy"]
class _BaseClassification(Metric):
def __init__(
self,
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
self._is_multilabel = is_multilabel
self._type = None # type: Optional[str]
self._num_classes = None # type: Optional[int]
super(_BaseClassification, self).__init__(output_transform=output_transform, device=device)
def reset(self) -> None:
self._type = None
self._num_classes = None
def _check_shape(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if not (y.ndimension() == y_pred.ndimension() or y.ndimension() + 1 == y_pred.ndimension()):
raise ValueError(
"y must have shape of (batch_size, ...) and y_pred must have "
"shape of (batch_size, num_categories, ...) or (batch_size, ...), "
f"but given {y.shape} vs {y_pred.shape}."
)
y_shape = y.shape
y_pred_shape = y_pred.shape # type: Tuple[int, ...]
if y.ndimension() + 1 == y_pred.ndimension():
y_pred_shape = (y_pred_shape[0],) + y_pred_shape[2:]
if not (y_shape == y_pred_shape):
raise ValueError("y and y_pred must have compatible shapes.")
if self._is_multilabel and not (y.shape == y_pred.shape and y.ndimension() > 1 and y.shape[1] > 1):
raise ValueError(
"y and y_pred must have same shape of (batch_size, num_categories, ...) and num_categories > 1."
)
def _check_binary_multilabel_cases(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if not torch.equal(y, y ** 2):
raise ValueError("For binary cases, y must be comprised of 0's and 1's.")
if not torch.equal(y_pred, y_pred ** 2):
raise ValueError("For binary cases, y_pred must be comprised of 0's and 1's.")
def _check_type(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if y.ndimension() + 1 == y_pred.ndimension():
num_classes = y_pred.shape[1]
if num_classes == 1:
update_type = "binary"
self._check_binary_multilabel_cases((y_pred, y))
else:
update_type = "multiclass"
elif y.ndimension() == y_pred.ndimension():
self._check_binary_multilabel_cases((y_pred, y))
if self._is_multilabel:
update_type = "multilabel"
num_classes = y_pred.shape[1]
else:
update_type = "binary"
num_classes = 1
else:
raise RuntimeError(
f"Invalid shapes of y (shape={y.shape}) and y_pred (shape={y_pred.shape}), check documentation."
" for expected shapes of y and y_pred."
)
if self._type is None:
self._type = update_type
self._num_classes = num_classes
else:
if self._type != update_type:
raise RuntimeError(f"Input data type has changed from {self._type} to {update_type}.")
if self._num_classes != num_classes:
raise ValueError(f"Input data number of classes has changed from {self._num_classes} to {num_classes}")
class Accuracy(_BaseClassification):
r"""Calculates the accuracy for binary, multiclass and multilabel data.
.. math:: \text{Accuracy} = \frac{ TP + TN }{ TP + TN + FP + FN }
where :math:`\text{TP}` is true positives, :math:`\text{TN}` is true negatives,
:math:`\text{FP}` is false positives and :math:`\text{FN}` is false negatives.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
- `y` and `y_pred` must be in the following shape of (batch_size, num_categories, ...) and
num_categories must be greater than 1 for multilabel cases.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
is_multilabel: flag to use in multilabel case. By default, False.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
Binary case
.. testcode:: 1
metric = Accuracy()
metric.attach(default_evaluator, "accuracy")
y_true = torch.Tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.Tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 1
0.6666...
Multiclass case
.. testcode:: 2
metric = Accuracy()
metric.attach(default_evaluator, "accuracy")
y_true = torch.Tensor([2, 0, 2, 1, 0, 1]).long()
y_pred = torch.Tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 2
0.5
Multilabel case
.. testcode:: 3
metric = Accuracy(is_multilabel=True)
metric.attach(default_evaluator, "accuracy")
y_true = torch.Tensor([
[0, 0, 1, 0, 1],
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 1, 0, 1],
])
y_pred = torch.Tensor([
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 1, 1, 1],
[1, 1, 0, 0, 1],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 3
0.2
In binary and multilabel cases, the elements of `y` and `y_pred` should have 0 or 1 values. Thresholding of
predictions can be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
metric = Accuracy(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "accuracy")
y_true = torch.Tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.Tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 4
0.6666...
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(Accuracy, self).__init__(output_transform=output_transform, is_multilabel=is_multilabel, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._num_correct = torch.tensor(0, device=self._device)
self._num_examples = 0
super(Accuracy, self).reset()
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
self._check_type(output)
y_pred, y = output[0].detach(), output[1].detach()
if self._type == "binary":
correct = torch.eq(y_pred.view(-1).to(y), y.view(-1))
elif self._type == "multiclass":
indices = torch.argmax(y_pred, dim=1)
correct = torch.eq(indices, y).view(-1)
elif self._type == "multilabel":
# if y, y_pred shape is (N, C, ...) -> (N x ..., C)
num_classes = y_pred.size(1)
last_dim = y_pred.ndimension()
y_pred = torch.transpose(y_pred, 1, last_dim - 1).reshape(-1, num_classes)
y = torch.transpose(y, 1, last_dim - 1).reshape(-1, num_classes)
correct = torch.all(y == y_pred.type_as(y), dim=-1)
self._num_correct += torch.sum(correct).to(self._device)
self._num_examples += correct.shape[0]
@sync_all_reduce("_num_examples", "_num_correct")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("Accuracy must have at least one example before it can be computed.")
return self._num_correct.item() / self._num_examples
|
py | 7df6e260ec2277b70396bab508cd5fd9fa843201 | # -*- encoding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from wagtail.core.blocks import BooleanBlock
from wagtail.images.blocks import ImageChooserBlock
from .base import MaterializeComponentMixin
from .base import MaterializeComponentBase
from .base import MaterializeStreamBase
class ParallaxImage(MaterializeComponentMixin, ImageChooserBlock):
class Meta:
label = _("Paralax image")
icon = 'image'
classname = 'full'
class Parallax(MaterializeComponentBase):
contents = 'Paralax Content'
image = ParallaxImage()
full_screen = BooleanBlock(
label=_("Full screen"),
required=False,
)
middle_align = BooleanBlock(
label=_("Middle align"),
required=False,
)
center_align = BooleanBlock(
label=_("Center align"),
required=False,
)
materialize_tag = 'div'
materialize_class = 'parallax-container'
class Meta:
template = 'wagtail/materialize/components/parallax.html'
label = _("Parallax")
class ParallaxStream(MaterializeStreamBase):
parallax = Parallax()
class Meta:
label = _("Parallaxes")
|
py | 7df6e29c3178835e388fce774aee984927369d27 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Execution import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Risk import *
from QuantConnect.Algorithm.Framework.Selection import *
import decimal
class Test_PythonExceptionInterpreter(QCAlgorithm):
def Initialize(self):
pass
def key_error(self):
x = dict()['SPY']
def no_method_match(self):
self.SetCash('SPY')
def unsupported_operand(self):
x = decimal.Decimal(1) * 1.1
def zero_division_error(self):
x = 1 / 0 |
py | 7df6e3cd99f2a56a3bee86f176df26377bbd0a87 | from __future__ import absolute_import
import logging
from django.core.urlresolvers import reverse
from sentry.exceptions import InvalidIdentity, PluginError
from sentry.models import Deploy, Release, ReleaseHeadCommit, Repository, User
from sentry.plugins import bindings
from sentry.tasks.base import instrumented_task, retry
from sentry.utils.email import MessageBuilder
from sentry.utils.http import absolute_uri
logger = logging.getLogger(__name__)
def generate_invalid_identity_email(identity, commit_failure=False):
new_context = {
'identity': identity,
'auth_url': absolute_uri(reverse('socialauth_associate', args=[identity.provider])),
'commit_failure': commit_failure,
}
return MessageBuilder(
subject='Unable to Fetch Commits' if commit_failure else 'Action Required',
context=new_context,
template='sentry/emails/identity-invalid.txt',
html_template='sentry/emails/identity-invalid.html',
)
def generate_fetch_commits_error_email(release, error_message):
new_context = {
'release': release,
'error_message': error_message,
}
return MessageBuilder(
subject='Unable to Fetch Commits',
context=new_context,
template='sentry/emails/unable-to-fetch-commits.txt',
html_template='sentry/emails/unable-to-fetch-commits.html',
)
# we're future proofing this function a bit so it could be used with other code
def handle_invalid_identity(identity, commit_failure=False):
# email the user
msg = generate_invalid_identity_email(identity, commit_failure)
msg.send_async(to=[identity.user.email])
# now remove the identity, as its invalid
identity.delete()
@instrumented_task(
name='sentry.tasks.commits.fetch_commits',
queue='commits',
default_retry_delay=60 * 5,
max_retries=5
)
@retry(exclude=(Release.DoesNotExist, User.DoesNotExist, ))
def fetch_commits(release_id, user_id, refs, prev_release_id=None, **kwargs):
commit_list = []
release = Release.objects.get(id=release_id)
user = User.objects.get(id=user_id)
prev_release = None
if prev_release_id is not None:
try:
prev_release = Release.objects.get(id=prev_release_id)
except Release.DoesNotExist:
pass
for ref in refs:
try:
repo = Repository.objects.get(
organization_id=release.organization_id,
name=ref['repository'],
)
except Repository.DoesNotExist:
logger.info(
'repository.missing',
extra={
'organization_id': release.organization_id,
'user_id': user_id,
'repository': ref['repository'],
}
)
continue
try:
provider_cls = bindings.get('repository.provider').get(repo.provider)
except KeyError:
continue
# if previous commit isn't provided, try to get from
# previous release otherwise, try to get
# recent commits from provider api
start_sha = None
if ref.get('previousCommit'):
start_sha = ref['previousCommit']
elif prev_release:
try:
start_sha = ReleaseHeadCommit.objects.filter(
organization_id=release.organization_id,
release=prev_release,
repository_id=repo.id,
).values_list(
'commit__key', flat=True
)[0]
except IndexError:
pass
end_sha = ref['commit']
provider = provider_cls(id=repo.provider)
try:
repo_commits = provider.compare_commits(repo, start_sha, end_sha, actor=user)
except NotImplementedError:
pass
except Exception as exc:
logger.exception(
'fetch_commits.error',
exc_info=True,
extra={
'organization_id': repo.organization_id,
'user_id': user_id,
'repository': repo.name,
'end_sha': end_sha,
'start_sha': start_sha,
}
)
if isinstance(exc, InvalidIdentity) and getattr(exc, 'identity', None):
handle_invalid_identity(identity=exc.identity, commit_failure=True)
elif isinstance(exc, (PluginError, InvalidIdentity)):
msg = generate_fetch_commits_error_email(release, exc.message)
msg.send_async(to=[user.email])
else:
msg = generate_fetch_commits_error_email(
release, 'An internal system error occurred.')
msg.send_async(to=[user.email])
else:
logger.info(
'fetch_commits.complete',
extra={
'organization_id': repo.organization_id,
'user_id': user_id,
'repository': repo.name,
'end_sha': end_sha,
'start_sha': start_sha,
'num_commits': len(repo_commits or []),
}
)
commit_list.extend(repo_commits)
if commit_list:
release.set_commits(commit_list)
deploys = Deploy.objects.filter(
organization_id=release.organization_id,
release=release,
notified=False,
).values_list(
'id', flat=True
)
for d_id in deploys:
Deploy.notify_if_ready(d_id, fetch_complete=True)
|
py | 7df6e68ad3c77be3b285c936d3aefb722aa9afda | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import array
import datetime
from decimal import Decimal
from pyflink.table import DataTypes, Row
from pyflink.table.tests.test_types import ExamplePoint, PythonOnlyPoint, ExamplePointUDT, \
PythonOnlyUDT
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class StreamTableCalcTests(PyFlinkStreamTableTestCase):
def test_select(self):
t = self.t_env.from_elements([(1, 'hi', 'hello')], ['a', 'b', 'c'])
result = t.select("a + 1, b, c")
query_operation = result._j_table.getQueryOperation()
self.assertEqual('[`default_catalog`.`default_database`.`plus`(a, 1), b, c]',
query_operation.getProjectList().toString())
def test_alias(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.alias("d, e, f").select("d, e, f")
table_schema = result._j_table.getQueryOperation().getTableSchema()
self.assertEqual(['d', 'e', 'f'], list(table_schema.getFieldNames()))
def test_where(self):
t_env = self.t_env
t = t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.where("a > 1 && b = 'Hello'")
query_operation = result._j_table.getQueryOperation()
self.assertEqual("`default_catalog`.`default_database`.`and`("
"`default_catalog`.`default_database`.`greaterThan`(a, 1), "
"`default_catalog`.`default_database`.`equals`(b, 'Hello'))",
query_operation.getCondition().toString())
def test_filter(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.filter("a > 1 && b = 'Hello'")
query_operation = result._j_table.getQueryOperation()
self.assertEqual("`default_catalog`.`default_database`.`and`("
"`default_catalog`.`default_database`.`greaterThan`(a, 1), "
"`default_catalog`.`default_database`.`equals`(b, 'Hello'))",
query_operation.getCondition().toString())
def test_from_element(self):
t_env = self.t_env
field_names = ["a", "b", "c", "d", "e", "f", "g", "h",
"i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s"]
field_types = [DataTypes.BIGINT(), DataTypes.DOUBLE(), DataTypes.STRING(),
DataTypes.STRING(), DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP(),
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(),
DataTypes.INTERVAL(DataTypes.DAY(), DataTypes.SECOND()),
DataTypes.ARRAY(DataTypes.DOUBLE()),
DataTypes.ARRAY(DataTypes.DOUBLE(False)),
DataTypes.ARRAY(DataTypes.STRING()),
DataTypes.ARRAY(DataTypes.DATE()),
DataTypes.DECIMAL(10, 0),
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.DOUBLE())]),
DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE()),
DataTypes.BYTES(), ExamplePointUDT(),
PythonOnlyUDT()]
schema = DataTypes.ROW(
list(map(lambda field_name, field_type: DataTypes.FIELD(field_name, field_type),
field_names,
field_types)))
table_sink = source_sink_utils.TestAppendSink(field_names, field_types)
t_env.register_table_sink("Results", table_sink)
t = t_env.from_elements(
[(1, 1.0, "hi", "hello", datetime.date(1970, 1, 2), datetime.time(1, 0, 0),
datetime.datetime(1970, 1, 2, 0, 0), datetime.datetime(1970, 1, 2, 0, 0),
datetime.timedelta(days=1, microseconds=10),
[1.0, None], array.array("d", [1.0, 2.0]),
["abc"], [datetime.date(1970, 1, 2)], Decimal(1), Row("a", "b")(1, 2.0),
{"key": 1.0}, bytearray(b'ABCD'), ExamplePoint(1.0, 2.0),
PythonOnlyPoint(3.0, 4.0))],
schema)
t.insert_into("Results")
self.env.execute()
actual = source_sink_utils.results()
expected = ['1,1.0,hi,hello,1970-01-02,01:00:00,1970-01-02 00:00:00.0,'
'1970-01-02 00:00:00.0,86400000010,[1.0, null],[1.0, 2.0],[abc],[1970-01-02],'
'1,1,2.0,{key=1.0},[65, 66, 67, 68],[1.0, 2.0],[3.0, 4.0]']
self.assert_equals(actual, expected)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
py | 7df6e6d5bf301c05e45546cd6804a59bca7f10ff | '''
========================================================================================================================
Author: Alan Camilo
www.alancamilo.com
Requirements: aTools Package
------------------------------------------------------------------------------------------------------------------------
To install aTools, please follow the instructions in the file how_to_install.txt
------------------------------------------------------------------------------------------------------------------------
To unistall aTools, go to menu (the last button on the right), Uninstall
========================================================================================================================
'''
from maya import cmds
from maya import mel
from aTools.generalTools.aToolsGlobals import aToolsGlobals as G
from aTools.commonMods import uiMod
from aTools.commonMods import utilMod
from aTools.commonMods import animMod
from aTools.commonMods import aToolsMod
import maya.OpenMaya as om
#============================================================================================================
class TempCustomPivot(object):
def __init__(self):
self.STORE_NODE = "tempCustomPivot"
self.CONSTRAINTS = "constraintObjects"
self.LOCATORS = "locatorObjects"
self.CTRLS = "ctrlsObjects"
self.CURRENTFRAME = "currentFrame"
self.sel = []
self.deniedCtx = ["dragAttrContext", "manipMoveContext", "manipRotateContext", "manipScaleContext"]
self.clear()
def popupMenu(self, *args):
cmds.popupMenu()
cmds.menuItem(label="Clear temporary custom pivots", command=self.clear)
def create(self, *args):
img = cmds.iconTextButton("TempCustomPivotBtn", query=True, image=True)
onOff = (img[-10:-4] == "active")
if onOff:
self.clear()
cmds.select(self.sel)
return
cmds.undoInfo(openChunk=True)
cmds.undoInfo(closeChunk=True)
cmds.undoInfo(openChunk=True)
cmds.undoInfo(closeChunk=True)
cmds.undoInfo(openChunk=True)
cmds.undoInfo(closeChunk=True)
cmds.undoInfo(openChunk=True)
self.clear()
getCurves = animMod.getAnimCurves()
animCurves = getCurves[0]
getFrom = getCurves[1]
if animCurves:
keyTimes = animMod.getTarget("keyTimes", animCurves, getFrom)
self.sel = cmds.ls(selection=True)
if not self.sel: return
cmds.iconTextButton("TempCustomPivotBtn", edit=True, image= uiMod.getImagePath("specialTools_create_temp_custom_pivot_active"), highlightImage= uiMod.getImagePath("specialTools_create_temp_custom_pivot_active"))
targetObj = self.sel[-1]
aToolsMod.saveInfoWithScene(self.STORE_NODE, self.CTRLS, self.sel)
currentFrame = cmds.currentTime(query=True)
aToolsMod.saveInfoWithScene(self.STORE_NODE, self.CURRENTFRAME, currentFrame)
locators = []
for loopSel in self.sel:
nameSpace = utilMod.getNameSpace([loopSel])
loopSelName = "%s_%s"%(nameSpace[0][0], nameSpace[1][0])
locatorName = "tempCustomPivot_%s"%loopSelName
locator = animMod.createNull(locatorName)
locators.append(locator)
G.aToolsBar.align.align([locator], loopSel)
locatorGroup = "tempCustomPivot_group"
animMod.group(name=locatorGroup)
G.aToolsBar.align.align([locatorGroup], targetObj)
with G.aToolsBar.createAToolsNode: cmds.parent(locators, locatorGroup)
cmds.select(locatorGroup, replace=True)
locators.append(locatorGroup)
aToolsMod.saveInfoWithScene(self.STORE_NODE, self.LOCATORS, locators)
#parent ctrls to locator
constraints = ["%s_tempCustomPivot_constraint"%loopConstraint for loopConstraint in self.sel]
aToolsMod.saveInfoWithScene(self.STORE_NODE, self.CONSTRAINTS, constraints)
for n, loopSel in enumerate(self.sel):
with G.aToolsBar.createAToolsNode: cmds.parentConstraint(locators[n], loopSel, name=constraints[n], maintainOffset=True)
constraintNode = "%s.blendParent1"%loopSel
if not cmds.objExists(constraintNode): continue
cmds.setKeyframe(constraintNode)
if keyTimes:
for loopTime in keyTimes[0]:
cmds.setKeyframe("%s.tx"%locatorGroup, time=(loopTime,loopTime))
if loopTime != currentFrame:
cmds.setKeyframe(constraintNode, time=(loopTime,loopTime), value=0)
#enter edit mode
cmds.setToolTo(cmds.currentCtx())
cmds.ctxEditMode()
#scriptjob
cmds.scriptJob(runOnce = True, killWithScene = True, event =('SelectionChanged', self.scriptJob_SelectionChanged))
def scriptJob_SelectionChanged(self):
self.clear()
cmds.undoInfo(closeChunk=True)
def clear(self, *args):
if cmds.iconTextButton("TempCustomPivotBtn", query=True, exists=True):
cmds.iconTextButton("TempCustomPivotBtn", edit=True, image= uiMod.getImagePath("specialTools_create_temp_custom_pivot"), highlightImage= uiMod.getImagePath("specialTools_create_temp_custom_pivot copy"))
cmds.refresh(suspend=True)
currFrame = cmds.currentTime(query=True)
loadConstraints = aToolsMod.loadInfoWithScene(self.STORE_NODE, self.CONSTRAINTS)
loadLocators = aToolsMod.loadInfoWithScene(self.STORE_NODE, self.LOCATORS)
loadCtrls = aToolsMod.loadInfoWithScene(self.STORE_NODE, self.CTRLS)
currentFrame = aToolsMod.loadInfoWithScene(self.STORE_NODE, self.CURRENTFRAME)
#exit edit mode
if cmds.currentCtx() not in self.deniedCtx: cmds.setToolTo(cmds.currentCtx())
if currentFrame:
cmds.currentTime(eval(currentFrame))
#get values
"""
translation = []
rotation = []
if loadCtrls:
ctrlObjs = eval(loadCtrls)
for loopCtrl in ctrlObjs:
translation.append(cmds.xform(loopCtrl, query=True, ws=True, rotatePivot=True))
rotation.append(cmds.xform(loopCtrl, query=True, ws=True, rotation=True))
"""
if loadConstraints:
constraintObjs = eval(loadConstraints)
for loopConstraint in constraintObjs:
if cmds.objExists(loopConstraint): cmds.delete(loopConstraint)
if loadCtrls and loadLocators:
locatorObjs = eval(loadLocators)
ctrlObjs = eval(loadCtrls)
for n, loopCtrl in enumerate(ctrlObjs):
if cmds.objExists(loopCtrl) and cmds.objExists(locatorObjs[n]):
G.aToolsBar.align.align([loopCtrl], locatorObjs[n])
for loopLocator in locatorObjs:
if cmds.objExists(loopLocator): cmds.delete(loopLocator)
cmds.currentTime(currFrame)
cmds.refresh(suspend=False)
|
py | 7df6e6e440bcecff28c9e42f6e33aa4fa068d048 | from flask import Flask
from flask_cors import CORS
import json
import sys
filename=sys.argv[1]
app = Flask(__name__)
CORS(app)
flatten = lambda list: [item for sublist in list for item in sublist]
node2json = lambda node: json.loads('{ "id": "'+node+'" }')
pair2json = lambda pair: json.loads('{ "source": "'+pair[0]+'", "target": "'+pair[1]+'" }')
def load_data():
with open(filename,"r") as f:
data_raw = f.readlines()
#Formal data
data=[x.strip().split("\t") for x in data_raw]
#Obtain nodes
nodes = set(flatten(data))
#to json format
nodesjson=[node2json(x) for x in nodes]
linksjson=[pair2json(x) for x in data]
#Convert json to string
json_out={"nodes" : nodesjson , "links" : linksjson }
return json.dumps(json_out)
@app.route('/')
def show_json():
#return '{ "nodes": [ { "id": " node 1" }, { "id": "node 2" } ], "links": [ { "source": " node 1", "target": "node 2" } ] } '
out = load_data()
return out
if __name__ == '__main__':
app.run() |
py | 7df6e838d6eac0f8b27bce74ba39bd5237d4d4e6 | # pylint: disable=protected-access,redefined-outer-name
"""Test to verify that Beward library works."""
from datetime import datetime
from unittest import TestCase
import requests_mock
from requests import ConnectTimeout
from beward import BewardCamera
from beward.const import ALARM_MOTION
from . import function_url, load_binary, load_fixture
from .const import MOCK_HOST, MOCK_PASS, MOCK_USER
class TestBewardCamera(TestCase):
"""Test case for BewardCamera class."""
def test___init__(self):
"""Initialize test."""
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS)
self.assertEqual(None, bwd.rtsp_port)
self.assertEqual(0, bwd.stream)
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS, rtsp_port=123, stream=2)
self.assertEqual(123, bwd.rtsp_port)
self.assertEqual(2, bwd.stream)
@requests_mock.Mocker()
def test_obtain_uris(self, mock):
"""Test that obtain urls from device."""
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS)
self.assertIsNone(bwd._live_image_url)
self.assertIsNone(bwd._rtsp_live_video_url)
mock.register_uri("get", function_url("rtsp"), exc=ConnectTimeout)
bwd.obtain_uris()
expect = (
"rtsp://" + MOCK_USER + ":" + MOCK_PASS + "@" + MOCK_HOST + ":554/av0_0"
)
self.assertEqual(expect, bwd._rtsp_live_video_url)
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS, stream=1)
mock.register_uri("get", function_url("rtsp"))
bwd.obtain_uris()
expect = (
function_url("images", user=MOCK_USER, password=MOCK_PASS) + "?channel=0"
)
self.assertEqual(expect, bwd._live_image_url)
expect = (
"rtsp://" + MOCK_USER + ":" + MOCK_PASS + "@" + MOCK_HOST + ":554/av0_1"
)
self.assertEqual(expect, bwd._rtsp_live_video_url)
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS)
mock.register_uri("get", function_url("rtsp"), text=load_fixture("rtsp.txt"))
bwd.obtain_uris()
expect = (
"rtsp://" + MOCK_USER + ":" + MOCK_PASS + "@" + MOCK_HOST + ":47456/av0_0"
)
self.assertEqual(expect, bwd._rtsp_live_video_url)
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS, rtsp_port=123, stream=2)
mock.register_uri("get", function_url("rtsp"), text=load_fixture("rtsp.txt"))
bwd.obtain_uris()
expect = (
"rtsp://" + MOCK_USER + ":" + MOCK_PASS + "@" + MOCK_HOST + ":123/av0_2"
)
self.assertEqual(expect, bwd._rtsp_live_video_url)
@requests_mock.Mocker()
def test_live_image_url(self, mock):
"""Test that obtain live image url from device."""
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS)
mock.register_uri("get", function_url("rtsp"))
self.assertIsNone(bwd._live_image_url)
expect = (
function_url("images", user=MOCK_USER, password=MOCK_PASS) + "?channel=0"
)
self.assertEqual(expect, bwd.live_image_url)
@requests_mock.Mocker()
def test_rtsp_live_video_url(self, mock):
"""Test that obtain RTSP live video url from device."""
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS)
mock.register_uri("get", function_url("rtsp"), text=load_fixture("rtsp.txt"))
self.assertIsNone(bwd._rtsp_live_video_url)
expect = (
"rtsp://" + MOCK_USER + ":" + MOCK_PASS + "@" + MOCK_HOST + ":47456/av0_0"
)
self.assertEqual(expect, bwd.rtsp_live_video_url)
@requests_mock.Mocker()
def test_live_image(self, mock):
"""Test that receive live image from device."""
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS)
image = load_binary("image.jpg")
mock.register_uri("get", function_url("images"), content=image)
res = bwd.live_image
self.assertIsNone(res)
mock.register_uri(
"get",
function_url("images"),
content=image,
headers={"Content-Type": "image/jpeg"},
)
res = bwd.live_image
self.assertEqual(image, res)
@requests_mock.Mocker()
def test__handle_alarm(self, mock):
"""Test that handle alarms."""
bwd = BewardCamera(MOCK_HOST, MOCK_USER, MOCK_PASS)
image = load_binary("image.jpg")
# Check initial state
self.assertIsNone(bwd.last_motion_timestamp)
self.assertIsNone(bwd.last_motion_image)
ts1 = datetime.now()
mock.register_uri(
"get",
function_url("images"),
content=image,
headers={"Content-Type": "image/jpeg"},
)
bwd._handle_alarm(ts1, ALARM_MOTION, True)
self.assertEqual(ts1, bwd.last_motion_timestamp)
self.assertEqual(image, bwd.last_motion_image)
|
py | 7df6e83abe44e9d3beb5fbf4177cc51539f1a8b3 | # -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.docstring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Classes for docstring parsing and formatting.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import collections
import inspect
import re
from six import string_types, u
from six.moves import range
from sphinx.ext.napoleon.iterators import modify_iter
from sphinx.util.pycompat import UnicodeMixin
if False:
# For type annotation
from typing import Any, Callable, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config as SphinxConfig # NOQA
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
_google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)')
_numpy_section_regex = re.compile(r'^[=\-`:\'"~^_*+#<>]{2,}\s*$')
_single_colon_regex = re.compile(r'(?<!:):(?!:)')
_xref_regex = re.compile(r'(:\w+:\S+:`.+?`|:\S+:`.+?`|`.+?`)')
_bullet_list_regex = re.compile(r'^(\*|\+|\-)(\s+\S|\s*$)')
_enumerated_list_regex = re.compile(
r'^(?P<paren>\()?'
r'(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])'
r'(?(paren)\)|\.)(\s+\S|\s*$)')
class GoogleDocstring(UnicodeMixin):
"""Convert Google style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
config: :obj:`sphinx.ext.napoleon.Config` or :obj:`sphinx.config.Config`
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new :class:`sphinx.ext.napoleon.Config` object.
Other Parameters
----------------
app : :class:`sphinx.application.Sphinx`, optional
Application object representing the Sphinx process.
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : :class:`sphinx.ext.autodoc.Options`, optional
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Args:
... arg1(int): Description of `arg1`
... arg2(str): Description of `arg2`
... Returns:
... str: Description of return value.
... '''
>>> print(GoogleDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
# type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
self._config = config
self._app = app
if not self._config:
from sphinx.ext.napoleon import Config
self._config = self._app and self._app.config or Config() # type: ignore
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable): # type: ignore
what = 'function'
else:
what = 'object'
self._what = what
self._name = name
self._obj = obj
self._opt = options
if isinstance(docstring, string_types):
docstring = docstring.splitlines() # type: ignore
self._lines = docstring
self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
self._parsed_lines = [] # type: List[unicode]
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
self._directive_sections = [] # type: List[unicode]
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
'arguments': self._parse_parameters_section,
'attributes': self._parse_attributes_section,
'example': self._parse_examples_section,
'examples': self._parse_examples_section,
'keyword args': self._parse_keyword_arguments_section,
'keyword arguments': self._parse_keyword_arguments_section,
'methods': self._parse_methods_section,
'note': self._parse_note_section,
'notes': self._parse_notes_section,
'other parameters': self._parse_other_parameters_section,
'parameters': self._parse_parameters_section,
'return': self._parse_returns_section,
'returns': self._parse_returns_section,
'raises': self._parse_raises_section,
'references': self._parse_references_section,
'see also': self._parse_see_also_section,
'todo': self._parse_todo_section,
'warning': self._parse_warning_section,
'warnings': self._parse_warning_section,
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
} # type: Dict[unicode, Callable]
self._parse()
def __unicode__(self):
# type: () -> unicode
"""Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
"""
return u('\n').join(self.lines())
def lines(self):
# type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return self._parsed_lines
def _consume_indented_block(self, indent=1):
# type: (int) -> List[unicode]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_contiguous(self):
# type: () -> List[unicode]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_empty(self):
# type: () -> List[unicode]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
line = next(self._line_iter) # type: ignore
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after # type: unicode, unicode, unicode
if parse_type:
match = _google_typed_arg_regex.match(before) # type: ignore
if match:
_name = match.group(1)
_type = match.group(2)
_name = self._escape_args_and_kwargs(_name)
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_desc = [_desc] + self._dedent(self._consume_indented_block(indent)) # type: ignore
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc # type: ignore
def _consume_fields(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
self._consume_empty()
fields = []
while not self._is_section_break():
_name, _type, _desc = self._consume_field(parse_type, prefer_type)
if _name or _type or _desc:
fields.append((_name, _type, _desc,))
return fields
def _consume_inline_attribute(self):
# type: () -> Tuple[unicode, List[unicode]]
line = next(self._line_iter) # type: ignore
_type, colon, _desc = self._partition_field_on_colon(line)
if not colon:
_type, _desc = _desc, _type
_desc = [_desc] + self._dedent(self._consume_to_end()) # type: ignore
_desc = self.__class__(_desc, self._config).lines()
return _type, _desc # type: ignore
def _consume_returns_section(self):
# type: () -> List[Tuple[unicode, unicode, List[unicode]]]
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
_name, _type, _desc = '', '', lines # type: unicode, unicode, List[unicode]
if colon:
if after:
_desc = [after] + lines[1:]
else:
_desc = lines[1:]
_type = before
_desc = self.__class__(_desc, self._config).lines()
return [(_name, _type, _desc,)]
else:
return []
def _consume_usage_section(self):
# type: () -> List[unicode]
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
# type: () -> unicode
section = next(self._line_iter) # type: ignore
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
section = stripped_section
return section
def _consume_to_end(self):
# type: () -> List[unicode]
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_to_next_section(self):
# type: () -> List[unicode]
self._consume_empty()
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter)) # type: ignore
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
# type: (List[unicode], bool) -> List[unicode]
if full:
return [line.lstrip() for line in lines]
else:
min_indent = self._get_min_indent(lines)
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
# type: (unicode) -> unicode
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
return r'\*' + name[1:]
else:
return name
def _fix_field_desc(self, desc):
# type: (List[unicode]) -> List[unicode]
if self._is_list(desc):
desc = [''] + desc # type: ignore
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
desc = [''] + desc # type: ignore
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc
def _format_admonition(self, admonition, lines):
# type: (unicode, List[unicode]) -> List[unicode]
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
return ['.. %s::' % admonition, ''] + lines + [''] # type: ignore
else:
return ['.. %s::' % admonition, '']
def _format_block(self, prefix, lines, padding=None):
# type: (unicode, List[unicode], unicode) -> List[unicode]
if lines:
if padding is None:
padding = ' ' * len(prefix)
result_lines = []
for i, line in enumerate(lines):
if i == 0:
result_lines.append((prefix + line).rstrip())
elif line:
result_lines.append(padding + line)
else:
result_lines.append('')
return result_lines
else:
return [prefix]
def _format_docutils_params(self, fields, field_role='param',
type_role='type'):
# type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
if any(_desc):
_desc = self._fix_field_desc(_desc)
field = ':%s %s: ' % (field_role, _name)
lines.extend(self._format_block(field, _desc))
else:
lines.append(':%s %s:' % (field_role, _name))
if _type:
lines.append(':%s %s: %s' % (type_role, _name, _type))
return lines + ['']
def _format_field(self, _name, _type, _desc):
# type: (unicode, unicode, List[unicode]) -> List[unicode]
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
field = '**%s** (%s)%s' % (_name, _type, separator) # type: unicode
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
field = '**%s**%s' % (_name, separator)
elif _type:
if '`' in _type:
field = '%s%s' % (_type, separator)
else:
field = '*%s*%s' % (_type, separator)
else:
field = ''
if has_desc:
_desc = self._fix_field_desc(_desc)
if _desc[0]:
return [field + _desc[0]] + _desc[1:]
else:
return [field] + _desc
else:
return [field]
def _format_fields(self, field_type, fields):
# type: (unicode, List[Tuple[unicode, unicode, List[unicode]]]) -> List[unicode]
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = [] # type: List[unicode]
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(padding + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' ', field))
if lines and lines[-1]:
lines.append('')
return lines
def _get_current_indent(self, peek_ahead=0):
# type: (int) -> int
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
while line != self._line_iter.sentinel:
if line:
return self._get_indent(line)
peek_ahead += 1
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
return 0
def _get_indent(self, line):
# type: (unicode) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
# type: (List[unicode]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
# type: (List[unicode]) -> int
min_indent = None
for line in lines:
if line:
indent = self._get_indent(line)
if min_indent is None:
min_indent = indent
elif indent < min_indent:
min_indent = indent
return min_indent or 0
def _indent(self, lines, n=4):
# type: (List[unicode], int) -> List[unicode]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
# type: (unicode, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
elif not s.isspace():
return False
return False
def _is_list(self, lines):
# type: (List[unicode]) -> bool
if not lines:
return False
if _bullet_list_regex.match(lines[0]): # type: ignore
return True
if _enumerated_list_regex.match(lines[0]): # type: ignore
return True
if len(lines) < 2 or lines[0].endswith('::'):
return False
indent = self._get_indent(lines[0])
next_indent = indent
for line in lines[1:]:
if line:
next_indent = self._get_indent(line)
break
return next_indent > indent
def _is_section_header(self):
# type: () -> bool
section = self._line_iter.peek().lower()
match = _google_section_regex.match(section)
if match and section.strip(':') in self._sections:
header_indent = self._get_indent(section)
section_indent = self._get_current_indent(peek_ahead=1)
return section_indent > header_indent
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _is_section_break(self):
# type: () -> bool
line = self._line_iter.peek()
return (not self._line_iter.has_next() or
self._is_section_header() or
(self._is_in_section and
line and
not self._is_indented(line, self._section_indent)))
def _parse(self):
# type: () -> None
self._parsed_lines = self._consume_empty()
if self._name and (self._what == 'attribute' or self._what == 'data'):
self._parsed_lines.extend(self._parse_attribute_docstring())
return
while self._line_iter.has_next():
if self._is_section_header():
try:
section = self._consume_section_header()
self._is_in_section = True
self._section_indent = self._get_current_indent()
if _directive_regex.match(section): # type: ignore
lines = [section] + self._consume_to_next_section()
else:
lines = self._sections[section.lower()](section)
finally:
self._is_in_section = False
self._section_indent = 0
else:
if not self._parsed_lines:
lines = self._consume_contiguous() + self._consume_empty()
else:
lines = self._consume_to_next_section()
self._parsed_lines.extend(lines)
def _parse_attribute_docstring(self):
# type: () -> List[unicode]
_type, _desc = self._consume_inline_attribute()
return self._format_field('', _type, _desc)
def _parse_attributes_section(self, section):
# type: (unicode) -> List[unicode]
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
field = ':ivar %s: ' % _name # type: unicode
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
else:
lines.extend(['.. attribute:: ' + _name, ''])
field = self._format_field('', _type, _desc) # type: ignore
lines.extend(self._indent(field, 3)) # type: ignore
lines.append('')
if self._config.napoleon_use_ivar:
lines.append('')
return lines
def _parse_examples_section(self, section):
# type: (unicode) -> List[unicode]
use_admonition = self._config.napoleon_use_admonition_for_examples
return self._parse_generic_section(section, use_admonition)
def _parse_usage_section(self, section):
# type: (unicode) -> List[unicode]
header = ['.. rubric:: Usage:', ''] # type: List[unicode]
block = ['.. code-block:: python', ''] # type: List[unicode]
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
# type: (unicode, bool) -> List[unicode]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
header = '.. admonition:: %s' % section # type: unicode
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
if lines:
return [header, ''] + lines + ['']
else:
return [header, '']
def _parse_keyword_arguments_section(self, section):
# type: (unicode) -> List[unicode]
fields = self._consume_fields()
if self._config.napoleon_use_keyword:
return self._format_docutils_params(
fields,
field_role="keyword",
type_role="kwtype")
else:
return self._format_fields('Keyword Arguments', fields)
def _parse_methods_section(self, section):
# type: (unicode) -> List[unicode]
lines = [] # type: List[unicode]
for _name, _, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
lines.extend([''] + self._indent(_desc, 3)) # type: ignore
lines.append('')
return lines
def _parse_note_section(self, section):
# type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('note', lines)
def _parse_notes_section(self, section):
# type: (unicode) -> List[unicode]
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section('Notes', use_admonition)
def _parse_other_parameters_section(self, section):
# type: (unicode) -> List[unicode]
return self._format_fields('Other Parameters', self._consume_fields())
def _parse_parameters_section(self, section):
# type: (unicode) -> List[unicode]
fields = self._consume_fields()
if self._config.napoleon_use_param:
return self._format_docutils_params(fields)
else:
return self._format_fields('Parameters', fields)
def _parse_raises_section(self, section):
# type: (unicode) -> List[unicode]
fields = self._consume_fields(parse_type=False, prefer_type=True)
field_type = ':raises:'
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = [] # type: List[unicode]
for _, _type, _desc in fields:
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _type:
has_refs = '`' in _type or ':' in _type
has_space = any(c in ' \t\n\v\f ' for c in _type)
if not has_refs and not has_space:
_type = ':exc:`%s`%s' % (_type, separator)
elif has_desc and has_space:
_type = '*%s*%s' % (_type, separator)
else:
_type = '%s%s' % (_type, separator)
if has_desc:
field = [_type + _desc[0]] + _desc[1:]
else:
field = [_type]
else:
field = _desc
if multi:
if lines:
lines.extend(self._format_block(padding + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' ', field))
if lines and lines[-1]:
lines.append('')
return lines
def _parse_references_section(self, section):
# type: (unicode) -> List[unicode]
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section('References', use_admonition)
def _parse_returns_section(self, section):
# type: (unicode) -> List[unicode]
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
use_rtype = False
else:
use_rtype = self._config.napoleon_use_rtype
lines = [] # type: List[unicode]
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
else:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(' * ', field))
else:
lines.extend(self._format_block(':returns: * ', field))
else:
lines.extend(self._format_block(':returns: ', field))
if _type and use_rtype:
lines.extend([':rtype: %s' % _type, ''])
if lines and lines[-1]:
lines.append('')
return lines
def _parse_see_also_section(self, section):
# type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('seealso', lines)
def _parse_todo_section(self, section):
# type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('todo', lines)
def _parse_warning_section(self, section):
# type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('warning', lines)
def _parse_warns_section(self, section):
# type: (unicode) -> List[unicode]
return self._format_fields('Warns', self._consume_fields())
def _parse_yields_section(self, section):
# type: (unicode) -> List[unicode]
fields = self._consume_returns_section()
return self._format_fields('Yields', fields)
def _partition_field_on_colon(self, line):
# type: (unicode) -> Tuple[unicode, unicode, unicode]
before_colon = []
after_colon = []
colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)): # type: ignore
if found_colon:
after_colon.append(source)
else:
m = _single_colon_regex.search(source)
if (i % 2) == 0 and m:
found_colon = True
colon = source[m.start(): m.end()]
before_colon.append(source[:m.start()])
after_colon.append(source[m.end():])
else:
before_colon.append(source)
return ("".join(before_colon).strip(),
colon,
"".join(after_colon).strip())
def _strip_empty(self, lines):
# type: (List[unicode]) -> List[unicode]
if lines:
start = -1
for i, line in enumerate(lines):
if line:
start = i
break
if start == -1:
lines = []
end = -1
for i in reversed(range(len(lines))):
line = lines[i]
if line:
end = i
break
if start > 0 or end + 1 < len(lines):
lines = lines[start:end + 1]
return lines
class NumpyDocstring(GoogleDocstring):
"""Convert NumPy style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
config: :obj:`sphinx.ext.napoleon.Config` or :obj:`sphinx.config.Config`
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new :class:`sphinx.ext.napoleon.Config` object.
Other Parameters
----------------
app : :class:`sphinx.application.Sphinx`, optional
Application object representing the Sphinx process.
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : :class:`sphinx.ext.autodoc.Options`, optional
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Parameters
... ----------
... arg1 : int
... Description of `arg1`
... arg2 : str
... Description of `arg2`
... Returns
... -------
... str
... Description of return value.
... '''
>>> print(NumpyDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
Methods
-------
__str__()
Return the parsed docstring in reStructuredText format.
Returns
-------
str
UTF-8 encoded version of the docstring.
__unicode__()
Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
lines()
Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
# type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
self._directive_sections = ['.. index::']
super(NumpyDocstring, self).__init__(docstring, config, app, what,
name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
line = next(self._line_iter) # type: ignore
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
else:
_name, _type = line, ''
_name, _type = _name.strip(), _type.strip()
_name = self._escape_args_and_kwargs(_name)
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_desc = self._dedent(self._consume_indented_block(indent))
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc
def _consume_returns_section(self):
# type: () -> List[Tuple[unicode, unicode, List[unicode]]]
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
# type: () -> unicode
section = next(self._line_iter) # type: ignore
if not _directive_regex.match(section):
# Consume the header underline
next(self._line_iter) # type: ignore
return section
def _is_section_break(self):
# type: () -> bool
line1, line2 = self._line_iter.peek(2)
return (not self._line_iter.has_next() or
self._is_section_header() or
['', ''] == [line1, line2] or
(self._is_in_section and
line1 and
not self._is_indented(line1, self._section_indent)))
def _is_section_header(self):
# type: () -> bool
section, underline = self._line_iter.peek(2)
section = section.lower()
if section in self._sections and isinstance(underline, string_types):
return bool(_numpy_section_regex.match(underline)) # type: ignore
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also_section(self, section):
# type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
except ValueError:
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
# type: (List[unicode]) -> List[unicode]
"""
Derived from the NumpyDoc implementation of _parse_see_also.
See Also
--------
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = [] # type: List[unicode]
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line) # type: ignore
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
if not items:
return []
roles = {
'method': 'meth',
'meth': 'meth',
'function': 'func',
'func': 'func',
'class': 'class',
'exception': 'exc',
'exc': 'exc',
'object': 'obj',
'obj': 'obj',
'module': 'mod',
'mod': 'mod',
'data': 'data',
'constant': 'const',
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
} # type: Dict[unicode, unicode]
if self._what is None:
func_role = 'obj' # type: unicode
else:
func_role = roles.get(self._what, '')
lines = [] # type: List[unicode]
last_had_desc = True
for func, desc, role in items:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
lines += ['']
lines += [link]
else:
lines[-1] += ", %s" % link
if desc:
lines += self._indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
lines += ['']
return self._format_admonition('seealso', lines)
|
py | 7df6e85cfa47270e8ebfc8b7c80e98f730d922d3 | import urllib, json, os, sys
oid = sys.argv[1]
url = "http://app.ecwid.com/api/v1/" + oid+ "/products"
data = json.loads(urllib.urlopen(url).read())
images = dict((x['id'], x.get('originalImageUrl', None)) for x in data)
if not os.path.exists(oid):
os.makedirs(oid)
for k in images.keys():
if images[k] != None:
print "Start downloading" ,images[k]
urllib.urlretrieve(images[k], oid + "/" + str(k) + ".jpg")
print "Finish downloading" ,images[k]
|
py | 7df6ebc0ff48c911270e0a9f95dc5c4d70eec4b0 | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('basics/',include('basics.urls')),
]
|
py | 7df6ec0891501241627787828ccab52b61d03875 | from logging import getLogger
import reversion
from datahub.company.models import Company
from datahub.dbmaintenance.management.base import CSVBaseCommand
from datahub.dbmaintenance.utils import parse_bool, parse_uuid
from datahub.search.signals import disable_search_signal_receivers
logger = getLogger(__name__)
class Command(CSVBaseCommand):
"""Command to update Company.great_profile_status."""
@disable_search_signal_receivers(Company)
def _handle(self, *args, **options):
"""
Disables search signal receivers for companies.
Avoid queuing huge number of Celery tasks for syncing companies to Elasticsearch.
(Syncing can be manually performed afterwards using sync_es if required.)
"""
return super()._handle(*args, **options)
def _process_row(self, row, simulate=False, **options):
"""
Process one single row.
"""
pk = parse_uuid(row['datahub_company_id'])
company = Company.objects.get(pk=pk)
has_profile = parse_bool(row['has_find_a_supplier_profile'])
is_published = parse_bool(row['is_published_find_a_supplier'])
profile_status = None
if has_profile and is_published:
profile_status = Company.GREAT_PROFILE_STATUSES.published
elif has_profile:
profile_status = Company.GREAT_PROFILE_STATUSES.unpublished
if company.great_profile_status == profile_status:
return
company.great_profile_status = profile_status
if simulate:
return
with reversion.create_revision():
company.save(update_fields=('great_profile_status',))
reversion.set_comment('GREAT profile status updated.')
|
py | 7df6ec664461b950840bdaa95e19e391adf7da63 | import functools
import itertools
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
import math
import numpy as np
import ohio.ext.pandas
import pandas as pd
import statistics
import typing
from collections import defaultdict
from sqlalchemy.orm import sessionmaker
from aequitas.bias import Bias
from aequitas.fairness import Fairness
from aequitas.group import Group
from aequitas.preprocessing import preprocess_input_df
from . import metrics
from .utils import (
db_retry,
sort_predictions_and_labels,
get_subset_table_name,
filename_friendly_hash
)
from triage.util.db import scoped_session
from triage.util.random import generate_python_random_seed
from triage.component.catwalk.storage import MatrixStore
RELATIVE_TOLERANCE = 0.01
SORT_TRIALS = 30
def subset_labels_and_predictions(
subset_df,
labels,
predictions_proba,
protected_df=None,
):
"""Reduce the labels and predictions to only those relevant to the current
subset.
Args:
subset_df (pandas.DataFrame) A dataframe whose index is the entity_ids
and as_of_dates in a subset
labels (pandas.Series) A series of labels with entity_id and as_of_date
as the index
predictions_proba (np.array) An array of predictions for the same
entity_date pairs as the labels and in the same order
protected_df (pandas.DataFrame) A dataframe of protected group attributes
Returns: (pandas.Series, np.array, pandas.DataFrame) The labels, predictions, and protected
group attributes that refer to entity-date pairs in the subset
"""
indexed_predictions = pd.Series(predictions_proba, index=labels.index)
if protected_df is None:
protected_df = pd.DataFrame()
# The subset isn't specific to the cohort, so inner join to the labels/predictions
labels_subset = labels.align(subset_df, join="inner")[0]
predictions_subset = indexed_predictions.align(subset_df, join="inner")[0].values
protected_df_subset = protected_df if protected_df.empty else protected_df.align(subset_df, join="inner")[0]
logger.spam(
f"{len(labels_subset)} entities in subset out of {len(labels)} in matrix.",
)
return (labels_subset, predictions_subset, protected_df_subset)
def query_subset_table(db_engine, as_of_dates, subset_table_name):
"""Queries the subset table to find the entities active at the given
as_of_dates
Args:
db_engine (sqlalchemy.engine) a database engine
as_of_dates (list) the as_of_Dates to query
subset_table_name (str) the name of the table to query
Returns: (pandas.DataFrame) a dataframe indexed by the entity-date pairs
active in the subset
"""
as_of_dates_sql = "[{}]".format(
", ".join("'{}'".format(date.strftime("%Y-%m-%d %H:%M:%S.%f")) for date in as_of_dates)
)
query_string = f"""
with dates as (
select unnest(array{as_of_dates_sql}::timestamp[]) as as_of_date
)
select entity_id, as_of_date, active
from {subset_table_name}
join dates using(as_of_date)
"""
df = pd.DataFrame.pg_copy_from(query_string, connectable=db_engine, parse_dates=["as_of_date"],
index_col=MatrixStore.indices)
return df
def generate_binary_at_x(test_predictions, x_value, unit="top_n"):
"""Assign predicted classes based based on top% or absolute rank of score
Args:
test_predictions (np.array) A predictions, sorted by risk score descending
x_value (int) The percentile or absolute value desired
unit (string, default 'top_n') The thresholding method desired,
either percentile or top_n
Returns: (np.array) The predicted classes
"""
len_predictions = len(test_predictions)
if len_predictions == 0:
return np.array([])
if unit == "percentile":
cutoff_index = int(len_predictions * (x_value / 100.00))
else:
cutoff_index = int(x_value)
num_ones = cutoff_index if cutoff_index <= len_predictions else len_predictions
num_zeroes = len_predictions - cutoff_index if cutoff_index <= len_predictions else 0
test_predictions_binary = np.concatenate(
(np.ones(num_ones, np.int8), np.zeros(num_zeroes, np.int8))
)
return test_predictions_binary
class MetricDefinition(typing.NamedTuple):
"""A single metric, bound to a particular threshold and parameter combination"""
metric: str
threshold_unit: str
threshold_value: int
parameter_combination: dict
parameter_string: str
class MetricEvaluationResult(typing.NamedTuple):
"""A metric and parameter combination alongside preliminary results.
The 'value' could represent the worst, best, or a random version of tiebreaking.
"""
metric: str
parameter: str
value: float
num_labeled_examples: int
num_labeled_above_threshold: int
num_positive_labels: int
class ModelEvaluator:
"""An object that can score models based on its known metrics"""
# Available metric calculation functions
# Each value is expected to be a function that takes in the following params
# (predictions_proba, predictions_binary, labels, parameters)
# and return a numeric score
available_metrics = {
"precision@": metrics.precision,
"recall@": metrics.recall,
"fbeta@": metrics.fbeta,
"f1": metrics.f1,
"accuracy": metrics.accuracy,
"roc_auc": metrics.roc_auc,
"average precision score": metrics.avg_precision,
"true positives@": metrics.true_positives,
"true negatives@": metrics.true_negatives,
"false positives@": metrics.false_positives,
"false negatives@": metrics.false_negatives,
"fpr@": metrics.fpr,
}
def __init__(
self,
testing_metric_groups,
training_metric_groups,
db_engine,
custom_metrics=None,
bias_config=None,
):
"""
Args:
testing_metric_groups (list) A list of groups of metric/configurations
to use for evaluating all given models
Each entry is a dict, with a list of metrics, and potentially
thresholds and parameter lists. Each metric is expected to
be a key in self.available_metrics
Examples:
testing_metric_groups = [{
'metrics': ['precision@', 'recall@'],
'thresholds': {
'percentiles': [5.0, 10.0],
'top_n': [5, 10]
}
}, {
'metrics': ['f1'],
}, {
'metrics': ['fbeta@'],
'parameters': [{'beta': 0.75}, {'beta': 1.25}]
}]
training_metric_groups (list) metrics to be calculated on training set,
in the same form as testing_metric_groups
db_engine (sqlalchemy.engine)
custom_metrics (dict) Functions to generate metrics
not available by default
Each function is expected take in the following params:
(predictions_proba, predictions_binary, labels, parameters)
and return a numeric score
"""
self.testing_metric_groups = testing_metric_groups
self.training_metric_groups = training_metric_groups
self.db_engine = db_engine
self.bias_config = bias_config
if custom_metrics:
self._validate_metrics(custom_metrics)
self.available_metrics.update(custom_metrics)
@property
def sessionmaker(self):
return sessionmaker(bind=self.db_engine)
def _validate_metrics(self, custom_metrics):
for name, met in custom_metrics.items():
if not hasattr(met, "greater_is_better"):
raise ValueError(
f"Custom metric {name} missing greater_is_better "
f"attribute"
)
elif met.greater_is_better not in (True, False):
raise ValueError(
"For custom metric {name} greater_is_better must be "
"boolean True or False"
)
def _build_parameter_string(
self,
threshold_unit,
threshold_value,
parameter_combination,
threshold_specified_by_user,
):
"""Encode the metric parameters and threshold into a short, human-parseable string
Examples are: '100_abs', '5_pct'
Args:
threshold_unit (string) the type of threshold, either 'percentile' or 'top_n'
threshold_value (int) the numeric threshold,
parameter_combination (dict) The non-threshold parameter keys and values used
Usually this will be empty, but an example would be {'beta': 0.25}
Returns: (string) A short, human-parseable string
"""
full_params = parameter_combination.copy()
if threshold_specified_by_user:
short_threshold_unit = "pct" if threshold_unit == "percentile" else "abs"
full_params[short_threshold_unit] = threshold_value
parameter_string = "/".join(
["{}_{}".format(val, key) for key, val in full_params.items()]
)
return parameter_string
def _filter_nan_labels(self, predicted_classes: np.array, labels: np.array):
"""Filter missing labels and their corresponding predictions
Args:
predicted_classes (list) Predicted binary classes, of same length as labels
labels (list) Labels, maybe containing NaNs
Returns: (tuple) Copies of the input lists, with NaN labels removed
"""
nan_mask = np.isfinite(labels)
return (predicted_classes[nan_mask], labels[nan_mask])
def _flatten_metric_threshold(
self,
metrics,
parameters,
threshold_unit,
threshold_value,
threshold_specified_by_user=True,
):
"""Flatten lists of metrics and parameters for an individual threshold
into individual metric definitions.
Args:
metrics (list) names of metric to compute
parameters (list) dicts holding parameters to pass to metrics
threshold_unit (string) the type of threshold, either 'percentile' or 'top_n'
threshold_value (int) the numeric threshold,
threshold_specified_by_user (bool) Whether or not there was any threshold
specified by the user. Defaults to True
Returns: (list) MetricDefinition objects
Raises: UnknownMetricError if a given metric is not present in
self.available_metrics
"""
metric_definitions = []
for metric in metrics:
if metric not in self.available_metrics:
raise metrics.UnknownMetricError()
for parameter_combination in parameters:
# convert the thresholds/parameters into something
# more readable
parameter_string = self._build_parameter_string(
threshold_unit=threshold_unit,
threshold_value=threshold_value,
parameter_combination=parameter_combination,
threshold_specified_by_user=threshold_specified_by_user,
)
result = MetricDefinition(
metric=metric,
parameter_string=parameter_string,
parameter_combination=parameter_combination,
threshold_unit=threshold_unit,
threshold_value=threshold_value
)
metric_definitions.append(result)
return metric_definitions
def _flatten_metric_config_group(self, group):
"""Flatten lists of metrics, parameters, and thresholds into individual metric definitions
Args:
group (dict) A configuration dictionary for the group.
Should contain the key 'metrics', and optionally 'parameters' or 'thresholds'
Returns: (list) MetricDefinition objects
"""
logger.debug(f"Creating evaluations for metric group {group}")
parameters = group.get("parameters", [{}])
generate_metrics = functools.partial(
self._flatten_metric_threshold,
metrics=group["metrics"],
parameters=parameters,
)
metrics = []
if "thresholds" not in group:
logger.notice(
"Not a thresholded group, generating evaluation based on all predictions"
)
metrics = metrics + generate_metrics(
threshold_unit="percentile",
threshold_value=100,
threshold_specified_by_user=False,
)
for pct_thresh in group.get("thresholds", {}).get("percentiles", []):
logger.debug(f"Processing percent threshold {pct_thresh}")
metrics = metrics + generate_metrics(
threshold_unit="percentile", threshold_value=pct_thresh
)
for abs_thresh in group.get("thresholds", {}).get("top_n", []):
logger.debug(f"Processing absolute threshold {abs_thresh}")
metrics = metrics + generate_metrics(
threshold_unit="top_n", threshold_value=abs_thresh
)
return metrics
def _flatten_metric_config_groups(self, metric_config_groups):
"""Flatten lists of metrics, parameters, and thresholds into individual metric definitions
Args:
metric_config_groups (list) A list of metric group configuration dictionaries
Each dict should contain the key 'metrics', and optionally 'parameters' or 'thresholds'
Returns:
(list) MetricDefinition objects
"""
return [
item
for group in metric_config_groups
for item in self._flatten_metric_config_group(group)
]
def metric_definitions_from_matrix_type(self, matrix_type):
"""Retrieve the correct metric config groups for the matrix type and flatten them into metric definitions
Args:
matrix_type (catwalk.storage.MatrixType) A matrix type definition
Returns:
(list) MetricDefinition objects
"""
if matrix_type.is_test:
return self._flatten_metric_config_groups(self.testing_metric_groups)
else:
return self._flatten_metric_config_groups(self.training_metric_groups)
def needs_evaluations(self, matrix_store, model_id, subset_hash=''):
"""Returns whether or not all the configured metrics are present in the
database for the given matrix and model.
Args:
matrix_store (triage.component.catwalk.storage.MatrixStore)
model_id (int) A model id
subset_hash (str) An identifier for the subset to be evaluated
Returns:
(bool) whether or not this matrix and model are missing any evaluations in the db
"""
# assemble a list of evaluation objects from the config
# by running the evaluation code with an empty list of predictions and labels
eval_obj = matrix_store.matrix_type.evaluation_obj
matrix_type = matrix_store.matrix_type
metric_definitions = self.metric_definitions_from_matrix_type(matrix_type)
# assemble a list of evaluation objects from the database
# by querying the unique metrics and parameters relevant to the passed-in matrix
session = self.sessionmaker()
evaluation_objects_in_db = session.query(eval_obj).filter_by(
model_id=model_id,
evaluation_start_time=matrix_store.as_of_dates[0],
evaluation_end_time=matrix_store.as_of_dates[-1],
as_of_date_frequency=matrix_store.metadata["as_of_date_frequency"],
subset_hash=subset_hash,
).distinct(eval_obj.metric, eval_obj.parameter).all()
# The list of needed metrics and parameters are all the unique metric/params from the config
# not present in the unique metric/params from the db
evals_needed = bool(
{(met.metric, met.parameter_string) for met in metric_definitions} -
{(obj.metric, obj.parameter) for obj in evaluation_objects_in_db}
)
session.close()
if evals_needed:
logger.notice(f"Needed evaluations for model {model_id} on matrix {matrix_store.uuid} are missing")
return True
# now check bias config if there
# if no bias config, no aequitas audits needed, so just return False at this point
if not self.bias_config:
logger.notice(f"No aequitas audit configured, so no evaluation needed")
return False
# if we do have bias config, return True. Too complicated with aequitas' visibility
# at present to check whether all the needed records are needed.
return True
def _compute_evaluations(self, predictions_proba, labels, metric_definitions):
"""Compute evaluations for a set of predictions and labels
Args:
predictions_proba (np.array) predictions, sorted by score descending
labels (np.array) labels, sorted however the caller wishes to break ties
metric_definitions (list of MetricDefinition objects) metrics to compute
Returns: (list of MetricEvaluationResult objects) One result for each metric definition
"""
evals = []
for (threshold_unit, threshold_value), metrics_for_threshold, in \
itertools.groupby(metric_definitions, lambda m: (m.threshold_unit, m.threshold_value)):
predicted_classes = generate_binary_at_x(
predictions_proba, threshold_value, unit=threshold_unit
)
# filter out null labels
predicted_classes_with_labels, present_labels = self._filter_nan_labels(
predicted_classes, labels
)
num_labeled_examples = len(present_labels)
num_labeled_above_threshold = np.count_nonzero(predicted_classes_with_labels)
num_positive_labels = np.count_nonzero(present_labels)
for metric_def in metrics_for_threshold:
# using threshold configuration, convert probabilities to predicted classes
if len(predictions_proba) == 0:
logger.warning(
f"{metric_def.metric} not defined for parameter {metric_def.parameter_combination} because no entities "
"are in the subset for this matrix. Inserting NULL for value."
)
value = None
else:
try:
value = self.available_metrics[metric_def.metric](
predictions_proba,
predicted_classes_with_labels,
present_labels,
metric_def.parameter_combination,
)
except ValueError:
logger.warning(
f"{metric_def.metric} not defined for parameter {metric_def.parameter_combination} because all labels "
"are the same. Inserting NULL for value."
)
value = None
result = MetricEvaluationResult(
metric=metric_def.metric,
parameter=metric_def.parameter_string,
value=value,
num_labeled_examples=num_labeled_examples,
num_labeled_above_threshold=num_labeled_above_threshold,
num_positive_labels=num_positive_labels,
)
evals.append(result)
return evals
def evaluate(self, predictions_proba, matrix_store, model_id, protected_df=None, subset=None):
"""Evaluate a model based on predictions, and save the results
Args:
predictions_proba (np.array) List of prediction probabilities
matrix_store (catwalk.storage.MatrixStore) a wrapper for the
prediction matrix and metadata
model_id (int) The database identifier of the model
subset (dict) A dictionary containing a query and a
name for the subset to evaluate on, if any
protected_df (pandas.DataFrame) A dataframe with protected group attributes
"""
if (protected_df is not None) and (not protected_df.empty):
protected_df = protected_df.align(matrix_store.labels, join="inner", axis=0)[0]
# If we are evaluating on a subset, we want to get just the labels and
# predictions for the included entity-date pairs
if subset:
logger.verbose(f"Subsetting labels and predictions of model {model_id} on matrix {matrix_store.uuid}")
labels, predictions_proba, protected_df = subset_labels_and_predictions(
subset_df=query_subset_table(
self.db_engine,
matrix_store.as_of_dates,
get_subset_table_name(subset),
),
predictions_proba=predictions_proba,
labels=matrix_store.labels,
protected_df=protected_df
)
subset_hash = filename_friendly_hash(subset)
else:
logger.debug(f"Using all the predictions of model {model_id} on matrix {matrix_store.uuid} for evaluation (i.e. no subset)")
labels = matrix_store.labels
subset_hash = ""
labels = np.array(labels)
matrix_type = matrix_store.matrix_type
metric_defs = self.metric_definitions_from_matrix_type(matrix_type)
logger.spam(f"Found {len(metric_defs)} metric definitions total")
# 1. get worst sorting
predictions_proba_worst, labels_worst = sort_predictions_and_labels(
predictions_proba=predictions_proba,
labels=labels,
tiebreaker='worst',
)
worst_lookup = {
(eval.metric, eval.parameter): eval
for eval in
self._compute_evaluations(predictions_proba_worst, labels_worst, metric_defs)
}
logger.debug(f'Predictions from {model_id} sorted by worst case scenario, i.e. all negative and NULL labels first')
# 2. get best sorting
predictions_proba_best, labels_best = sort_predictions_and_labels(
predictions_proba=predictions_proba_worst,
labels=labels_worst,
tiebreaker='best',
)
best_lookup = {
(eval.metric, eval.parameter): eval
for eval in
self._compute_evaluations(predictions_proba_best, labels_best, metric_defs)
}
logger.debug(f'Predictions from {model_id} sorted by best case scenario, i.e. all positive labels first, NULL labels at the end')
evals_without_trials = dict()
# 3. figure out which metrics have too far of a distance between best and worst
# and need random trials
metric_defs_to_trial = []
for metric_def in metric_defs:
worst_eval = worst_lookup[(metric_def.metric, metric_def.parameter_string)]
best_eval = best_lookup[(metric_def.metric, metric_def.parameter_string)]
if worst_eval.value is None or best_eval.value is None or math.isclose(worst_eval.value, best_eval.value, rel_tol=RELATIVE_TOLERANCE):
evals_without_trials[(worst_eval.metric, worst_eval.parameter)] = worst_eval.value
else:
metric_defs_to_trial.append(metric_def)
# 4. get average of n random trials
logger.debug(
f"For model {model_id}, {len(metric_defs_to_trial)} metric definitions need {SORT_TRIALS} random trials each as best/worst evals were different"
)
random_eval_accumulator = defaultdict(list)
for _ in range(0, SORT_TRIALS):
sort_seed = generate_python_random_seed()
predictions_proba_random, labels_random = sort_predictions_and_labels(
predictions_proba=predictions_proba_worst,
labels=labels_worst,
tiebreaker='random',
sort_seed=sort_seed
)
for random_eval in self._compute_evaluations(
predictions_proba_random,
labels_random,
metric_defs_to_trial
):
random_eval_accumulator[(random_eval.metric, random_eval.parameter)].append(random_eval.value)
# 5. flatten best, worst, stochastic results for each metric definition
# into database records
evaluation_start_time = matrix_store.as_of_dates[0]
evaluation_end_time = matrix_store.as_of_dates[-1]
as_of_date_frequency = matrix_store.metadata["as_of_date_frequency"]
matrix_uuid = matrix_store.uuid
evaluations = []
for metric_def in metric_defs:
metric_key = (metric_def.metric, metric_def.parameter_string)
if metric_key in evals_without_trials:
stochastic_value = evals_without_trials[metric_key]
standard_deviation = 0
num_sort_trials = 0
else:
trial_results = [value for value in random_eval_accumulator[metric_key] if value is not None]
stochastic_value = statistics.mean(trial_results)
standard_deviation = statistics.stdev(trial_results)
num_sort_trials = len(trial_results)
evaluation = matrix_type.evaluation_obj(
metric=metric_def.metric,
parameter=metric_def.parameter_string,
num_labeled_examples=worst_lookup[metric_key].num_labeled_examples,
num_labeled_above_threshold=worst_lookup[metric_key].num_labeled_above_threshold,
num_positive_labels=worst_lookup[metric_key].num_positive_labels,
worst_value=worst_lookup[metric_key].value,
best_value=best_lookup[metric_key].value,
stochastic_value=stochastic_value,
num_sort_trials=num_sort_trials,
standard_deviation=standard_deviation,
)
evaluations.append(evaluation)
self._write_to_db(
model_id,
subset_hash,
evaluation_start_time,
evaluation_end_time,
as_of_date_frequency,
matrix_store.uuid,
evaluations,
matrix_type.evaluation_obj,
)
if protected_df is not None:
self._write_audit_to_db(
model_id=model_id,
protected_df=protected_df,
predictions_proba=predictions_proba_worst,
labels=labels_worst,
tie_breaker='worst',
subset_hash=subset_hash,
matrix_type=matrix_type,
evaluation_start_time=evaluation_start_time,
evaluation_end_time=evaluation_end_time,
matrix_uuid=matrix_store.uuid)
self._write_audit_to_db(
model_id=model_id,
protected_df=protected_df,
predictions_proba=predictions_proba_best,
labels=labels_best,
tie_breaker='best',
subset_hash=subset_hash,
matrix_type=matrix_type,
evaluation_start_time=evaluation_start_time,
evaluation_end_time=evaluation_end_time,
matrix_uuid=matrix_store.uuid)
def _write_audit_to_db(
self,
model_id,
protected_df,
predictions_proba,
labels,
tie_breaker,
subset_hash,
matrix_type,
evaluation_start_time,
evaluation_end_time,
matrix_uuid
):
"""
Runs the bias audit and saves the result in the bias table.
Args:
model_id (int) primary key of the model
protected_df (pandas.DataFrame) A dataframe with protected group attributes:
predictions_proba (np.array) List of prediction probabilities
labels (pandas.Series): List of labels
tie_breaker: 'best' or 'worst' case tiebreaking rule that the predictions and labels were sorted by
subset_hash (str) the hash of the subset, if any, that the
evaluation is made on
matrix_type (triage.component.catwalk.storage.MatrixType)
The type of matrix used
evaluation_start_time (pandas._libs.tslibs.timestamps.Timestamp)
first as_of_date included in the evaluation period
evaluation_end_time (pandas._libs.tslibs.timestamps.Timestamp) last
as_of_date included in the evaluation period
matrix_uuid: the uuid of the matrix
Returns:
"""
if protected_df.empty:
return
# to preprocess aequitas requires the following columns:
# score, label value, model_id, protected attributes
# fill out the protected_df, which just has protected attributes at this point
protected_df = protected_df.copy()
protected_df['model_id'] = model_id
protected_df['score'] = predictions_proba
protected_df['label_value'] = labels
aequitas_df, attr_cols_input = preprocess_input_df(protected_df)
# create group crosstabs
g = Group()
score_thresholds = {}
score_thresholds['rank_abs'] = self.bias_config['thresholds'].get('top_n', [])
# convert 0-100 percentile to 0-1 that Aequitas expects
score_thresholds['rank_pct'] = [value / 100.0 for value in self.bias_config['thresholds'].get('percentiles', [])]
groups_model, attr_cols = g.get_crosstabs(aequitas_df,
score_thresholds=score_thresholds,
attr_cols=attr_cols_input)
# analyze bias from reference groups
bias = Bias()
ref_groups_method = self.bias_config.get('ref_groups_method', None)
if ref_groups_method == 'predefined' and self.bias_config['ref_groups']:
bias_df = bias.get_disparity_predefined_groups(groups_model, aequitas_df, self.bias_config['ref_groups'])
elif ref_groups_method == 'majority':
bias_df = bias.get_disparity_major_group(groups_model, aequitas_df)
else:
bias_df = bias.get_disparity_min_metric(groups_model, aequitas_df)
# analyze fairness for each group
f = Fairness(tau=0.8) # the default fairness threshold is 0.8
group_value_df = f.get_group_value_fairness(bias_df)
group_value_df['subset_hash'] = subset_hash
group_value_df['tie_breaker'] = tie_breaker
group_value_df['evaluation_start_time'] = evaluation_start_time
group_value_df['evaluation_end_time'] = evaluation_end_time
group_value_df['matrix_uuid'] = matrix_uuid
group_value_df = group_value_df.rename(index=str, columns={"score_threshold": "parameter", "for": "for_"})
if group_value_df.empty:
raise ValueError(f"""
Bias audit: aequitas_audit() failed.
Returned empty dataframe for model_id = {model_id}, and subset_hash = {subset_hash}
and matrix_type = {matrix_type}""")
with scoped_session(self.db_engine) as session:
for index, row in group_value_df.iterrows():
session.query(matrix_type.aequitas_obj).filter_by(
model_id=row['model_id'],
evaluation_start_time=row['evaluation_start_time'],
evaluation_end_time=row['evaluation_end_time'],
subset_hash=row['subset_hash'],
parameter=row['parameter'],
tie_breaker=row['tie_breaker'],
matrix_uuid=row['matrix_uuid'],
attribute_name=row['attribute_name'],
attribute_value=row['attribute_value']
).delete()
session.bulk_insert_mappings(matrix_type.aequitas_obj, group_value_df.to_dict(orient="records"))
@db_retry
def _write_to_db(
self,
model_id,
subset_hash,
evaluation_start_time,
evaluation_end_time,
as_of_date_frequency,
matrix_uuid,
evaluations,
evaluation_table_obj,
):
"""Write evaluation objects to the database
Binds the model_id as as_of_date to the given ORM objects
and writes them to the database
Args:
model_id (int) primary key of the model
subset_hash (str) the hash of the subset, if any, that the
evaluation is made on
evaluation_start_time (pandas._libs.tslibs.timestamps.Timestamp)
first as_of_date included in the evaluation period
evaluation_end_time (pandas._libs.tslibs.timestamps.Timestamp) last
as_of_date included in the evaluation period
as_of_date_frequency (str) the frequency with which as_of_dates
occur between the evaluation_start_time and evaluation_end_time
evaluations (list) results_schema.TestEvaluation or TrainEvaluation
objects
evaluation_table_obj (schema.TestEvaluation or TrainEvaluation)
specifies to which table to add the evaluations
"""
with scoped_session(self.db_engine) as session:
session.query(evaluation_table_obj).filter_by(
model_id=model_id,
evaluation_start_time=evaluation_start_time,
evaluation_end_time=evaluation_end_time,
as_of_date_frequency=as_of_date_frequency,
subset_hash=subset_hash
).delete()
for evaluation in evaluations:
evaluation.model_id = model_id
evaluation.as_of_date_frequency = as_of_date_frequency
evaluation.subset_hash = subset_hash
evaluation.evaluation_start_time = evaluation_start_time
evaluation.evaluation_end_time = evaluation_end_time
evaluation.as_of_date_frequency = as_of_date_frequency
evaluation.matrix_uuid = matrix_uuid
evaluation.subset_hash = subset_hash
session.add(evaluation)
|
py | 7df6edd5a4b0680c0698c638689ad627fadb0fb2 | # Copyright (C) 2018 Pierre Jean Fichet
# <pierrejean dot fichet at posteo dot net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import re
def error(player, text):
"Message detailing a login error."
player.client.send("<p class='error'>{}</p>".format(text))
def info(player, text):
"Game information sent to a player."
player.client.send("<p class='info'>{}</p>".format(text))
def fmt(character, title, text):
character.player.client.send("<p><b>{}</b>. — {}</p>".format(title, text))
def _has_name(name, text):
"Check if a player is inserting his character name in his pose."
name = '/' + name.lower()
if re.search("/il|/elle|/{}".format(name), text, re.IGNORECASE):
return True
else:
return False
def pose(from_char, text):
"Print a short pose or what some games call an action."
if not _has_name(from_char.data['name'], text):
info(from_char.player,
"""<i>/{}</i>, <i>/il</il> ou <il>/elle</il>,
doit apparaître dans la pose."""
.format(from_char.data['name']))
return
if text[-1] in ('.', '!', '?'):
text = text[:-1]
for to_char in from_char.room.characters:
newtext = expose_format(from_char, to_char, text)
to_char.player.client.send("<p><b>{}</b>.</p>".format(newtext))
from_char.data['pose'] = text
def expose(from_char, text):
"Print a long expose, ie an emote."
if not _has_name(from_char.data['name'], text):
info(from_char.player,
"""<i>/{}</i>, <i>/il</il> ou <il>/elle</il>,
doit apparaître dans l'exposition."""
.format(from_char.data['name']))
return
for to_char in from_char.room.characters:
newtext = expose_format(from_char, to_char, text)
to_char.player.client.send("<p><b>{}</b>. — {}</p>".format(
to_char.remember.get_remember(from_char), newtext))
# keep track of last emote time
from_char.room.rp()
def off_topic(from_char, text):
"Off topic, or out of character, communication."
for to_char in from_char.room.characters:
newtext = expose_format(from_char, to_char, text)
to_char.player.client.send(
"<p class='off_topic'>{}</p>".format(newtext))
def expose_format(from_char, to_char, text):
"""Format an expose.
We want to subsitute /keyword with a character name.
The difficulty is that "keyword" is only valid from the
sender point of view, and the character name depends on
the recipient point of view.
"""
# First, we define a backend function for re.sub()
# we have to place it here, because the backend only accepts
# one argument, and we need to pass it from_char and to_char.
def find_name(matchobj):
keyword = matchobj.group(0)
keyword = keyword[1:] # removes the '/'
# /Il /Elle /il /elle refers to the sender's character
if keyword in ('Il', 'Elle', 'il', 'elle'):
if keyword[0] in ('I', 'E'):
return to_char.remember.get_remember(from_char).capitalize()
return to_char.remember.get_remember(from_char)
# Otherwise, check who that keyword may refer to.
for char in from_char.room.characters:
# Search who that keyword refers to from the sender
# point of view.
if keyword.lower() in from_char.remember.get_remember(char).lower():
# Search how that character is known from the
# recipient point of view
return to_char.remember.get_remember(char)
# If nothing, returns the keyword itself.
return keyword
# Now, we substitute, and call the find_name function
text = re.sub("/\w+", find_name, text)
return text
|
py | 7df6ee2db07e1dd4c765de4b6f600ee89932c6a3 | import databaseInfo as db
import pymysql
import re |
py | 7df6efbb8c2c2b2bb8f1b3aae567f0f7a2d07039 | import pyttsx3
import os
import speech_recognition as sr
import webbrowser
import subprocess
import datetime
import smtplib
#defining the engine
engine=pyttsx3.init()
voices=engine.getProperty('voices')
#voice ids
voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0"
engine.setProperty('voice',voice_id)
engine.setProperty('rate', 180)
engine.runAndWait()
#speech funtion
def speak(text):
engine.say(text)
engine.runAndWait()
def command():
r=sr.Recognizer()
with sr.Microphone() as source:
print("listening...")
audio = r.listen(source)
try:
query=r.recognize_google(audio, language= 'en-in')
except Exception as e:
speak("Can you say that again? Please")
query=None
return query
""" def setup():
speak("What would you like me to call you?")
mastername =
return mastername
"""
MASTER = "Shaan"
def Wakeup():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Goodmorning" + MASTER)
elif hour>=12 and hour<15:
speak("Good Afternoon" + MASTER)
else:
speak("Good Evening" + MASTER)
#ADDITIONAL STUFF
#system programs to open - enter your online url links and application path.
url={}
url_youtube='https://www.youtube.com/'
url_netflix='https://www.netflix.com/browse'
url_whatsapp='https://web.whatsapp.com/' #build dictionary to store
if __name__ == "__main__":
Wakeup()
while True:
query=command().lower()
if 'tell me about' in query.lower():
query=query.replace("tell me about", "")
results=wikipedia.summary(query, sentences = 2)
speak(results)
elif 'open youtube' in query.lower():
webbrowser.open_new(url_youtube)
|
py | 7df6f040321310b7773154fbaea2f2f2ac5edd00 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval Xception."""
import os
import numpy as np
from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export
from src.Xception import xception
from src.model_utils.config import config as args, config_gpu, config_ascend
from src.model_utils.moxing_adapter import moxing_wrapper
def modelarts_pre_process():
'''modelarts pre process function.'''
args.ckpt_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.ckpt_file)
args.file_name = os.path.join(args.output_path, args.file_name)
@moxing_wrapper(pre_process=modelarts_pre_process)
def run_export():
'''export function'''
if args.device_target == "Ascend":
config = config_ascend
elif args.device_target == "GPU":
config = config_gpu
else:
raise ValueError("Unsupported device_target.")
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
context.set_context(device_id=args.device_id)
net = xception(class_num=config.class_num)
# load checkpoint
param_dict = load_checkpoint(args.ckpt_file)
load_param_into_net(net, param_dict)
net.set_train(False)
image = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32))
export(net, image, file_name=args.file_name, file_format=args.file_format)
if __name__ == "__main__":
run_export()
|
py | 7df6f05d1361f4982b63d59526249ac2f0537db9 | ficha = list()
while True:
nome = str(input('Nome do aluno: '))
nota1 = float(input('Digite nota 1: '))
nota2 = float(input('Digite nota 2: '))
media = (nota2 + nota1) / 2
ficha.append([nome, [nota1, nota2], media])
resp = str(input('Quer continuar? [S/N] '))
if resp in 'Nn':
break
print('-=' * 40)
print('{:<4}{:<10}{:>8}'.format("No.", "NOME", "MEDIA"))
print('-=' * 40)
for i, a in enumerate(ficha):
print(f'{i:<4}{a[0]:<10}{a[2]:>8.1f}')
while True:
print('_'* 35)
opc = int(input('Mostrar nota individual? (999 paraa finalizar): '))
if opc == 999:
print('FINALIZADO...')
break
if opc <= len(ficha) - 1:
print(f'Notas de {ficha[opc][0]} são {ficha[opc][1]}')
print('<<< VOLTE SEMPRE >>>') |
py | 7df6f14898c517300cfb6cd326a37301c5db6885 | import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import concurrent.futures
import asyncio
from functools import partial
import math
class Part:
def __init__(self, **kwargs):
self.name = kwargs.get("name")
self.url = kwargs.get("url")
self.type = kwargs.get("type")
self.price = kwargs.get("price")
self.image = kwargs.get("image")
class PCPPList:
def __init__(self, **kwargs):
self.parts = kwargs.get("parts")
self.wattage = kwargs.get("wattage")
self.total = kwargs.get("total")
self.url = kwargs.get("url")
self.compatibility = kwargs.get("compatibility")
class Product(Part):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.specs = kwargs.get("specs")
self.price_list = kwargs.get("price_list")
self.rating = kwargs.get("rating")
self.reviews = kwargs.get("reviews")
self.compatible_parts = kwargs.get("compatible_parts")
class Price:
def __init__(self, **kwargs):
self.value = kwargs.get("value")
self.seller = kwargs.get("seller")
self.seller_icon = kwargs.get("seller_icon")
self.url = kwargs.get("url")
self.base_value = kwargs.get("base_value")
self.in_stock = kwargs.get("in_stock")
class Review:
def __init__(self, **kwargs):
self.author = kwargs.get("author")
self.author_url = kwargs.get("author_url")
self.author_icon = kwargs.get("author_icon")
self.points = kwargs.get("points")
self.created_at = kwargs.get("created_at")
self.rating = kwargs.get("rating")
self.content = kwargs.get("content")
class Verification(Exception):
pass
class Scraper:
def __init__(self, **kwargs):
headers_dict = kwargs.get("headers", {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36 Edg/88.0.705.63', 'cookie': 'xsessionid=8gxgh7l25gwr276aregdd4qnj7zhmxwb; xcsrftoken=o7wXfkFMvIMf5SKShBaC4E0KJ7anQ1mdzHuZ4G6sWwMH2gSbcsZn5YdneEKo8fiv; xgdpr-consent=allow; __cfduid=d8b6350b0033bccdde51da015aaf07f381611344324; cf_clearance=d8b834d4bd7bf761c45622d38c027bc6e0d93f24-1612772344-0-150'})
if not isinstance(headers_dict, dict):
raise ValueError("Headers kwarg has to be a dict!")
self.headers = headers_dict
def make_soup(self, url) -> BeautifulSoup:
# sends a request to the URL
page = requests.get(url, headers=self.headers)
# gets the HTML code for the website and parses it using Python's built in HTML parser
soup = BeautifulSoup(page.content, 'html.parser')
if "Verification" in soup.find(class_="pageTitle").get_text():
raise Verification(f"You are being rate limited by PCPartPicker! Slow down your rate of requests, and complete the captcha at this URL: {url}")
# returns the HTML
return soup
def fetch_list(self, list_url) -> PCPPList:
# checks if its a pcpartpicker list and raises an exception if its not or if the list is empty
if not "pcpartpicker.com/list/" in list_url or list_url.endswith("/list/"):
raise Exception(f"'{list_url}' is an invalid PCPartPicker list!")
# fetches the HTML code for the website
try:
soup = self.make_soup(list_url)
except requests.exceptions.ConnectionError:
raise ValueError("Invalid list URL! Max retries exceeded with URL.")
# gets the code with the table containing all the parts
table = soup.find_all("table", {"class": "xs-col-12"}, limit=1)[0]
# creates an empty list to put the Part objects inside
parts = []
# iterates through every part in the table
for item in table.find_all('tr', class_="tr__product"):
# creates a new part object using values obtained from the tables' rows
part_name = item.find(class_="td__name").get_text().strip('\n').replace('\n', '')
if "Note:" in part_name:
part_name = part_name.split("Note:")[0]
if "From parametric filter:" in part_name:
part_name = part_name.split("From parametric filter:")[0]
if "From parametric selection:" in part_name:
part_name = part_name.split("From parametric selection:")[0]
part_object = Part(
name=part_name,
price=item.find(class_="td__price").get_text().strip('\n').replace("No Prices Available", "None").replace("Price", "").strip('\n'),
type=item.find(class_="td__component").get_text().strip('\n').strip(),
image=("https://" + item.find("img", class_="")["src"]).replace("https://https://", "https://")
)
# converts string representation of 'None' to NoneType
if part_object.price == 'None':
part_object.price = None
# checks if the product row has a product URL inside
if 'href' in str(item.find(class_="td__name")):
# adds the product URL to the Part object
part_object.url = "https://" + urlparse(list_url).netloc + item.find(class_="td__name").find("a")[
"href"].replace("/placeholder-", "")
# adds the part object to the list
parts.append(part_object)
# gets the estimated wattage for the list
wattage = soup.find(class_="partlist__keyMetric").get_text().replace("Estimated Wattage:", "").strip('\n')
# gets the total cost for the list
total_cost = table.find("tr", class_="tr__total tr__total--final").find(class_="td__price").get_text()
# gets the compatibility notes for the list
compatibilitynotes = [a.get_text().strip('\n').replace("Note:", "").replace("Warning!", "") for a in
soup.find_all("li", class_=["info-message", "warning-message"])]
# returns a PCPPList object containing all the information
return PCPPList(parts=parts, wattage=wattage, total=total_cost, url=list_url, compatibility=compatibilitynotes)
def part_search(self, search_term, **kwargs) -> Part:
search_term = search_term.replace(' ', '+')
limit = kwargs.get("limit", 20)
# makes sure limit is an integer, raises ValueError if it's not
if not isinstance(limit, int):
raise ValueError("Product limit must be an integer!")
# checks if the region given is a string, and checks if it is a country code
if not isinstance(kwargs.get("region", "us"), str) or len(kwargs.get("region", "us")) != 2:
raise ValueError("Invalid region!")
if limit < 0:
raise ValueError("Limit out of range.")
# constructs the search URL
if kwargs.get("region") in ("us", None):
search_link = f"https://pcpartpicker.com/search/?q={search_term}"
else:
search_link = f"https://{kwargs.get('region', '')}.pcpartpicker.com/search/?q={search_term}"
iterations = math.ceil(limit/20)
# creates an empty list for the part objects to be stored in
parts = []
for i in range(iterations):
try:
soup = self.make_soup(f"{search_link}&page={i + 1}")
except requests.exceptions.ConnectionError:
raise ValueError("Invalid region! Max retries exceeded with URL.")
# checks if the page redirects to a product page
if soup.find(class_="pageTitle").get_text() != "Product Search":
# creates a part object with the information from the product page
part_object = Part(
name = soup.find(class_="pageTitle").get_text(),
url = search_link,
price = None
)
# searches for the pricing table
table = soup.find("table", class_="xs-col-12")
# loops through every row in the table
for row in table.find_all("tr"):
# first conditional statement makes sure its not the top row with the table parameters, second checks if the product is out of stock
if not "td__availability" in str(row) or "Out of stock" in row.find(class_="td__availability").get_text():
# skips this iteration
continue
# sets the price of the price object to the price
part_object.price = row.find(class_="td__finalPrice").get_text().strip('\n').strip("+")
break
# returns the part object
return [part_object]
# gets the section of the website's code with the search results
section = soup.find("section", class_="search-results__pageContent")
if "No results" in section.get_text():
break
# iterates through all the HTML elements that match the given the criteria
for product in section.find_all("ul", class_="list-unstyled"):
# extracts the product data from the HTML code and creates a part object with that information
part_object = Part(
name = product.find("p", class_="search_results--link").get_text().strip(),
url = "https://" + urlparse(search_link).netloc + product.find("p", class_="search_results--link").find("a", href=True)["href"],
image = ("https://" + product.find("img")["src"].strip('/')).replace("https://https://", "https://")
)
try:
part_object.price = product.find(class_="product__link product__link--price").get_text()
except AttributeError:
part_object.price = None
# adds the part object to the list
parts.append(part_object)
# returns the part objects
return parts[:kwargs.get("limit", 20)]
def fetch_product(self, part_url) -> Product:
# checks if the URL is invalid
if not "pcpartpicker.com" in part_url and "/product/" in part_url:
raise ValueError("Invalid product URL!")
try:
soup = self.make_soup(part_url)
except requests.exceptions.ConnectionError:
raise ValueError("Invalid product URL! Max retries exceeded with URL.")
specs_block = soup.find(class_="block xs-hide md-block specs")
specs = {}
prices = []
price = None
# finds the table with the pricing information
table = soup.find("table", class_="xs-col-12")
section = table.find("tbody")
for row in section.find_all("tr"):
# skip over empty row
if "tr--noBorder" in str(row):
continue
# creates a Price object with all the information
price_object = Price(
value=row.find(class_="td__finalPrice").get_text().strip('\n'),
seller=row.find(class_="td__logo").find("img")["alt"],
seller_icon=("https://" + row.find(class_="td__logo").find("img")["src"][1:]).replace(
"https://https://", "https://"),
base_value=row.find(class_="td__base priority--2").get_text(),
url="https://" + urlparse(part_url).netloc + row.find(class_="td__finalPrice").find("a")["href"],
in_stock=True if "In stock" in row.find(class_="td__availability").get_text() else False
)
# chceks if its the cheapest in stock price
if price is None and "In stock" in row.find(class_="td__availability").get_text():
price = row.find(class_="td__finalPrice").get_text().strip('\n')
prices.append(price_object)
# adds spec keys and values to the specs dictionary
for spec in specs_block.find_all("div", class_="group group--spec"):
specs[spec.find("h3", class_="group__title").get_text()] = spec.find("div",
class_="group__content").get_text().strip().strip(
'\n').replace("\u00b3", '').replace('\"', '').split('\n')
reviews = None
# gets the HTML code for the box containing reviews
review_box = soup.find(class_="block partReviews")
# skips over this process if the review box does not exist
if review_box != None:
reviews = []
# counts stars in reviews
for review in review_box.find_all(class_="partReviews__review"):
stars = 0
for star in review.find(class_="product--rating list-unstyled").find_all("li"):
if ' '.join(star.find("svg")["class"]) == "icon shape-star-full":
stars += 1
# gets the upvotes and timestamp
iterations = 0
for info in review.find(class_="userDetails__userData list-unstyled").find_all("li"):
if iterations == 0:
points = info.get_text().replace(" points", '').replace(" point", '')
elif iterations == 1:
created_at = info.get_text().replace(" ago", '')
else:
break
iterations += 1
# creates review object with all the information
review_object = Review(
author=review.find(class_="userDetails__userName").get_text(),
author_url="https://" + urlparse(part_url).netloc + review.find(class_="userDetails__userName")[
"href"],
author_icon="https://" + urlparse(part_url).netloc +
review.find(class_="userAvatar userAvatar--entry").find("img")["src"],
content=review.find(class_="partReviews__writeup markdown").get_text(),
rating=stars,
points=points,
created_at=created_at
)
reviews.append(review_object)
compatible_parts = None
# fetches section with compatible parts hyperlinks
compatible_parts_list = soup.find(class_="compatibleParts__list list-unstyled")
if compatible_parts_list != None:
compatible_parts = []
# finds every list item in the section
for item in compatible_parts_list.find_all("li"):
compatible_parts.append((
item.find("a").get_text(), "https://" + urlparse(part_url).netloc + item.find("a")["href"]
))
# creates the product object to return
product_object = Product(
name=soup.find(class_="pageTitle").get_text(),
url=part_url,
image=None,
specs=specs,
price_list=prices,
price=price,
rating=soup.find(class_="actionBox actionBox__ratings").find(
class_="product--rating list-unstyled").get_text().strip('\n').strip().strip("()"),
reviews=reviews,
compatible_parts=compatible_parts,
type=soup.find(class_="breadcrumb").find(class_="list-unstyled").find("li").get_text()
)
image_box = soup.find(class_="single_image_gallery_box")
if image_box != None:
# adds image to object if it finds one
product_object.image = image_box.find("img")["src"].replace("https://https://", "https://")
return product_object
async def aio_part_search(search_term, **kwargs):
with concurrent.futures.ThreadPoolExecutor() as pool:
result = await asyncio.get_event_loop().run_in_executor(pool, partial(self.part_search, search_term, **kwargs))
return result
async def aio_fetch_list(list_url):
with concurrent.futures.ThreadPoolExecutor() as pool:
result = await asyncio.get_event_loop().run_in_executor(pool, self.fetch_list, list_url)
return result
async def aio_fetch_product(part_url):
with concurrent.futures.ThreadPoolExecutor() as pool:
result = await asyncio.get_event_loop().run_in_executor(pool, self.fetch_product, part_url)
return result
|
py | 7df6f1a28bfb521d534c1105d46e7a26195a0a92 | # -*- coding: utf-8 -*-
import torch
from ..init import reinit_layer_
from ..utils import _nonlinearity2activation
from .attention import MultiheadAttention
class FeedForwardBlock(torch.nn.Module):
def __init__(self, in_dim: int, out_dim: int, drop_rate: float=0.5, nonlinearity: str='relu'):
super().__init__()
self.proj_layer = torch.nn.Linear(in_dim, out_dim)
self.activation = _nonlinearity2activation(nonlinearity)
reinit_layer_(self.proj_layer, nonlinearity)
self.dropout = torch.nn.Dropout(drop_rate)
def forward(self, x: torch.Tensor):
return self.activation(self.proj_layer(self.dropout(x)))
class ConvBlock(torch.nn.Module):
def __init__(self, in_dim: int, out_dim: int, kernel_size: int, padding_mode: str='both', drop_rate: float=0.5, nonlinearity: str='relu'):
super().__init__()
padding_size = 0 if padding_mode.lower() == 'none' else kernel_size-1
if nonlinearity.lower() == 'glu':
self.conv = torch.nn.Conv1d(in_dim, out_dim*2, kernel_size=kernel_size, padding=padding_size)
self.activation = torch.nn.GLU(dim=1)
else:
self.conv = torch.nn.Conv1d(in_dim, out_dim, kernel_size=kernel_size, padding=padding_size)
self.activation = _nonlinearity2activation(nonlinearity)
reinit_layer_(self.conv, nonlinearity)
self.dropout = torch.nn.Dropout(drop_rate)
assert padding_mode.lower() in ('both', 'pre', 'post', 'none')
self.padding_mode = padding_mode
@property
def kernel_size(self):
return self.conv.kernel_size[0]
@property
def _pre_trim_size(self):
if self.padding_mode.lower() == 'both':
return (self.kernel_size - 1) // 2
elif self.padding_mode.lower() == 'pre':
# If paddings are in front, do not trim the front tensors; vice versa.
return 0
else:
return self.kernel_size - 1
@property
def _post_trim_size(self):
return self.kernel_size - 1 - self._pre_trim_size
def _trim(self, x: torch.Tensor):
assert self.padding_mode.lower() in ('both', 'pre', 'post'), f"Illegal to trim with `padding_mode` {self.padding_mode}"
assert x.dim() == 3
# x: (batch, channels, step)
return x[:, :, self._pre_trim_size:x.size(-1)-self._post_trim_size]
def forward(self, x: torch.Tensor, mask: torch.Tensor=None):
# NOTE: It would be better to ensure the input (rather than the output) of convolutional
# layers to be zeros in padding positions, since only convolutional layers have such
# a property: Its output values in non-padding positions are sensitive to the input
# values in padding positions.
# x: (batch, in_dim=channels, step)
# mask: (batch, step)
if mask is not None:
x.masked_fill_(mask.unsqueeze(1), 0)
# conved: (batch, out_dim=channels, step)
conved = self.activation(self.conv(self.dropout(x)))
if self.padding_mode.lower() == 'none':
return conved
else:
return self._trim(conved)
class TransformerEncoderBlock(torch.nn.Module):
def __init__(self, hid_dim: int, ff_dim: int, num_heads: int=8, scoring: str='scaled_dot', drop_rate: float=0.1, nonlinearity: str='relu'):
super().__init__()
self.self_attention = MultiheadAttention(hid_dim, num_heads=num_heads, scoring=scoring, drop_rate=drop_rate)
self.self_norm = torch.nn.LayerNorm(hid_dim)
self.ff1 = torch.nn.Linear(hid_dim, ff_dim)
# reinit_layer_(self.ff1, nonlinearity)
reinit_layer_(self.ff1, 'linear')
self.activation = _nonlinearity2activation(nonlinearity)
self.ff2 = torch.nn.Linear(ff_dim, hid_dim)
reinit_layer_(self.ff2, 'linear')
self.ff_norm = torch.nn.LayerNorm(hid_dim)
self.dropout = torch.nn.Dropout(drop_rate)
def forward(self, x: torch.Tensor, mask: torch.Tensor=None, return_atten_weight: bool=False):
attened, atten_weight = self.self_attention(self.dropout(x), self.dropout(x), self.dropout(x), mask=mask, return_atten_weight=True)
attened_x = self.self_norm(self.dropout(x) + self.dropout(attened))
ffed = self.ff2(self.dropout(self.activation(self.ff1(attened_x))))
ffed_attened_x = self.ff_norm(attened_x + self.dropout(ffed))
if return_atten_weight:
return ffed_attened_x, atten_weight
else:
return ffed_attened_x
class TransformerDecoderBlock(torch.nn.Module):
def __init__(self, hid_dim: int, ff_dim: int, ctx_dim: int=None, num_heads: int=8, scoring: str='scaled_dot', drop_rate: float=0.1, nonlinearity: str='relu'):
super().__init__()
self.self_attention = MultiheadAttention(hid_dim, num_heads=num_heads, scoring=scoring, drop_rate=drop_rate)
self.self_norm = torch.nn.LayerNorm(hid_dim)
self.cross_attention = MultiheadAttention(hid_dim, key_dim=ctx_dim, value_dim=ctx_dim, num_heads=num_heads, scoring=scoring, drop_rate=drop_rate)
self.cross_norm = torch.nn.LayerNorm(hid_dim)
self.ff1 = torch.nn.Linear(hid_dim, ff_dim)
# reinit_layer_(self.ff1, nonlinearity)
reinit_layer_(self.ff1, 'linear')
self.activation = _nonlinearity2activation(nonlinearity)
self.ff2 = torch.nn.Linear(ff_dim, hid_dim)
reinit_layer_(self.ff2, 'linear')
self.ff_norm = torch.nn.LayerNorm(hid_dim)
self.dropout = torch.nn.Dropout(drop_rate)
# `trg_mask` masks subsequent/future tokens, which is a matrix where
# rows represent queries, and columns represent keys/values.
# Note the last query token can observe all tokens (last row is all False)
# | F T T T ... T |
# | F F T T ... T |
# | F F F T ... T |
# | ... ... ... . |
# | F F F F ... F |
self.register_buffer('_trg_mask', torch.ones(100, 100, dtype=torch.bool).triu(diagonal=1))
def _get_trg_mask(self, seq_len: int):
if self._trg_mask.size(0) < seq_len:
self.register_buffer('_trg_mask', torch.ones(seq_len*2, seq_len*2, dtype=torch.bool, device=self._trg_mask.device).triu(diagonal=1))
return self._trg_mask[:seq_len, :seq_len]
def forward(self, x: torch.Tensor, src_x: torch.Tensor, src_mask: torch.Tensor=None, last_step: bool=False, return_atten_weight: bool=False):
# x: (batch, trg_step, hid_dim)
# Targets as queries/keys/values in self-attention.
# src_x: (batch, src_step, hid_dim)
# Sources as keys/values in cross-attention.
# src_mask: (batch, src_step)
if last_step:
# Use the last step of `x` only as the query
# xq: (batch, trg_step=1, hid_dim)
xq = x[:, -1:]
trg_mask = None
else:
xq = x
# trg_mask: (batch, trg_step, trg_step)
trg_mask = self._get_trg_mask(x.size(1)).expand(x.size(0), -1, -1)
attened, atten_weight = self.self_attention(self.dropout(xq), self.dropout(x), self.dropout(x), mask=trg_mask, return_atten_weight=True)
attened_xq = self.self_norm(self.dropout(xq) + self.dropout(attened))
crossed, cross_atten_weight = self.cross_attention(attened_xq, self.dropout(src_x), self.dropout(src_x), mask=src_mask, return_atten_weight=True)
crossed_attened_xq = self.cross_norm(attened_xq + self.dropout(crossed))
ffed = self.ff2(self.dropout(self.activation(self.ff1(crossed_attened_xq))))
ffed_crossed_attened_xq = self.ff_norm(crossed_attened_xq + self.dropout(ffed))
if return_atten_weight:
return ffed_crossed_attened_xq, atten_weight, cross_atten_weight
else:
return ffed_crossed_attened_xq
|
py | 7df6f28d40a4f85b32afd1293163c028e92c865e | from django.core.management.base import BaseCommand, CommandError
from .models import Role, Permission
class Command(BaseCommand):
help = 'Initial Role and Permission tables seeder'
def handle(self, *args, **options):
# Roles
Role.objects.bulk_create(
[
Role(name="root"),
Role(name="admin"),
Role(name="owner"),
Role(name="user"),
]
)
self.stdout.write(self.style.SUCCESS('Roles seeded'))
# Permissions
Permission.objects.bulk_create(
[
Permission(name="create"),
Permission(name="read"),
Permission(name="update"),
Permission(name="delete")
]
)
self.stdout.write(self.style.SUCCESS('Permissions seeded'))
# Role_permissions
ThroughModel = Role.permissions.through
roles = Role.objects.all()
permissions = Permission.objects.all()
ThroughModel.objects.bulk_create(
[
ThroughModel(role_id=roles[0].pk, permission_id=permissions[0].pk),
ThroughModel(role_id=roles[0].pk, permission_id=permissions[1].pk),
ThroughModel(role_id=roles[0].pk, permission_id=permissions[2].pk),
ThroughModel(role_id=roles[0].pk, permission_id=permissions[3].pk),
ThroughModel(role_id=roles[1].pk, permission_id=permissions[0].pk),
ThroughModel(role_id=roles[1].pk, permission_id=permissions[1].pk),
ThroughModel(role_id=roles[1].pk, permission_id=permissions[2].pk),
ThroughModel(role_id=roles[1].pk, permission_id=permissions[3].pk),
ThroughModel(role_id=roles[2].pk, permission_id=permissions[1].pk),
ThroughModel(role_id=roles[2].pk, permission_id=permissions[2].pk),
ThroughModel(role_id=roles[3].pk, permission_id=permissions[1].pk),
]
)
self.stdout.write(self.style.SUCCESS('Role_permission seeded')) |
py | 7df6f297b059fba6e1a1a4808b3dc34e80eea9eb | ## Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## ALL RIGHTS RESERVED.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Neither the name of the SONATA-NFV, 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## nor the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## This work has been performed in the framework of the SONATA project,
## funded by the European Commission under Grant number 671517 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the SONATA
## partner consortium (www.sonata-nfv.eu).
##
## This work has been performed in the framework of the 5GTANGO project,
## funded by the European Commission under Grant number 761493 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the 5GTANGO
## partner consortium (www.5gtango.eu).
# encoding: utf-8
import json,urllib2, base64
import datetime,time,logging,os
import time,datetime
from configure import configuration
from servers import server
from DtFiltering import valdt
controller_ip = "192.168.1.231"
def init():
global prometh_server
global odl_server
global node_name
global user
global logger
#read configuration
conf = configuration("odc.conf")
#odl_server = conf.ConfigSectionMap("ODL_server")['odl_url']
#prometh_server = conf.ConfigSectionMap("Prometheus")['server_url']
#node_name = conf.ConfigSectionMap("ODL_server")['node_name']
#user = json.loads(conf.ConfigSectionMap("ODL_server")['user'])
odl_server = os.getenv('ODL_SRV', conf.ConfigSectionMap("vm_node")['odl_url'])
prometh_server = os.getenv('PROM_SRV', conf.ConfigSectionMap("Prometheus")['server_url'])
node_name = os.getenv('NODE_NAME', conf.ConfigSectionMap("Prometheus")['node_name'])
user = os.getenv('USR_CRED', conf.ConfigSectionMap("Prometheus")['user'])
logger = logging.getLogger('dataCollector')
hdlr = logging.FileHandler('dataCollector.log', mode='w')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
logger.setLevel(logging.INFO)
#logger.error('We have a problem')
logger.info('OpenDayLight Data Collector')
logger.info('ODL Server '+odl_server)
logger.info('Promth Server '+prometh_server)
logger.info('Monitoring Node '+node_name)
def getNodes(creds):
try:
url = odl_server+"/restconf/operational/opendaylight-inventory:nodes"
req = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (creds[0]['user_name'], creds[0]['password'])).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
req.add_header('Content-Type','application/json')
response=urllib2.urlopen(req)
code = response.code
logger.info('Response code from ODL: '+str(code))
data = json.loads(response.read())
return data
except urllib2.HTTPError, e:
logger.warning('Error: '+str(e))
except urllib2.URLError, e:
logger.warning('Error: '+str(e))
except ValueError, e:
logger.warning('Error: '+str(e))
def postNode(node):
nodeId = "id=\""+node['id']+"\", serial_number=\""+node['flow-node-inventory:serial-number']+"\", hardware=\""+node['flow-node-inventory:hardware']+"\", manufacturer=\""+node['flow-node-inventory:manufacturer']+"\", software=\""+node['flow-node-inventory:software']+"\""
timestamp = " "+str(int(datetime.datetime.now().strftime("%s")) * 1000)
#ports
port_state_live="# TYPE port_state_live gauge" + '\n'
port_state_blocked="# TYPE port_state_blocked gauge" + '\n'
port_state_link_down="# TYPE port_state_link_down gauge" + '\n'
port_maximum_speed="# TYPE port_maximum_speed gauge" + '\n'
port_current_speed="# TYPE port_current_speed gauge" + '\n'
port_receive_frame_error="# TYPE port_receive_frame_error gauge" + '\n'
port_packets_transmitted="# TYPE port_packets_transmitted gauge" + '\n'
port_packets_received="# TYPE port_packets_received gauge" + '\n'
port_collision_count="# TYPE port_collision_count gauge" + '\n'
port_receive_over_run_error="# TYPE port_receive_over_run_error gauge" + '\n'
port_receive_crc_error="# TYPE port_receive_crc_error gauge" + '\n'
port_transmit_errors="# TYPE port_transmit_errors gauge" + '\n'
port_receive_drops="# TYPE port_receive_drops gauge" + '\n'
port_transmit_drops="# TYPE port_transmit_drops gauge" + '\n'
port_receive_errors="# TYPE port_receive_errors gauge" + '\n'
for port in node['node-connector']:
portId = ",port=\""+port['flow-node-inventory:port-number']+"\", mac=\""+port['flow-node-inventory:hardware-address']+"\""
port_state_live+="port_state_live{"+nodeId+portId+"}"+str(boolean2int(port['flow-node-inventory:state']['live'])) + timestamp + '\n'
port_state_blocked+="port_state_blocked{"+nodeId+portId+"}"+str(boolean2int(port['flow-node-inventory:state']['blocked'])) + timestamp + '\n'
port_state_link_down+="port_state_link_down{"+nodeId+portId+"}"+str(boolean2int(port['flow-node-inventory:state']['link-down'])) + timestamp + '\n'
port_maximum_speed+="port_maximum_speed{"+nodeId+portId+"}"+str(port['flow-node-inventory:maximum-speed']) + timestamp + '\n'
port_current_speed+="port_current_speed{"+nodeId+portId+"}"+str(port['flow-node-inventory:current-speed']) + timestamp + '\n'
port_receive_frame_error+="port_receive_frame_error{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['receive-frame-error']) + timestamp + '\n'
port_packets_transmitted+="port_packets_transmitted{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['packets']['transmitted']) + timestamp + '\n'
port_packets_received+="port_packets_received{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['packets']['received']) + timestamp + '\n'
port_collision_count+="port_collision_count{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['collision-count']) + timestamp + '\n'
port_receive_over_run_error+="port_receive_over_run_error{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['receive-over-run-error']) + timestamp + '\n'
port_receive_crc_error+="port_receive_crc_error{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['receive-crc-error']) + timestamp + '\n'
port_transmit_errors+="port_transmit_errors{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['transmit-errors']) + timestamp + '\n'
port_receive_drops+="port_receive_drops{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['receive-drops']) + timestamp + '\n'
port_transmit_drops+="port_transmit_drops{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['transmit-drops']) + timestamp + '\n'
port_receive_errors+="port_receive_errors{"+nodeId+portId+"}"+str(port['opendaylight-port-statistics:flow-capable-node-connector-statistics']['receive-errors']) + timestamp + '\n'
data = port_state_live+port_state_blocked+port_state_link_down+port_maximum_speed+port_current_speed+port_receive_frame_error+port_packets_transmitted+port_packets_received+port_collision_count+port_receive_over_run_error+port_receive_crc_error+port_transmit_errors+port_receive_drops+port_transmit_drops+port_receive_errors
#print data
url = prometh_server+"/job/ports/instance/"+node_name
logger.info('Post on: \n'+url)
logger.info('Post ports metrics: \n'+data)
try:
req = urllib2.Request(url)
req.add_header('Content-Type','text/html')
req.get_method = lambda: 'PUT'
response=urllib2.urlopen(req,data)
code = response.code
logger.info('Response Code: '+str(code))
except urllib2.HTTPError, e:
logger.warning('Error: '+str(e))
except urllib2.URLError, e:
logger.warning('Error: '+str(e))
def boolean2int(bool):
if bool:
return 1
return 0
def date2int(str_date):
date = datetime.datetime.strptime(str_date,"%Y-%m-%dT%H:%M:%SZ")
return time.mktime(date.timetuple())
def getStates(vms):
states ={}
for vm in vms:
if vm['status'] in states:
states[vm['status']] += 1
else:
states[vm['status']] = 1
return states
if __name__ == "__main__":
#print "OpenDayLight Data Collector"
init()
nodes = getNodes(user)
for node in nodes['nodes']['node']:
postNode(node)
|
py | 7df6f29d99f7ec00359fe467fbb32b68e22d81a3 | """
WSGI config for dogs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dogs.settings')
application = get_wsgi_application()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.