ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a47fbcae1d671991105d61535b7e4b227dd9c41 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import SchleemsTestFramework
from test_framework.util import *
class P2PMempoolTests(SchleemsTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
|
py | 1a47fde47fa582c2105ae88607b3fb0929c2f16a | from setuptools import setup, find_packages
install_requirements = ['splinter', 'docopt']
version = '0.2.0'
try:
import importlib
except ImportError:
install_requirements.append('importlib')
setup(
name='ticketmachine',
version=version,
description='The universal travel ticket machine',
#long_description=open('README.md').read(),
author='Tomas Babej',
author_email='[email protected]',
license='MIT',
url='https://github.com/tbabej/ticketmachine',
download_url='https://github.com/tbabej/ticketmachine/downloads',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=install_requirements,
classifiers=[
'Development Status :: 4 - Beta',
],
entry_points={
'console_scripts': [
'ticketmachine = ticketmachine.main:main',
]
},
)
|
py | 1a47fedc4b6b6ffc6549e5de1ccd04787be6ba2c | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import vtpl_api
from vtpl_api.models.vehicle_congestion_events_response import VehicleCongestionEventsResponse # noqa: E501
from vtpl_api.rest import ApiException
class TestVehicleCongestionEventsResponse(unittest.TestCase):
"""VehicleCongestionEventsResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVehicleCongestionEventsResponse(self):
"""Test VehicleCongestionEventsResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = vtpl_api.models.vehicle_congestion_events_response.VehicleCongestionEventsResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a48002cf122cc65fa9f5e95827e4a2cdb28168d | import os
from spirl.models.closed_loop_spirl_mdl import ClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
from spirl.models.bc_atomic import BCMdl
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-microwave_kettle_topknob_switch.hdf5',
subseq_len=10,
)
env = AttrDict(
task_list = ['microwave', 'kettle', 'top burner', 'light switch']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': ClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'offline_data': False,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'bc_model': BCMdl,
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
)
bc_model = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
nz_mid=128,
n_processing_layers=5,
# checkpt_path=f'{os.environ["EXP_DIR"]}/bc_atomic/kitchen/offline_data/no-topknob',
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-no-topknob.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
|
py | 1a4800a383e403cfdcbc61a927966bf485cf590c | #!/usr/bin/env python
# spongemock __main__.py
# author: Noah Krim
# email: [email protected]
from __future__ import print_function
import argparse
import re
from pyperclip import copy
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
parser = init_parser()
args = parser.parse_args()
try:
out = mock(' '.join(args.text), args.bias, args.seed or args.strseed or None)
except Exception as e:
eprint('Error: '+sys.argv[0]+': '+str(e))
return 1
if args.copy:
try:
copy(out)
except Exception:
eprint('Warning: '+sys.argv[0]+': could not copy the output to the clipboard because of an unexpected error. '
+'If using Linux, pleaes make sure you have all the proper modules installed for pyperclip '
+'(more info: https://tkinter.unpythonic.net/wiki/How_to_install_Tkinter).')
print(out)
return 0
def init_parser():
parser = argparse.ArgumentParser(description='Mock some text like spongebob would. mOCk SoMe TexT lIKe SpONGebOb wOuLd.')
parser.add_argument('text', nargs='+', help='the text to mock. ThE tExT tO mOCk.')
parser.add_argument('-c', '--copy', action='store_true', help='Mocked text will be copied to the clipboard.')
parser.add_argument('-b', '--bias', type=float, default=0.5,
help='This bias is used to succesively increase the chance of swapping from the previously-mocked case. '
+'A value of `0` will ensure the chance is always 50/50, '
+'and a value of `1` will ensure that after the first random choice the capitalization perfectly oscilates. '
+'Default is `0.5`.')
seed_group = parser.add_mutually_exclusive_group()
seed_group.add_argument('-s', '--seed', type=parsable_seed, help='Seed for random number generator. Can be any number or string (numbers are parsed).')
seed_group.add_argument('-S', '--strseed', help='Seed for random number generator. Does not attempt to parse the string to a number.')
return parser
def parsable_seed(str_seed):
# Try int parse
if re.match(r'^-?\d+$', str_seed):
return int(float(str_seed))
# Try float parse
try:
return float(str_seed)
except Exception:
pass
return str_seed
if __name__ == '__main__':
if __package__ is None:
from os import path
sys.path.append( path.dirname(path.abspath(__file__) ) )
from spongemock import mock
else:
from .spongemock import mock
main()
else:
from .spongemock import mock |
py | 1a4800f54943f5909fd5e5d6a455f2a458c9017d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import netCDF4 as nc
from netCDF4 import Dataset
import os
import rasterio
from scipy.interpolate import griddata
from scipy import interpolate
Path_save = '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/'
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"""
Codigo para la interpolacion de las latitudes y las longiutes, a usar como medida
extrema cuando se dañen los arrays de latitudes y de las Longitudes.
"""
################################################################################
##------------------LECTURA DE LOS DATOS DE GOES CH2--------------------------##
################################################################################
"Las lats y lons de GOES serán la malla de referencia."
lat_GOES = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lat_CH2_2018_2019.npy')
lon_GOES = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lon_CH2_2018_2019.npy')
################################################################################
##----------------------INTERPOLANDO LOS DATOS--------------------------------##
################################################################################
x = np.arange(0, lat_GOES.shape[1], 1)
y = np.arange(0, lat_GOES.shape[0], 1)
f = interpolate.interp2d(x, y, lat_GOES[:,:])
xnew = np.arange(0, lat_GOES.shape[1], 3.9)
ynew = np.arange(0, lat_GOES.shape[0], 4)
Lat_new = f(xnew, ynew)
Lat_new=np.array(Lat_new)
del x, y, f, xnew, ynew
x = np.arange(0, lon_GOES.shape[1], 1)
y = np.arange(0, lon_GOES.shape[0], 1)
f = interpolate.interp2d(x, y, lon_GOES[:,:])
xnew = np.arange(0, lon_GOES.shape[1], 3.9)
ynew = np.arange(0, lon_GOES.shape[0], 4)
Lon_new = f(xnew, ynew)
Lon_new=np.array(Lon_new)
############################################################################################
##----------------------------GUARDANDO EL ARRAY INTERPOLADO -----------------------------##
############################################################################################
np.save(Path_save+'Array_Lat_COD_Junio', Lat_new)
np.save(Path_save+'Array_Lon_COD_Junio', Lon_new)
|
py | 1a480143094363d629d626ac0267798c1c45b8a0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # 用于管理日期时间
import os.path # 来管理路径
import sys # 用于找到脚本名称(argv[0])
# 导入BackTrader平台
import backtrader as bt
# 创建一个策略
class TestStrategy(bt.Strategy):
params = (
('deep', -0.3),
('printlog', False),
('profit',0.3),
('isA', False),
('onlyprintgood',True)
)
def log(self, txt, dt=None, doprint=False):
'''此策略的日志记录功能'''
if self.params.printlog or doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# 保存对收盘价线最新数据的引用
self.dataclose = self.datas[0].close
# 跟踪待处理订单和买入价格/佣金
self.order = None
self.buyprice = None
self.buycomm = None
self.init_cash = self.broker.getvalue()
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# 做多/做空 订单 已提交/已执行 到/被代理 - 无事可做
return
# 检查订单是否已经完成
# 注意:如果没有足够资金,代理可能拒绝订单
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # 做空
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# 引用的收盘价的日志
self.log('Close, %.2f' % self.dataclose[0])
# 检查订单是否挂起。。。如果是,我们无法发送第二个
if self.order:
return
# 检查我们是否在市场上
if not self.position:
if len(self) > 10 and (self.dataclose[0] - max(self.dataclose)) / max(self.dataclose) <= self.params.deep:
# 最大回撤达到deep 买,买,买!!! (应用所有可能的默认参数)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# 跟踪创建的订单以避免第二个订单
if self.params.isA:
size = int(self.broker.getvalue()*0.9 / self.dataclose[0]/100)*100
else:
size =self.broker.getvalue()*0.9/self.dataclose[0]-0.1
self.order = self.buy(size=size)
else:
# 已经在市场,我们可能需要做空
if (self.dataclose[0] - self.position.price) / self.position.price >= self.params.profit:
# 卖,卖,卖!!! (应用所有可能的默认参数)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# 跟踪创建的订单以避免第二个订单
self.order = self.sell(size=self.position.size)
def stop(self):
if self.params.isA:
self.basesize = int(self.init_cash / self.dataclose[-(len(self)-1)] / 100) * 100
self.rest = self.init_cash- self.basesize * self.dataclose[-(len(self)-1)]
else:
self.basesize = self.init_cash / self.dataclose[-(len(self)-1)] - 0.1
self.rest = self.init_cash - self.basesize * self.dataclose[-(len(self)-1)]
self.basline = self.basesize * self.dataclose[0] + self.rest
if self.params.onlyprintgood:
if self.broker.getvalue()/self.init_cash - 1 > self.basline/self.init_cash - 1:
self.log('(MA deep %f, P %f, profit %f, baseline %f) Ending Value %.2f,baseline Value %.2f' %
(self.params.deep, self.params.profit, self.broker.getvalue()/self.init_cash - 1,self.basline/self.init_cash - 1, self.broker.getvalue(),self.basline), doprint=True)
else:
self.log('(MA deep %f, P %f, profit %f, baseline %f) Ending Value %.2f,baseline Value %.2f' %
(self.params.deep, self.params.profit, self.broker.getvalue() / self.init_cash - 1,
self.basline / self.init_cash - 1, self.broker.getvalue(), self.basline), doprint=True)
if __name__ == '__main__':
# 创建一个大脑实例
cerebro = bt.Cerebro()
# 添加一个策略(最大跌幅超过deep买,买入后盈利超过profit卖)
#cerebro.addstrategy(TestStrategy)
#最大回撤达到deep后买入,盈利profit后卖出
cerebro.optstrategy(
TestStrategy,
deep=[-i/20 for i in range(1, 20)], profit=[i/20 for i in range(1, 20)], isA=True, printlog=False, onlyprintgood=True)
# 数据保存在样本的一个子文件夹中。我们需要找到脚本的位置
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
datapath = os.path.join(modpath, '../../datas/orcl-1995-2014.txt')
# 创建一个数据槽
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# 不接收这个日期更早的数据
fromdate=datetime.datetime(2000, 1, 1),
# 不接收晚于这个日期的数据
todate=datetime.datetime(2000, 12, 31),
reverse=False)
import pandas as pd
df = pd.read_csv('./samples/candlestick1')
df.index = pd.to_datetime(df.pop('id'), unit='s')
df.columns = ['high','low','open','close','volume','Adjusted_Close']
df.pop('Adjusted_Close')
df = df.sort_index()
df1 = df[-345:]
import tushare as ts
ts.set_token('1eda71057295b5ba834d31d24b572521d24689463e7328ca84fed1d6')
pro = ts.pro_api()
#df = pro.query('daily', ts_code='600519.SH', start_date='20150123',end_date='20210619')
#df = pro.query('daily', ts_code='601318.SH', start_date='20150123', end_date='20210530')
df = ts.pro_bar(ts_code='600519.SH', adj='qfq', start_date='20150123', end_date='20210619')
df = df.set_index(["trade_date"])
df = df.sort_index(ascending=True)
features_considered = ['open', 'close', 'high', 'low', "vol"]
features = df[features_considered]
features.columns = ['open', 'close', 'high', 'low','volume']
df = features
print(df.loc['20150123'])
df.index = pd.to_datetime(df.index, format='%Y%m%d')
data = bt.feeds.PandasData(dataname=df)
# 把数据槽添加到大脑引擎中
cerebro.adddata(data)
# 设定我们希望的初始金额
cerebro.broker.setcash(20000000.0)
# 根据stake添加一个固定下单量
#cerebro.addsizer(bt.sizers.FixedSize, stake=1)
# 设定佣金为0.1%,去掉百分号除以100
cerebro.broker.setcommission(commission=0.001)
# 运行所有命令
cerebro.run(maxcpus=1)
|
py | 1a4801e2da6d4553fed12a83ba5c8a73bf87e4d4 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app_33967.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a4802997abb7ce22549000d56996bab7e333eaa | """Intersection reach analysis.
Author: Chen Feng
Last updated on Nov. 21, 2018
"""
from __future__ import division
from dijkstra import Dijkstra
import numpy as np
import pandas as pd
import os
import time
from pyx import *
# __________Reformat data__________
def get_EV_id_matrix(na_x1, na_y1, na_x2, na_y2):
"""Produce the edge-vertex-ID matrix for the undirected graph generated from the input map.
:param na_x1: a 1-d numpy array listing the x-coordinate of the start point of the undirected edge/segment
:param na_y1: a 1-d numpy array listing the y-coordinate of the start point of the undirected edge/segment
:param na_x2: a 1-d numpy array listing the x-coordinate of the end point of the undirected edge/segment
:param na_y2: a 1-d numpy array listing the y-coordinate of the end point of the undirected edge/segment
:return: a 2-d numpy array listing the start and end point IDs of each undirected edge, a 1-d numpy array
listing x-y coordinate tuple of the start point of each edge, and a 1-d numpy array listing
x-y coordinate tuple of the end point of each edge
"""
# Produce the coordinates of the start points of segments and
# convert to a numpy array of tuples in the form of [(x1, y1), ...]
na_pts_start = (np.array(zip(na_x1, na_y1), dtype=[('x1', float), ('y1', float)])).astype(tuple)
list_pts_start = na_pts_start.tolist()
# Produce the coordinates of the end pints of segments and
# convert to a numpy array of tuples in the form of [(x2, y2), ...]
na_pts_end = (np.array(zip(na_x2, na_y2), dtype=[('x2', float), ('y2', float)])).astype(tuple)
list_pts_end = na_pts_end.tolist()
# Extract distinct vertices from the point list
list_pts = na_pts_start.tolist() + na_pts_end.tolist()
set_pts = list(set(list_pts))
list_pt_id = range(len(set_pts))
# Create an edge-vertex matrix with the column 'na_edge_pts_1' indicating the start vertices, and
# the column 'na_edge_pts_2' indicating the end vertices.
na_edge_pts_1 = np.zeros((len(na_x1)), dtype=int) - 1 # create a numpy array filled with -1
for i in range(len(list_pt_id)):
indices = [ndx for ndx, x in enumerate(list_pts_start) if x == set_pts[i]]
for j in indices:
na_edge_pts_1[j] = i
na_edge_pts_2 = np.zeros((len(na_x1)), dtype=int) - 1 # create a numpy array filled with -1
for i in range(len(list_pt_id)):
indices = [ndx for ndx, x in enumerate(list_pts_end) if x == set_pts[i]]
for j in indices:
na_edge_pts_2[j] = i
na_edge_pts = np.column_stack((na_edge_pts_1, na_edge_pts_2))
return na_edge_pts, na_pts_start, na_pts_end
def create_x_adj_list(na_edge_pts, deg_threshold):
"""Create an adjacency list for the undirected graph generated from the initial map.
:param na_edge_pts: a 2-d numpy array listing the start and end point IDs of each edge
:param deg_threshold: the minimum degree of a vertex (i.e., an endpoint) to be considered as a street intersection
:return: an adjacency list, e.g., G = [{12: 0, 1: 1, 24: 1}, {0: 1, 24:1, 2: 1, 16:1}, ...]
indicates that the edge 0 is adjacent with edges 12, 1, and 24, and the cross distances from
those edges are G[0][12] = 0, G[0][1] = 1, and G[0][24] = 1.
"""
adj_list = []
for i in range(len(na_edge_pts)):
adj_list.append({})
# extract the start point of the current edge
v1 = na_edge_pts[i, 0]
# find the incident edges of the start point of the current edge
t = np.where(na_edge_pts == v1)
v1_deg = len(t[0])
for j in range(len(t[0])):
if t[0][j] != i:
if v1_deg >= deg_threshold:
adj_list[i][int(t[0][j])] = 1
else:
adj_list[i][int(t[0][j])] = 0
# extract the end point of the current edge
v2 = na_edge_pts[i, 1]
# find the incident edges of the end point of the current edge
t = np.where(na_edge_pts == v2)
v2_deg = len(t[0])
for j in range(len(t[0])):
if t[0][j] != i:
if v2_deg >= deg_threshold:
adj_list[i][int(t[0][j])] = 1
else:
adj_list[i][int(t[0][j])] = 0
return adj_list
# __________Run intersection-reach analysis__________
def x_reach(edge_id, max_cross, adj_list, na_seg_len):
"""Conduct intersection reach analysis.
:param edge_id: the ID of the source edge
:param max_cross: the maximum number of intersections allowed to cross
:param adj_list: an adjacency list, e.g., G = [{12: 0, 1: 1, 24: 1}, {0: 1, 24:1, 2: 1, 16:1}, ...]
indicates that the edge 0 is adjacent with edges 12, 1, and 24, and the cross distances from
those edges are G[0][12] = 0, G[0][1] = 1, and G[0][24] = 1.
:param na_seg_len: a list listing the length of each line segment
:return: the total street length accessible within max_cross intersections and
the list of lines that can be reached
"""
D, P = Dijkstra(adj_list, edge_id)
# find all the edges that are no more than max_cross intersections away from the source edge
list_reached_edges = [k for k, v in D.items() if v <= max_cross]
# compute the total length of the reached edges
total_len = 0
for i in list_reached_edges:
total_len += na_seg_len[i]
return total_len, list_reached_edges
def x_reach_all_pairs(max_cross, adj_list, na_seg_len):
"""Conduct intersection reach analysis for all lines.
:param max_cross: the maximum number of intersections allowed to cross
:param adj_list: an adjacency list, e.g., G = [{12: 0, 1: 1, 24: 1}, {0: 1, 24:1, 2: 1, 16:1}, ...]
indicates that the edge 0 is adjacent with edges 12, 1, and 24, and the cross distances from
those edges are G[0][12] = 0, G[0][1] = 1, and G[0][24] = 1.
:param na_seg_len: a list listing the length of each line segment
:return: a list listing the intersection reach for each line segment
"""
list_reach = []
num_lines = len(na_seg_len)
for e in range(num_lines):
reach, list_edges = x_reach(e, max_cross, adj_list, na_seg_len)
list_reach.append(reach)
return list_reach
def draw_x_reach_edges(eid, list_edge_id, na_e_pt_start, na_e_pt_end, file_dir, file_name="x_reach_snapshot",
stroke_width=0.02, scale_factor=1 / 50):
"""Visualize the result of intersection reach analysis.
:param eid: the ID of the edge from which the reach analysis starts
:param list_edge_id: a list listing the IDs of the line segments that have been reached
:param na_e_pt_start: a 1-d numpy array listing x-y coordinate tuple of the start point of each line
:param na_e_pt_end: a 1-d numpy array listing x-y coordinate tuple of the end point of each line
:param file_dir: a string representing the file directory in which to store the exported drawing
:param file_name: the name of the PDF file to be exported to
:param stroke_width: the width of the stroke for drawing lines
:param scale_factor: the scale factor used to scale the input coordinates
"""
os.chdir(file_dir)
# Set canvas for drawing
c = canvas.canvas()
num_edges = len(na_e_pt_start)
# Draw the whole grid (i.e., the initial map) first
for i in range(num_edges):
line = path.line(na_e_pt_start[i][0] * scale_factor, na_e_pt_start[i][1] * scale_factor,
na_e_pt_end[i][0] * scale_factor, na_e_pt_end[i][1] * scale_factor)
c.stroke(line, [style.linewidth(stroke_width), color.rgb.black])
# Draw the reached edges in list_edge_id
for e in list_edge_id:
line = path.line(na_e_pt_start[e][0] * scale_factor, na_e_pt_start[e][1] * scale_factor,
na_e_pt_end[e][0] * scale_factor, na_e_pt_end[e][1] * scale_factor)
c.stroke(line, [style.linewidth(4 * stroke_width), color.rgb.red])
# Draw a red circle at the midpoint of the edge from which to start
circle_center_x = (na_e_pt_start[eid][0] + na_e_pt_end[eid][0]) / 2
circle_center_y = (na_e_pt_start[eid][1] + na_e_pt_end[eid][1]) / 2
circle = path.circle(circle_center_x * scale_factor, circle_center_y * scale_factor, 8 * stroke_width)
c.stroke(circle, [deco.filled([color.rgb.black])])
c.writePDFfile(file_name)
if __name__ == '__main__':
# Document the time at which the script starts running
localtime = time.asctime(time.localtime(time.time()))
print "Start Time :", localtime + "\n"
# Change working directory
directory = r"C:\_SoftwareDevelopment\Grasshopper\GhPython_PatternGeneration\data_RealExamples\Apt"
os.chdir(directory)
csv_file = "test_Apt.csv"
# Set the maximum number of intersections allowed to travel
max_crossings = 3
# Set the vertex degree threshold
degree_threshold = 3
# Set the start edge ID
start_edge = 125
# Read in the data and store them as a pandas DataFrame
df = pd.read_csv(csv_file)
x1, y1, x2, y2 = df['x1'].values, df['y1'].values, df['x2'].values, df['y2'].values
na_segment_length = df['seg_len'].values
na_edge_points, na_points_start, na_points_end = get_EV_id_matrix(x1, y1, x2, y2)
adjacency_list = create_x_adj_list(na_edge_points, degree_threshold)
xr, reached_edgs = x_reach(start_edge, max_crossings, adjacency_list, na_segment_length)
print xr
# print x_reach_all_pairs(max_crossings, adjacency_list, na_segment_length)
# Document the time at which the script finishes running
localtime = time.asctime(time.localtime(time.time()))
print "\nEnd Time :", localtime
# Change the directory to save the PDF file
directory = "C:/Users/cfeng/Desktop/Outbox"
# Highlight the reached edges on the initial map and export the drawing to a PDF file
draw_x_reach_edges(start_edge, reached_edgs, na_points_start, na_points_end, directory, stroke_width=0.02,
scale_factor=1 / 50)
|
py | 1a4802d7b2cabe2c07a8100ccbab88b635cb3f2b | from data import warehouse, word_frequencies
from puzzle.heuristics import acrostic
from puzzle.puzzlepedia import prod_config
from spec.mamba import *
BA_PREFIX_TRIE = word_frequencies.load(
zip(('bad', 'bag', 'ban', 'bar', 'bat'), [1]*5))
with description('acrostic'):
with it('uses a mock trie'):
a = acrostic.Acrostic(list('bag'), BA_PREFIX_TRIE)
expect(len(a._trie)).to(be_below(100))
with it('yields multi-character solutions'):
a = acrostic.Acrostic(list('bag'), BA_PREFIX_TRIE)
expect(list(a)).to(contain('bag'))
with it('is observable'):
a = acrostic.Acrostic(list('bag'), BA_PREFIX_TRIE)
subs = mock.Mock()
a.subscribe(subs)
expect(subs.on_next.call_args).to(equal(mock.call('bag')))
with it('yields unique solutions'):
a = acrostic.Acrostic(list('ba') + ['ggg'], BA_PREFIX_TRIE)
expect(list(a)).to(have_len(1))
with it('yields multiple multi-character solutions'):
a = acrostic.Acrostic(list('ba') + ['dgnrt'], BA_PREFIX_TRIE)
expect(list(a)).to(contain('bad', 'bag', 'ban', 'bar', 'bat'))
with _description('real data'):
with before.all:
warehouse.save()
prod_config.init()
with after.all:
prod_config.reset()
warehouse.restore()
with it('finds simple words'):
a = acrostic.Acrostic('cab')
expected = [
'cab',
'ca b',
'c ab',
]
for i, (answer, weight) in enumerate(a.items()):
expect('#%s = %s @ %s' % (i, answer, weight)).to(equal(
'#%s = %s @ %s' % (i, expected[i], weight)
))
expect(a.items()).to(have_len(len(expected)))
with it('finds important words'):
a = acrostic.Acrostic('binary')
expect(next(a.items())).to(equal(('binary', 1)))
with _it('modestly expensive'):
words = [
'larch', 'simple', 'foray', 'doyen', 'eerily', 'soup', 'must',
]
a = acrostic.Acrostic(words)
limit = 1000000
for i, (answer, weight) in enumerate(a.items()):
if answer.startswith('answer') or i % 1000 == 0:
print(answer, weight)
if i > limit:
print('tried %s' % i)
break
with _it('crazy expensive'):
words = [
'champion', 'nitpick', 'conspiracy', 'windpipe', 'epinephrine',
'philanthropic', 'sierpinski', 'mississippi', 'pilaf', 'vulpine',
'spinach', 'pinochet', 'porcupine', 'megapixels', 'australopithecus',
'sharpie', 'intrepid', 'insipid', 'robespierre'
]
a = acrostic.Acrostic(words)
limit = 1000000
for i, (answer, weight) in enumerate(a.items()):
if answer.startswith('answer') or i % (limit / 10) == 0:
print(answer, weight)
if i > limit:
print('tried %s' % i)
break
""" 4/24
a to incipient each rss 120548796
a to incipient opps eii 153396
a to incipient eipe rni 59329
a to incipient ipps epe 174519
a to incipient cmss ede 290375
a to incipient csts rsr 175192
a to incipient opca dsr 752124
a to incipient cisr tnp 87249
a to incipient ilos dps 1290835
a to pntemplates cs tio 770193
a to perempuan usps tio 770193
4/25 + early break in walk when scores are low
a to incipient each rss 120548796
a to incipient iste eie 57198
a to incipient cmss dss 1995347
a to incipient imia rsi 697477
a to incipient osrs eip 398559
a to perempuan peas tpe 275152
a to perempuan imcs nss 990710
a to perempuan caar ens 717319
a to perempuan usea tns 523866
a to perempuan epra pii 512601
a to dicipline imps psi 6101411
9/15 38 seconds; 35 seconds
a to incipient usui ipi 1.699863585947228e-07
a in incipient isps psr 3.399727171894456e-07
a in incipient rire dns 5.7795361922205745e-06
a i applesauce isls pdo 1.699863585947228e-07
a i applesauce pirs inr 6.799454343788912e-07
a i renaisance csus iss 2.209822661731396e-06
a i renaisance cmaa nsp 3.399727171894456e-07
a i renassance imes nss 5.099590757841683e-07
a can eliminate aisi ds 3.399727171894456e-07
a can eliminate phr dio 1.699863585947228e-07
"""
|
py | 1a48034d649e716b44247082686281b11589c891 | from unicorn.arm_const import *
from ..fuzz import get_fuzz
import sys
def puts(uc):
ptr = uc.reg_read(UC_ARM_REG_R0)
assert(ptr != 0)
msg = uc.mem_read(ptr, 256)
#ptr += 1
#while msg[-1] != b"\0":
# msg += uc.mem_read(ptr, 1)
# ptr += 1
if b'\0' in msg:
msg = msg[:msg.find(b'\0')]
print(msg)
def putchar(uc):
c = uc.reg_read(UC_ARM_REG_R0)
assert (c < 256)
sys.stdout.write(chr(c))
sys.stdout.flush()
def printf(uc):
# for now just print out the fmt string
ptr = uc.reg_read(UC_ARM_REG_R0)
assert(ptr != 0)
msg = uc.mem_read(ptr, 256)
# ptr += 1
# while msg[-1] != b"\0":
# msg += uc.mem_read(ptr, 1)
# ptr += 1
if b'\0' in msg:
msg = msg[:msg.find(b'\0')]
sys.stdout.write(msg.decode('latin1'))
sys.stdout.flush()
def readline(uc):
ptr = uc.reg_read(UC_ARM_REG_R0)
l = uc.reg_read(UC_ARM_REG_R1)
assert(ptr != 0)
data = b''
while len(data) < l:
data += get_fuzz(1)
if data.endswith(b'\n'):
break
uc.mem_write(ptr, data)
uc.reg_write(UC_ARM_REG_R0, 0)
# echo
sys.stdout.write(data.decode('latin1'))
sys.stdout.flush() |
py | 1a4803cf6ffe9a64a36ee2294f12aa33de8b8be0 | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
def autostring(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
"""
Format number (array) with given decimal precision.
Definition
----------
def autostring(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
There is a wrapper function for convenience with the short name 'astr' that calls autostring
def astr(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
Input
-----
num number array
Optional Input
--------------
prec number of decimal places of formatted values
minimum field width for integers (default: 0)
zero if True, pad values with zeros rather than blanks (default: False)
set_printoptions if True, sets linewidth to the format times size of 1st dimension (default: False)
pp shortcut for set_printoptions (default: False)
it will be checked for (pp | set_printoptions)
join if True, joins all individual strings of last (fastest) dimension into one string (default: False)
joinall if True, joins all individual strings into single string,
i.e. first flattens the array and then joins it (default: False, overwrites join)
sep separator used when joining (default: space=' ')
Output
------
string (array) of formatted numbers
Restrictions
------------
None
Examples
--------
>>> print(autostring(3.5967, 3))
3.597
>>> print(autostring(3.5967))
4
>>> print(autostring(3, 3))
3
>>> print(autostring(np.array([3.5967, 3.5964]), 3))
['3.597' '3.596']
>>> print(autostring(np.array([3.59, 1.123456e12]), 3))
['3.590e+00' '1.123e+12']
>>> print(autostring(np.array([3.59, 11.1234]), 3, zero=True))
['03.590' '11.123']
>>> print(autostring(np.array([3, 11])))
[' 3' '11']
>>> print(autostring(np.array([3, 11]), 3))
[' 3' ' 11']
>>> print(autostring(np.zeros((2,2), dtype=np.float), 1))
[['0.0' '0.0']
['0.0' '0.0']]
>>> np.set_printoptions(threshold=10)
>>> print(autostring(np.zeros((2,10), dtype=np.float)[0:2,0:2], 1))
[['0.0' '0.0']
['0.0' '0.0']]
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, set_printoptions=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, set_printoptions=False, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(autostring(np.array([3.5967, 3.5964]), 3, join=True))
3.597 3.596
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, join=True, sep=';'))
['0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0'
'0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0']
>>> print(autostring(np.reshape(np.arange(20,dtype=np.float),(2,10)), 1, joinall=True, sep=';'))
0.0; 1.0; 2.0; 3.0; 4.0; 5.0; 6.0; 7.0; 8.0; 9.0;10.0;11.0;12.0;13.0;14.0;15.0;16.0;17.0;18.0;19.0
>>> print(autostring(np.reshape(np.arange(20,dtype=np.float),(2,10)), 1, joinall=True, sep=';'))
0.0; 1.0; 2.0; 3.0; 4.0; 5.0; 6.0; 7.0; 8.0; 9.0;10.0;11.0;12.0;13.0;14.0;15.0;16.0;17.0;18.0;19.0
>>> print(autostring(np.array([3, 11, np.inf])))
[' 3' ' 11' 'inf']
>>> print(autostring(np.array([3, 11, np.nan])))
[' 3' ' 11' 'nan']
>>> print(autostring(np.ma.array([3, 11, np.nan], mask=[False,True,False])))
[' 3' '-- ' 'nan']
>>> print(autostring(np.ma.array([3, 11, np.nan], mask=[False,False,True])))
[' 3' '11' '--']
License
-------
This file is part of the JAMS Python package, distributed under the MIT License.
Copyright (c) 2011-2014 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Nov 2011 - from autostring.pro
Modified, MC, May 2012 - pp
MC, Dec 2012 - special treatment of -0.0 on output
MC, Feb 2013 - nan, inf and masked arrays
MC, Feb 2013 - ported to Python 3
MC, Oct 2014 - isinstance
MC, Dec 2014 - tuple input
"""
#
# Check input
if isinstance(num, (list, tuple)): num = np.array(num)
isarr = np.ndim(num)
if (isarr > 2):
print("AUTOSTRING WARNING: autostring only works with scalars, 1D- and 2D arrays: return original array.")
return num
# Only treat int and float
if (isarr==0):
try:
typ = num.dtype
except AttributeError:
if (type(num) == float):
typ = np.float64
elif (type(num) == int):
typ = np.int32
else:
typ = type(num)
else:
typ = num.dtype
try:
lfloat = np.float128 # Mac/*nix
except AttributeError:
try:
lfloat = np.float96 # Windows
except AttributeError:
lfloat = np.float64
if np.__version__ >= "1.6":
if (typ in [np.float16, np.float32, np.float64, lfloat]):
isfloat = True
elif (typ in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]):
isfloat = False
else:
print("AUTOSTRING WARNING: autostring cannot work with input type: return original array.")
return num
else:
if (typ in [np.float32, np.float64, lfloat]):
isfloat = True
elif (typ in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]):
isfloat = False
else:
print("AUTOSTRING WARNING: autostring cannot work with input type: return original array.")
return num
# Scalar to array if necessary; Special treatment of -0.0
if (isarr==0):
if (num == 0):
num = np.abs(num)
else:
if isinstance(num, np.ma.masked_array):
num = np.ma.where(num == 0, 0, num)
else:
num = np.where(num == 0, 0, num)
# Zero padding
if zero:
nix = '0'
else:
nix = ''
#
# If we deal with an array of numbers we take the largest for the format
# deal with inf and nan
hasmask = False
hasnan = False
if (isarr==0):
if np.isnan(num): return 'nan'
if np.isinf(num): return 'inf'
abs_num = np.ma.abs(num)
# leave room for the decimal point and the negative sign, if any
if (num < 0.):
num_sign_chars = 1
else:
num_sign_chars = 0
else:
if isinstance(num, np.ma.masked_array):
if np.sum(num.mask) > 0: hasmask = True
if num.count() > np.ma.sum(np.isfinite(num)): hasnan = True
else:
if num.size > np.sum(np.isfinite(num)): hasnan = True
inum = np.ma.array(num, mask=~np.isfinite(num), keep_mask=True)
abs_num = np.ma.max(np.ma.abs(inum))
# leave room for the decimal point and the negative sign, if any
if (np.ma.min(inum) < 0.):
num_sign_chars = 1
else:
num_sign_chars = 0
#
# Floating point
if isfloat: # number is a float, more or less
if abs_num >= 1.e6:
num_prefix_chars = 1
num_sci_not_chars = 4
format_type = 'e'
elif ((abs_num < 1.e6) & (abs_num >= 1.)):
nprefix = np.int_(np.log10(np.int32(abs_num)))+1
# special treatment: the output prefix digits could
# be one digit longer as the input prefix digits: e.g. 99.99 => 100.0
val = np.around(abs_num*(10.**prec))/(10.**prec)
nprefixval = np.int_(np.log10(val))+1
nprefix = np.amax(np.array([nprefix,nprefixval], dtype=np.int))
num_prefix_chars = nprefix
num_sci_not_chars = 0
format_type = 'f'
elif ((abs_num < 1.) & (abs_num >= 1.e-3)):
num_prefix_chars = 1
num_sci_not_chars = 0
format_type = 'f'
elif (abs_num == 0):
num_prefix_chars = 1
num_sci_not_chars = 0
format_type = 'f'
else:
num_prefix_chars = 1
num_sci_not_chars = 4
format_type = 'e'
#
num_postfix_chars = prec
num_total_chars = num_sign_chars + num_prefix_chars + 1 + num_postfix_chars + num_sci_not_chars
if (prec == 0): # no dot if prec=0
num_total_chars -= 1
if hasmask: # need space for --
if num_total_chars < 2: num_total_chars = 2
if hasnan: # need space for nan or inf
if num_total_chars < 3: num_total_chars = 3
format_string = ("{0:s}{1:s}{2:d}{3:s}{4:d}{5:s}{6:s}".format('{0:', nix, num_total_chars,
'.', num_postfix_chars, format_type, '}'))
else: # number is an integer
format_type = 'd'
if abs_num != 0:
num_digits = np.int_(np.log10(abs_num))+1
else:
num_digits = 1
num_total_chars = np.maximum(num_digits + num_sign_chars, prec)
if hasmask: # need space for --
if num_total_chars < 2: num_total_chars = 2
if hasnan: # need space for nan or inf
if num_total_chars < 3: num_total_chars = 3
format_string = ("{0:s}{1:s}{2:d}{3:s}{4:s}".format('{0:', nix, num_total_chars, format_type, '}'))
#
if (isarr == 0):
out = format_string.format(num)
# Special treatment of -0.0
if np.float(out) == 0:
out = format_string.format(0)
else:
fnum = num.flatten()
nnum = fnum.size
import sys
if sys.hexversion > int('0x3000000',base=16):
styp = 'U{0:d}'.format(num_total_chars)
else:
styp = 'S{0:d}'.format(num_total_chars)
out = np.empty(nnum, dtype=styp)
for i in range(nnum):
if str(fnum[i]) == '--':
sformat_string = ("{0:s}{1:d}s{2:s}".format('{0:', num_total_chars, '}'))
out[i] = sformat_string.format('--')
else:
out[i] = format_string.format(fnum[i])
if np.float(out[i]) == 0:
out[i] = format_string.format(0)
out = np.reshape(out, num.shape)
if (set_printoptions | pp):
# num_total_chars+3 for '' and space, +isarr for []
np.set_printoptions(linewidth=num.shape[-1]*(num_total_chars+3)+isarr, threshold=nnum+1)
if (join | joinall): # There should be reduction routines in numpy
if ((isarr == 1) | ((isarr==2) & joinall)):
if (isarr == 2):
out = out.flatten()
for i in range(out.size):
if (i==0):
outc = out[i]
else:
outc = outc+sep+out[i]
else:
if sys.hexversion > int('0x3000000',base=16):
sform = 'U{0:d}'.format((len(out[0,0])+len(sep))*out.shape[1])
else:
sform = 'S{0:d}'.format((len(out[0,0])+len(sep))*out.shape[1])
outc = np.zeros(out.shape[0], dtype=sform)
for j in range(out.shape[0]):
for i in range(out.shape[1]):
if (i==0):
outc[j] = out[j,i]
else:
outc[j] = outc[j]+sep+out[j,i]
out = outc
# return formatted string
return out
def astr(num, prec=0, zero=False, set_printoptions=False, pp=True, join=False, joinall=False, sep=' '):
"""
Wrapper function for autostring with pp=True by default.
def autostring(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
Examples
--------
>>> print(astr(3.5967, 3))
3.597
>>> print(astr(3.5967))
4
>>> print(astr(3, 3))
3
>>> print(astr(np.array([3.5967, 3.5964]), 3))
['3.597' '3.596']
>>> print(astr(np.array([3.59, 1.123456e12]), 3))
['3.590e+00' '1.123e+12']
>>> print(astr(np.array([3.59, 11.1234]), 3, zero=True))
['03.590' '11.123']
>>> print(astr(np.array([3, 11])))
[' 3' '11']
>>> print(astr(np.array([3, 11]), 3))
[' 3' ' 11']
>>> print(astr(np.zeros((2,2), dtype=np.float), 1))
[['0.0' '0.0']
['0.0' '0.0']]
>>> np.set_printoptions(threshold=10)
>>> print(astr(np.zeros((2,10), dtype=np.float), 1))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, set_printoptions=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, set_printoptions=False, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.array([3.5967, 3.5964]), 3, join=True))
3.597 3.596
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, join=True, sep=';'))
['0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0'
'0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0']
>>> print(astr(np.reshape(np.arange(20,dtype=np.float),(2,10)), 1, joinall=True, sep=';'))
0.0; 1.0; 2.0; 3.0; 4.0; 5.0; 6.0; 7.0; 8.0; 9.0;10.0;11.0;12.0;13.0;14.0;15.0;16.0;17.0;18.0;19.0
>>> print(astr(np.array([3, 11, np.inf])))
[' 3' ' 11' 'inf']
>>> print(astr(np.array([3, 11, np.nan])))
[' 3' ' 11' 'nan']
>>> print(astr(np.ma.array([3, 11, np.nan], mask=[False,True,False])))
[' 3' '-- ' 'nan']
>>> print(astr(np.ma.array([3, 11, np.nan], mask=[False,False,True])))
[' 3' '11' '--']
"""
return autostring(num, prec=prec, zero=zero, set_printoptions=set_printoptions,
pp=pp, join=join, joinall=joinall, sep=sep)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# print(autostring(np.array([3, 11, np.nan])))
# #[' 3' ' 11' 'nan']
# print(autostring(np.ma.array([3, 11, np.nan], mask=[False,True,False])))
# #[' 3' '-- ' 'nan']
# print(autostring(np.ma.array([3, 11, np.nan], mask=[False,False,True])))
# #[' 3' ' 11' '-- ']
|
py | 1a48045e6e056748a8761ee081a5ed8231cad891 | import os
import sys
import cookielib
import urllib2
import json
import time
import resolver
chemblSite = "https://www.ebi.ac.uk/chembl/api/data/"
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(
urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler(debuglevel=0),
urllib2.HTTPSHandler(debuglevel=0),
urllib2.HTTPCookieProcessor(cookies))
opener.addheaders = [
('User-agent', ('Mozilla/4.0 (compatible; MSIE 6.0; '
'Windows NT 5.2; .NET CLR 1.1.4322)'))
]
def requestJson(uri):
try:
handle = opener.open(uri)
response = handle.read()
handle.close()
obj = json.loads(response)
return obj
except:
sys.stderr.write("failed: "+uri+"\n")
sys.stderr.flush()
time.sleep(5)
def iterateThruPages(task, func, header):
filename = 'chembl/'+task+'.txt'
skip = 0
if not os.path.exists(filename):
fp = open(filename, 'w')
fp.write('\t'.join(header) + '\tref_id\tref_url\tref_type\n')
fp.close()
else:
skip = len(open(filename).readlines()) - 1
fp = open(filename, 'a')
top = 20
max = 0
while skip < max or max == 0:
uri = chemblSite + task + '?format=json&limit='+str(top)+'&offset='+str(skip)
obj = requestJson(uri)
if max == 0:
max = obj['page_meta']['total_count']
if not obj.has_key(task+'s'):
newobj = dict()
newobj[task+'s'] = []
newobj[task+'s'].append(obj)
obj = newobj
skip = max
if obj is None:
skip = skip
elif len(obj[task+'s']) == 0:
skip = max
else:
for entry in obj[task+'s']:
func(fp, entry, header)
sys.stderr.write(uri+"\n")
sys.stderr.flush()
skip = skip + top
fp.close()
return
def safeAdd(string, obj, delim = ""):
if obj is None:
return string + delim
try:
obj = obj.encode('ascii', 'ignore')
except:
obj = str(obj)
obj = obj.replace('\n', ' ')
obj = obj.replace('\t', ' ')
return string + obj + delim
def getRefType(refType):
refTypes = ['DailyMed', 'FDA', 'ATC', 'ClinicalTrials', 'PubMed', 'Wikipedia']
if refType in refTypes:
return refTypes.index(refType)
return len(refTypes)
def getSynType(refType):
refTypes = ['FDA', 'INN', 'USAN', 'ATC', 'BAN', 'JAN', 'MI', 'USP', 'MERCK_INDEX', 'RESEARCH_CODE']
if refType in refTypes:
return refTypes.index(refType)
return len(refTypes)
def drugIndicationParse(fp, entry, header):
if 'mesh_id' not in entry or entry['mesh_id'] == "":
print entry
print "SFDFSD"
sys.exit()
oline = ''
for item in header:
oline = safeAdd(oline, entry[item], '\t')
bestRef = None
for item in entry['indication_refs']:
if bestRef == None or getRefType(item['ref_type']) < getRefType(bestRef['ref_type']):
bestRef = item
if bestRef != None:
oline = safeAdd(oline, bestRef['ref_id'], '\t')
oline = safeAdd(oline, bestRef['ref_url'], '\t')
oline = safeAdd(oline, bestRef['ref_type'])
if bestRef['ref_type'] == 'ClinicalTrials':
oline = oline + ' Phase ' + str(entry['max_phase'])
fp.write(oline)
fp.write('\n')
return
def mechanismParse(fp, entry, header):
oline = ''
for item in header:
oline = safeAdd(oline, entry[item], '\t')
bestRef = None
for item in entry['mechanism_refs']:
if bestRef == None or getRefType(item['ref_type']) < getRefType(bestRef['ref_type']):
bestRef = item
if bestRef != None:
oline = safeAdd(oline, bestRef['ref_id'], '\t')
oline = safeAdd(oline, bestRef['ref_url'], '\t')
oline = safeAdd(oline, bestRef['ref_type'])
if bestRef['ref_type'] == 'ClinicalTrials':
oline = oline + ' Phase ' + str(entry['max_phase'])
fp.write(oline)
fp.write('\n')
return
def metabolismParse(fp, entry, header):
oline = ''
for item in header:
oline = safeAdd(oline, entry[item], '\t')
bestRef = None
for item in entry['metabolism_refs']:
if bestRef == None or getRefType(item['ref_type']) < getRefType(bestRef['ref_type']):
bestRef = item
if bestRef != None:
oline = safeAdd(oline, bestRef['ref_id'], '\t')
oline = safeAdd(oline, bestRef['ref_url'], '\t')
oline = safeAdd(oline, bestRef['ref_type'])
if bestRef['ref_type'] == 'ClinicalTrials':
oline = oline + ' Phase ' + str(entry['max_phase_for_ind'])
fp.write(oline)
fp.write('\n')
return
def drugParse(fp, entry, header):
oline = ''
for item in header:
oline = safeAdd(oline, entry[item], '\t')
bestRef = None
for item in entry['molecule_synonyms']:
if bestRef == None or getSynType(item['syn_type']) < getSynType(bestRef['syn_type']):
bestRef = item
if bestRef != None:
oline = safeAdd(oline, bestRef['molecule_synonym'], '\t')
oline = safeAdd(oline, bestRef['synonyms'], '\t')
oline = safeAdd(oline, bestRef['syn_type'])
fp.write(oline)
fp.write('\n')
return
def molParse(fp, entry, header):
oline = ''
for item in header:
oline = safeAdd(oline, entry[item], '\t')
bestRef = None
for item in entry['molecule_synonyms']:
if bestRef == None or getSynType(item['syn_type']) < getSynType(bestRef['syn_type']):
bestRef = item
if bestRef != None:
oline = safeAdd(oline, bestRef['molecule_synonym'], '\t')
oline = safeAdd(oline, bestRef['synonyms'], '\t')
oline = safeAdd(oline, bestRef['syn_type'])
canSmiles = ''
if 'molecule_structures' in entry:
if entry['molecule_structures'] != None and 'canonical_smiles' in entry['molecule_structures']:
oline = oline + '\t'
oline = safeAdd(oline, entry['molecule_structures']['canonical_smiles'])
canSmiles = entry['molecule_structures']['canonical_smiles']
fp.write(oline)
fp.write('\n')
return [bestRef['molecule_synonym'], canSmiles]
def updateFiles():
header = ['molecule_chembl_id', 'parent_molecule_chembl_id', 'mesh_id', 'mesh_heading', 'efo_term', 'drugind_id']
iterateThruPages('drug_indication', drugIndicationParse, header)
header = ['molecule_chembl_id', 'target_chembl_id', 'action_type', 'mechanism_comment', 'mechanism_of_action']
iterateThruPages('mechanism', mechanismParse, header)
header = ['drug_chembl_id', 'substrate_chembl_id', 'metabolite_chembl_id', 'target_chembl_id', 'organism', 'substrate_name', 'metabolite_name', 'met_conversion']
iterateThruPages('metabolism', metabolismParse, header)
header = ['molecule_chembl_id', 'first_approval', 'first_in_class', 'usan_year', 'prodrug', 'oral', 'parenteral', 'topical', 'withdrawn_country', 'withdrawn_reason', 'withdrawn_year']
iterateThruPages('drug', drugParse, header)
def readFileDict(filename, keyCol, valCols):
adict = dict()
fp = open(filename, 'r')
header = fp.readline().strip().split('\t')
k = header.index(keyCol)
vs = []
for item in valCols:
vs.append(header.index(item))
line = fp.readline()
while line != "":
sline = line[0:-1].split("\t")
entry = []
for i in range(len(vs)):
if len(sline) > vs[i]:
entry.append(sline[vs[i]])
else:
entry.append('')
adict[sline[k]] = entry
line = fp.readline()
fp.close()
return adict
def updateChemblMol():
drugfile = 'chembl/drug.txt'
molfile = 'chembl/chembl-mol.txt'
c2Name = readFileDict(drugfile, 'molecule_chembl_id', ['ref_id'])
header = ['molecule_chembl_id', 'first_approval', 'first_in_class', 'usan_year', 'prodrug', 'oral', 'parenteral', 'topical', 'withdrawn_country', 'withdrawn_reason', 'withdrawn_year']
if not os.path.exists(molfile):
fp = open(molfile, 'w')
fp.write('\t'.join(header) + '\tref_id\tref_url\tref_type\tcan_smiles\n')
fp.close()
else:
c2Name2 = readFileDict(molfile, 'molecule_chembl_id', ['ref_id', 'can_smiles'])
for key in c2Name2:
if key not in c2Name:
c2Name[key] = c2Name2[key]
files = []
files.append(['chembl/drug_indication.txt', 'molecule_chembl_id', ['molecule_chembl_id']])
files.append(['chembl/mechanism.txt', 'molecule_chembl_id', ['molecule_chembl_id']])
files.append(['chembl/metabolism.txt', 'drug_chembl_id', ['drug_chembl_id']])
files.append(['chembl/metabolism.txt', 'substrate_chembl_id', ['substrate_chembl_id']])
files.append(['chembl/metabolism.txt', 'metabolite_chembl_id', ['metabolite_chembl_id']])
fp = open(molfile, 'a')
for item in files:
entries = readFileDict(item[0], item[1], item[2])
for key in entries:
if key not in c2Name.keys():
uri = 'https://www.ebi.ac.uk/chembl/api/data/molecule/'+key+'?format=json'
sys.stderr.write(key+'\n')
obj = requestJson(uri)
entry = molParse(fp, obj, header)
c2Name[key] = entry
fp.close()
return c2Name
if __name__=="__main__":
# first delete files from chembl dir, then run:
#updateFiles()
# get ChEMBL definitions of all of the relevant molecules
c2Name = updateChemblMol()
# map to UNIIs
uniiMap = dict()
uniiMapFile = 'chembl/chembl-unii_map.txt'
if os.path.exists(uniiMapFile):
mapping = open(uniiMapFile, 'r').readlines()
for line in mapping:
sline = line.strip().split('\t')
if sline[1] != "_N/A":
uniiMap[sline[0]] = sline[1]
mappingFP = open(uniiMapFile, 'a')
#c2Name = dict()
#c2Name['CHEMBL2107829'] = ['Emixustat HCl', 'Cl.NCC[C@@H](O)c1cccc(OCC2CCCCC2)c1']
#c2Name['CHEMBL4297511'] = ['Firibastat']
for key in c2Name:
if key not in uniiMap:
unii = resolver.resolveName(c2Name[key])
if len(unii) != 10:
if len(unii) < 1 and len(c2Name[key][0]) > 0 or (len(c2Name[key]) > 1 and len(c2Name[key][1]) > 0):
print key, c2Name[key], unii
#sys.exit()
unii = "_N/A"
else:
uniiMap[key] = unii
mappingFP.write(key+"\t"+unii)
for item in c2Name[key]:
mappingFP.write("\t"+item)
mappingFP.write("\n")
mappingFP.flush()
mappingFP.close()
|
py | 1a4804609d03c788dd4fad63b4e2c01b9ba6ef42 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import math
"""
############# GENERAL GAS CLASS ###########
"""
class Gas():
def __init__(self,T,P,R_u=8.31447):
self.T = T
self.P = P
self.R_u=R_u
self.normalshock=self.Shock(self)
def gas_list(self):
print(" Code\t","Gas","\n",
"----\t","---","\n",
"Air\t","Air" ,"\n",
"Ar\t\t","Argon" ,"\n" ,
"CO2\t","Carbon dioxide" ,"\n",
"CO\t\t","Carbon monoxide" ,"\n",
"N2\t\t","Nitrogen" ,"\n"
)
def area(self, diameter):
return (np.pi*(diameter**2))/4
def critical_area(self,massflowrate):
return massflowrate/(self.P*1000*(self.k**(1/2))*(2/(self.k+1))**((self.k+1)/(2*self.k-2))/((self.R*1000*self.T)**(1/2)))
def critical_m_dot(self, Ma, diameter=1):
return self.critical_density()*self.area(diameter)*self.critical_speed_of_sound(Ma)
def critical_temperature(self, Ma):
return self.stagnation_temp(Ma)*2/(self.k+1)
def critical_pressure(self):
return self.P*(2/(self.k+1))**(self.k/(self.k-1))
def critical_density(self):
return self.rho*(2/(self.k+1))**(1/(self.k-1))
def critical_speed_of_sound(self, Ma):
return np.sqrt(self.k*self.R*self.critical_temperature(Ma)*1000)
def density(self):
return self.P/(self.R*self.T)
def diameter(self, area):
return np.sqrt(4/np.pi*area)
def enthalpy(self):
return self.cp*self.T
def exit_temperature(self,Mach):
return self.T/(1+(self.k-1)/2*Mach**2)
def exit_pressure(self,Mach):
return self.P/(1+(self.k-1)/2*Mach**2)**(self.k/(self.k-1))
def exit_density(self, Mach):
return self.rho/(1+(self.k-1)/2*Mach**2)**(1/(self.k-1))
def exit_speed(self, Mach):
return Mach*np.sqrt(self.k*self.R*self.exit_temperature(Mach)*1000)
def exit_area(self, Throat_Area, Mach):
return Throat_Area*(1/Mach)*((2/(self.k+1))*(1+(self.k-1)/2*Mach**2))**((self.k+1)/(2*self.k-2))
def mach_number(self, velocity):
return velocity/self.speed_of_sound()
def m_dot(self, velocity, diameter=1):
return self.density()*self.area(diameter)*velocity
def mfr(self,velocity, diameter):
return self.critical_pressure()*self.area(diameter)*self.mach_number(velocity)*np.sqrt(self.k/(self.R*self.critical_temperature()))
def mass_flowrate(self, velocity, diameter=1):
return (self.area(diameter)*self.mach_number(velocity)*self.stagnation_pressure(velocity)*np.sqrt(self.k/(self.R*self.stagnation_temp(velocity))))\
/((1+(self.k-1)*(self.mach_number(velocity)**2)/2)**((self.k+1)/(2*(self.k-1))))
def ma_finder(self, section, area_ratio, show_iterations=False, tolerance=10e-6, method="bisection"):
try:
if section !="upward" and section !="downward":
raise NameError("Please specify the flow by using these keywords: \"upward\" or \"downward\"")
def finder(Ma):
value = (1/Ma*((1+0.5*(self.k-1)*Ma**2)/(0.5*(self.k+1)))**(0.5*(self.k+1)/(self.k-1)))
if method=='golden' or method=='secant':
target = abs(value - area_ratio)
elif method=='bisection':
target = value - area_ratio
return target
# def check_boundaries(Ma_0, Ma_1):
# if section=="upward":
# if Ma_0>1 or Ma_1>1:
# Ma_0 = 1/Ma_0
# Ma_1 = Ma_0+0.001
# # print("ma kucuk 1 den calisti")
# elif section=="downward":
# if Ma_0<1 or Ma_1<1:
# Ma_0 = 1+Ma_0
# Ma_1 = Ma_0+0.1
# # print("ma buyuk 1 den calisti")
if section=="upward":
if method=='bisection':
Ma=bisection_method( finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='secant':
Ma=secant_method( finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='golden':
Ma=golden_section(finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations)
elif section=="downward":
if method=='bisection':
Ma=bisection_method( finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='secant':
Ma=secant_method( finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='golden':
Ma=golden_section(finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations)
return Ma
except NameError:
raise NameError("Please specify the flow by using these keywords: \"upward\" or \"downward\"") from None
except ValueError:
raise ValueError("Given area is smaller than throat area. Program has terminated.\n Hint: You could change the division number.") from None
def plot(self,area_start, area_end, Mach_start, y_axis='T', color_bar='Ma', division=250 ,x_axis='A', method="bisection"):
area_upward = np.linspace(area_start, self.throat_area(area_start,Mach_start), division)
area_downward = np.linspace(self.throat_area(area_start,Mach_start), area_end, division)
area_total = np.concatenate((area_upward,area_downward))
ST = self.stagnation_temp(Mach_start)
temp_upward = []
Ma_upward = []
for i in range(division):
ratio = self.throat_area_ratio(area_upward[i], area_start, Mach_start)
Ma=self.ma_finder("upward",ratio,method=method)
Ma_upward.append(Ma)
temp_upward.append(self.temperature(Ma, ST))
temp_downward = []
Ma_downward = []
for i in range(division):
ratio = self.throat_area_ratio(area_downward[i], area_start, Mach_start)
Ma=self.ma_finder("downward",ratio,method=method)
Ma_downward.append(Ma)
temp_downward.append(self.temperature(Ma, ST))
temp_total = temp_upward +temp_downward
Ma_total = Ma_upward +Ma_downward
fig = plt.figure(figsize=(10,7.5))
ax = fig.add_subplot(111)
xs = np.linspace(0,1,2*division)
if y_axis == 'T':
y_lbl='Temperature (K)'
if color_bar=='Ma':
color = Ma_total
mp = ax.scatter((xs),(temp_total),c=color,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'Mach Number'
elif color_bar=='T':
mp = ax.scatter((xs),(temp_total),c=temp_total,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'T (K)'
elif y_axis == 'Ma':
y_lbl='Mach Number'
if color_bar=='Ma':
color = Ma_total
mp = ax.scatter((xs),(Ma_total),c=color,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'Mach Number'
elif color_bar=='T':
mp = ax.scatter((xs),(Ma_total),c=temp_total,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'T (K)'
cb = plt.colorbar(mp)
cb.set_label(c_lbl)
ax.set(title=r'Converging- Diverging Nozzle',
xlabel='Area $m^2$', ylabel=y_lbl)
tick_labels=[]
for j in np.linspace(0,(2*division),7):
if j==2*division:
tick_labels.append(round(area_total[-1],4))
else:
tick_labels.append(round(area_total[int(j)],4))
plt.xticks(np.linspace(0,1,7),tick_labels)
plt.show()
def pressure(self, Mach, Stagnation_Pressure):
return Stagnation_Pressure/((1+0.5*(self.k-1)*Mach**2)**(self.k/(self.k-1)))
def speed_of_sound(self):
return np.sqrt(self.k*self.R*self.T*1000)
def stagnation_temp(self,Mach):
return self.T*(1+(self.k-1)/2*Mach**2)
def stagnation_pressure(self,Mach):
return self.P*(1+0.5*(self.k-1)*Mach**2)**(self.k/(self.k-1))
def temperature(self, Mach, Stagnation_Temperature):
return Stagnation_Temperature/(1+(self.k-1)/2*Mach**2)
def throat_area(self,known_area,Mach):
return known_area/((1/Mach)*((2/(self.k+1))*(1+(self.k-1)/2*Mach**2))**((self.k+1)/(2*self.k-2)))
def throat_area_ratio(self,wanted_area, known_area,known_Mach):
return wanted_area/self.throat_area(known_area, known_Mach)
class Shock():
def __init__(self, gas):
self.gas = gas
def P2(self, Ma1, P1):
return P1*(1/(self.gas.k+1)*(2*self.gas.k*Ma1**2-(self.gas.k-1)))
def Ma2(self,Ma1):
return np.sqrt(((self.gas.k-1)*Ma1**2+2)/(2*self.gas.k*Ma1**2-(self.gas.k-1)))
def P0_2(self,Stagnation_Pressure, Ma1):
return Stagnation_Pressure*((((self.gas.k+1)*Ma1**2)/(2+(self.gas.k-1)*Ma1**2))**(self.gas.k/(self.gas.k-1))\
*((self.gas.k+1)/(2*self.gas.k*Ma1**2-(self.gas.k-1)))**(1/(self.gas.k-1)))
def area_shock_star(self, area1_star, Ma1):
return area1_star*(self.Ma2(Ma1)/Ma1)*((2+(self.gas.k-1)*Ma1**2)/(2+(self.gas.k-1)*self.Ma2(Ma1)**2))**((self.gas.k+1)/(2*self.gas.k-2))
def Ma_beforeshock(self, P2_P1):
return np.sqrt((P2_P1*(self.gas.k+1)+(self.gas.k-1))/(2*self.gas.k))
def T2(self,T1,Ma1):
return T1*(2+(self.gas.k-1)*Ma1**2)*(2*self.gas.k*Ma1**2-(self.gas.k-1))/(((self.gas.k+1)**2)*(Ma1**2))
def V2(self, T1, V1):
return np.sqrt(2*self.gas.cp*(T1-self.T2(T1, V1/(self.gas.speed_of_sound())))+V1**2)
class Air(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=28.97
self.k=1.4
self.R=self.R_u/self.M
self.cp=1.9327E-10*self.T**4 - 7.9999E-07*self.T**3 + 1.1407E-03*self.T**2 - 4.4890E-01*self.T + 1.0575E+03
self.rho = self.P/(self.R*self.T)
class CO2(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=44.01
self.k=1.289
self.R=self.R_u/self.M
self.cp=0.849
self.rho = self.P/(self.R*self.T)
class CO(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=28.01
self.k=1.4
self.R=self.R_u/self.M
self.cp=1.039
self.rho = self.P/(self.R*self.T)
class N2(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=28.01
self.k=1.4
self.R=self.R_u/self.M
self.cp=1.040
self.rho = self.P/(self.R*self.T)
class Ar(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=39.95
self.k=1.667
self.R=self.R_u/self.M
self.cp=0.5203
self.rho = self.P/(self.R*self.T)
"""
############# NUMERICAL METHODS ###########
"""
def golden_section(func,starting, ending, show_iterations=False, tolerance = 10e-6):
gr=(np.sqrt(5)+1)/2-1
dm=tolerance
a0 = starting+dm
b0 = ending-dm
count=0
while True:
count+=1
# print(finder(Ma_0))
# print(finder(Ma_1))
d=gr*(b0-a0)
a1=a0+d
b1=b0-d
if abs((a1-b1)/a1)<=tolerance:
if 1>=ending:
print("The Mach number below unity is: ",a1,"\n")
elif starting>=1:
print("The Mach number above unity is: ",a1,"\n")
break
else:
if func(a0)>func(b0):
a0=a1
b1=b1
else:
a0=a0
b0=b1
if show_iterations ==True:
print("Iteration ", count, " :",a1)
return (a1+b1)/2
def secant_method(func, lower_bound, upper_bound, show_iterations=False,tolerance=10e-6):
Ma_0 = (upper_bound+lower_bound)/2
dMa = 0.01
Ma_1 = Ma_0+dMa
count=0
while True:
count+=1
Ma_2 = Ma_1 - func(Ma_1)*(Ma_1-Ma_0)/(func(Ma_1)-func(Ma_0))
if show_iterations ==True:
print("Iteration ", count, " :",Ma_2)
if func(Ma_2)<=tolerance:
if show_iterations ==True:
print("The Mach number below unity is: ",Ma_2,"\n")
break
else:
Ma_0 = Ma_1
Ma_1 = Ma_2
return Ma_2
def bisection_method(func, lower_bound, upper_bound, show_iterations=False,tolerance=10e-6):
if lower_bound==0 :
lower_bound+=tolerance
a=lower_bound
b= upper_bound
count = 0
while True:
count+=1
c = (a+b)/2
if abs(func(c))<=tolerance:
if show_iterations ==True:
print("The root is: ",c,"\n")
break
else:
if func(a)*func(c)>func(b)*func(c):
b=b
a=c
else:
a=a
b=c
if show_iterations ==True:
print("Iteration ", count, " :",c)
return c
"""
############# ROCKET NOZZLE CLASS ###########
"""
class Nozzle(Gas):
def __init__(self, class_gas):
self.T=class_gas.T
self.P=class_gas.P
self.k=class_gas.k
self.M=class_gas.M
self.k=class_gas.k
self.R=class_gas.R_u/class_gas.M
self.cp=class_gas.cp
self.rho = class_gas.P/(class_gas.R*class_gas.T)
def critical_throat_pressure(self):
return self.P*(2/(self.k+1))**(self.k/(self.k-1))
def exit_mach(self,backflow_pressure):
if self.ischoked(backflow_pressure):
Ma = 1
else:
Ma = np.sqrt(5*((self.P/backflow_pressure)**(2/7)-1))
return Ma
def ischoked(self, backflow_pressure ):
if backflow_pressure < self.critical_pressure():
condition=True
else:
condition = False
return condition
def massflowrate(self, backflow_pressure, area):
if self.ischoked(backflow_pressure):
mdot = (area*self.P*1000)/(np.sqrt(self.R*self.T*1000))*np.sqrt((2*self.k/(self.k-1))*((self.critical_pressure()/self.P)**(2/self.k))*(1-(self.critical_pressure()/self.P)**(1-1/self.k)))
else:
mdot = (area*self.P*1000)/(np.sqrt(self.R*self.T*1000))*np.sqrt((2*self.k/(self.k-1))*((backflow_pressure/self.P)**(2/self.k))*(1-(backflow_pressure/self.P)**(1-1/self.k)))
return mdot
class RocketNozzle(Gas):
def __init__(self, class_gas):
self.T=class_gas.T
self.P=class_gas.P
self.k=class_gas.k
self.M=class_gas.M
self.k=class_gas.k
self.R=class_gas.R_u/class_gas.M
self.cp=class_gas.cp
self.rho = class_gas.P/(class_gas.R*class_gas.T)
self.normalshock=self.Shock(self)
def geometry(self, area_start, area_throat, area_end, division=250, color = 'black'):
A_start = area_start
A1_star = area_throat
A_exit = area_end
division = 250
r1=int((A_start/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
r2=int((A_exit/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
area_upward = np.linspace((A_start), (A1_star), r1)
area_downward = np.linspace((A1_star), (A_exit), r2)
area_total = np.concatenate((area_upward,area_downward))
diameter_total = self.diameter(area_total)
# plt.style.use('dark_background')
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
xs = np.linspace(0,1,r1+r2)
tick_labels=[]
for j in np.linspace(0,(r1+r2),11):
if j==r1+r2:
tick_labels.append(round(area_total[-1],4))
else:
tick_labels.append(round(area_total[int(j)],4))
plt.xticks(np.linspace(0,1,11),tick_labels)
plt.plot(xs,diameter_total/2,color=color,linewidth=3)
plt.plot(xs,-diameter_total/2,color=color,linewidth=3)
centerline,=plt.plot(xs, 0*xs,linewidth=1,color=color)
dashes=[30,5,5,5]
centerline.set_dashes(dashes)
plt.xlabel("Area (m2)")
plt.ylabel("Radius (m)")
plt.title("Rocket Nozzle Geometry")
plt.show()
plt.style.use('default')
def shock(self, exit_pressure, throat_area, exit_area, start_area, plot=True,division = 250):
def shock_finder(A_shock):
ratio = A_shock/throat_area
M1 = self.ma_finder('downward', ratio)
P1 = self.pressure(M1, self.P)
T1 = self.temperature(M1, self.T)
M2 = self.normalshock.Ma2(M1)
P2 = self.normalshock.P2(M1,P1)
T2 = self.normalshock.T2(T1, M1)
P02 = self.normalshock.P0_2(self.P, M1)
A2_star = self.normalshock.area_shock_star(throat_area, M1)
ratio2 = exit_area/A2_star
Me = self.ma_finder('upward', ratio2)
Pe = self.pressure(Me,P02)
target = Pe-exit_pressure
return target
if shock_finder(exit_area)>0:
print("There is no shock wave in the rocket nozzle")
A_shock = None
else:
A_shock=bisection_method( shock_finder,throat_area, exit_area, tolerance = 10e-3,show_iterations=True)
def shock_plot(start_area):
A_start = start_area
A1_star = throat_area
A_exit = exit_area
r1=int((A_start/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
r2=int((A_exit/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
area_upward = np.linspace((start_area), (throat_area), r1)
area_downward = np.linspace((throat_area), (exit_area), r2)
area_total = np.concatenate((area_upward,area_downward))
def find_closest(A, target):
#A must be sorted
idx = A.searchsorted(target)
idx = np.clip(idx, 1, len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target - left < right - target
return idx
idx=find_closest(area_total,A_shock)
r=self.diameter(A_shock)/2
plt.style.use('dark_background')
self.geometry(start_area, throat_area, exit_area,color='white')
y=np.linspace(r,-r)
# correction = ((A_shock/throat_area)+(start_area/throat_area))/((exit_area/throat_area)+(start_area/throat_area))
x=A_shock*np.sin(5000*y)+idx/division
plt.plot(x,y,color='gold')
plt.show()
plt.style.use('default')
if plot==True:
shock_plot(start_area)
return A_shock
"""
############# RELATIONS CLASS ###########
"""
class relations:
def change_in_entropy(T2,T1,P2,P1,cp,R):
return cp*np.log(T2/T1)-R*np.log(P2/P1)
|
py | 1a48060acda1bd590cb82373fafb13720526bfae | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
import sys
# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
# system, use it to handle IPAddress ServerAltnames (this was added in
# python-3.5) otherwise only do DNS matching. This allows
# backports.ssl_match_hostname to continue to be used in Python 2.7.
try:
from pip._vendor import ipaddress
except ImportError:
ipaddress = None
__version__ = '3.5.0.1'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _to_unicode(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding='ascii', errors='strict')
return obj
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence from upstream: ipaddress can't handle byte str
host_ip = ipaddress.ip_address(_to_unicode(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence from upstream: Have to deal with ipaddress not taking
# byte strings. addresses should be all ascii, so we consider it not
# an ipaddress in this case
host_ip = None
except AttributeError:
# Divergence from upstream: Make ipaddress library optional
if ipaddress is None:
host_ip = None
else:
raise
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
py | 1a48080d5124f2bef4842f33be8703971e0c4ffe | from django.shortcuts import render, redirect
from django.core.exceptions import ValidationError
from lists.models import Item, List
from lists.forms import ItemForm, ExistingListItemForm
# Create your views here.
def home_page(request):
return render(request, 'home.html', { 'form': ItemForm() })
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
form = ExistingListItemForm(for_list=list_)
if request.method == 'POST':
form = ExistingListItemForm(for_list=list_, data=request.POST)
if form.is_valid():
form.save()
return redirect(list_)
return render(request, 'list.html', { 'list': list_, 'form': form })
def new_list(request):
form = ItemForm(data=request.POST)
if form.is_valid():
list_ = List.objects.create()
form.save(for_list=list_)
return redirect(list_)
else:
return render(request, 'home.html', { 'form': form })
|
py | 1a4808afee97c55fc6cd3956cf58ffbeb295622e | import numpy as np
import matplotlib.pyplot as plt
# EXAMPLE ON THE GUIDE
a = np.arange(-5, 5, 0.1)
f_x = np.power(a,2)
plt.plot(a, f_x)
plt.xlim(-5,5)
plt.ylim(-5,15)
k = np.array([-2,0,2])
plt.plot(k, k**2, "bo")
for i in k:
plt.plot(a, (2*i)*a-(i**2))
plt.show()
|
py | 1a4808df5cf603bdb63fe31c5c8d6e02d3a62325 |
from logging import exception
from django.http import response
from django.http import request
from django.http.request import HttpRequest
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import views
from author.models import *
from Followers.models import *
from django.contrib.auth import get_user_model
User = get_user_model()
# Create your tests here.
class FollowerTestCase(TestCase):
def setUp(self):
response = self.client.get("")
self.request = response.wsgi_request.user
self.usr1 = self.create_author("test_user1", "test_user1",
"test_user1@github", "", "test_password1")
self.usr2 = self.create_author("test_user2", "test_user2",
"test_user2@github", "", "test_password2")
self.usr3 = self.create_author("test_user3", "test_user3",
"test_user3@github", "", "test_password3")
# self.create_author("test_user2", "test_password2")
# self.create_author("test_user3", "test_password3")
def create_author(self, username, display_name, github, profile_image, password):
author = Author.objects.create_superuser(userName=username, displayName=display_name, github=github,
profileImage=profile_image, password=password)
author.save()
return author
def test_get_authors(self):
# Follower.objects.create(sender=self.usr1, receiver=self.usr2)
response = self.client.get("/api/authors")
assert response.status_code == 200
# not working; something is up with the id
def test_get_followers_by_id(self):
Follower.objects.create(sender=self.usr1, receiver=self.usr2)
str_id2 = str(self.usr2.id)
response = self.client.get(f"/api/author/{str_id2}/followers")
# print("RESPONSE: ", response.json()[0]['id'])
assert response.status_code == 200
def test_get_single_follower_by_id(self):
Follower.objects.create(sender=self.usr1, receiver=self.usr2)
str_id1 = str(self.usr1.id)
str_id2 = str(self.usr2.id)
response = self.client.get(
f"/api/author/{str_id2}/followers/{str_id1}")
assert response.status_code == 200
def test_delete_follower_of_id(self):
Follower.objects.create(sender=self.usr1, receiver=self.usr2)
str_id1 = str(self.usr1.id)
str_id2 = str(self.usr2.id)
response = self.client.delete(
f"/api/author/{str_id2}/followers/{str_id1}")
assert response.status_code == 200
def test_follower_duplication(self):
Follower.objects.create(sender=self.usr1, receiver=self.usr2)
str_id1 = str(self.usr1.id)
str_id2 = str(self.usr2.id)
response = self.client.put(
f"/api/author/{str_id2}/followers/{str_id1}")
response2 = self.client.put(
f"/api/author/{str_id2}/followers/{str_id1}")
assert response2.status_code == 400
def test_follower_not_exist(self):
Follower.objects.create(sender=self.usr1, receiver=self.usr2)
str_id1 = str(self.usr1.id)
str_id3 = str(self.usr3.id)
response = self.client.get(
f"/api/author/{str_id3}/followers/{str_id1}")
assert response.status_code == 404
|
py | 1a480917152f5d04abd9b298b7dfb0b5cee68538 | """
Plot one week of events loaded from file (starting from the earliest event).
Examples:
plot_events.py --from events.json
Usage:
plot_events.py [--from=<FILE>]
Options:
-h --help Show this screen.
-f --from=<FILE> File containing a list of event descriptions [default: default]
"""
from os.path import join, dirname, exists
import sys
import random
import matplotlib.pyplot as plt
import numpy as np
import json
import pytz
import matplotlib.ticker as ticker
import re
from datetime import datetime, time, timedelta
from matplotlib.patches import FancyBboxPatch
from PIL import Image
from docopt import docopt
from datetime import time
from math import ceil
from collections import namedtuple
from typing import List, Dict
from schedulingassistant.data import Event
from schedulingassistant.conversion_utils import time_to_float
# TODO:
# * Complete all docstrings for function in this file
# * Add tests for `str_to_datetime`
DAY_COUNT = 7
def main(sys_args: List[str] = []) -> None:
args = parse_args(docopt(__doc__, argv=sys_args))
try:
events = extract_events_from(args['event_file'])
plot_events(events)
return 0
except Exception as e_info:
print(e_info)
return 1
def parse_args(args: Dict[str, str]) -> Dict[str, any]:
"""Parse the arguments passed in, and return a dictionary containing the final input values for the application."""
DEFAULT_EVENT_FILE = "generated_events.json"
event_file_arg = args['--from']
event_file_path = event_file_arg
if event_file_arg == "default":
event_file_path = join(dirname(__file__), DEFAULT_EVENT_FILE)
if exists(event_file_path):
return { 'event_file': event_file_path }
else:
raise ValueError("File '" + event_file_path + "' could not be found.")
def plot_events(events: List[Event]) -> None:
fig = plt.figure(figsize=(10, 16))
fig.tight_layout()
plt.title('Events', y=1, fontsize=14)
ax = fig.add_subplot(1, 1, 1)
# X
ax.set_xlim(0.5, DAY_COUNT + 0.5)
earliest_date = min([e.start_datetime.date() for e in events])
date_labels = [(earliest_date + timedelta(days=i)).strftime("%d/%m/%Y") for i in range(DAY_COUNT + 1)]
ax.set_xticks(range(1, DAY_COUNT + 1))
ax.set_xticklabels(date_labels)
plt.tick_params(bottom=False) # Hide ticks
# Y
start_of_day = 0
end_of_day = 24
ax.set_ylim(end_of_day, start_of_day)
block_times = np.arange(start_of_day, end_of_day, float(5.0/60.0))
ax.set_yticks(block_times)
hour_labels = [("{0}:00".format(int(b)) if b.is_integer() else "") for b in block_times]
ax.set_yticklabels(hour_labels)
# Create the horizontal timeblock grid lines
ax.grid(axis='y', linestyle='-', linewidth=0.3, color="black", alpha=0.05)
grid_lines = ax.yaxis.get_gridlines()
for h in range(end_of_day):
label = "{0}:00".format(h)
label_idx = hour_labels.index(label)
grid_lines[label_idx].set_alpha(1.0)
# Go through and make all hour grid lines bold
# https://stackoverflow.com/questions/53781180/polar-plot-put-one-grid-line-in-bold
# Plot the events
for e in events:
plot_event(e, earliest_date, ax)
# Save this output to an image file and open it
img_name = 'events.png'
plt.savefig(img_name, dpi=400, bbox_inches='tight')
img = Image.open(img_name)
img.show()
def extract_events_from(events_file_path: str) -> List[Event]:
"""todo, also add docstrings to all other functions in this file"""
events = []
with open(events_file_path) as events_file:
json_events = json.load(events_file)
for e in json_events:
name = e['name']
start = str_to_datetime(e['start_datetime'])
end = str_to_datetime(e['end_datetime'])
events.append(Event(name, start, end))
return events
def str_to_datetime(input_str: str) -> datetime:
"""Parse a string `input_str` and return a corresponding `datetime` object."""
microseconds = 0
if '.' in input_str:
seconds_decimal_component_match = re.match(r"[^.]*\d+[^.]*(.\d+)", input_str)
if seconds_decimal_component_match:
decimal_component_str = seconds_decimal_component_match.group(1)
input_str = input_str.replace(decimal_component_str, '')
microseconds = int(float("0" + decimal_component_str) * 1000000)
output = datetime.strptime(input_str, "%Y-%m-%dT%H:%M:%S%z").replace(microsecond=microseconds, tzinfo=pytz.utc)
return output
def plot_event(e: Event, earliest_date: datetime.date, ax) -> None:
boxes = convert_event_to_boxes(e)
# An index representing the first day that the start of this event should be on
day_offset = (e.start_datetime.date() - earliest_date).days
color = rand_hex_col()
start_hour = e.start_datetime.hour
start_min = e.start_datetime.minute
event_label = '{0}:{1:0>2} {2}'.format(start_hour, start_min, e.name)
for box_idx in range(len(boxes)):
label = event_label if box_idx == 0 else ""
draw_event_box(boxes[box_idx], day_offset, label, color, ax)
# An `EventBox` represents a window of time that can be drawn with one rectangle on a calendar with multiple days in
# different columns. E.g 9am - 10am would be valid `EventBox`, but 11pm - 1am would not as this would have to be broken
# down into two windows.
EventBox = namedtuple('EventBox', ['column_idx', 'start_time_float', 'end_time_float'])
def draw_event_box(box: EventBox, day_offset: int, label: str, color: str, ax):
"""Draws an event box on the plot using a day index (used internally to calculate the horizontal components of the
box, and two start/end floats representing percentages through the day, used to calculate the vertical components."""
top = box.start_time_float
bottom = box.end_time_float
left = 0.5 + box.column_idx + day_offset
# If this event would be drawn outside the view of the plot
if left >= 7.0:
return
padding_between_days = 0.05
right = left + 1 - padding_between_days
# Draw boxes and labels on top of everything else
z = 2.0
box = FancyBboxPatch(
(left, top),
abs(right - left),
abs(bottom - top),
boxstyle="round,pad=-0.0040,rounding_size=0.02",
ec="black",
fc=color,
lw=0.2,
zorder=z,
mutation_aspect=1)
ax.add_patch(box)
plt.text(left + 0.01, top + 0.01, label, va='top', fontsize=3, zorder=z)
def convert_event_to_boxes(event: Event) -> List[EventBox]:
"""Takes in an event and converts this into a list of boxes that when combined completely cover the time allocated
to this event. Usually, this list will contain a single EventBox as many events start and end on the same day, but
any events split across multiple day boundaries will be split into multiple boxes."""
start_date = event.start_datetime.date()
end_date = event.end_datetime.date()
start_time_float = time_to_float(event.start_datetime.time())
end_time_float = time_to_float(event.end_datetime.time())
days_spanned = (end_date - start_date).days + 1
boxes = []
if days_spanned == 1:
boxes.append(EventBox(0, start_time_float, end_time_float))
else:
boxes.append(EventBox(0, start_time_float, 24.0))
for i in range(max(0, days_spanned - 2)):
boxes.append(EventBox(i + 1, 0.0, 24.0))
boxes.append(EventBox(days_spanned - 1, 0.0, end_time_float))
return boxes
# Create rounded box for the event with a random colour
# https://stackoverflow.com/questions/58425392/bar-chart-with-rounded-corners-in-matplotlib
def rand_hex_col() -> str:
r = lambda: 128 + random.randint(0, 127)
return '#%02X%02X%02X' % (r(),r(),r())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
py | 1a48091a3129d6e7c28f9518907d73923a66ad62 | # Copyright 2020 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from typing import TYPE_CHECKING, List, Any
from dragonchain import exceptions
from dragonchain.lib.dto import eth
from dragonchain.lib.dto import btc
from dragonchain.lib.dto import bnb
from dragonchain.lib.interfaces import storage
if TYPE_CHECKING:
from dragonchain.lib.dto import model
FOLDER = "INTERCHAINS"
def save_interchain_client(interchain_client: "model.InterchainModel") -> None:
"""Save an interchain model to storage"""
storage.put_object_as_json(f"{FOLDER}/{interchain_client.blockchain}/{interchain_client.name}", interchain_client.export_as_at_rest())
def does_interchain_exist(blockchain: str, name: str) -> bool:
"""Check if a specific interchain exists
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to get (user defined on the creation of the interchain)
"""
if blockchain == "bitcoin":
return storage.does_object_exist(f"{FOLDER}/bitcoin/{name}")
elif blockchain == "ethereum":
return storage.does_object_exist(f"{FOLDER}/ethereum/{name}")
elif blockchain == "binance":
return storage.does_object_exist(f"{FOLDER}/binance/{name}")
else:
return False
def get_interchain_client(blockchain: str, name: str) -> "model.InterchainModel":
"""Get a specific interchain client
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to get (user defined on the creation of the interchain)
Raises:
exceptions.NotFound: When the requested client can't be found
"""
if blockchain == "bitcoin":
return btc.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/bitcoin/{name}"))
elif blockchain == "ethereum":
return eth.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/ethereum/{name}"))
elif blockchain == "binance":
return bnb.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/binance/{name}"))
else:
raise exceptions.NotFound(f"Blockchain network {blockchain} is not supported")
def list_interchain_clients(blockchain: str) -> List["model.InterchainModel"]:
"""Get all of the interchain clients for a specific blockchain type
Args:
blockchain: The blockchain of the desired clients to get
Returns:
List of instantiated interchain clients for the specified blockchain
"""
from_rest_function: Any = None
if blockchain == "bitcoin":
from_rest_function = btc.new_from_at_rest
elif blockchain == "ethereum":
from_rest_function = eth.new_from_at_rest
elif blockchain == "binance":
from_rest_function = bnb.new_from_at_rest
else:
raise exceptions.NotFound(f"Blockchain network {blockchain} is not supported")
return [from_rest_function(storage.get_json_from_object(x)) for x in storage.list_objects(f"{FOLDER}/{blockchain}/")]
def set_default_interchain_client(blockchain: str, name: str) -> "model.InterchainModel":
"""Set the default interchain model for this chain
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to set as default (user defined on the creation of the interchain)
Returns:
The client for the interchain which was set as default
Raises:
exceptions.NotFound: When trying to set a default to an interchain that doesn't exist on this chain
"""
# Make sure the specified interchain exists before setting as default
client = get_interchain_client(blockchain, name)
storage.put_object_as_json(f"{FOLDER}/default", {"version": "1", "blockchain": blockchain, "name": name})
return client
def get_default_interchain_client() -> "model.InterchainModel":
"""Get the interchain model which has been set as the default for this chain
Returns:
Instantiated InterchainModel
Raises:
exceptions.NotFound: When default has not been set, or set default cannot be found
NotImplementedError: WHen the saved default is a bad version
"""
default_dto = storage.get_json_from_object(f"{FOLDER}/default")
if default_dto.get("version") == "1":
return get_interchain_client(default_dto.get("blockchain"), default_dto.get("name"))
else:
raise NotImplementedError(f"Default dto error. Version {default_dto.get('version')} not supported")
def delete_interchain_client(blockchain: str, name: str) -> None:
"""Delete an interchain client from this chain
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to delete (user defined on the creation of the interchain)
"""
storage.delete(f"{FOLDER}/{blockchain}/{name}")
|
py | 1a480a44a42b8fe21f6c87dec7d0e460a24d7a0c | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for openvswitch rpc
"""
import stubout
from neutron.agent import rpc as agent_rpc
from neutron.common import topics
from neutron.openstack.common import context
from neutron.openstack.common import rpc
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_neutron_plugin as povs
from neutron.tests import base
class rpcApiTestCase(base.BaseTestCase):
def _test_ovs_api(self, rpcapi, topic, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
if rpc_method == 'cast' and method == 'run_instance':
kwargs['call'] = False
self.fake_args = None
self.fake_kwargs = None
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_delete_network(self):
rpcapi = povs.AgentNotifierApi(topics.AGENT)
self._test_ovs_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='fanout_cast',
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = povs.AgentNotifierApi(topics.AGENT)
self._test_ovs_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='fanout_cast',
port='fake_port',
network_type='fake_network_type',
segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network')
def test_tunnel_update(self):
rpcapi = povs.AgentNotifierApi(topics.AGENT)
self._test_ovs_api(rpcapi,
topics.get_topic_name(topics.AGENT,
constants.TUNNEL,
topics.UPDATE),
'tunnel_update', rpc_method='fanout_cast',
tunnel_ip='fake_ip', tunnel_id='fake_id',
tunnel_type=None)
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'get_device_details', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'update_device_down', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_tunnel_sync(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'update_device_up', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
|
py | 1a480c818721278b57d783ee39df965381078f79 | """Tests for HTMLParser.py."""
import html.parser
import pprint
import unittest
from test import support
class EventCollector(html.parser.HTMLParser):
def __init__(self, *args, **kw):
self.events = []
self.append = self.events.append
html.parser.HTMLParser.__init__(self, *args, **kw)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class EventCollectorCharrefs(EventCollector):
def get_events(self):
return self.events
def handle_charref(self, data):
self.fail('This should never be called with convert_charrefs=True')
def handle_entityref(self, data):
self.fail('This should never be called with convert_charrefs=True')
class TestCaseBase(unittest.TestCase):
def get_collector(self):
raise NotImplementedError
def _run_check(self, source, expected_events, collector=None):
if collector is None:
collector = self.get_collector()
parser = collector
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self.fail("received events did not match expected events" +
"\nSource:\n" + repr(source) +
"\nExpected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events,
EventCollectorExtra(convert_charrefs=False))
def _parse_error(self, source):
def parse(source=source):
parser = self.get_collector()
parser.feed(source)
parser.close()
with self.assertRaises(html.parser.HTMLParseError):
with self.assertWarns(DeprecationWarning):
parse()
class HTMLParserStrictTestCase(TestCaseBase):
def get_collector(self):
with support.check_warnings(("", DeprecationWarning), quite=False):
return EventCollector(strict=True, convert_charrefs=False)
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b-->
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_malformatted_charref(self):
self._run_check("<p>&#bad;</p>", [
("starttag", "p", []),
("data", "&#bad;"),
("endtag", "p"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
self._parse_error("<a$>")
self._parse_error("<a$b>")
self._parse_error("<a$b/>")
self._parse_error("<a$b >")
self._parse_error("<a$b />")
def test_valid_doctypes(self):
# from http://www.w3.org/QA/2002/04/valid-dtd-list.html
dtds = ['HTML', # HTML5 doctype
('HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd"'),
('HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" '
'"http://www.w3.org/TR/html4/loose.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd"'),
('math PUBLIC "-//W3C//DTD MathML 2.0//EN" '
'"http://www.w3.org/Math/DTD/mathml2/mathml2.dtd"'),
('html PUBLIC "-//W3C//DTD '
'XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" '
'"http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"'),
('svg PUBLIC "-//W3C//DTD SVG 1.1//EN" '
'"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"'),
'html PUBLIC "-//IETF//DTD HTML 2.0//EN"',
'html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"']
for dtd in dtds:
self._run_check("<!DOCTYPE %s>" % dtd,
[('decl', 'DOCTYPE ' + dtd)])
def test_declaration_junk_chars(self):
self._parse_error("<!DOCTYPE foo $ >")
def test_startendtag(self):
self._run_check("<p/>", [
("startendtag", "p", []),
])
self._run_check("<p></p>", [
("starttag", "p", []),
("endtag", "p"),
])
self._run_check("<p><img src='foo' /></p>", [
("starttag", "p", []),
("startendtag", "img", [("src", "foo")]),
("endtag", "p"),
])
def test_get_starttag_text(self):
s = """<foo:bar \n one="1"\ttwo=2 >"""
self._run_check_extra(s, [
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
def test_cdata_content(self):
contents = [
'<!-- not a comment --> ¬-an-entity-ref;',
"<not a='start tag'>",
'<a href="" /> <p> <span></span>',
'foo = "</scr" + "ipt>";',
'foo = "</SCRIPT" + ">";',
'foo = <\n/script> ',
'<!-- document.write("</scr" + "ipt>"); -->',
('\n//<![CDATA[\n'
'document.write(\'<s\'+\'cript type="text/javascript" '
'src="http://www.example.org/r=\'+new '
'Date().getTime()+\'"><\\/s\'+\'cript>\');\n//]]>'),
'\n<!-- //\nvar foo = 3.14;\n// -->\n',
'foo = "</sty" + "le>";',
'<!-- \u2603 -->',
# these two should be invalid according to the HTML 5 spec,
# section 8.1.2.2
#'foo = </\nscript>',
#'foo = </ script>',
]
elements = ['script', 'style', 'SCRIPT', 'STYLE', 'Script', 'Style']
for content in contents:
for element in elements:
element_lower = element.lower()
s = '<{element}>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)])
def test_cdata_with_closing_tags(self):
# see issue #13358
# make sure that HTMLParser calls handle_data only once for each CDATA.
# The normal event collector normalizes the events in get_events,
# so we override it to return the original list of events.
class Collector(EventCollector):
def get_events(self):
return self.events
content = """<!-- not a comment --> ¬-an-entity-ref;
<a href="" /> </p><p> <span></span></style>
'</script' + '>'"""
for element in [' script', 'script ', ' script ',
'\nscript', 'script\n', '\nscript\n']:
element_lower = element.lower().strip()
s = '<script>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)],
collector=Collector(convert_charrefs=False))
def test_comments(self):
html = ("<!-- I'm a valid comment -->"
'<!--me too!-->'
'<!------>'
'<!---->'
'<!----I have many hyphens---->'
'<!-- I have a > in the middle -->'
'<!-- and I have -- in the middle! -->')
expected = [('comment', " I'm a valid comment "),
('comment', 'me too!'),
('comment', '--'),
('comment', ''),
('comment', '--I have many hyphens--'),
('comment', ' I have a > in the middle '),
('comment', ' and I have -- in the middle! ')]
self._run_check(html, expected)
def test_condcoms(self):
html = ('<!--[if IE & !(lte IE 8)]>aren\'t<![endif]-->'
'<!--[if IE 8]>condcoms<![endif]-->'
'<!--[if lte IE 7]>pretty?<![endif]-->')
expected = [('comment', "[if IE & !(lte IE 8)]>aren't<![endif]"),
('comment', '[if IE 8]>condcoms<![endif]'),
('comment', '[if lte IE 7]>pretty?<![endif]')]
self._run_check(html, expected)
def test_convert_charrefs(self):
collector = lambda: EventCollectorCharrefs(convert_charrefs=True)
self.assertTrue(collector().convert_charrefs)
charrefs = ['"', '"', '"', '"', '"', '"']
# check charrefs in the middle of the text/attributes
expected = [('starttag', 'a', [('href', 'foo"zar')]),
('data', 'a"z'), ('endtag', 'a')]
for charref in charrefs:
self._run_check('<a href="foo{0}zar">a{0}z</a>'.format(charref),
expected, collector=collector())
# check charrefs at the beginning/end of the text/attributes
expected = [('data', '"'),
('starttag', 'a', [('x', '"'), ('y', '"X'), ('z', 'X"')]),
('data', '"'), ('endtag', 'a'), ('data', '"')]
for charref in charrefs:
self._run_check('{0}<a x="{0}" y="{0}X" z="X{0}">'
'{0}</a>{0}'.format(charref),
expected, collector=collector())
# check charrefs in <script>/<style> elements
for charref in charrefs:
text = 'X'.join([charref]*3)
expected = [('data', '"'),
('starttag', 'script', []), ('data', text),
('endtag', 'script'), ('data', '"'),
('starttag', 'style', []), ('data', text),
('endtag', 'style'), ('data', '"')]
self._run_check('{1}<script>{0}</script>{1}'
'<style>{0}</style>{1}'.format(text, charref),
expected, collector=collector())
# check truncated charrefs at the end of the file
html = '&quo &# &#x'
for x in range(1, len(html)):
self._run_check(html[:x], [('data', html[:x])],
collector=collector())
# check a string with no charrefs
self._run_check('no charrefs here', [('data', 'no charrefs here')],
collector=collector())
class HTMLParserTolerantTestCase(HTMLParserStrictTestCase):
def get_collector(self):
return EventCollector(convert_charrefs=False)
def test_deprecation_warnings(self):
with self.assertWarns(DeprecationWarning):
EventCollector() # convert_charrefs not passed explicitly
with self.assertWarns(DeprecationWarning):
EventCollector(strict=True)
with self.assertWarns(DeprecationWarning):
EventCollector(strict=False)
with self.assertRaises(html.parser.HTMLParseError):
with self.assertWarns(DeprecationWarning):
EventCollector().error('test')
def test_tolerant_parsing(self):
self._run_check('<html <html>te>>xt&a<<bc</a></html>\n'
'<img src="URL><//img></html</html>', [
('starttag', 'html', [('<html', None)]),
('data', 'te>>xt'),
('entityref', 'a'),
('data', '<'),
('starttag', 'bc<', [('a', None)]),
('endtag', 'html'),
('data', '\n<img src="URL>'),
('comment', '/img'),
('endtag', 'html<')])
def test_starttag_junk_chars(self):
self._run_check("</>", [])
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
self._run_check("</a", [('data', '</a')])
self._run_check("<a<a>", [('starttag', 'a<a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
self._run_check("<!", [('data', '<!')])
self._run_check("<a", [('data', '<a')])
self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
self._run_check("<a foo='bar", [('data', "<a foo='bar")])
self._run_check("<a foo='>'", [('data', "<a foo='>'")])
self._run_check("<a foo='>", [('data', "<a foo='>")])
self._run_check("<a$>", [('starttag', 'a$', [])])
self._run_check("<a$b>", [('starttag', 'a$b', [])])
self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
self._run_check("<a$b >", [('starttag', 'a$b', [])])
self._run_check("<a$b />", [('startendtag', 'a$b', [])])
def test_slashes_in_starttag(self):
self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])])
html = ('<img width=902 height=250px '
'src="/sites/default/files/images/homepage/foo.jpg" '
'/*what am I doing here*/ />')
expected = [(
'startendtag', 'img',
[('width', '902'), ('height', '250px'),
('src', '/sites/default/files/images/homepage/foo.jpg'),
('*what', None), ('am', None), ('i', None),
('doing', None), ('here*', None)]
)]
self._run_check(html, expected)
html = ('<a / /foo/ / /=/ / /bar/ / />'
'<a / /foo/ / /=/ / /bar/ / >')
expected = [
('startendtag', 'a', [('foo', None), ('=', None), ('bar', None)]),
('starttag', 'a', [('foo', None), ('=', None), ('bar', None)])
]
self._run_check(html, expected)
#see issue #14538
html = ('<meta><meta / ><meta // ><meta / / >'
'<meta/><meta /><meta //><meta//>')
expected = [
('starttag', 'meta', []), ('starttag', 'meta', []),
('starttag', 'meta', []), ('starttag', 'meta', []),
('startendtag', 'meta', []), ('startendtag', 'meta', []),
('startendtag', 'meta', []), ('startendtag', 'meta', []),
]
self._run_check(html, expected)
def test_declaration_junk_chars(self):
self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
def test_illegal_declarations(self):
self._run_check('<!spacer type="block" height="25">',
[('comment', 'spacer type="block" height="25"')])
def test_with_unquoted_attributes(self):
# see #12008
html = ("<html><body bgcolor=d0ca90 text='181008'>"
"<table cellspacing=0 cellpadding=1 width=100% ><tr>"
"<td align=left><font size=-1>"
"- <a href=/rabota/><span class=en> software-and-i</span></a>"
"- <a href='/1/'><span class=en> library</span></a></table>")
expected = [
('starttag', 'html', []),
('starttag', 'body', [('bgcolor', 'd0ca90'), ('text', '181008')]),
('starttag', 'table',
[('cellspacing', '0'), ('cellpadding', '1'), ('width', '100%')]),
('starttag', 'tr', []),
('starttag', 'td', [('align', 'left')]),
('starttag', 'font', [('size', '-1')]),
('data', '- '), ('starttag', 'a', [('href', '/rabota/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' software-and-i'),
('endtag', 'span'), ('endtag', 'a'),
('data', '- '), ('starttag', 'a', [('href', '/1/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' library'),
('endtag', 'span'), ('endtag', 'a'), ('endtag', 'table')
]
self._run_check(html, expected)
def test_comma_between_attributes(self):
self._run_check('<form action="/xxx.php?a=1&b=2&", '
'method="post">', [
('starttag', 'form',
[('action', '/xxx.php?a=1&b=2&'),
(',', None), ('method', 'post')])])
def test_weird_chars_in_unquoted_attribute_values(self):
self._run_check('<form action=bogus|&#()value>', [
('starttag', 'form',
[('action', 'bogus|&#()value')])])
def test_invalid_end_tags(self):
# A collection of broken end tags. <br> is used as separator.
# see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state
# and #13993
html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>'
'</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>')
expected = [('starttag', 'br', []),
# < is part of the name, / is discarded, p is an attribute
('endtag', 'label<'),
('starttag', 'br', []),
# text and attributes are discarded
('endtag', 'div'),
('starttag', 'br', []),
# comment because the first char after </ is not a-zA-Z
('comment', '<h4'),
('starttag', 'br', []),
# attributes are discarded
('endtag', 'li'),
('starttag', 'br', []),
# everything till ul (included) is discarded
('endtag', 'li'),
('starttag', 'br', []),
# </> is ignored
('starttag', 'br', [])]
self._run_check(html, expected)
def test_broken_invalid_end_tag(self):
# This is technically wrong (the "> shouldn't be included in the 'data')
# but is probably not worth fixing it (in addition to all the cases of
# the previous test, it would require a full attribute parsing).
# see #13993
html = '<b>This</b attr=">"> confuses the parser'
expected = [('starttag', 'b', []),
('data', 'This'),
('endtag', 'b'),
('data', '"> confuses the parser')]
self._run_check(html, expected)
def test_correct_detection_of_start_tags(self):
# see #13273
html = ('<div style="" ><b>The <a href="some_url">rain</a> '
'<br /> in <span>Spain</span></b></div>')
expected = [
('starttag', 'div', [('style', '')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
('data', ' '),
('startendtag', 'br', []),
('data', ' in '),
('starttag', 'span', []),
('data', 'Spain'),
('endtag', 'span'),
('endtag', 'b'),
('endtag', 'div')
]
self._run_check(html, expected)
html = '<div style="", foo = "bar" ><b>The <a href="some_url">rain</a>'
expected = [
('starttag', 'div', [('style', ''), (',', None), ('foo', 'bar')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
]
self._run_check(html, expected)
def test_EOF_in_charref(self):
# see #17802
# This test checks that the UnboundLocalError reported in the issue
# is not raised, however I'm not sure the returned values are correct.
# Maybe HTMLParser should use self.unescape for these
data = [
('a&', [('data', 'a&')]),
('a&b', [('data', 'ab')]),
('a&b ', [('data', 'a'), ('entityref', 'b'), ('data', ' ')]),
('a&b;', [('data', 'a'), ('entityref', 'b')]),
]
for html, expected in data:
self._run_check(html, expected)
def test_unescape_method(self):
from html import unescape
p = self.get_collector()
with self.assertWarns(DeprecationWarning):
s = '""""""&#bad;'
self.assertEqual(p.unescape(s), unescape(s))
def test_broken_comments(self):
html = ('<! not really a comment >'
'<! not a comment either -->'
'<! -- close enough -->'
'<!><!<-- this was an empty comment>'
'<!!! another bogus comment !!!>')
expected = [
('comment', ' not really a comment '),
('comment', ' not a comment either --'),
('comment', ' -- close enough --'),
('comment', ''),
('comment', '<-- this was an empty comment'),
('comment', '!! another bogus comment !!!'),
]
self._run_check(html, expected)
def test_broken_condcoms(self):
# these condcoms are missing the '--' after '<!' and before the '>'
html = ('<![if !(IE)]>broken condcom<![endif]>'
'<![if ! IE]><link href="favicon.tiff"/><![endif]>'
'<![if !IE 6]><img src="firefox.png" /><![endif]>'
'<![if !ie 6]><b>foo</b><![endif]>'
'<![if (!IE)|(lt IE 9)]><img src="mammoth.bmp" /><![endif]>')
# According to the HTML5 specs sections "8.2.4.44 Bogus comment state"
# and "8.2.4.45 Markup declaration open state", comment tokens should
# be emitted instead of 'unknown decl', but calling unknown_decl
# provides more flexibility.
# See also Lib/_markupbase.py:parse_declaration
expected = [
('unknown decl', 'if !(IE)'),
('data', 'broken condcom'),
('unknown decl', 'endif'),
('unknown decl', 'if ! IE'),
('startendtag', 'link', [('href', 'favicon.tiff')]),
('unknown decl', 'endif'),
('unknown decl', 'if !IE 6'),
('startendtag', 'img', [('src', 'firefox.png')]),
('unknown decl', 'endif'),
('unknown decl', 'if !ie 6'),
('starttag', 'b', []),
('data', 'foo'),
('endtag', 'b'),
('unknown decl', 'endif'),
('unknown decl', 'if (!IE)|(lt IE 9)'),
('startendtag', 'img', [('src', 'mammoth.bmp')]),
('unknown decl', 'endif')
]
self._run_check(html, expected)
class AttributesStrictTestCase(TestCaseBase):
def get_collector(self):
with support.check_warnings(("", DeprecationWarning), quite=False):
return EventCollector(strict=True, convert_charrefs=False)
def test_attr_syntax(self):
output = [
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
[("starttag", "a", [("b", "xxx\n\txxx"),
("c", "yyy\t\nyyy"),
("d", "\txyz\n")])])
self._run_check("""<a b='' c="">""",
[("starttag", "a", [("b", ""), ("c", "")])])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>",
[("starttag", "e", [("a", "rgb(1,2,3)")])])
# Regression test for SF bug #921657.
self._run_check(
"<a href=mailto:[email protected]>",
[("starttag", "a", [("href", "mailto:[email protected]")])])
def test_attr_nonascii(self):
# see issue 7311
self._run_check(
"<img src=/foo/bar.png alt=\u4e2d\u6587>",
[("starttag", "img", [("src", "/foo/bar.png"),
("alt", "\u4e2d\u6587")])])
self._run_check(
"<a title='\u30c6\u30b9\u30c8' href='\u30c6\u30b9\u30c8.html'>",
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
self._run_check(
'<a title="\u30c6\u30b9\u30c8" href="\u30c6\u30b9\u30c8.html">',
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
def test_attr_entity_replacement(self):
self._run_check(
"<a b='&><"''>",
[("starttag", "a", [("b", "&><\"'")])])
def test_attr_funky_names(self):
self._run_check(
"<a a.b='v' c:d=v e-f=v>",
[("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")])])
def test_entityrefs_in_attributes(self):
self._run_check(
"<html foo='€&aa&unsupported;'>",
[("starttag", "html", [("foo", "\u20AC&aa&unsupported;")])])
class AttributesTolerantTestCase(AttributesStrictTestCase):
def get_collector(self):
return EventCollector(convert_charrefs=False)
def test_attr_funky_names2(self):
self._run_check(
"<a $><b $=%><c \=/>",
[("starttag", "a", [("$", None)]),
("starttag", "b", [("$", "%")]),
("starttag", "c", [("\\", "/")])])
def test_entities_in_attribute_value(self):
# see #1200313
for entity in ['&', '&', '&', '&']:
self._run_check('<a href="%s">' % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href='%s'>" % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href=%s>" % entity,
[("starttag", "a", [("href", "&")])])
def test_malformed_attributes(self):
# see #13357
html = (
"<a href=test'style='color:red;bad1'>test - bad1</a>"
"<a href=test'+style='color:red;ba2'>test - bad2</a>"
"<a href=test' style='color:red;bad3'>test - bad3</a>"
"<a href = test' style='color:red;bad4' >test - bad4</a>"
)
expected = [
('starttag', 'a', [('href', "test'style='color:red;bad1'")]),
('data', 'test - bad1'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'+style='color:red;ba2'")]),
('data', 'test - bad2'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad3'")]),
('data', 'test - bad3'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad4'")]),
('data', 'test - bad4'), ('endtag', 'a')
]
self._run_check(html, expected)
def test_malformed_adjacent_attributes(self):
# see #12629
self._run_check('<x><y z=""o"" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('o""', None)]),
('endtag', 'x')])
self._run_check('<x><y z="""" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('""', None)]),
('endtag', 'x')])
# see #755670 for the following 3 tests
def test_adjacent_attributes(self):
self._run_check('<a width="100%"cellspacing=0>',
[("starttag", "a",
[("width", "100%"), ("cellspacing","0")])])
self._run_check('<a id="foo"class="bar">',
[("starttag", "a",
[("id", "foo"), ("class","bar")])])
def test_missing_attribute_value(self):
self._run_check('<a v=>',
[("starttag", "a", [("v", "")])])
def test_javascript_attribute_value(self):
self._run_check("<a href=javascript:popup('/popup/help.html')>",
[("starttag", "a",
[("href", "javascript:popup('/popup/help.html')")])])
def test_end_tag_in_attribute_value(self):
# see #1745761
self._run_check("<a href='http://www.example.org/\">;'>spam</a>",
[("starttag", "a",
[("href", "http://www.example.org/\">;")]),
("data", "spam"), ("endtag", "a")])
if __name__ == "__main__":
unittest.main()
|
py | 1a480dd4fd200a5e13720b6e3d8954d9878dd585 | import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
from PIL import Image
import glob
from scipy.misc import imresize
import os
from os import listdir
from os.path import isfile, join
import shutil
import stat
import collections
from collections import defaultdict
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
import json
import h5py
import matplotlib.image as img
import glob
import cv2
from model_loader import ModelLoader
#Total number of classe
ix_to_class={"0": "apple_pie", "1": "baby_back_ribs", "2": "baklava", "3": "beef_carpaccio", "4": "beef_tartare", "5": "beet_salad", "6": "beignets", "7": "bibimbap", "8": "bread_pudding", "9": "breakfast_burrito", "10": "bruschetta", "11": "caesar_salad", "12": "cannoli", "13": "caprese_salad", "14": "carrot_cake", "15": "ceviche", "16": "cheesecake", "17": "cheese_plate", "18": "chicken_curry", "19": "chicken_quesadilla", "20": "chicken_wings", "21": "chocolate_cake", "22": "chocolate_mousse", "23": "churros", "24": "clam_chowder", "25": "club_sandwich", "26": "crab_cakes", "27": "creme_brulee", "28": "croque_madame", "29": "cup_cakes", "30": "deviled_eggs", "31": "donuts", "32": "dumplings", "33": "edamame", "34": "eggs_benedict", "35": "escargots", "36": "falafel", "37": "filet_mignon", "38": "fish_and_chips", "39": "foie_gras", "40": "french_fries", "41": "french_onion_soup", "42": "french_toast", "43": "fried_calamari", "44": "fried_rice", "45": "frozen_yogurt", "46": "garlic_bread", "47": "gnocchi", "48": "greek_salad", "49": "grilled_cheese_sandwich", "50": "grilled_salmon", "51": "guacamole", "52": "gyoza", "53": "hamburger", "54": "hot_and_sour_soup", "55": "hot_dog", "56": "huevos_rancheros", "57": "hummus", "58": "ice_cream", "59": "lasagna", "60": "lobster_bisque", "61": "lobster_roll_sandwich", "62": "macaroni_and_cheese", "63": "macarons", "64": "miso_soup", "65": "mussels", "66": "nachos", "67": "omelette", "68": "onion_rings", "69": "oysters", "70": "pad_thai", "71": "paella", "72": "pancakes", "73": "panna_cotta", "74": "peking_duck", "75": "pho", "76": "pizza", "77": "pork_chop", "78": "poutine", "79": "prime_rib", "80": "pulled_pork_sandwich", "81": "ramen", "82": "ravioli", "83": "red_velvet_cake", "84": "risotto", "85": "samosa", "86": "sashimi", "87": "scallops", "88": "seaweed_salad", "89": "shrimp_and_grits", "90": "spaghetti_bolognese", "91": "spaghetti_carbonara", "92": "spring_rolls", "93": "steak", "94": "strawberry_shortcake", "95": "sushi", "96": "tacos", "97": "takoyaki", "98": "tiramisu", "99": "tuna_tartare", "100": "waffles"}
class FoodRecognition:
model = None
process_image = None
def __init__(self, filepath, process_image):
self.filepath = filepath
self.model = ModelLoader()
self.process_image = process_image
def load_images(self):
root = self.filepath
min_side=299
all_imgs = []
all_classes = []
resize_count = 0
invalid_count = 0
for img_name in sorted(glob.glob(root)):
img_arr = img.imread(img_name)
if img_arr.shape[2]==4:
img_arr = cv2.cvtColor(img_arr, cv2.COLOR_BGR2RGB)
img_arr_rs = img_arr
try:
w, h, _ = img_arr.shape
if w < min_side:
wpercent = (min_side/float(w))
hsize = int((float(h)*float(wpercent)))
#print('new dims:', min_side, hsize)
img_arr_rs = imresize(img_arr, (min_side, hsize))
resize_count += 1
elif h < min_side:
hpercent = (min_side/float(h))
wsize = int((float(w)*float(hpercent)))
#print('new dims:', wsize, min_side)
img_arr_rs = imresize(img_arr, (wsize, min_side))
resize_count += 1
all_imgs.append(img_arr_rs)
except:
print('Skipping bad image: ',img_name)
invalid_count += 1
img1 = np.array(all_imgs)
#print(img1.shape)
img1= np.reshape(img1,(img1.shape[1],img1.shape[2],3))
img_resized = cv2.resize(img1,(299,299))
return img_resized
def model_predict(self,img,model):
top_n=5
x=np.reshape(img,(1,299,299,3))
x =self.process_image(x.astype('float32'))
y_pred = model.predict(x)
#print(x.shape)
preds = np.argmax(y_pred, axis=1)
score = y_pred[0][preds[0]]
top_n_preds= np.argpartition(y_pred, -top_n)[:,-top_n:]
pred_class = ix_to_class[str(preds[0])]
result ={}
for i in top_n_preds[0]:
pred_class = ix_to_class[str(i)]
score = y_pred[0][i]
result[str(pred_class)]=score
sorted_result = sorted(result.items(), key=lambda x: x[1], reverse=True)
return sorted_result
def main(self):
img = self.load_images()
result= self.model_predict(img,self.model.getModel())
return result
|
py | 1a480fdac642985f6441bac4ce0ef8c0e334f06d | import os
import unittest
from openeo_pg_parser.translate import translate_process_graph
class GraphTester(unittest.TestCase):
""" Tests all functionalities of the class `Graph`. """
def setUp(self):
""" Setting up variables for one test. """
pg_dirpath = os.path.join(os.path.dirname(__file__), 'process_graphs')
self.max_ndvi_pg_filepath = os.path.join(pg_dirpath, "s2_max_ndvi.json")
def test_sort_process_graph(self):
""" Tests sorting of a process graph. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
assert list(graph.ids) == ["apply_0", "linear_scale_range_1", "load_collection_2", "reduce_bands_3", "red_4",
"nir_5", "ndvi_6", "reduce_time_7", "max_8", "save_9"]
sorted_graph = graph.sort(by='dependency')
assert list(sorted_graph.ids) == ["load_collection_2", "reduce_bands_3", "red_4", "nir_5", "ndvi_6",
"reduce_time_7", "max_8", "apply_0", "linear_scale_range_1", "save_9"]
def test_get_parent_process(self):
""" Tests to retrieve the parent process of an embedded process graph. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
lsr_node = graph['linear_scale_range_1']
apply_node = graph['apply_0']
assert lsr_node.parent_process == apply_node
def test_is_reducer(self):
""" Tests reducer identification. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply_0']
assert not apply_node.is_reducer
reduce_node = graph['reduce_time_7']
assert reduce_node.is_reducer
def test_get_dimension(self):
""" Tests dimension retrieval. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply_0']
assert apply_node.dimension is None
reduce_node = graph['reduce_time_7']
assert reduce_node.dimension == 't'
def test_get_node_by_id(self):
""" Tests node access in a graph by node id. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply_0']
assert apply_node.id == 'apply_0'
def test_get_node_by_name(self):
""" Tests node access in a graph by node name. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply']
assert apply_node.id == 'apply_0'
def test_has_descendant_process(self):
""" Tests if a node has a descendant process. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
dc_node = graph['load_collection_2']
assert dc_node.has_descendant_process(graph, 'save_result')
def test_to_igraph(self):
""" Tests conversion of internal graph to an iGraph object. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
graph.to_igraph(edge_name="process")
assert True
if __name__ == '__main__':
unittest.main() |
py | 1a4811782f85588975396370ed81f3480edeac80 |
import fmkit_utilities
print(fmkit_utilities.dtw_c)
|
py | 1a4811fb2bffa321c1b56e3d7cadabafe2c4b0f7 | """Generate CPython API wrapper functions for native functions.
The wrapper functions are used by the CPython runtime when calling
native functions from interpreted code, and when the called function
can't be determined statically in compiled code. They validate, match,
unbox and type check function arguments, and box return values as
needed. All wrappers accept and return 'PyObject *' (boxed) values.
The wrappers aren't used for most calls between two native functions
or methods in a single compilation unit.
"""
from typing import List, Optional, Sequence
from mypy.nodes import ARG_POS, ARG_OPT, ARG_NAMED_OPT, ARG_NAMED, ARG_STAR, ARG_STAR2
from mypy.operators import op_methods_to_symbols, reverse_op_methods, reverse_op_method_names
from mypyc.common import PREFIX, NATIVE_PREFIX, DUNDER_PREFIX, use_vectorcall
from mypyc.codegen.emit import Emitter, ErrorHandler, GotoHandler, AssignHandler, ReturnHandler
from mypyc.ir.rtypes import (
RType, RInstance, is_object_rprimitive, is_int_rprimitive, is_bool_rprimitive,
object_rprimitive
)
from mypyc.ir.func_ir import FuncIR, RuntimeArg, FUNC_STATICMETHOD
from mypyc.ir.class_ir import ClassIR
from mypyc.namegen import NameGenerator
# Generic vectorcall wrapper functions (Python 3.7+)
#
# A wrapper function has a signature like this:
#
# PyObject *fn(PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
#
# The function takes a self object, pointer to an array of arguments,
# the number of positional arguments, and a tuple of keyword argument
# names (that are stored starting in args[nargs]).
#
# It returns the returned object, or NULL on an exception.
#
# These are more efficient than legacy wrapper functions, since
# usually no tuple or dict objects need to be created for the
# arguments. Vectorcalls also use pre-constructed str objects for
# keyword argument names and other pre-computed information, instead
# of processing the argument format string on each call.
def wrapper_function_header(fn: FuncIR, names: NameGenerator) -> str:
"""Return header of a vectorcall wrapper function.
See comment above for a summary of the arguments.
"""
return (
'PyObject *{prefix}{name}('
'PyObject *self, PyObject *const *args, size_t nargs, PyObject *kwnames)').format(
prefix=PREFIX,
name=fn.cname(names))
def generate_traceback_code(fn: FuncIR,
emitter: Emitter,
source_path: str,
module_name: str) -> str:
# If we hit an error while processing arguments, then we emit a
# traceback frame to make it possible to debug where it happened.
# Unlike traceback frames added for exceptions seen in IR, we do this
# even if there is no `traceback_name`. This is because the error will
# have originated here and so we need it in the traceback.
globals_static = emitter.static_name('globals', module_name)
traceback_code = 'CPy_AddTraceback("%s", "%s", %d, %s);' % (
source_path.replace("\\", "\\\\"),
fn.traceback_name or fn.name,
fn.line,
globals_static)
return traceback_code
def make_arg_groups(args: List[RuntimeArg]) -> List[List[RuntimeArg]]:
"""Group arguments by kind."""
return [[arg for arg in args if arg.kind == k] for k in range(ARG_NAMED_OPT + 1)]
def reorder_arg_groups(groups: List[List[RuntimeArg]]) -> List[RuntimeArg]:
"""Reorder argument groups to match their order in a format string."""
return groups[ARG_POS] + groups[ARG_OPT] + groups[ARG_NAMED_OPT] + groups[ARG_NAMED]
def make_static_kwlist(args: List[RuntimeArg]) -> str:
arg_names = ''.join('"{}", '.format(arg.name) for arg in args)
return 'static const char * const kwlist[] = {{{}0}};'.format(arg_names)
def make_format_string(func_name: Optional[str], groups: List[List[RuntimeArg]]) -> str:
"""Return a format string that specifies the accepted arguments.
The format string is an extended subset of what is supported by
PyArg_ParseTupleAndKeywords(). Only the type 'O' is used, and we
also support some extensions:
- Required keyword-only arguments are introduced after '@'
- If the function receives *args or **kwargs, we add a '%' prefix
Each group requires the previous groups' delimiters to be present
first.
These are used by both vectorcall and legacy wrapper functions.
"""
format = ''
if groups[ARG_STAR] or groups[ARG_STAR2]:
format += '%'
format += 'O' * len(groups[ARG_POS])
if groups[ARG_OPT] or groups[ARG_NAMED_OPT] or groups[ARG_NAMED]:
format += '|' + 'O' * len(groups[ARG_OPT])
if groups[ARG_NAMED_OPT] or groups[ARG_NAMED]:
format += '$' + 'O' * len(groups[ARG_NAMED_OPT])
if groups[ARG_NAMED]:
format += '@' + 'O' * len(groups[ARG_NAMED])
if func_name is not None:
format += ':{}'.format(func_name)
return format
def generate_wrapper_function(fn: FuncIR,
emitter: Emitter,
source_path: str,
module_name: str) -> None:
"""Generate a CPython-compatible vectorcall wrapper for a native function.
In particular, this handles unboxing the arguments, calling the native function, and
then boxing the return value.
"""
emitter.emit_line('{} {{'.format(wrapper_function_header(fn, emitter.names)))
# If fn is a method, then the first argument is a self param
real_args = list(fn.args)
if fn.class_name and not fn.decl.kind == FUNC_STATICMETHOD:
arg = real_args.pop(0)
emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name))
# Need to order args as: required, optional, kwonly optional, kwonly required
# This is because CPyArg_ParseStackAndKeywords format string requires
# them grouped in that way.
groups = make_arg_groups(real_args)
reordered_args = reorder_arg_groups(groups)
emitter.emit_line(make_static_kwlist(reordered_args))
fmt = make_format_string(fn.name, groups)
# Define the arguments the function accepts (but no types yet)
emitter.emit_line('static CPyArg_Parser parser = {{"{}", kwlist, 0}};'.format(fmt))
for arg in real_args:
emitter.emit_line('PyObject *obj_{}{};'.format(
arg.name, ' = NULL' if arg.optional else ''))
cleanups = ['CPy_DECREF(obj_{});'.format(arg.name)
for arg in groups[ARG_STAR] + groups[ARG_STAR2]]
arg_ptrs: List[str] = []
if groups[ARG_STAR] or groups[ARG_STAR2]:
arg_ptrs += ['&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL']
arg_ptrs += ['&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL']
arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args]
if fn.name == '__call__' and use_vectorcall(emitter.capi_version):
nargs = 'PyVectorcall_NARGS(nargs)'
else:
nargs = 'nargs'
parse_fn = 'CPyArg_ParseStackAndKeywords'
# Special case some common signatures
if len(real_args) == 0:
# No args
parse_fn = 'CPyArg_ParseStackAndKeywordsNoArgs'
elif len(real_args) == 1 and len(groups[ARG_POS]) == 1:
# Single positional arg
parse_fn = 'CPyArg_ParseStackAndKeywordsOneArg'
elif len(real_args) == len(groups[ARG_POS]) + len(groups[ARG_OPT]):
# No keyword-only args, *args or **kwargs
parse_fn = 'CPyArg_ParseStackAndKeywordsSimple'
emitter.emit_lines(
'if (!{}(args, {}, kwnames, &parser{})) {{'.format(
parse_fn, nargs, ''.join(', ' + n for n in arg_ptrs)),
'return NULL;',
'}')
traceback_code = generate_traceback_code(fn, emitter, source_path, module_name)
generate_wrapper_core(fn, emitter, groups[ARG_OPT] + groups[ARG_NAMED_OPT],
cleanups=cleanups,
traceback_code=traceback_code)
emitter.emit_line('}')
# Legacy generic wrapper functions
#
# These take a self object, a Python tuple of positional arguments,
# and a dict of keyword arguments. These are a lot slower than
# vectorcall wrappers, especially in calls involving keyword
# arguments.
def legacy_wrapper_function_header(fn: FuncIR, names: NameGenerator) -> str:
return 'PyObject *{prefix}{name}(PyObject *self, PyObject *args, PyObject *kw)'.format(
prefix=PREFIX,
name=fn.cname(names))
def generate_legacy_wrapper_function(fn: FuncIR,
emitter: Emitter,
source_path: str,
module_name: str) -> None:
"""Generates a CPython-compatible legacy wrapper for a native function.
In particular, this handles unboxing the arguments, calling the native function, and
then boxing the return value.
"""
emitter.emit_line('{} {{'.format(legacy_wrapper_function_header(fn, emitter.names)))
# If fn is a method, then the first argument is a self param
real_args = list(fn.args)
if fn.class_name and not fn.decl.kind == FUNC_STATICMETHOD:
arg = real_args.pop(0)
emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name))
# Need to order args as: required, optional, kwonly optional, kwonly required
# This is because CPyArg_ParseTupleAndKeywords format string requires
# them grouped in that way.
groups = make_arg_groups(real_args)
reordered_args = reorder_arg_groups(groups)
emitter.emit_line(make_static_kwlist(reordered_args))
for arg in real_args:
emitter.emit_line('PyObject *obj_{}{};'.format(
arg.name, ' = NULL' if arg.optional else ''))
cleanups = ['CPy_DECREF(obj_{});'.format(arg.name)
for arg in groups[ARG_STAR] + groups[ARG_STAR2]]
arg_ptrs: List[str] = []
if groups[ARG_STAR] or groups[ARG_STAR2]:
arg_ptrs += ['&obj_{}'.format(groups[ARG_STAR][0].name) if groups[ARG_STAR] else 'NULL']
arg_ptrs += ['&obj_{}'.format(groups[ARG_STAR2][0].name) if groups[ARG_STAR2] else 'NULL']
arg_ptrs += ['&obj_{}'.format(arg.name) for arg in reordered_args]
emitter.emit_lines(
'if (!CPyArg_ParseTupleAndKeywords(args, kw, "{}", "{}", kwlist{})) {{'.format(
make_format_string(None, groups), fn.name, ''.join(', ' + n for n in arg_ptrs)),
'return NULL;',
'}')
traceback_code = generate_traceback_code(fn, emitter, source_path, module_name)
generate_wrapper_core(fn, emitter, groups[ARG_OPT] + groups[ARG_NAMED_OPT],
cleanups=cleanups,
traceback_code=traceback_code)
emitter.emit_line('}')
# Specialized wrapper functions
def generate_dunder_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __dunder__ methods to be able to fit into the mapping
protocol slot. This specifically means that the arguments are taken as *PyObjects and returned
as *PyObjects.
"""
gen = WrapperGenerator(cl, emitter)
gen.set_target(fn)
gen.emit_header()
gen.emit_arg_processing()
gen.emit_call()
gen.finish()
return gen.wrapper_name()
def generate_bin_op_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for a native binary dunder method.
The same wrapper that handles the forward method (e.g. __add__) also handles
the corresponding reverse method (e.g. __radd__), if defined.
Both arguments and the return value are PyObject *.
"""
gen = WrapperGenerator(cl, emitter)
gen.set_target(fn)
gen.arg_names = ['left', 'right']
wrapper_name = gen.wrapper_name()
gen.emit_header()
if fn.name not in reverse_op_methods and fn.name in reverse_op_method_names:
# There's only a reverse operator method.
generate_bin_op_reverse_only_wrapper(cl, fn, emitter, gen)
else:
rmethod = reverse_op_methods[fn.name]
fn_rev = cl.get_method(rmethod)
if fn_rev is None:
# There's only a forward operator method.
generate_bin_op_forward_only_wrapper(cl, fn, emitter, gen)
else:
# There's both a forward and a reverse operator method.
generate_bin_op_both_wrappers(cl, fn, fn_rev, emitter, gen)
return wrapper_name
def generate_bin_op_forward_only_wrapper(cl: ClassIR,
fn: FuncIR,
emitter: Emitter,
gen: 'WrapperGenerator') -> None:
gen.emit_arg_processing(error=GotoHandler('typefail'), raise_exception=False)
gen.emit_call(not_implemented_handler='goto typefail;')
gen.emit_error_handling()
emitter.emit_label('typefail')
# If some argument has an incompatible type, treat this the same as
# returning NotImplemented, and try to call the reverse operator method.
#
# Note that in normal Python you'd instead of an explicit
# return of NotImplemented, but it doesn't generally work here
# the body won't be executed at all if there is an argument
# type check failure.
#
# The recommended way is to still use a type check in the
# body. This will only be used in interpreted mode:
#
# def __add__(self, other: int) -> Foo:
# if not isinstance(other, int):
# return NotImplemented
# ...
rmethod = reverse_op_methods[fn.name]
emitter.emit_line(
'return CPy_CallReverseOpMethod(obj_left, obj_right, "{}", "{}");'.format(
op_methods_to_symbols[fn.name],
rmethod))
gen.finish()
def generate_bin_op_reverse_only_wrapper(cl: ClassIR,
fn_rev: FuncIR,
emitter: Emitter,
gen: 'WrapperGenerator') -> None:
gen.arg_names = ['right', 'left']
gen.emit_arg_processing(error=GotoHandler('typefail'), raise_exception=False)
gen.emit_call()
gen.emit_error_handling()
emitter.emit_label('typefail')
emitter.emit_line('Py_INCREF(Py_NotImplemented);')
emitter.emit_line('return Py_NotImplemented;')
gen.finish()
def generate_bin_op_both_wrappers(cl: ClassIR,
fn: FuncIR,
fn_rev: FuncIR,
emitter: Emitter,
gen: 'WrapperGenerator') -> None:
# There's both a forward and a reverse operator method. First
# check if we should try calling the forward one. If the
# argument type check fails, fall back to the reverse method.
#
# Similar to above, we can't perfectly match Python semantics.
# In regular Python code you'd return NotImplemented if the
# operand has the wrong type, but in compiled code we'll never
# get to execute the type check.
emitter.emit_line('if (PyObject_IsInstance(obj_left, (PyObject *){})) {{'.format(
emitter.type_struct_name(cl)))
gen.emit_arg_processing(error=GotoHandler('typefail'), raise_exception=False)
gen.emit_call(not_implemented_handler='goto typefail;')
gen.emit_error_handling()
emitter.emit_line('}')
emitter.emit_label('typefail')
emitter.emit_line('if (PyObject_IsInstance(obj_right, (PyObject *){})) {{'.format(
emitter.type_struct_name(cl)))
gen.set_target(fn_rev)
gen.arg_names = ['right', 'left']
gen.emit_arg_processing(error=GotoHandler('typefail2'), raise_exception=False)
gen.emit_call()
gen.emit_error_handling()
emitter.emit_line('} else {')
emitter.emit_line(
'return CPy_CallReverseOpMethod(obj_left, obj_right, "{}", "{}");'.format(
op_methods_to_symbols[fn.name],
fn_rev.name))
emitter.emit_line('}')
emitter.emit_label('typefail2')
emitter.emit_line('Py_INCREF(Py_NotImplemented);')
emitter.emit_line('return Py_NotImplemented;')
gen.finish()
RICHCOMPARE_OPS = {
'__lt__': 'Py_LT',
'__gt__': 'Py_GT',
'__le__': 'Py_LE',
'__ge__': 'Py_GE',
'__eq__': 'Py_EQ',
'__ne__': 'Py_NE',
}
def generate_richcompare_wrapper(cl: ClassIR, emitter: Emitter) -> Optional[str]:
"""Generates a wrapper for richcompare dunder methods."""
# Sort for determinism on Python 3.5
matches = sorted([name for name in RICHCOMPARE_OPS if cl.has_method(name)])
if not matches:
return None
name = '{}_RichCompare_{}'.format(DUNDER_PREFIX, cl.name_prefix(emitter.names))
emitter.emit_line(
'static PyObject *{name}(PyObject *obj_lhs, PyObject *obj_rhs, int op) {{'.format(
name=name)
)
emitter.emit_line('switch (op) {')
for func in matches:
emitter.emit_line('case {}: {{'.format(RICHCOMPARE_OPS[func]))
method = cl.get_method(func)
assert method is not None
generate_wrapper_core(method, emitter, arg_names=['lhs', 'rhs'])
emitter.emit_line('}')
emitter.emit_line('}')
emitter.emit_line('Py_INCREF(Py_NotImplemented);')
emitter.emit_line('return Py_NotImplemented;')
emitter.emit_line('}')
return name
def generate_get_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __get__ methods."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line(
'static PyObject *{name}(PyObject *self, PyObject *instance, PyObject *owner) {{'.
format(name=name))
emitter.emit_line('instance = instance ? instance : Py_None;')
emitter.emit_line('return {}{}(self, instance, owner);'.format(
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_line('}')
return name
def generate_hash_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __hash__ methods."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(
name=name
))
emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type),
emitter.get_group_prefix(fn.decl),
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_error_check('retval', fn.ret_type, 'return -1;')
if is_int_rprimitive(fn.ret_type):
emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);')
else:
emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);')
emitter.emit_dec_ref('retval', fn.ret_type)
emitter.emit_line('if (PyErr_Occurred()) return -1;')
# We can't return -1 from a hash function..
emitter.emit_line('if (val == -1) return -2;')
emitter.emit_line('return val;')
emitter.emit_line('}')
return name
def generate_len_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __len__ methods."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(
name=name
))
emitter.emit_line('{}retval = {}{}{}(self);'.format(emitter.ctype_spaced(fn.ret_type),
emitter.get_group_prefix(fn.decl),
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_error_check('retval', fn.ret_type, 'return -1;')
if is_int_rprimitive(fn.ret_type):
emitter.emit_line('Py_ssize_t val = CPyTagged_AsSsize_t(retval);')
else:
emitter.emit_line('Py_ssize_t val = PyLong_AsSsize_t(retval);')
emitter.emit_dec_ref('retval', fn.ret_type)
emitter.emit_line('if (PyErr_Occurred()) return -1;')
emitter.emit_line('return val;')
emitter.emit_line('}')
return name
def generate_bool_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __bool__ methods."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line('static int {name}(PyObject *self) {{'.format(
name=name
))
emitter.emit_line('{}val = {}{}(self);'.format(emitter.ctype_spaced(fn.ret_type),
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_error_check('val', fn.ret_type, 'return -1;')
# This wouldn't be that hard to fix but it seems unimportant and
# getting error handling and unboxing right would be fiddly. (And
# way easier to do in IR!)
assert is_bool_rprimitive(fn.ret_type), "Only bool return supported for __bool__"
emitter.emit_line('return val;')
emitter.emit_line('}')
return name
def generate_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __delitem__.
This is only called from a combined __delitem__/__setitem__ wrapper.
"""
name = '{}{}{}'.format(DUNDER_PREFIX, '__delitem__', cl.name_prefix(emitter.names))
input_args = ', '.join('PyObject *obj_{}'.format(arg.name) for arg in fn.args)
emitter.emit_line('static int {name}({input_args}) {{'.format(
name=name,
input_args=input_args,
))
generate_set_del_item_wrapper_inner(fn, emitter, fn.args)
return name
def generate_set_del_item_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __setitem__ method (also works for __delitem__).
This is used with the mapping protocol slot. Arguments are taken as *PyObjects and we
return a negative C int on error.
Create a separate wrapper function for __delitem__ as needed and have the
__setitem__ wrapper call it if the value is NULL. Return the name
of the outer (__setitem__) wrapper.
"""
method_cls = cl.get_method_and_class('__delitem__')
del_name = None
if method_cls and method_cls[1] == cl:
# Generate a separate wrapper for __delitem__
del_name = generate_del_item_wrapper(cl, method_cls[0], emitter)
args = fn.args
if fn.name == '__delitem__':
# Add an extra argument for value that we expect to be NULL.
args = list(args) + [RuntimeArg('___value', object_rprimitive, ARG_POS)]
name = '{}{}{}'.format(DUNDER_PREFIX, '__setitem__', cl.name_prefix(emitter.names))
input_args = ', '.join('PyObject *obj_{}'.format(arg.name) for arg in args)
emitter.emit_line('static int {name}({input_args}) {{'.format(
name=name,
input_args=input_args,
))
# First check if this is __delitem__
emitter.emit_line('if (obj_{} == NULL) {{'.format(args[2].name))
if del_name is not None:
# We have a native implementation, so call it
emitter.emit_line('return {}(obj_{}, obj_{});'.format(del_name,
args[0].name,
args[1].name))
else:
# Try to call superclass method instead
emitter.emit_line(
'PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name))
emitter.emit_line('if (super == NULL) return -1;')
emitter.emit_line(
'PyObject *result = PyObject_CallMethod(super, "__delitem__", "O", obj_{});'.format(
args[1].name))
emitter.emit_line('Py_DECREF(super);')
emitter.emit_line('Py_XDECREF(result);')
emitter.emit_line('return result == NULL ? -1 : 0;')
emitter.emit_line('}')
method_cls = cl.get_method_and_class('__setitem__')
if method_cls and method_cls[1] == cl:
generate_set_del_item_wrapper_inner(fn, emitter, args)
else:
emitter.emit_line(
'PyObject *super = CPy_Super(CPyModule_builtins, obj_{});'.format(args[0].name))
emitter.emit_line('if (super == NULL) return -1;')
emitter.emit_line('PyObject *result;')
if method_cls is None and cl.builtin_base is None:
msg = "'{}' object does not support item assignment".format(cl.name)
emitter.emit_line(
'PyErr_SetString(PyExc_TypeError, "{}");'.format(msg))
emitter.emit_line('result = NULL;')
else:
# A base class may have __setitem__
emitter.emit_line(
'result = PyObject_CallMethod(super, "__setitem__", "OO", obj_{}, obj_{});'.format(
args[1].name, args[2].name))
emitter.emit_line('Py_DECREF(super);')
emitter.emit_line('Py_XDECREF(result);')
emitter.emit_line('return result == NULL ? -1 : 0;')
emitter.emit_line('}')
return name
def generate_set_del_item_wrapper_inner(fn: FuncIR, emitter: Emitter,
args: Sequence[RuntimeArg]) -> None:
for arg in args:
generate_arg_check(arg.name, arg.type, emitter, GotoHandler('fail'))
native_args = ', '.join('arg_{}'.format(arg.name) for arg in args)
emitter.emit_line('{}val = {}{}({});'.format(emitter.ctype_spaced(fn.ret_type),
NATIVE_PREFIX,
fn.cname(emitter.names),
native_args))
emitter.emit_error_check('val', fn.ret_type, 'goto fail;')
emitter.emit_dec_ref('val', fn.ret_type)
emitter.emit_line('return 0;')
emitter.emit_label('fail')
emitter.emit_line('return -1;')
emitter.emit_line('}')
def generate_contains_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for a native __contains__ method."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line(
'static int {name}(PyObject *self, PyObject *obj_item) {{'.
format(name=name))
generate_arg_check('item', fn.args[1].type, emitter, ReturnHandler('-1'))
emitter.emit_line('{}val = {}{}(self, arg_item);'.format(emitter.ctype_spaced(fn.ret_type),
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_error_check('val', fn.ret_type, 'return -1;')
if is_bool_rprimitive(fn.ret_type):
emitter.emit_line('return val;')
else:
emitter.emit_line('int boolval = PyObject_IsTrue(val);')
emitter.emit_dec_ref('val', fn.ret_type)
emitter.emit_line('return boolval;')
emitter.emit_line('}')
return name
# Helpers
def generate_wrapper_core(fn: FuncIR,
emitter: Emitter,
optional_args: Optional[List[RuntimeArg]] = None,
arg_names: Optional[List[str]] = None,
cleanups: Optional[List[str]] = None,
traceback_code: Optional[str] = None) -> None:
"""Generates the core part of a wrapper function for a native function.
This expects each argument as a PyObject * named obj_{arg} as a precondition.
It converts the PyObject *s to the necessary types, checking and unboxing if necessary,
makes the call, then boxes the result if necessary and returns it.
"""
optional_args = optional_args or []
cleanups = cleanups or []
use_goto = bool(cleanups or traceback_code)
error = ReturnHandler('NULL') if not use_goto else GotoHandler('fail')
arg_names = arg_names or [arg.name for arg in fn.args]
for arg_name, arg in zip(arg_names, fn.args):
# Suppress the argument check for *args/**kwargs, since we know it must be right.
typ = arg.type if arg.kind not in (ARG_STAR, ARG_STAR2) else object_rprimitive
generate_arg_check(arg_name,
typ,
emitter,
error,
optional=arg in optional_args)
native_args = ', '.join('arg_{}'.format(arg) for arg in arg_names)
if fn.ret_type.is_unboxed or use_goto:
# TODO: The Py_RETURN macros return the correct PyObject * with reference count handling.
# Are they relevant?
emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(fn.ret_type),
NATIVE_PREFIX,
fn.cname(emitter.names),
native_args))
emitter.emit_lines(*cleanups)
if fn.ret_type.is_unboxed:
emitter.emit_error_check('retval', fn.ret_type, 'return NULL;')
emitter.emit_box('retval', 'retbox', fn.ret_type, declare_dest=True)
emitter.emit_line('return {};'.format('retbox' if fn.ret_type.is_unboxed else 'retval'))
else:
emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX,
fn.cname(emitter.names),
native_args))
# TODO: Tracebacks?
if use_goto:
emitter.emit_label('fail')
emitter.emit_lines(*cleanups)
if traceback_code:
emitter.emit_lines(traceback_code)
emitter.emit_lines('return NULL;')
def generate_arg_check(name: str,
typ: RType,
emitter: Emitter,
error: Optional[ErrorHandler] = None,
*,
optional: bool = False,
raise_exception: bool = True) -> None:
"""Insert a runtime check for argument and unbox if necessary.
The object is named PyObject *obj_{}. This is expected to generate
a value of name arg_{} (unboxed if necessary). For each primitive a runtime
check ensures the correct type.
"""
error = error or AssignHandler()
if typ.is_unboxed:
# Borrow when unboxing to avoid reference count manipulation.
emitter.emit_unbox('obj_{}'.format(name),
'arg_{}'.format(name),
typ,
declare_dest=True,
raise_exception=raise_exception,
error=error,
borrow=True,
optional=optional)
elif is_object_rprimitive(typ):
# Object is trivial since any object is valid
if optional:
emitter.emit_line('PyObject *arg_{};'.format(name))
emitter.emit_line('if (obj_{} == NULL) {{'.format(name))
emitter.emit_line('arg_{} = {};'.format(name, emitter.c_error_value(typ)))
emitter.emit_lines('} else {', 'arg_{} = obj_{}; '.format(name, name), '}')
else:
emitter.emit_line('PyObject *arg_{} = obj_{};'.format(name, name))
else:
emitter.emit_cast('obj_{}'.format(name),
'arg_{}'.format(name),
typ,
declare_dest=True,
raise_exception=raise_exception,
error=error,
optional=optional)
class WrapperGenerator:
"""Helper that simplifies the generation of wrapper functions."""
# TODO: Use this for more wrappers
def __init__(self, cl: ClassIR, emitter: Emitter) -> None:
self.cl = cl
self.emitter = emitter
self.cleanups: List[str] = []
self.optional_args: List[RuntimeArg] = []
self.traceback_code = ''
def set_target(self, fn: FuncIR) -> None:
"""Set the wrapped function.
It's fine to modify the attributes initialized here later to customize
the wrapper function.
"""
self.target_name = fn.name
self.target_cname = fn.cname(self.emitter.names)
self.arg_names = [arg.name for arg in fn.args]
self.args = fn.args[:]
self.ret_type = fn.ret_type
def wrapper_name(self) -> str:
"""Return the name of the wrapper function."""
return '{}{}{}'.format(DUNDER_PREFIX,
self.target_name,
self.cl.name_prefix(self.emitter.names))
def use_goto(self) -> bool:
"""Do we use a goto for error handling (instead of straight return)?"""
return bool(self.cleanups or self.traceback_code)
def emit_header(self) -> None:
"""Emit the function header of the wrapper implementation."""
input_args = ', '.join('PyObject *obj_{}'.format(arg) for arg in self.arg_names)
self.emitter.emit_line('static PyObject *{name}({input_args}) {{'.format(
name=self.wrapper_name(),
input_args=input_args,
))
def emit_arg_processing(self,
error: Optional[ErrorHandler] = None,
raise_exception: bool = True) -> None:
"""Emit validation and unboxing of arguments."""
error = error or self.error()
for arg_name, arg in zip(self.arg_names, self.args):
# Suppress the argument check for *args/**kwargs, since we know it must be right.
typ = arg.type if arg.kind not in (ARG_STAR, ARG_STAR2) else object_rprimitive
generate_arg_check(arg_name,
typ,
self.emitter,
error,
raise_exception=raise_exception,
optional=arg in self.optional_args)
def emit_call(self, not_implemented_handler: str = '') -> None:
"""Emit call to the wrapper function.
If not_implemented_handler is non-empty, use this C code to handle
a NotImplemented return value (if it's possible based on the return type).
"""
native_args = ', '.join('arg_{}'.format(arg) for arg in self.arg_names)
ret_type = self.ret_type
emitter = self.emitter
if ret_type.is_unboxed or self.use_goto():
# TODO: The Py_RETURN macros return the correct PyObject * with reference count
# handling. Are they relevant?
emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(ret_type),
NATIVE_PREFIX,
self.target_cname,
native_args))
emitter.emit_lines(*self.cleanups)
if ret_type.is_unboxed:
emitter.emit_error_check('retval', ret_type, 'return NULL;')
emitter.emit_box('retval', 'retbox', ret_type, declare_dest=True)
emitter.emit_line(
'return {};'.format('retbox' if ret_type.is_unboxed else 'retval'))
else:
if not_implemented_handler and not isinstance(ret_type, RInstance):
# The return value type may overlap with NotImplemented.
emitter.emit_line('PyObject *retbox = {}{}({});'.format(NATIVE_PREFIX,
self.target_cname,
native_args))
emitter.emit_lines('if (retbox == Py_NotImplemented) {',
not_implemented_handler,
'}',
'return retbox;')
else:
emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX,
self.target_cname,
native_args))
# TODO: Tracebacks?
def error(self) -> ErrorHandler:
"""Figure out how to deal with errors in the wrapper."""
if self.cleanups or self.traceback_code:
# We'll have a label at the end with error handling code.
return GotoHandler('fail')
else:
# Nothing special needs to done to handle errors, so just return.
return ReturnHandler('NULL')
def emit_error_handling(self) -> None:
"""Emit error handling block at the end of the wrapper, if needed."""
emitter = self.emitter
if self.use_goto():
emitter.emit_label('fail')
emitter.emit_lines(*self.cleanups)
if self.traceback_code:
emitter.emit_line(self.traceback_code)
emitter.emit_line('return NULL;')
def finish(self) -> None:
self.emitter.emit_line('}')
|
py | 1a4812b6757661245348537a9d3696ee75f2a47e | # coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for the fairness example."""
import functools
import jax
import jax.numpy as jnp
import ott
def binary_cross_entropy(logits, labels):
return jnp.sum(
-labels * jnp.log(logits) - (1 - labels) * jnp.log(1 - logits))
def compute_metrics(logits, labels):
loss = binary_cross_entropy(logits, labels)
accuracy = jnp.mean((logits > 0.5) == labels)
metrics = {
'loss': loss,
'accuracy': accuracy,
}
metrics = jax.lax.pmean(metrics, axis_name='batch')
return metrics
@functools.partial(jax.jit, static_argnums=(2, 3))
def sort_group(inputs: jnp.ndarray,
in_group: jnp.ndarray,
quantization: int,
epsilon: float):
"""Sorts and quantizes only the member of the given group.
Args:
inputs: 1D array to be sorted.
in_group: a 1D array of 0s and 1s indicating if the element is part of the
group or not.
quantization: the number of values the sorted values output should be mapped
onto.
epsilon: sinkhorn entropic regularization.
Returns:
A sorted array of size `quantization`.
"""
a = in_group / jnp.sum(in_group)
b = jnp.ones(quantization) / quantization
ot = ott.tools.soft_sort.transport_for_sort(
inputs, a, b, dict(epsilon=epsilon))
return 1.0 / b * ot.apply(inputs, axis=0)
def fairness_regularizer(inputs: jnp.ndarray,
groups: jnp.ndarray,
quantization: int = 16,
epsilon: float = 1e-2,
num_groups: int = 2):
"""Approximation of the wasserstein between the per-group distributions."""
quantiles = jnp.stack([sort_group(inputs, groups == g, quantization, epsilon)
for g in range(num_groups)])
weights = jnp.stack(
[jnp.sum(groups == g) for g in range(num_groups)]) / groups.shape[0]
mean_quantile = jnp.sum(weights[:, None] * quantiles, axis=0)
delta = jnp.where(quantiles,
quantiles - mean_quantile,
jnp.zeros_like(mean_quantile))
return jnp.mean(delta ** 2)
|
py | 1a4813452073bf8ee8872c7445abc1803a00f64d | import tensorflow as tf
import numpy as np
import csv
import random
from keras.optimizers import Adam
from keras.layers import (
Flatten, Dense, Dropout, Convolution2D, Activation, BatchNormalization
)
from keras.models import Model, Sequential, model_from_json
from scipy.misc import imread, imresize
from sklearn.model_selection import train_test_split
import json
flags = tf.app.flags
FLAGS = flags.FLAGS
# command line flags
flags.DEFINE_string('save_file', 'output_model', "The model and weights file to save (.json and .h5)")
flags.DEFINE_string('driving_log', 'driving_log.csv', 'The driving log.')
flags.DEFINE_integer('epochs', 8, "The number of epochs.")
flags.DEFINE_integer('batch_size', 64, "The batch size.")
flags.DEFINE_integer('epoch_sample', 1000, 'The epoch sample.')
flags.DEFINE_float('lrate', 0.001, 'The learning rate')
flags.DEFINE_integer('validation_sample', 1000, 'The validation sample.')
CSV_CENTER_IMAGE_INDEX = 0
CSV_LEFT_IMAGE_INDEX = 1
CSV_RIGHT_IMAGE_INDEX = 2
CSV_STEERING_IMAGE_INDEX = 3
CSV_THROTTLE_IMAGE_INDEX = 4
CSV_BRAKE_IMAGE_INDEX = 5
CSV_SPEED_IMAGE_INDEX = 6
def nvidia_model(image):
model = Sequential()
model.add(BatchNormalization(axis=1, input_shape=image.shape))
model.add(Convolution2D(16, 3, 3, border_mode='valid', subsample=(2, 2), activation='elu'))
model.add(Convolution2D(24, 3, 3, border_mode='valid', subsample=(1, 2), activation='elu'))
model.add(Convolution2D(36, 3, 3, border_mode='valid', activation='elu'))
model.add(Convolution2D(48, 2, 2, border_mode='valid', activation='elu'))
model.add(Convolution2D(48, 2, 2, border_mode='valid', activation='elu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Dropout(.5))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('elu'))
model.add(Dense(1))
model.summary()
adam = Adam(lr=0.0001)
model.compile(loss='mse',
optimizer=adam)
return model
def load_csv(path):
csv_rows = []
with open(path, 'r') as infile:
reader = csv.reader(infile)
for row in reader:
csv_rows.append(row)
preprocess(csv_rows)
csv_rows_main, csv_rows_test = train_test_split(csv_rows, test_size=0.1)
csv_rows_train, csv_rows_val = train_test_split(csv_rows_main, test_size=0.1)
return (csv_rows_train, csv_rows_val, csv_rows_test)
def normalize(imgs):
"""
Normalize images between [-1, 1].
"""
return (imgs / 255.0) - 0.5
def flip(image, steering):
return (np.fliplr(image), -steering)
def crop(imgs):
result = []
for img in imgs:
result_img = img[10: , :, :]
result.append(result_img)
return result
def resize(imgs, shape=(20, 64, 3)):
"""
Resize images to shape.
"""
height, width, channels = shape
imgs_resized = np.empty([len(imgs), height, width, channels])
for i, img in enumerate(imgs):
imgs_resized[i] = imresize(img, shape)
return imgs_resized
def preprocess_image(img):
img = crop(img)
img = resize(img)
img = normalize(img)
return img
def generator_from(csv_rows):
while True:
for i in range(0, len(csv_rows)):
current_images = []
current_angeles = []
for j in range(0, FLAGS.batch_size):
angle = float(csv_rows[i][3])
current_images.append(imread(csv_rows[i][0].strip()).astype(np.float32))
current_angeles.append(angle)
if csv_rows[i][1] != '':
current_images.append(imread(csv_rows[i][1].strip()).astype(np.float32))
current_angeles.append(angle + .25)
if csv_rows[i][2] != '':
current_images.append(imread(csv_rows[i][2].strip()).astype(np.float32))
current_angeles.append(angle - .25)
(new_image, new_angle) = flip(imread(csv_rows[i][0]).astype(np.float32), angle)
current_images.append(new_image)
current_angeles.append(new_angle)
current_images = preprocess_image(current_images)
yield (current_images, current_angeles)
def get_image(path):
with open(path, mode='r') as infile:
reader = csv.reader(infile)
for rows in reader:
image = imread(rows[0]).astype(np.float32)
image = image - np.mean(image)
# csv.close(infile)
return preprocess_image(np.array([image]))[0]
# return image
def save_model(model):
print("Saving model...")
model.save_weights(FLAGS.weights_file)
model_as_json = model.to_json()
with open(FLAGS.model_file, "w") as model_file:
model_file.write(model_as_json)
print("Model saved.")
def save(model, prefix):
"""save model for future inspection and continuous training
"""
model_file = prefix + ".json"
weight_file = prefix + ".h5"
json.dump(model.to_json(), open(model_file, "w"))
model.save_weights(weight_file)
print("Model saved.")
return model
def restore(prefix):
"""restore a saved model
"""
model_file = prefix + ".json"
weight_file = prefix + ".h5"
model = model_from_json(json.load(open(model_file)))
model.load_weights(weight_file)
print("Model loaded.")
return model
def shuffle(csv_rows):
print("Shuffled the data.")
random.shuffle(csv_rows)
return csv_rows
def preprocess(csv_rows):
csv_rows = shuffle(csv_rows)
return csv_rows
def main(_):
(csv_rows_train, csv_rows_val, csv_rows_test) = load_csv(FLAGS.driving_log)
image = get_image(FLAGS.driving_log)
model = nvidia_model(image)
model.fit_generator(
generator=generator_from(csv_rows_train),
samples_per_epoch=FLAGS.epoch_sample,
nb_epoch=FLAGS.epochs,
validation_data=generator_from(csv_rows_val),
nb_val_samples=FLAGS.validation_sample,
)
# Evaluate the model
model.evaluate_generator(
generator=generator_from(csv_rows_test),
val_samples=FLAGS.testing_sample,
)
save(model, FLAGS.save_file)
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
|
py | 1a4813daa719d7f976f6e765204fa73a5f2267d5 | from discord.ext import commands
import discord
import youtube_dl
import asyncio
from discord import utils
import sqlite3
class Player(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.db = sqlite3.connect('playlist.db')
@commands.command(name="정크랫")
async def 정크랫(self, ctx):
await ctx.send('왜 불렀음?')
@commands.command(name="아", aliases=["재생"])
async def 아(self, ctx: commands.Context, *q:str):
url = ' '.join(q)
ytdl_options = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'outtmpl': f'./songs/{ctx.guild.id}/%(extractor)s-%(id)s.mp3'
}
ytdl = youtube_dl.YoutubeDL(ytdl_options)
loop = asyncio.get_event_loop()
if url.startswith('https://') or url.startswith('http://'):
utype = 'url'
else:
utype = 'query'
def fetch_video():
if utype == 'url':
return ytdl.extract_info(url)
elif utype == 'query':
return ytdl.extract_info(f"ytsearch:{url}")
data = await loop.run_in_executor(None, lambda: fetch_video())
if 'entries' in data:
data = data['entries'][0]
vc = self.get_voice_client(ctx)
source = ytdl.prepare_filename(data)
vc.play(source=discord.FFmpegPCMAudio(source=source))
embed = discord.Embed(title="플레이중")
embed.add_field(name="곡 정보", value=f"제목: {data.get('title')}")
await ctx.send(embed=embed)
@commands.command(name="stop", aliases=["정지"])
async def stop(self, ctx: commands.context):
self.get_voice_client(ctx).stop()
@commands.command(name="플레이리스트", aliases=['playlist'])
async def playlist(self, ctx: commands.Context):
con = self.db
con.execute(f'CREATE TABLE IF NOT EXISTS pl_{ctx.guild.id}(query varchar(255))')
con.commit()
await ctx.send('플레이리스트')
def get_voice_client(self, ctx: commands.Context) -> discord.VoiceClient:
return utils.get(self.bot.voice_clients, guild=ctx.guild)
|
py | 1a48144780037e8a2df9a8ccb7438519372b680a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AS2AcknowledgementConnectionSettingsResponse',
'AS2AgreementContentResponse',
'AS2EnvelopeSettingsResponse',
'AS2ErrorSettingsResponse',
'AS2MdnSettingsResponse',
'AS2MessageConnectionSettingsResponse',
'AS2OneWayAgreementResponse',
'AS2ProtocolSettingsResponse',
'AS2SecuritySettingsResponse',
'AS2ValidationSettingsResponse',
'AgreementContentResponse',
'AssemblyPropertiesResponse',
'AzureResourceErrorInfoResponse',
'B2BPartnerContentResponse',
'BatchConfigurationPropertiesResponse',
'BatchReleaseCriteriaResponse',
'BusinessIdentityResponse',
'ContentHashResponse',
'ContentLinkResponse',
'EdifactAcknowledgementSettingsResponse',
'EdifactAgreementContentResponse',
'EdifactDelimiterOverrideResponse',
'EdifactEnvelopeOverrideResponse',
'EdifactEnvelopeSettingsResponse',
'EdifactFramingSettingsResponse',
'EdifactMessageFilterResponse',
'EdifactMessageIdentifierResponse',
'EdifactOneWayAgreementResponse',
'EdifactProcessingSettingsResponse',
'EdifactProtocolSettingsResponse',
'EdifactSchemaReferenceResponse',
'EdifactValidationOverrideResponse',
'EdifactValidationSettingsResponse',
'ExpressionResponse',
'ExpressionRootResponse',
'IntegrationAccountMapPropertiesResponseParametersSchema',
'IntegrationAccountSkuResponse',
'KeyVaultKeyReferenceResponse',
'KeyVaultKeyReferenceResponseKeyVault',
'KeyVaultKeyResponse',
'KeyVaultKeyResponseAttributes',
'PartnerContentResponse',
'RecurrenceScheduleOccurrenceResponse',
'RecurrenceScheduleResponse',
'ResourceReferenceResponse',
'SkuResponse',
'WorkflowParameterResponse',
'WorkflowTriggerListCallbackUrlQueriesResponse',
'WorkflowTriggerRecurrenceResponse',
'X12AcknowledgementSettingsResponse',
'X12AgreementContentResponse',
'X12DelimiterOverridesResponse',
'X12EnvelopeOverrideResponse',
'X12EnvelopeSettingsResponse',
'X12FramingSettingsResponse',
'X12MessageFilterResponse',
'X12MessageIdentifierResponse',
'X12OneWayAgreementResponse',
'X12ProcessingSettingsResponse',
'X12ProtocolSettingsResponse',
'X12SchemaReferenceResponse',
'X12SecuritySettingsResponse',
'X12ValidationOverrideResponse',
'X12ValidationSettingsResponse',
]
@pulumi.output_type
class AS2AcknowledgementConnectionSettingsResponse(dict):
"""
The AS2 agreement acknowledgement connection settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ignoreCertificateNameMismatch":
suggest = "ignore_certificate_name_mismatch"
elif key == "keepHttpConnectionAlive":
suggest = "keep_http_connection_alive"
elif key == "supportHttpStatusCodeContinue":
suggest = "support_http_status_code_continue"
elif key == "unfoldHttpHeaders":
suggest = "unfold_http_headers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2AcknowledgementConnectionSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2AcknowledgementConnectionSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2AcknowledgementConnectionSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ignore_certificate_name_mismatch: bool,
keep_http_connection_alive: bool,
support_http_status_code_continue: bool,
unfold_http_headers: bool):
"""
The AS2 agreement acknowledgement connection settings.
:param bool ignore_certificate_name_mismatch: The value indicating whether to ignore mismatch in certificate name.
:param bool keep_http_connection_alive: The value indicating whether to keep the connection alive.
:param bool support_http_status_code_continue: The value indicating whether to support HTTP status code 'CONTINUE'.
:param bool unfold_http_headers: The value indicating whether to unfold the HTTP headers.
"""
pulumi.set(__self__, "ignore_certificate_name_mismatch", ignore_certificate_name_mismatch)
pulumi.set(__self__, "keep_http_connection_alive", keep_http_connection_alive)
pulumi.set(__self__, "support_http_status_code_continue", support_http_status_code_continue)
pulumi.set(__self__, "unfold_http_headers", unfold_http_headers)
@property
@pulumi.getter(name="ignoreCertificateNameMismatch")
def ignore_certificate_name_mismatch(self) -> bool:
"""
The value indicating whether to ignore mismatch in certificate name.
"""
return pulumi.get(self, "ignore_certificate_name_mismatch")
@property
@pulumi.getter(name="keepHttpConnectionAlive")
def keep_http_connection_alive(self) -> bool:
"""
The value indicating whether to keep the connection alive.
"""
return pulumi.get(self, "keep_http_connection_alive")
@property
@pulumi.getter(name="supportHttpStatusCodeContinue")
def support_http_status_code_continue(self) -> bool:
"""
The value indicating whether to support HTTP status code 'CONTINUE'.
"""
return pulumi.get(self, "support_http_status_code_continue")
@property
@pulumi.getter(name="unfoldHttpHeaders")
def unfold_http_headers(self) -> bool:
"""
The value indicating whether to unfold the HTTP headers.
"""
return pulumi.get(self, "unfold_http_headers")
@pulumi.output_type
class AS2AgreementContentResponse(dict):
"""
The integration account AS2 agreement content.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "receiveAgreement":
suggest = "receive_agreement"
elif key == "sendAgreement":
suggest = "send_agreement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2AgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2AgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2AgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
receive_agreement: 'outputs.AS2OneWayAgreementResponse',
send_agreement: 'outputs.AS2OneWayAgreementResponse'):
"""
The integration account AS2 agreement content.
:param 'AS2OneWayAgreementResponse' receive_agreement: The AS2 one-way receive agreement.
:param 'AS2OneWayAgreementResponse' send_agreement: The AS2 one-way send agreement.
"""
pulumi.set(__self__, "receive_agreement", receive_agreement)
pulumi.set(__self__, "send_agreement", send_agreement)
@property
@pulumi.getter(name="receiveAgreement")
def receive_agreement(self) -> 'outputs.AS2OneWayAgreementResponse':
"""
The AS2 one-way receive agreement.
"""
return pulumi.get(self, "receive_agreement")
@property
@pulumi.getter(name="sendAgreement")
def send_agreement(self) -> 'outputs.AS2OneWayAgreementResponse':
"""
The AS2 one-way send agreement.
"""
return pulumi.get(self, "send_agreement")
@pulumi.output_type
class AS2EnvelopeSettingsResponse(dict):
"""
The AS2 agreement envelope settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autogenerateFileName":
suggest = "autogenerate_file_name"
elif key == "fileNameTemplate":
suggest = "file_name_template"
elif key == "messageContentType":
suggest = "message_content_type"
elif key == "suspendMessageOnFileNameGenerationError":
suggest = "suspend_message_on_file_name_generation_error"
elif key == "transmitFileNameInMimeHeader":
suggest = "transmit_file_name_in_mime_header"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2EnvelopeSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2EnvelopeSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2EnvelopeSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
autogenerate_file_name: bool,
file_name_template: str,
message_content_type: str,
suspend_message_on_file_name_generation_error: bool,
transmit_file_name_in_mime_header: bool):
"""
The AS2 agreement envelope settings.
:param bool autogenerate_file_name: The value indicating whether to auto generate file name.
:param str file_name_template: The template for file name.
:param str message_content_type: The message content type.
:param bool suspend_message_on_file_name_generation_error: The value indicating whether to suspend message on file name generation error.
:param bool transmit_file_name_in_mime_header: The value indicating whether to transmit file name in mime header.
"""
pulumi.set(__self__, "autogenerate_file_name", autogenerate_file_name)
pulumi.set(__self__, "file_name_template", file_name_template)
pulumi.set(__self__, "message_content_type", message_content_type)
pulumi.set(__self__, "suspend_message_on_file_name_generation_error", suspend_message_on_file_name_generation_error)
pulumi.set(__self__, "transmit_file_name_in_mime_header", transmit_file_name_in_mime_header)
@property
@pulumi.getter(name="autogenerateFileName")
def autogenerate_file_name(self) -> bool:
"""
The value indicating whether to auto generate file name.
"""
return pulumi.get(self, "autogenerate_file_name")
@property
@pulumi.getter(name="fileNameTemplate")
def file_name_template(self) -> str:
"""
The template for file name.
"""
return pulumi.get(self, "file_name_template")
@property
@pulumi.getter(name="messageContentType")
def message_content_type(self) -> str:
"""
The message content type.
"""
return pulumi.get(self, "message_content_type")
@property
@pulumi.getter(name="suspendMessageOnFileNameGenerationError")
def suspend_message_on_file_name_generation_error(self) -> bool:
"""
The value indicating whether to suspend message on file name generation error.
"""
return pulumi.get(self, "suspend_message_on_file_name_generation_error")
@property
@pulumi.getter(name="transmitFileNameInMimeHeader")
def transmit_file_name_in_mime_header(self) -> bool:
"""
The value indicating whether to transmit file name in mime header.
"""
return pulumi.get(self, "transmit_file_name_in_mime_header")
@pulumi.output_type
class AS2ErrorSettingsResponse(dict):
"""
The AS2 agreement error settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resendIfMDNNotReceived":
suggest = "resend_if_mdn_not_received"
elif key == "suspendDuplicateMessage":
suggest = "suspend_duplicate_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2ErrorSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2ErrorSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2ErrorSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
resend_if_mdn_not_received: bool,
suspend_duplicate_message: bool):
"""
The AS2 agreement error settings.
:param bool resend_if_mdn_not_received: The value indicating whether to resend message If MDN is not received.
:param bool suspend_duplicate_message: The value indicating whether to suspend duplicate message.
"""
pulumi.set(__self__, "resend_if_mdn_not_received", resend_if_mdn_not_received)
pulumi.set(__self__, "suspend_duplicate_message", suspend_duplicate_message)
@property
@pulumi.getter(name="resendIfMDNNotReceived")
def resend_if_mdn_not_received(self) -> bool:
"""
The value indicating whether to resend message If MDN is not received.
"""
return pulumi.get(self, "resend_if_mdn_not_received")
@property
@pulumi.getter(name="suspendDuplicateMessage")
def suspend_duplicate_message(self) -> bool:
"""
The value indicating whether to suspend duplicate message.
"""
return pulumi.get(self, "suspend_duplicate_message")
@pulumi.output_type
class AS2MdnSettingsResponse(dict):
"""
The AS2 agreement mdn settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "micHashingAlgorithm":
suggest = "mic_hashing_algorithm"
elif key == "needMDN":
suggest = "need_mdn"
elif key == "sendInboundMDNToMessageBox":
suggest = "send_inbound_mdn_to_message_box"
elif key == "sendMDNAsynchronously":
suggest = "send_mdnasynchronously"
elif key == "signMDN":
suggest = "sign_mdn"
elif key == "signOutboundMDNIfOptional":
suggest = "sign_outbound_mdn_if_optional"
elif key == "dispositionNotificationTo":
suggest = "disposition_notification_to"
elif key == "mdnText":
suggest = "mdn_text"
elif key == "receiptDeliveryUrl":
suggest = "receipt_delivery_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2MdnSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2MdnSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2MdnSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
mic_hashing_algorithm: str,
need_mdn: bool,
send_inbound_mdn_to_message_box: bool,
send_mdnasynchronously: bool,
sign_mdn: bool,
sign_outbound_mdn_if_optional: bool,
disposition_notification_to: Optional[str] = None,
mdn_text: Optional[str] = None,
receipt_delivery_url: Optional[str] = None):
"""
The AS2 agreement mdn settings.
:param str mic_hashing_algorithm: The signing or hashing algorithm.
:param bool need_mdn: The value indicating whether to send or request a MDN.
:param bool send_inbound_mdn_to_message_box: The value indicating whether to send inbound MDN to message box.
:param bool send_mdnasynchronously: The value indicating whether to send the asynchronous MDN.
:param bool sign_mdn: The value indicating whether the MDN needs to be signed or not.
:param bool sign_outbound_mdn_if_optional: The value indicating whether to sign the outbound MDN if optional.
:param str disposition_notification_to: The disposition notification to header value.
:param str mdn_text: The MDN text.
:param str receipt_delivery_url: The receipt delivery URL.
"""
pulumi.set(__self__, "mic_hashing_algorithm", mic_hashing_algorithm)
pulumi.set(__self__, "need_mdn", need_mdn)
pulumi.set(__self__, "send_inbound_mdn_to_message_box", send_inbound_mdn_to_message_box)
pulumi.set(__self__, "send_mdnasynchronously", send_mdnasynchronously)
pulumi.set(__self__, "sign_mdn", sign_mdn)
pulumi.set(__self__, "sign_outbound_mdn_if_optional", sign_outbound_mdn_if_optional)
if disposition_notification_to is not None:
pulumi.set(__self__, "disposition_notification_to", disposition_notification_to)
if mdn_text is not None:
pulumi.set(__self__, "mdn_text", mdn_text)
if receipt_delivery_url is not None:
pulumi.set(__self__, "receipt_delivery_url", receipt_delivery_url)
@property
@pulumi.getter(name="micHashingAlgorithm")
def mic_hashing_algorithm(self) -> str:
"""
The signing or hashing algorithm.
"""
return pulumi.get(self, "mic_hashing_algorithm")
@property
@pulumi.getter(name="needMDN")
def need_mdn(self) -> bool:
"""
The value indicating whether to send or request a MDN.
"""
return pulumi.get(self, "need_mdn")
@property
@pulumi.getter(name="sendInboundMDNToMessageBox")
def send_inbound_mdn_to_message_box(self) -> bool:
"""
The value indicating whether to send inbound MDN to message box.
"""
return pulumi.get(self, "send_inbound_mdn_to_message_box")
@property
@pulumi.getter(name="sendMDNAsynchronously")
def send_mdnasynchronously(self) -> bool:
"""
The value indicating whether to send the asynchronous MDN.
"""
return pulumi.get(self, "send_mdnasynchronously")
@property
@pulumi.getter(name="signMDN")
def sign_mdn(self) -> bool:
"""
The value indicating whether the MDN needs to be signed or not.
"""
return pulumi.get(self, "sign_mdn")
@property
@pulumi.getter(name="signOutboundMDNIfOptional")
def sign_outbound_mdn_if_optional(self) -> bool:
"""
The value indicating whether to sign the outbound MDN if optional.
"""
return pulumi.get(self, "sign_outbound_mdn_if_optional")
@property
@pulumi.getter(name="dispositionNotificationTo")
def disposition_notification_to(self) -> Optional[str]:
"""
The disposition notification to header value.
"""
return pulumi.get(self, "disposition_notification_to")
@property
@pulumi.getter(name="mdnText")
def mdn_text(self) -> Optional[str]:
"""
The MDN text.
"""
return pulumi.get(self, "mdn_text")
@property
@pulumi.getter(name="receiptDeliveryUrl")
def receipt_delivery_url(self) -> Optional[str]:
"""
The receipt delivery URL.
"""
return pulumi.get(self, "receipt_delivery_url")
@pulumi.output_type
class AS2MessageConnectionSettingsResponse(dict):
"""
The AS2 agreement message connection settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ignoreCertificateNameMismatch":
suggest = "ignore_certificate_name_mismatch"
elif key == "keepHttpConnectionAlive":
suggest = "keep_http_connection_alive"
elif key == "supportHttpStatusCodeContinue":
suggest = "support_http_status_code_continue"
elif key == "unfoldHttpHeaders":
suggest = "unfold_http_headers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2MessageConnectionSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2MessageConnectionSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2MessageConnectionSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ignore_certificate_name_mismatch: bool,
keep_http_connection_alive: bool,
support_http_status_code_continue: bool,
unfold_http_headers: bool):
"""
The AS2 agreement message connection settings.
:param bool ignore_certificate_name_mismatch: The value indicating whether to ignore mismatch in certificate name.
:param bool keep_http_connection_alive: The value indicating whether to keep the connection alive.
:param bool support_http_status_code_continue: The value indicating whether to support HTTP status code 'CONTINUE'.
:param bool unfold_http_headers: The value indicating whether to unfold the HTTP headers.
"""
pulumi.set(__self__, "ignore_certificate_name_mismatch", ignore_certificate_name_mismatch)
pulumi.set(__self__, "keep_http_connection_alive", keep_http_connection_alive)
pulumi.set(__self__, "support_http_status_code_continue", support_http_status_code_continue)
pulumi.set(__self__, "unfold_http_headers", unfold_http_headers)
@property
@pulumi.getter(name="ignoreCertificateNameMismatch")
def ignore_certificate_name_mismatch(self) -> bool:
"""
The value indicating whether to ignore mismatch in certificate name.
"""
return pulumi.get(self, "ignore_certificate_name_mismatch")
@property
@pulumi.getter(name="keepHttpConnectionAlive")
def keep_http_connection_alive(self) -> bool:
"""
The value indicating whether to keep the connection alive.
"""
return pulumi.get(self, "keep_http_connection_alive")
@property
@pulumi.getter(name="supportHttpStatusCodeContinue")
def support_http_status_code_continue(self) -> bool:
"""
The value indicating whether to support HTTP status code 'CONTINUE'.
"""
return pulumi.get(self, "support_http_status_code_continue")
@property
@pulumi.getter(name="unfoldHttpHeaders")
def unfold_http_headers(self) -> bool:
"""
The value indicating whether to unfold the HTTP headers.
"""
return pulumi.get(self, "unfold_http_headers")
@pulumi.output_type
class AS2OneWayAgreementResponse(dict):
"""
The integration account AS2 one-way agreement.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "protocolSettings":
suggest = "protocol_settings"
elif key == "receiverBusinessIdentity":
suggest = "receiver_business_identity"
elif key == "senderBusinessIdentity":
suggest = "sender_business_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2OneWayAgreementResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2OneWayAgreementResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2OneWayAgreementResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
protocol_settings: 'outputs.AS2ProtocolSettingsResponse',
receiver_business_identity: 'outputs.BusinessIdentityResponse',
sender_business_identity: 'outputs.BusinessIdentityResponse'):
"""
The integration account AS2 one-way agreement.
:param 'AS2ProtocolSettingsResponse' protocol_settings: The AS2 protocol settings.
:param 'BusinessIdentityResponse' receiver_business_identity: The receiver business identity
:param 'BusinessIdentityResponse' sender_business_identity: The sender business identity
"""
pulumi.set(__self__, "protocol_settings", protocol_settings)
pulumi.set(__self__, "receiver_business_identity", receiver_business_identity)
pulumi.set(__self__, "sender_business_identity", sender_business_identity)
@property
@pulumi.getter(name="protocolSettings")
def protocol_settings(self) -> 'outputs.AS2ProtocolSettingsResponse':
"""
The AS2 protocol settings.
"""
return pulumi.get(self, "protocol_settings")
@property
@pulumi.getter(name="receiverBusinessIdentity")
def receiver_business_identity(self) -> 'outputs.BusinessIdentityResponse':
"""
The receiver business identity
"""
return pulumi.get(self, "receiver_business_identity")
@property
@pulumi.getter(name="senderBusinessIdentity")
def sender_business_identity(self) -> 'outputs.BusinessIdentityResponse':
"""
The sender business identity
"""
return pulumi.get(self, "sender_business_identity")
@pulumi.output_type
class AS2ProtocolSettingsResponse(dict):
"""
The AS2 agreement protocol settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementConnectionSettings":
suggest = "acknowledgement_connection_settings"
elif key == "envelopeSettings":
suggest = "envelope_settings"
elif key == "errorSettings":
suggest = "error_settings"
elif key == "mdnSettings":
suggest = "mdn_settings"
elif key == "messageConnectionSettings":
suggest = "message_connection_settings"
elif key == "securitySettings":
suggest = "security_settings"
elif key == "validationSettings":
suggest = "validation_settings"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2ProtocolSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2ProtocolSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2ProtocolSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_connection_settings: 'outputs.AS2AcknowledgementConnectionSettingsResponse',
envelope_settings: 'outputs.AS2EnvelopeSettingsResponse',
error_settings: 'outputs.AS2ErrorSettingsResponse',
mdn_settings: 'outputs.AS2MdnSettingsResponse',
message_connection_settings: 'outputs.AS2MessageConnectionSettingsResponse',
security_settings: 'outputs.AS2SecuritySettingsResponse',
validation_settings: 'outputs.AS2ValidationSettingsResponse'):
"""
The AS2 agreement protocol settings.
:param 'AS2AcknowledgementConnectionSettingsResponse' acknowledgement_connection_settings: The acknowledgement connection settings.
:param 'AS2EnvelopeSettingsResponse' envelope_settings: The envelope settings.
:param 'AS2ErrorSettingsResponse' error_settings: The error settings.
:param 'AS2MdnSettingsResponse' mdn_settings: The MDN settings.
:param 'AS2MessageConnectionSettingsResponse' message_connection_settings: The message connection settings.
:param 'AS2SecuritySettingsResponse' security_settings: The security settings.
:param 'AS2ValidationSettingsResponse' validation_settings: The validation settings.
"""
pulumi.set(__self__, "acknowledgement_connection_settings", acknowledgement_connection_settings)
pulumi.set(__self__, "envelope_settings", envelope_settings)
pulumi.set(__self__, "error_settings", error_settings)
pulumi.set(__self__, "mdn_settings", mdn_settings)
pulumi.set(__self__, "message_connection_settings", message_connection_settings)
pulumi.set(__self__, "security_settings", security_settings)
pulumi.set(__self__, "validation_settings", validation_settings)
@property
@pulumi.getter(name="acknowledgementConnectionSettings")
def acknowledgement_connection_settings(self) -> 'outputs.AS2AcknowledgementConnectionSettingsResponse':
"""
The acknowledgement connection settings.
"""
return pulumi.get(self, "acknowledgement_connection_settings")
@property
@pulumi.getter(name="envelopeSettings")
def envelope_settings(self) -> 'outputs.AS2EnvelopeSettingsResponse':
"""
The envelope settings.
"""
return pulumi.get(self, "envelope_settings")
@property
@pulumi.getter(name="errorSettings")
def error_settings(self) -> 'outputs.AS2ErrorSettingsResponse':
"""
The error settings.
"""
return pulumi.get(self, "error_settings")
@property
@pulumi.getter(name="mdnSettings")
def mdn_settings(self) -> 'outputs.AS2MdnSettingsResponse':
"""
The MDN settings.
"""
return pulumi.get(self, "mdn_settings")
@property
@pulumi.getter(name="messageConnectionSettings")
def message_connection_settings(self) -> 'outputs.AS2MessageConnectionSettingsResponse':
"""
The message connection settings.
"""
return pulumi.get(self, "message_connection_settings")
@property
@pulumi.getter(name="securitySettings")
def security_settings(self) -> 'outputs.AS2SecuritySettingsResponse':
"""
The security settings.
"""
return pulumi.get(self, "security_settings")
@property
@pulumi.getter(name="validationSettings")
def validation_settings(self) -> 'outputs.AS2ValidationSettingsResponse':
"""
The validation settings.
"""
return pulumi.get(self, "validation_settings")
@pulumi.output_type
class AS2SecuritySettingsResponse(dict):
"""
The AS2 agreement security settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableNRRForInboundDecodedMessages":
suggest = "enable_nrr_for_inbound_decoded_messages"
elif key == "enableNRRForInboundEncodedMessages":
suggest = "enable_nrr_for_inbound_encoded_messages"
elif key == "enableNRRForInboundMDN":
suggest = "enable_nrr_for_inbound_mdn"
elif key == "enableNRRForOutboundDecodedMessages":
suggest = "enable_nrr_for_outbound_decoded_messages"
elif key == "enableNRRForOutboundEncodedMessages":
suggest = "enable_nrr_for_outbound_encoded_messages"
elif key == "enableNRRForOutboundMDN":
suggest = "enable_nrr_for_outbound_mdn"
elif key == "overrideGroupSigningCertificate":
suggest = "override_group_signing_certificate"
elif key == "encryptionCertificateName":
suggest = "encryption_certificate_name"
elif key == "sha2AlgorithmFormat":
suggest = "sha2_algorithm_format"
elif key == "signingCertificateName":
suggest = "signing_certificate_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2SecuritySettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2SecuritySettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2SecuritySettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_nrr_for_inbound_decoded_messages: bool,
enable_nrr_for_inbound_encoded_messages: bool,
enable_nrr_for_inbound_mdn: bool,
enable_nrr_for_outbound_decoded_messages: bool,
enable_nrr_for_outbound_encoded_messages: bool,
enable_nrr_for_outbound_mdn: bool,
override_group_signing_certificate: bool,
encryption_certificate_name: Optional[str] = None,
sha2_algorithm_format: Optional[str] = None,
signing_certificate_name: Optional[str] = None):
"""
The AS2 agreement security settings.
:param bool enable_nrr_for_inbound_decoded_messages: The value indicating whether to enable NRR for inbound decoded messages.
:param bool enable_nrr_for_inbound_encoded_messages: The value indicating whether to enable NRR for inbound encoded messages.
:param bool enable_nrr_for_inbound_mdn: The value indicating whether to enable NRR for inbound MDN.
:param bool enable_nrr_for_outbound_decoded_messages: The value indicating whether to enable NRR for outbound decoded messages.
:param bool enable_nrr_for_outbound_encoded_messages: The value indicating whether to enable NRR for outbound encoded messages.
:param bool enable_nrr_for_outbound_mdn: The value indicating whether to enable NRR for outbound MDN.
:param bool override_group_signing_certificate: The value indicating whether to send or request a MDN.
:param str encryption_certificate_name: The name of the encryption certificate.
:param str sha2_algorithm_format: The Sha2 algorithm format. Valid values are Sha2, ShaHashSize, ShaHyphenHashSize, Sha2UnderscoreHashSize.
:param str signing_certificate_name: The name of the signing certificate.
"""
pulumi.set(__self__, "enable_nrr_for_inbound_decoded_messages", enable_nrr_for_inbound_decoded_messages)
pulumi.set(__self__, "enable_nrr_for_inbound_encoded_messages", enable_nrr_for_inbound_encoded_messages)
pulumi.set(__self__, "enable_nrr_for_inbound_mdn", enable_nrr_for_inbound_mdn)
pulumi.set(__self__, "enable_nrr_for_outbound_decoded_messages", enable_nrr_for_outbound_decoded_messages)
pulumi.set(__self__, "enable_nrr_for_outbound_encoded_messages", enable_nrr_for_outbound_encoded_messages)
pulumi.set(__self__, "enable_nrr_for_outbound_mdn", enable_nrr_for_outbound_mdn)
pulumi.set(__self__, "override_group_signing_certificate", override_group_signing_certificate)
if encryption_certificate_name is not None:
pulumi.set(__self__, "encryption_certificate_name", encryption_certificate_name)
if sha2_algorithm_format is not None:
pulumi.set(__self__, "sha2_algorithm_format", sha2_algorithm_format)
if signing_certificate_name is not None:
pulumi.set(__self__, "signing_certificate_name", signing_certificate_name)
@property
@pulumi.getter(name="enableNRRForInboundDecodedMessages")
def enable_nrr_for_inbound_decoded_messages(self) -> bool:
"""
The value indicating whether to enable NRR for inbound decoded messages.
"""
return pulumi.get(self, "enable_nrr_for_inbound_decoded_messages")
@property
@pulumi.getter(name="enableNRRForInboundEncodedMessages")
def enable_nrr_for_inbound_encoded_messages(self) -> bool:
"""
The value indicating whether to enable NRR for inbound encoded messages.
"""
return pulumi.get(self, "enable_nrr_for_inbound_encoded_messages")
@property
@pulumi.getter(name="enableNRRForInboundMDN")
def enable_nrr_for_inbound_mdn(self) -> bool:
"""
The value indicating whether to enable NRR for inbound MDN.
"""
return pulumi.get(self, "enable_nrr_for_inbound_mdn")
@property
@pulumi.getter(name="enableNRRForOutboundDecodedMessages")
def enable_nrr_for_outbound_decoded_messages(self) -> bool:
"""
The value indicating whether to enable NRR for outbound decoded messages.
"""
return pulumi.get(self, "enable_nrr_for_outbound_decoded_messages")
@property
@pulumi.getter(name="enableNRRForOutboundEncodedMessages")
def enable_nrr_for_outbound_encoded_messages(self) -> bool:
"""
The value indicating whether to enable NRR for outbound encoded messages.
"""
return pulumi.get(self, "enable_nrr_for_outbound_encoded_messages")
@property
@pulumi.getter(name="enableNRRForOutboundMDN")
def enable_nrr_for_outbound_mdn(self) -> bool:
"""
The value indicating whether to enable NRR for outbound MDN.
"""
return pulumi.get(self, "enable_nrr_for_outbound_mdn")
@property
@pulumi.getter(name="overrideGroupSigningCertificate")
def override_group_signing_certificate(self) -> bool:
"""
The value indicating whether to send or request a MDN.
"""
return pulumi.get(self, "override_group_signing_certificate")
@property
@pulumi.getter(name="encryptionCertificateName")
def encryption_certificate_name(self) -> Optional[str]:
"""
The name of the encryption certificate.
"""
return pulumi.get(self, "encryption_certificate_name")
@property
@pulumi.getter(name="sha2AlgorithmFormat")
def sha2_algorithm_format(self) -> Optional[str]:
"""
The Sha2 algorithm format. Valid values are Sha2, ShaHashSize, ShaHyphenHashSize, Sha2UnderscoreHashSize.
"""
return pulumi.get(self, "sha2_algorithm_format")
@property
@pulumi.getter(name="signingCertificateName")
def signing_certificate_name(self) -> Optional[str]:
"""
The name of the signing certificate.
"""
return pulumi.get(self, "signing_certificate_name")
@pulumi.output_type
class AS2ValidationSettingsResponse(dict):
"""
The AS2 agreement validation settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "checkCertificateRevocationListOnReceive":
suggest = "check_certificate_revocation_list_on_receive"
elif key == "checkCertificateRevocationListOnSend":
suggest = "check_certificate_revocation_list_on_send"
elif key == "checkDuplicateMessage":
suggest = "check_duplicate_message"
elif key == "compressMessage":
suggest = "compress_message"
elif key == "encryptMessage":
suggest = "encrypt_message"
elif key == "encryptionAlgorithm":
suggest = "encryption_algorithm"
elif key == "interchangeDuplicatesValidityDays":
suggest = "interchange_duplicates_validity_days"
elif key == "overrideMessageProperties":
suggest = "override_message_properties"
elif key == "signMessage":
suggest = "sign_message"
elif key == "signingAlgorithm":
suggest = "signing_algorithm"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AS2ValidationSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AS2ValidationSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AS2ValidationSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
check_certificate_revocation_list_on_receive: bool,
check_certificate_revocation_list_on_send: bool,
check_duplicate_message: bool,
compress_message: bool,
encrypt_message: bool,
encryption_algorithm: str,
interchange_duplicates_validity_days: int,
override_message_properties: bool,
sign_message: bool,
signing_algorithm: Optional[str] = None):
"""
The AS2 agreement validation settings.
:param bool check_certificate_revocation_list_on_receive: The value indicating whether to check for certificate revocation list on receive.
:param bool check_certificate_revocation_list_on_send: The value indicating whether to check for certificate revocation list on send.
:param bool check_duplicate_message: The value indicating whether to check for duplicate message.
:param bool compress_message: The value indicating whether the message has to be compressed.
:param bool encrypt_message: The value indicating whether the message has to be encrypted.
:param str encryption_algorithm: The encryption algorithm.
:param int interchange_duplicates_validity_days: The number of days to look back for duplicate interchange.
:param bool override_message_properties: The value indicating whether to override incoming message properties with those in agreement.
:param bool sign_message: The value indicating whether the message has to be signed.
:param str signing_algorithm: The signing algorithm.
"""
pulumi.set(__self__, "check_certificate_revocation_list_on_receive", check_certificate_revocation_list_on_receive)
pulumi.set(__self__, "check_certificate_revocation_list_on_send", check_certificate_revocation_list_on_send)
pulumi.set(__self__, "check_duplicate_message", check_duplicate_message)
pulumi.set(__self__, "compress_message", compress_message)
pulumi.set(__self__, "encrypt_message", encrypt_message)
pulumi.set(__self__, "encryption_algorithm", encryption_algorithm)
pulumi.set(__self__, "interchange_duplicates_validity_days", interchange_duplicates_validity_days)
pulumi.set(__self__, "override_message_properties", override_message_properties)
pulumi.set(__self__, "sign_message", sign_message)
if signing_algorithm is not None:
pulumi.set(__self__, "signing_algorithm", signing_algorithm)
@property
@pulumi.getter(name="checkCertificateRevocationListOnReceive")
def check_certificate_revocation_list_on_receive(self) -> bool:
"""
The value indicating whether to check for certificate revocation list on receive.
"""
return pulumi.get(self, "check_certificate_revocation_list_on_receive")
@property
@pulumi.getter(name="checkCertificateRevocationListOnSend")
def check_certificate_revocation_list_on_send(self) -> bool:
"""
The value indicating whether to check for certificate revocation list on send.
"""
return pulumi.get(self, "check_certificate_revocation_list_on_send")
@property
@pulumi.getter(name="checkDuplicateMessage")
def check_duplicate_message(self) -> bool:
"""
The value indicating whether to check for duplicate message.
"""
return pulumi.get(self, "check_duplicate_message")
@property
@pulumi.getter(name="compressMessage")
def compress_message(self) -> bool:
"""
The value indicating whether the message has to be compressed.
"""
return pulumi.get(self, "compress_message")
@property
@pulumi.getter(name="encryptMessage")
def encrypt_message(self) -> bool:
"""
The value indicating whether the message has to be encrypted.
"""
return pulumi.get(self, "encrypt_message")
@property
@pulumi.getter(name="encryptionAlgorithm")
def encryption_algorithm(self) -> str:
"""
The encryption algorithm.
"""
return pulumi.get(self, "encryption_algorithm")
@property
@pulumi.getter(name="interchangeDuplicatesValidityDays")
def interchange_duplicates_validity_days(self) -> int:
"""
The number of days to look back for duplicate interchange.
"""
return pulumi.get(self, "interchange_duplicates_validity_days")
@property
@pulumi.getter(name="overrideMessageProperties")
def override_message_properties(self) -> bool:
"""
The value indicating whether to override incoming message properties with those in agreement.
"""
return pulumi.get(self, "override_message_properties")
@property
@pulumi.getter(name="signMessage")
def sign_message(self) -> bool:
"""
The value indicating whether the message has to be signed.
"""
return pulumi.get(self, "sign_message")
@property
@pulumi.getter(name="signingAlgorithm")
def signing_algorithm(self) -> Optional[str]:
"""
The signing algorithm.
"""
return pulumi.get(self, "signing_algorithm")
@pulumi.output_type
class AgreementContentResponse(dict):
"""
The integration account agreement content.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "aS2":
suggest = "a_s2"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
a_s2: Optional['outputs.AS2AgreementContentResponse'] = None,
edifact: Optional['outputs.EdifactAgreementContentResponse'] = None,
x12: Optional['outputs.X12AgreementContentResponse'] = None):
"""
The integration account agreement content.
:param 'AS2AgreementContentResponse' a_s2: The AS2 agreement content.
:param 'EdifactAgreementContentResponse' edifact: The EDIFACT agreement content.
:param 'X12AgreementContentResponse' x12: The X12 agreement content.
"""
if a_s2 is not None:
pulumi.set(__self__, "a_s2", a_s2)
if edifact is not None:
pulumi.set(__self__, "edifact", edifact)
if x12 is not None:
pulumi.set(__self__, "x12", x12)
@property
@pulumi.getter(name="aS2")
def a_s2(self) -> Optional['outputs.AS2AgreementContentResponse']:
"""
The AS2 agreement content.
"""
return pulumi.get(self, "a_s2")
@property
@pulumi.getter
def edifact(self) -> Optional['outputs.EdifactAgreementContentResponse']:
"""
The EDIFACT agreement content.
"""
return pulumi.get(self, "edifact")
@property
@pulumi.getter
def x12(self) -> Optional['outputs.X12AgreementContentResponse']:
"""
The X12 agreement content.
"""
return pulumi.get(self, "x12")
@pulumi.output_type
class AssemblyPropertiesResponse(dict):
"""
The assembly properties definition.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "assemblyName":
suggest = "assembly_name"
elif key == "assemblyCulture":
suggest = "assembly_culture"
elif key == "assemblyPublicKeyToken":
suggest = "assembly_public_key_token"
elif key == "assemblyVersion":
suggest = "assembly_version"
elif key == "changedTime":
suggest = "changed_time"
elif key == "contentLink":
suggest = "content_link"
elif key == "contentType":
suggest = "content_type"
elif key == "createdTime":
suggest = "created_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssemblyPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssemblyPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssemblyPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
assembly_name: str,
assembly_culture: Optional[str] = None,
assembly_public_key_token: Optional[str] = None,
assembly_version: Optional[str] = None,
changed_time: Optional[str] = None,
content: Optional[Any] = None,
content_link: Optional['outputs.ContentLinkResponse'] = None,
content_type: Optional[str] = None,
created_time: Optional[str] = None,
metadata: Optional[Any] = None):
"""
The assembly properties definition.
:param str assembly_name: The assembly name.
:param str assembly_culture: The assembly culture.
:param str assembly_public_key_token: The assembly public key token.
:param str assembly_version: The assembly version.
:param str changed_time: The artifact changed time.
:param 'ContentLinkResponse' content_link: The content link.
:param str content_type: The content type.
:param str created_time: The artifact creation time.
"""
pulumi.set(__self__, "assembly_name", assembly_name)
if assembly_culture is not None:
pulumi.set(__self__, "assembly_culture", assembly_culture)
if assembly_public_key_token is not None:
pulumi.set(__self__, "assembly_public_key_token", assembly_public_key_token)
if assembly_version is not None:
pulumi.set(__self__, "assembly_version", assembly_version)
if changed_time is not None:
pulumi.set(__self__, "changed_time", changed_time)
if content is not None:
pulumi.set(__self__, "content", content)
if content_link is not None:
pulumi.set(__self__, "content_link", content_link)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
@property
@pulumi.getter(name="assemblyName")
def assembly_name(self) -> str:
"""
The assembly name.
"""
return pulumi.get(self, "assembly_name")
@property
@pulumi.getter(name="assemblyCulture")
def assembly_culture(self) -> Optional[str]:
"""
The assembly culture.
"""
return pulumi.get(self, "assembly_culture")
@property
@pulumi.getter(name="assemblyPublicKeyToken")
def assembly_public_key_token(self) -> Optional[str]:
"""
The assembly public key token.
"""
return pulumi.get(self, "assembly_public_key_token")
@property
@pulumi.getter(name="assemblyVersion")
def assembly_version(self) -> Optional[str]:
"""
The assembly version.
"""
return pulumi.get(self, "assembly_version")
@property
@pulumi.getter(name="changedTime")
def changed_time(self) -> Optional[str]:
"""
The artifact changed time.
"""
return pulumi.get(self, "changed_time")
@property
@pulumi.getter
def content(self) -> Optional[Any]:
return pulumi.get(self, "content")
@property
@pulumi.getter(name="contentLink")
def content_link(self) -> Optional['outputs.ContentLinkResponse']:
"""
The content link.
"""
return pulumi.get(self, "content_link")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
"""
The content type.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[str]:
"""
The artifact creation time.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
return pulumi.get(self, "metadata")
@pulumi.output_type
class AzureResourceErrorInfoResponse(dict):
"""
The azure resource error info.
"""
def __init__(__self__, *,
code: str,
message: str,
details: Optional[Sequence['outputs.AzureResourceErrorInfoResponse']] = None):
"""
The azure resource error info.
:param str code: The error code.
:param str message: The error message.
:param Sequence['AzureResourceErrorInfoResponse'] details: The error details.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
if details is not None:
pulumi.set(__self__, "details", details)
@property
@pulumi.getter
def code(self) -> str:
"""
The error code.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
The error message.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def details(self) -> Optional[Sequence['outputs.AzureResourceErrorInfoResponse']]:
"""
The error details.
"""
return pulumi.get(self, "details")
@pulumi.output_type
class B2BPartnerContentResponse(dict):
"""
The B2B partner content.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "businessIdentities":
suggest = "business_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in B2BPartnerContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
B2BPartnerContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
B2BPartnerContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
business_identities: Optional[Sequence['outputs.BusinessIdentityResponse']] = None):
"""
The B2B partner content.
:param Sequence['BusinessIdentityResponse'] business_identities: The list of partner business identities.
"""
if business_identities is not None:
pulumi.set(__self__, "business_identities", business_identities)
@property
@pulumi.getter(name="businessIdentities")
def business_identities(self) -> Optional[Sequence['outputs.BusinessIdentityResponse']]:
"""
The list of partner business identities.
"""
return pulumi.get(self, "business_identities")
@pulumi.output_type
class BatchConfigurationPropertiesResponse(dict):
"""
The batch configuration properties definition.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "batchGroupName":
suggest = "batch_group_name"
elif key == "releaseCriteria":
suggest = "release_criteria"
elif key == "changedTime":
suggest = "changed_time"
elif key == "createdTime":
suggest = "created_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BatchConfigurationPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BatchConfigurationPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BatchConfigurationPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
batch_group_name: str,
release_criteria: 'outputs.BatchReleaseCriteriaResponse',
changed_time: Optional[str] = None,
created_time: Optional[str] = None,
metadata: Optional[Any] = None):
"""
The batch configuration properties definition.
:param str batch_group_name: The name of the batch group.
:param 'BatchReleaseCriteriaResponse' release_criteria: The batch release criteria.
:param str changed_time: The artifact changed time.
:param str created_time: The artifact creation time.
"""
pulumi.set(__self__, "batch_group_name", batch_group_name)
pulumi.set(__self__, "release_criteria", release_criteria)
if changed_time is not None:
pulumi.set(__self__, "changed_time", changed_time)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
@property
@pulumi.getter(name="batchGroupName")
def batch_group_name(self) -> str:
"""
The name of the batch group.
"""
return pulumi.get(self, "batch_group_name")
@property
@pulumi.getter(name="releaseCriteria")
def release_criteria(self) -> 'outputs.BatchReleaseCriteriaResponse':
"""
The batch release criteria.
"""
return pulumi.get(self, "release_criteria")
@property
@pulumi.getter(name="changedTime")
def changed_time(self) -> Optional[str]:
"""
The artifact changed time.
"""
return pulumi.get(self, "changed_time")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[str]:
"""
The artifact creation time.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
return pulumi.get(self, "metadata")
@pulumi.output_type
class BatchReleaseCriteriaResponse(dict):
"""
The batch release criteria.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "batchSize":
suggest = "batch_size"
elif key == "messageCount":
suggest = "message_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BatchReleaseCriteriaResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BatchReleaseCriteriaResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BatchReleaseCriteriaResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
batch_size: Optional[int] = None,
message_count: Optional[int] = None,
recurrence: Optional['outputs.WorkflowTriggerRecurrenceResponse'] = None):
"""
The batch release criteria.
:param int batch_size: The batch size in bytes.
:param int message_count: The message count.
:param 'WorkflowTriggerRecurrenceResponse' recurrence: The recurrence.
"""
if batch_size is not None:
pulumi.set(__self__, "batch_size", batch_size)
if message_count is not None:
pulumi.set(__self__, "message_count", message_count)
if recurrence is not None:
pulumi.set(__self__, "recurrence", recurrence)
@property
@pulumi.getter(name="batchSize")
def batch_size(self) -> Optional[int]:
"""
The batch size in bytes.
"""
return pulumi.get(self, "batch_size")
@property
@pulumi.getter(name="messageCount")
def message_count(self) -> Optional[int]:
"""
The message count.
"""
return pulumi.get(self, "message_count")
@property
@pulumi.getter
def recurrence(self) -> Optional['outputs.WorkflowTriggerRecurrenceResponse']:
"""
The recurrence.
"""
return pulumi.get(self, "recurrence")
@pulumi.output_type
class BusinessIdentityResponse(dict):
"""
The integration account partner's business identity.
"""
def __init__(__self__, *,
qualifier: str,
value: str):
"""
The integration account partner's business identity.
:param str qualifier: The business identity qualifier e.g. as2identity, ZZ, ZZZ, 31, 32
:param str value: The user defined business identity value.
"""
pulumi.set(__self__, "qualifier", qualifier)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def qualifier(self) -> str:
"""
The business identity qualifier e.g. as2identity, ZZ, ZZZ, 31, 32
"""
return pulumi.get(self, "qualifier")
@property
@pulumi.getter
def value(self) -> str:
"""
The user defined business identity value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ContentHashResponse(dict):
"""
The content hash.
"""
def __init__(__self__, *,
algorithm: Optional[str] = None,
value: Optional[str] = None):
"""
The content hash.
:param str algorithm: The algorithm of the content hash.
:param str value: The value of the content hash.
"""
if algorithm is not None:
pulumi.set(__self__, "algorithm", algorithm)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def algorithm(self) -> Optional[str]:
"""
The algorithm of the content hash.
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The value of the content hash.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ContentLinkResponse(dict):
"""
The content link.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentHash":
suggest = "content_hash"
elif key == "contentSize":
suggest = "content_size"
elif key == "contentVersion":
suggest = "content_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ContentLinkResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ContentLinkResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ContentLinkResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
content_hash: Optional['outputs.ContentHashResponse'] = None,
content_size: Optional[float] = None,
content_version: Optional[str] = None,
metadata: Optional[Any] = None,
uri: Optional[str] = None):
"""
The content link.
:param 'ContentHashResponse' content_hash: The content hash.
:param float content_size: The content size.
:param str content_version: The content version.
:param Any metadata: The metadata.
:param str uri: The content link URI.
"""
if content_hash is not None:
pulumi.set(__self__, "content_hash", content_hash)
if content_size is not None:
pulumi.set(__self__, "content_size", content_size)
if content_version is not None:
pulumi.set(__self__, "content_version", content_version)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> Optional['outputs.ContentHashResponse']:
"""
The content hash.
"""
return pulumi.get(self, "content_hash")
@property
@pulumi.getter(name="contentSize")
def content_size(self) -> Optional[float]:
"""
The content size.
"""
return pulumi.get(self, "content_size")
@property
@pulumi.getter(name="contentVersion")
def content_version(self) -> Optional[str]:
"""
The content version.
"""
return pulumi.get(self, "content_version")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The content link URI.
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class EdifactAcknowledgementSettingsResponse(dict):
"""
The Edifact agreement acknowledgement settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementControlNumberLowerBound":
suggest = "acknowledgement_control_number_lower_bound"
elif key == "acknowledgementControlNumberUpperBound":
suggest = "acknowledgement_control_number_upper_bound"
elif key == "batchFunctionalAcknowledgements":
suggest = "batch_functional_acknowledgements"
elif key == "batchTechnicalAcknowledgements":
suggest = "batch_technical_acknowledgements"
elif key == "needFunctionalAcknowledgement":
suggest = "need_functional_acknowledgement"
elif key == "needLoopForValidMessages":
suggest = "need_loop_for_valid_messages"
elif key == "needTechnicalAcknowledgement":
suggest = "need_technical_acknowledgement"
elif key == "rolloverAcknowledgementControlNumber":
suggest = "rollover_acknowledgement_control_number"
elif key == "sendSynchronousAcknowledgement":
suggest = "send_synchronous_acknowledgement"
elif key == "acknowledgementControlNumberPrefix":
suggest = "acknowledgement_control_number_prefix"
elif key == "acknowledgementControlNumberSuffix":
suggest = "acknowledgement_control_number_suffix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactAcknowledgementSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactAcknowledgementSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactAcknowledgementSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_control_number_lower_bound: int,
acknowledgement_control_number_upper_bound: int,
batch_functional_acknowledgements: bool,
batch_technical_acknowledgements: bool,
need_functional_acknowledgement: bool,
need_loop_for_valid_messages: bool,
need_technical_acknowledgement: bool,
rollover_acknowledgement_control_number: bool,
send_synchronous_acknowledgement: bool,
acknowledgement_control_number_prefix: Optional[str] = None,
acknowledgement_control_number_suffix: Optional[str] = None):
"""
The Edifact agreement acknowledgement settings.
:param int acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound.
:param int acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound.
:param bool batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements.
:param bool batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements.
:param bool need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed.
:param bool need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages.
:param bool need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed.
:param bool rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number.
:param bool send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement.
:param str acknowledgement_control_number_prefix: The acknowledgement control number prefix.
:param str acknowledgement_control_number_suffix: The acknowledgement control number suffix.
"""
pulumi.set(__self__, "acknowledgement_control_number_lower_bound", acknowledgement_control_number_lower_bound)
pulumi.set(__self__, "acknowledgement_control_number_upper_bound", acknowledgement_control_number_upper_bound)
pulumi.set(__self__, "batch_functional_acknowledgements", batch_functional_acknowledgements)
pulumi.set(__self__, "batch_technical_acknowledgements", batch_technical_acknowledgements)
pulumi.set(__self__, "need_functional_acknowledgement", need_functional_acknowledgement)
pulumi.set(__self__, "need_loop_for_valid_messages", need_loop_for_valid_messages)
pulumi.set(__self__, "need_technical_acknowledgement", need_technical_acknowledgement)
pulumi.set(__self__, "rollover_acknowledgement_control_number", rollover_acknowledgement_control_number)
pulumi.set(__self__, "send_synchronous_acknowledgement", send_synchronous_acknowledgement)
if acknowledgement_control_number_prefix is not None:
pulumi.set(__self__, "acknowledgement_control_number_prefix", acknowledgement_control_number_prefix)
if acknowledgement_control_number_suffix is not None:
pulumi.set(__self__, "acknowledgement_control_number_suffix", acknowledgement_control_number_suffix)
@property
@pulumi.getter(name="acknowledgementControlNumberLowerBound")
def acknowledgement_control_number_lower_bound(self) -> int:
"""
The acknowledgement control number lower bound.
"""
return pulumi.get(self, "acknowledgement_control_number_lower_bound")
@property
@pulumi.getter(name="acknowledgementControlNumberUpperBound")
def acknowledgement_control_number_upper_bound(self) -> int:
"""
The acknowledgement control number upper bound.
"""
return pulumi.get(self, "acknowledgement_control_number_upper_bound")
@property
@pulumi.getter(name="batchFunctionalAcknowledgements")
def batch_functional_acknowledgements(self) -> bool:
"""
The value indicating whether to batch functional acknowledgements.
"""
return pulumi.get(self, "batch_functional_acknowledgements")
@property
@pulumi.getter(name="batchTechnicalAcknowledgements")
def batch_technical_acknowledgements(self) -> bool:
"""
The value indicating whether to batch the technical acknowledgements.
"""
return pulumi.get(self, "batch_technical_acknowledgements")
@property
@pulumi.getter(name="needFunctionalAcknowledgement")
def need_functional_acknowledgement(self) -> bool:
"""
The value indicating whether functional acknowledgement is needed.
"""
return pulumi.get(self, "need_functional_acknowledgement")
@property
@pulumi.getter(name="needLoopForValidMessages")
def need_loop_for_valid_messages(self) -> bool:
"""
The value indicating whether a loop is needed for valid messages.
"""
return pulumi.get(self, "need_loop_for_valid_messages")
@property
@pulumi.getter(name="needTechnicalAcknowledgement")
def need_technical_acknowledgement(self) -> bool:
"""
The value indicating whether technical acknowledgement is needed.
"""
return pulumi.get(self, "need_technical_acknowledgement")
@property
@pulumi.getter(name="rolloverAcknowledgementControlNumber")
def rollover_acknowledgement_control_number(self) -> bool:
"""
The value indicating whether to rollover acknowledgement control number.
"""
return pulumi.get(self, "rollover_acknowledgement_control_number")
@property
@pulumi.getter(name="sendSynchronousAcknowledgement")
def send_synchronous_acknowledgement(self) -> bool:
"""
The value indicating whether to send synchronous acknowledgement.
"""
return pulumi.get(self, "send_synchronous_acknowledgement")
@property
@pulumi.getter(name="acknowledgementControlNumberPrefix")
def acknowledgement_control_number_prefix(self) -> Optional[str]:
"""
The acknowledgement control number prefix.
"""
return pulumi.get(self, "acknowledgement_control_number_prefix")
@property
@pulumi.getter(name="acknowledgementControlNumberSuffix")
def acknowledgement_control_number_suffix(self) -> Optional[str]:
"""
The acknowledgement control number suffix.
"""
return pulumi.get(self, "acknowledgement_control_number_suffix")
@pulumi.output_type
class EdifactAgreementContentResponse(dict):
"""
The Edifact agreement content.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "receiveAgreement":
suggest = "receive_agreement"
elif key == "sendAgreement":
suggest = "send_agreement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactAgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactAgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactAgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
receive_agreement: 'outputs.EdifactOneWayAgreementResponse',
send_agreement: 'outputs.EdifactOneWayAgreementResponse'):
"""
The Edifact agreement content.
:param 'EdifactOneWayAgreementResponse' receive_agreement: The EDIFACT one-way receive agreement.
:param 'EdifactOneWayAgreementResponse' send_agreement: The EDIFACT one-way send agreement.
"""
pulumi.set(__self__, "receive_agreement", receive_agreement)
pulumi.set(__self__, "send_agreement", send_agreement)
@property
@pulumi.getter(name="receiveAgreement")
def receive_agreement(self) -> 'outputs.EdifactOneWayAgreementResponse':
"""
The EDIFACT one-way receive agreement.
"""
return pulumi.get(self, "receive_agreement")
@property
@pulumi.getter(name="sendAgreement")
def send_agreement(self) -> 'outputs.EdifactOneWayAgreementResponse':
"""
The EDIFACT one-way send agreement.
"""
return pulumi.get(self, "send_agreement")
@pulumi.output_type
class EdifactDelimiterOverrideResponse(dict):
"""
The Edifact delimiter override settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "decimalPointIndicator":
suggest = "decimal_point_indicator"
elif key == "releaseIndicator":
suggest = "release_indicator"
elif key == "repetitionSeparator":
suggest = "repetition_separator"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
elif key == "messageAssociationAssignedCode":
suggest = "message_association_assigned_code"
elif key == "messageId":
suggest = "message_id"
elif key == "messageRelease":
suggest = "message_release"
elif key == "messageVersion":
suggest = "message_version"
elif key == "targetNamespace":
suggest = "target_namespace"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactDelimiterOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactDelimiterOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactDelimiterOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
component_separator: int,
data_element_separator: int,
decimal_point_indicator: str,
release_indicator: int,
repetition_separator: int,
segment_terminator: int,
segment_terminator_suffix: str,
message_association_assigned_code: Optional[str] = None,
message_id: Optional[str] = None,
message_release: Optional[str] = None,
message_version: Optional[str] = None,
target_namespace: Optional[str] = None):
"""
The Edifact delimiter override settings.
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param str decimal_point_indicator: The decimal point indicator.
:param int release_indicator: The release indicator.
:param int repetition_separator: The repetition separator.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The segment terminator suffix.
:param str message_association_assigned_code: The message association assigned code.
:param str message_id: The message id.
:param str message_release: The message release.
:param str message_version: The message version.
:param str target_namespace: The target namespace on which this delimiter settings has to be applied.
"""
pulumi.set(__self__, "component_separator", component_separator)
pulumi.set(__self__, "data_element_separator", data_element_separator)
pulumi.set(__self__, "decimal_point_indicator", decimal_point_indicator)
pulumi.set(__self__, "release_indicator", release_indicator)
pulumi.set(__self__, "repetition_separator", repetition_separator)
pulumi.set(__self__, "segment_terminator", segment_terminator)
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
if message_association_assigned_code is not None:
pulumi.set(__self__, "message_association_assigned_code", message_association_assigned_code)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if message_release is not None:
pulumi.set(__self__, "message_release", message_release)
if message_version is not None:
pulumi.set(__self__, "message_version", message_version)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> int:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> int:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="decimalPointIndicator")
def decimal_point_indicator(self) -> str:
"""
The decimal point indicator.
"""
return pulumi.get(self, "decimal_point_indicator")
@property
@pulumi.getter(name="releaseIndicator")
def release_indicator(self) -> int:
"""
The release indicator.
"""
return pulumi.get(self, "release_indicator")
@property
@pulumi.getter(name="repetitionSeparator")
def repetition_separator(self) -> int:
"""
The repetition separator.
"""
return pulumi.get(self, "repetition_separator")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> int:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> str:
"""
The segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@property
@pulumi.getter(name="messageAssociationAssignedCode")
def message_association_assigned_code(self) -> Optional[str]:
"""
The message association assigned code.
"""
return pulumi.get(self, "message_association_assigned_code")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> Optional[str]:
"""
The message release.
"""
return pulumi.get(self, "message_release")
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> Optional[str]:
"""
The message version.
"""
return pulumi.get(self, "message_version")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[str]:
"""
The target namespace on which this delimiter settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@pulumi.output_type
class EdifactEnvelopeOverrideResponse(dict):
"""
The Edifact envelope override settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "applicationPassword":
suggest = "application_password"
elif key == "associationAssignedCode":
suggest = "association_assigned_code"
elif key == "controllingAgencyCode":
suggest = "controlling_agency_code"
elif key == "functionalGroupId":
suggest = "functional_group_id"
elif key == "groupHeaderMessageRelease":
suggest = "group_header_message_release"
elif key == "groupHeaderMessageVersion":
suggest = "group_header_message_version"
elif key == "messageAssociationAssignedCode":
suggest = "message_association_assigned_code"
elif key == "messageId":
suggest = "message_id"
elif key == "messageRelease":
suggest = "message_release"
elif key == "messageVersion":
suggest = "message_version"
elif key == "receiverApplicationId":
suggest = "receiver_application_id"
elif key == "receiverApplicationQualifier":
suggest = "receiver_application_qualifier"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "senderApplicationQualifier":
suggest = "sender_application_qualifier"
elif key == "targetNamespace":
suggest = "target_namespace"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactEnvelopeOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactEnvelopeOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactEnvelopeOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
application_password: Optional[str] = None,
association_assigned_code: Optional[str] = None,
controlling_agency_code: Optional[str] = None,
functional_group_id: Optional[str] = None,
group_header_message_release: Optional[str] = None,
group_header_message_version: Optional[str] = None,
message_association_assigned_code: Optional[str] = None,
message_id: Optional[str] = None,
message_release: Optional[str] = None,
message_version: Optional[str] = None,
receiver_application_id: Optional[str] = None,
receiver_application_qualifier: Optional[str] = None,
sender_application_id: Optional[str] = None,
sender_application_qualifier: Optional[str] = None,
target_namespace: Optional[str] = None):
"""
The Edifact envelope override settings.
:param str application_password: The application password.
:param str association_assigned_code: The association assigned code.
:param str controlling_agency_code: The controlling agency code.
:param str functional_group_id: The functional group id.
:param str group_header_message_release: The group header message release.
:param str group_header_message_version: The group header message version.
:param str message_association_assigned_code: The message association assigned code.
:param str message_id: The message id on which this envelope settings has to be applied.
:param str message_release: The message release version on which this envelope settings has to be applied.
:param str message_version: The message version on which this envelope settings has to be applied.
:param str receiver_application_id: The receiver application id.
:param str receiver_application_qualifier: The receiver application qualifier.
:param str sender_application_id: The sender application id.
:param str sender_application_qualifier: The sender application qualifier.
:param str target_namespace: The target namespace on which this envelope settings has to be applied.
"""
if application_password is not None:
pulumi.set(__self__, "application_password", application_password)
if association_assigned_code is not None:
pulumi.set(__self__, "association_assigned_code", association_assigned_code)
if controlling_agency_code is not None:
pulumi.set(__self__, "controlling_agency_code", controlling_agency_code)
if functional_group_id is not None:
pulumi.set(__self__, "functional_group_id", functional_group_id)
if group_header_message_release is not None:
pulumi.set(__self__, "group_header_message_release", group_header_message_release)
if group_header_message_version is not None:
pulumi.set(__self__, "group_header_message_version", group_header_message_version)
if message_association_assigned_code is not None:
pulumi.set(__self__, "message_association_assigned_code", message_association_assigned_code)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if message_release is not None:
pulumi.set(__self__, "message_release", message_release)
if message_version is not None:
pulumi.set(__self__, "message_version", message_version)
if receiver_application_id is not None:
pulumi.set(__self__, "receiver_application_id", receiver_application_id)
if receiver_application_qualifier is not None:
pulumi.set(__self__, "receiver_application_qualifier", receiver_application_qualifier)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
if sender_application_qualifier is not None:
pulumi.set(__self__, "sender_application_qualifier", sender_application_qualifier)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="applicationPassword")
def application_password(self) -> Optional[str]:
"""
The application password.
"""
return pulumi.get(self, "application_password")
@property
@pulumi.getter(name="associationAssignedCode")
def association_assigned_code(self) -> Optional[str]:
"""
The association assigned code.
"""
return pulumi.get(self, "association_assigned_code")
@property
@pulumi.getter(name="controllingAgencyCode")
def controlling_agency_code(self) -> Optional[str]:
"""
The controlling agency code.
"""
return pulumi.get(self, "controlling_agency_code")
@property
@pulumi.getter(name="functionalGroupId")
def functional_group_id(self) -> Optional[str]:
"""
The functional group id.
"""
return pulumi.get(self, "functional_group_id")
@property
@pulumi.getter(name="groupHeaderMessageRelease")
def group_header_message_release(self) -> Optional[str]:
"""
The group header message release.
"""
return pulumi.get(self, "group_header_message_release")
@property
@pulumi.getter(name="groupHeaderMessageVersion")
def group_header_message_version(self) -> Optional[str]:
"""
The group header message version.
"""
return pulumi.get(self, "group_header_message_version")
@property
@pulumi.getter(name="messageAssociationAssignedCode")
def message_association_assigned_code(self) -> Optional[str]:
"""
The message association assigned code.
"""
return pulumi.get(self, "message_association_assigned_code")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> Optional[str]:
"""
The message release version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_release")
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> Optional[str]:
"""
The message version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_version")
@property
@pulumi.getter(name="receiverApplicationId")
def receiver_application_id(self) -> Optional[str]:
"""
The receiver application id.
"""
return pulumi.get(self, "receiver_application_id")
@property
@pulumi.getter(name="receiverApplicationQualifier")
def receiver_application_qualifier(self) -> Optional[str]:
"""
The receiver application qualifier.
"""
return pulumi.get(self, "receiver_application_qualifier")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="senderApplicationQualifier")
def sender_application_qualifier(self) -> Optional[str]:
"""
The sender application qualifier.
"""
return pulumi.get(self, "sender_application_qualifier")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[str]:
"""
The target namespace on which this envelope settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@pulumi.output_type
class EdifactEnvelopeSettingsResponse(dict):
"""
The Edifact agreement envelope settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "applyDelimiterStringAdvice":
suggest = "apply_delimiter_string_advice"
elif key == "createGroupingSegments":
suggest = "create_grouping_segments"
elif key == "enableDefaultGroupHeaders":
suggest = "enable_default_group_headers"
elif key == "groupControlNumberLowerBound":
suggest = "group_control_number_lower_bound"
elif key == "groupControlNumberUpperBound":
suggest = "group_control_number_upper_bound"
elif key == "interchangeControlNumberLowerBound":
suggest = "interchange_control_number_lower_bound"
elif key == "interchangeControlNumberUpperBound":
suggest = "interchange_control_number_upper_bound"
elif key == "isTestInterchange":
suggest = "is_test_interchange"
elif key == "overwriteExistingTransactionSetControlNumber":
suggest = "overwrite_existing_transaction_set_control_number"
elif key == "rolloverGroupControlNumber":
suggest = "rollover_group_control_number"
elif key == "rolloverInterchangeControlNumber":
suggest = "rollover_interchange_control_number"
elif key == "rolloverTransactionSetControlNumber":
suggest = "rollover_transaction_set_control_number"
elif key == "transactionSetControlNumberLowerBound":
suggest = "transaction_set_control_number_lower_bound"
elif key == "transactionSetControlNumberUpperBound":
suggest = "transaction_set_control_number_upper_bound"
elif key == "applicationReferenceId":
suggest = "application_reference_id"
elif key == "communicationAgreementId":
suggest = "communication_agreement_id"
elif key == "functionalGroupId":
suggest = "functional_group_id"
elif key == "groupApplicationPassword":
suggest = "group_application_password"
elif key == "groupApplicationReceiverId":
suggest = "group_application_receiver_id"
elif key == "groupApplicationReceiverQualifier":
suggest = "group_application_receiver_qualifier"
elif key == "groupApplicationSenderId":
suggest = "group_application_sender_id"
elif key == "groupApplicationSenderQualifier":
suggest = "group_application_sender_qualifier"
elif key == "groupAssociationAssignedCode":
suggest = "group_association_assigned_code"
elif key == "groupControlNumberPrefix":
suggest = "group_control_number_prefix"
elif key == "groupControlNumberSuffix":
suggest = "group_control_number_suffix"
elif key == "groupControllingAgencyCode":
suggest = "group_controlling_agency_code"
elif key == "groupMessageRelease":
suggest = "group_message_release"
elif key == "groupMessageVersion":
suggest = "group_message_version"
elif key == "interchangeControlNumberPrefix":
suggest = "interchange_control_number_prefix"
elif key == "interchangeControlNumberSuffix":
suggest = "interchange_control_number_suffix"
elif key == "processingPriorityCode":
suggest = "processing_priority_code"
elif key == "receiverInternalIdentification":
suggest = "receiver_internal_identification"
elif key == "receiverInternalSubIdentification":
suggest = "receiver_internal_sub_identification"
elif key == "receiverReverseRoutingAddress":
suggest = "receiver_reverse_routing_address"
elif key == "recipientReferencePasswordQualifier":
suggest = "recipient_reference_password_qualifier"
elif key == "recipientReferencePasswordValue":
suggest = "recipient_reference_password_value"
elif key == "senderInternalIdentification":
suggest = "sender_internal_identification"
elif key == "senderInternalSubIdentification":
suggest = "sender_internal_sub_identification"
elif key == "senderReverseRoutingAddress":
suggest = "sender_reverse_routing_address"
elif key == "transactionSetControlNumberPrefix":
suggest = "transaction_set_control_number_prefix"
elif key == "transactionSetControlNumberSuffix":
suggest = "transaction_set_control_number_suffix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactEnvelopeSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactEnvelopeSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactEnvelopeSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
apply_delimiter_string_advice: bool,
create_grouping_segments: bool,
enable_default_group_headers: bool,
group_control_number_lower_bound: float,
group_control_number_upper_bound: float,
interchange_control_number_lower_bound: float,
interchange_control_number_upper_bound: float,
is_test_interchange: bool,
overwrite_existing_transaction_set_control_number: bool,
rollover_group_control_number: bool,
rollover_interchange_control_number: bool,
rollover_transaction_set_control_number: bool,
transaction_set_control_number_lower_bound: float,
transaction_set_control_number_upper_bound: float,
application_reference_id: Optional[str] = None,
communication_agreement_id: Optional[str] = None,
functional_group_id: Optional[str] = None,
group_application_password: Optional[str] = None,
group_application_receiver_id: Optional[str] = None,
group_application_receiver_qualifier: Optional[str] = None,
group_application_sender_id: Optional[str] = None,
group_application_sender_qualifier: Optional[str] = None,
group_association_assigned_code: Optional[str] = None,
group_control_number_prefix: Optional[str] = None,
group_control_number_suffix: Optional[str] = None,
group_controlling_agency_code: Optional[str] = None,
group_message_release: Optional[str] = None,
group_message_version: Optional[str] = None,
interchange_control_number_prefix: Optional[str] = None,
interchange_control_number_suffix: Optional[str] = None,
processing_priority_code: Optional[str] = None,
receiver_internal_identification: Optional[str] = None,
receiver_internal_sub_identification: Optional[str] = None,
receiver_reverse_routing_address: Optional[str] = None,
recipient_reference_password_qualifier: Optional[str] = None,
recipient_reference_password_value: Optional[str] = None,
sender_internal_identification: Optional[str] = None,
sender_internal_sub_identification: Optional[str] = None,
sender_reverse_routing_address: Optional[str] = None,
transaction_set_control_number_prefix: Optional[str] = None,
transaction_set_control_number_suffix: Optional[str] = None):
"""
The Edifact agreement envelope settings.
:param bool apply_delimiter_string_advice: The value indicating whether to apply delimiter string advice.
:param bool create_grouping_segments: The value indicating whether to create grouping segments.
:param bool enable_default_group_headers: The value indicating whether to enable default group headers.
:param float group_control_number_lower_bound: The group control number lower bound.
:param float group_control_number_upper_bound: The group control number upper bound.
:param float interchange_control_number_lower_bound: The interchange control number lower bound.
:param float interchange_control_number_upper_bound: The interchange control number upper bound.
:param bool is_test_interchange: The value indicating whether the message is a test interchange.
:param bool overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number.
:param bool rollover_group_control_number: The value indicating whether to rollover group control number.
:param bool rollover_interchange_control_number: The value indicating whether to rollover interchange control number.
:param bool rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number.
:param float transaction_set_control_number_lower_bound: The transaction set control number lower bound.
:param float transaction_set_control_number_upper_bound: The transaction set control number upper bound.
:param str application_reference_id: The application reference id.
:param str communication_agreement_id: The communication agreement id.
:param str functional_group_id: The functional group id.
:param str group_application_password: The group application password.
:param str group_application_receiver_id: The group application receiver id.
:param str group_application_receiver_qualifier: The group application receiver qualifier.
:param str group_application_sender_id: The group application sender id.
:param str group_application_sender_qualifier: The group application sender qualifier.
:param str group_association_assigned_code: The group association assigned code.
:param str group_control_number_prefix: The group control number prefix.
:param str group_control_number_suffix: The group control number suffix.
:param str group_controlling_agency_code: The group controlling agency code.
:param str group_message_release: The group message release.
:param str group_message_version: The group message version.
:param str interchange_control_number_prefix: The interchange control number prefix.
:param str interchange_control_number_suffix: The interchange control number suffix.
:param str processing_priority_code: The processing priority code.
:param str receiver_internal_identification: The receiver internal identification.
:param str receiver_internal_sub_identification: The receiver internal sub identification.
:param str receiver_reverse_routing_address: The receiver reverse routing address.
:param str recipient_reference_password_qualifier: The recipient reference password qualifier.
:param str recipient_reference_password_value: The recipient reference password value.
:param str sender_internal_identification: The sender internal identification.
:param str sender_internal_sub_identification: The sender internal sub identification.
:param str sender_reverse_routing_address: The sender reverse routing address.
:param str transaction_set_control_number_prefix: The transaction set control number prefix.
:param str transaction_set_control_number_suffix: The transaction set control number suffix.
"""
pulumi.set(__self__, "apply_delimiter_string_advice", apply_delimiter_string_advice)
pulumi.set(__self__, "create_grouping_segments", create_grouping_segments)
pulumi.set(__self__, "enable_default_group_headers", enable_default_group_headers)
pulumi.set(__self__, "group_control_number_lower_bound", group_control_number_lower_bound)
pulumi.set(__self__, "group_control_number_upper_bound", group_control_number_upper_bound)
pulumi.set(__self__, "interchange_control_number_lower_bound", interchange_control_number_lower_bound)
pulumi.set(__self__, "interchange_control_number_upper_bound", interchange_control_number_upper_bound)
pulumi.set(__self__, "is_test_interchange", is_test_interchange)
pulumi.set(__self__, "overwrite_existing_transaction_set_control_number", overwrite_existing_transaction_set_control_number)
pulumi.set(__self__, "rollover_group_control_number", rollover_group_control_number)
pulumi.set(__self__, "rollover_interchange_control_number", rollover_interchange_control_number)
pulumi.set(__self__, "rollover_transaction_set_control_number", rollover_transaction_set_control_number)
pulumi.set(__self__, "transaction_set_control_number_lower_bound", transaction_set_control_number_lower_bound)
pulumi.set(__self__, "transaction_set_control_number_upper_bound", transaction_set_control_number_upper_bound)
if application_reference_id is not None:
pulumi.set(__self__, "application_reference_id", application_reference_id)
if communication_agreement_id is not None:
pulumi.set(__self__, "communication_agreement_id", communication_agreement_id)
if functional_group_id is not None:
pulumi.set(__self__, "functional_group_id", functional_group_id)
if group_application_password is not None:
pulumi.set(__self__, "group_application_password", group_application_password)
if group_application_receiver_id is not None:
pulumi.set(__self__, "group_application_receiver_id", group_application_receiver_id)
if group_application_receiver_qualifier is not None:
pulumi.set(__self__, "group_application_receiver_qualifier", group_application_receiver_qualifier)
if group_application_sender_id is not None:
pulumi.set(__self__, "group_application_sender_id", group_application_sender_id)
if group_application_sender_qualifier is not None:
pulumi.set(__self__, "group_application_sender_qualifier", group_application_sender_qualifier)
if group_association_assigned_code is not None:
pulumi.set(__self__, "group_association_assigned_code", group_association_assigned_code)
if group_control_number_prefix is not None:
pulumi.set(__self__, "group_control_number_prefix", group_control_number_prefix)
if group_control_number_suffix is not None:
pulumi.set(__self__, "group_control_number_suffix", group_control_number_suffix)
if group_controlling_agency_code is not None:
pulumi.set(__self__, "group_controlling_agency_code", group_controlling_agency_code)
if group_message_release is not None:
pulumi.set(__self__, "group_message_release", group_message_release)
if group_message_version is not None:
pulumi.set(__self__, "group_message_version", group_message_version)
if interchange_control_number_prefix is not None:
pulumi.set(__self__, "interchange_control_number_prefix", interchange_control_number_prefix)
if interchange_control_number_suffix is not None:
pulumi.set(__self__, "interchange_control_number_suffix", interchange_control_number_suffix)
if processing_priority_code is not None:
pulumi.set(__self__, "processing_priority_code", processing_priority_code)
if receiver_internal_identification is not None:
pulumi.set(__self__, "receiver_internal_identification", receiver_internal_identification)
if receiver_internal_sub_identification is not None:
pulumi.set(__self__, "receiver_internal_sub_identification", receiver_internal_sub_identification)
if receiver_reverse_routing_address is not None:
pulumi.set(__self__, "receiver_reverse_routing_address", receiver_reverse_routing_address)
if recipient_reference_password_qualifier is not None:
pulumi.set(__self__, "recipient_reference_password_qualifier", recipient_reference_password_qualifier)
if recipient_reference_password_value is not None:
pulumi.set(__self__, "recipient_reference_password_value", recipient_reference_password_value)
if sender_internal_identification is not None:
pulumi.set(__self__, "sender_internal_identification", sender_internal_identification)
if sender_internal_sub_identification is not None:
pulumi.set(__self__, "sender_internal_sub_identification", sender_internal_sub_identification)
if sender_reverse_routing_address is not None:
pulumi.set(__self__, "sender_reverse_routing_address", sender_reverse_routing_address)
if transaction_set_control_number_prefix is not None:
pulumi.set(__self__, "transaction_set_control_number_prefix", transaction_set_control_number_prefix)
if transaction_set_control_number_suffix is not None:
pulumi.set(__self__, "transaction_set_control_number_suffix", transaction_set_control_number_suffix)
@property
@pulumi.getter(name="applyDelimiterStringAdvice")
def apply_delimiter_string_advice(self) -> bool:
"""
The value indicating whether to apply delimiter string advice.
"""
return pulumi.get(self, "apply_delimiter_string_advice")
@property
@pulumi.getter(name="createGroupingSegments")
def create_grouping_segments(self) -> bool:
"""
The value indicating whether to create grouping segments.
"""
return pulumi.get(self, "create_grouping_segments")
@property
@pulumi.getter(name="enableDefaultGroupHeaders")
def enable_default_group_headers(self) -> bool:
"""
The value indicating whether to enable default group headers.
"""
return pulumi.get(self, "enable_default_group_headers")
@property
@pulumi.getter(name="groupControlNumberLowerBound")
def group_control_number_lower_bound(self) -> float:
"""
The group control number lower bound.
"""
return pulumi.get(self, "group_control_number_lower_bound")
@property
@pulumi.getter(name="groupControlNumberUpperBound")
def group_control_number_upper_bound(self) -> float:
"""
The group control number upper bound.
"""
return pulumi.get(self, "group_control_number_upper_bound")
@property
@pulumi.getter(name="interchangeControlNumberLowerBound")
def interchange_control_number_lower_bound(self) -> float:
"""
The interchange control number lower bound.
"""
return pulumi.get(self, "interchange_control_number_lower_bound")
@property
@pulumi.getter(name="interchangeControlNumberUpperBound")
def interchange_control_number_upper_bound(self) -> float:
"""
The interchange control number upper bound.
"""
return pulumi.get(self, "interchange_control_number_upper_bound")
@property
@pulumi.getter(name="isTestInterchange")
def is_test_interchange(self) -> bool:
"""
The value indicating whether the message is a test interchange.
"""
return pulumi.get(self, "is_test_interchange")
@property
@pulumi.getter(name="overwriteExistingTransactionSetControlNumber")
def overwrite_existing_transaction_set_control_number(self) -> bool:
"""
The value indicating whether to overwrite existing transaction set control number.
"""
return pulumi.get(self, "overwrite_existing_transaction_set_control_number")
@property
@pulumi.getter(name="rolloverGroupControlNumber")
def rollover_group_control_number(self) -> bool:
"""
The value indicating whether to rollover group control number.
"""
return pulumi.get(self, "rollover_group_control_number")
@property
@pulumi.getter(name="rolloverInterchangeControlNumber")
def rollover_interchange_control_number(self) -> bool:
"""
The value indicating whether to rollover interchange control number.
"""
return pulumi.get(self, "rollover_interchange_control_number")
@property
@pulumi.getter(name="rolloverTransactionSetControlNumber")
def rollover_transaction_set_control_number(self) -> bool:
"""
The value indicating whether to rollover transaction set control number.
"""
return pulumi.get(self, "rollover_transaction_set_control_number")
@property
@pulumi.getter(name="transactionSetControlNumberLowerBound")
def transaction_set_control_number_lower_bound(self) -> float:
"""
The transaction set control number lower bound.
"""
return pulumi.get(self, "transaction_set_control_number_lower_bound")
@property
@pulumi.getter(name="transactionSetControlNumberUpperBound")
def transaction_set_control_number_upper_bound(self) -> float:
"""
The transaction set control number upper bound.
"""
return pulumi.get(self, "transaction_set_control_number_upper_bound")
@property
@pulumi.getter(name="applicationReferenceId")
def application_reference_id(self) -> Optional[str]:
"""
The application reference id.
"""
return pulumi.get(self, "application_reference_id")
@property
@pulumi.getter(name="communicationAgreementId")
def communication_agreement_id(self) -> Optional[str]:
"""
The communication agreement id.
"""
return pulumi.get(self, "communication_agreement_id")
@property
@pulumi.getter(name="functionalGroupId")
def functional_group_id(self) -> Optional[str]:
"""
The functional group id.
"""
return pulumi.get(self, "functional_group_id")
@property
@pulumi.getter(name="groupApplicationPassword")
def group_application_password(self) -> Optional[str]:
"""
The group application password.
"""
return pulumi.get(self, "group_application_password")
@property
@pulumi.getter(name="groupApplicationReceiverId")
def group_application_receiver_id(self) -> Optional[str]:
"""
The group application receiver id.
"""
return pulumi.get(self, "group_application_receiver_id")
@property
@pulumi.getter(name="groupApplicationReceiverQualifier")
def group_application_receiver_qualifier(self) -> Optional[str]:
"""
The group application receiver qualifier.
"""
return pulumi.get(self, "group_application_receiver_qualifier")
@property
@pulumi.getter(name="groupApplicationSenderId")
def group_application_sender_id(self) -> Optional[str]:
"""
The group application sender id.
"""
return pulumi.get(self, "group_application_sender_id")
@property
@pulumi.getter(name="groupApplicationSenderQualifier")
def group_application_sender_qualifier(self) -> Optional[str]:
"""
The group application sender qualifier.
"""
return pulumi.get(self, "group_application_sender_qualifier")
@property
@pulumi.getter(name="groupAssociationAssignedCode")
def group_association_assigned_code(self) -> Optional[str]:
"""
The group association assigned code.
"""
return pulumi.get(self, "group_association_assigned_code")
@property
@pulumi.getter(name="groupControlNumberPrefix")
def group_control_number_prefix(self) -> Optional[str]:
"""
The group control number prefix.
"""
return pulumi.get(self, "group_control_number_prefix")
@property
@pulumi.getter(name="groupControlNumberSuffix")
def group_control_number_suffix(self) -> Optional[str]:
"""
The group control number suffix.
"""
return pulumi.get(self, "group_control_number_suffix")
@property
@pulumi.getter(name="groupControllingAgencyCode")
def group_controlling_agency_code(self) -> Optional[str]:
"""
The group controlling agency code.
"""
return pulumi.get(self, "group_controlling_agency_code")
@property
@pulumi.getter(name="groupMessageRelease")
def group_message_release(self) -> Optional[str]:
"""
The group message release.
"""
return pulumi.get(self, "group_message_release")
@property
@pulumi.getter(name="groupMessageVersion")
def group_message_version(self) -> Optional[str]:
"""
The group message version.
"""
return pulumi.get(self, "group_message_version")
@property
@pulumi.getter(name="interchangeControlNumberPrefix")
def interchange_control_number_prefix(self) -> Optional[str]:
"""
The interchange control number prefix.
"""
return pulumi.get(self, "interchange_control_number_prefix")
@property
@pulumi.getter(name="interchangeControlNumberSuffix")
def interchange_control_number_suffix(self) -> Optional[str]:
"""
The interchange control number suffix.
"""
return pulumi.get(self, "interchange_control_number_suffix")
@property
@pulumi.getter(name="processingPriorityCode")
def processing_priority_code(self) -> Optional[str]:
"""
The processing priority code.
"""
return pulumi.get(self, "processing_priority_code")
@property
@pulumi.getter(name="receiverInternalIdentification")
def receiver_internal_identification(self) -> Optional[str]:
"""
The receiver internal identification.
"""
return pulumi.get(self, "receiver_internal_identification")
@property
@pulumi.getter(name="receiverInternalSubIdentification")
def receiver_internal_sub_identification(self) -> Optional[str]:
"""
The receiver internal sub identification.
"""
return pulumi.get(self, "receiver_internal_sub_identification")
@property
@pulumi.getter(name="receiverReverseRoutingAddress")
def receiver_reverse_routing_address(self) -> Optional[str]:
"""
The receiver reverse routing address.
"""
return pulumi.get(self, "receiver_reverse_routing_address")
@property
@pulumi.getter(name="recipientReferencePasswordQualifier")
def recipient_reference_password_qualifier(self) -> Optional[str]:
"""
The recipient reference password qualifier.
"""
return pulumi.get(self, "recipient_reference_password_qualifier")
@property
@pulumi.getter(name="recipientReferencePasswordValue")
def recipient_reference_password_value(self) -> Optional[str]:
"""
The recipient reference password value.
"""
return pulumi.get(self, "recipient_reference_password_value")
@property
@pulumi.getter(name="senderInternalIdentification")
def sender_internal_identification(self) -> Optional[str]:
"""
The sender internal identification.
"""
return pulumi.get(self, "sender_internal_identification")
@property
@pulumi.getter(name="senderInternalSubIdentification")
def sender_internal_sub_identification(self) -> Optional[str]:
"""
The sender internal sub identification.
"""
return pulumi.get(self, "sender_internal_sub_identification")
@property
@pulumi.getter(name="senderReverseRoutingAddress")
def sender_reverse_routing_address(self) -> Optional[str]:
"""
The sender reverse routing address.
"""
return pulumi.get(self, "sender_reverse_routing_address")
@property
@pulumi.getter(name="transactionSetControlNumberPrefix")
def transaction_set_control_number_prefix(self) -> Optional[str]:
"""
The transaction set control number prefix.
"""
return pulumi.get(self, "transaction_set_control_number_prefix")
@property
@pulumi.getter(name="transactionSetControlNumberSuffix")
def transaction_set_control_number_suffix(self) -> Optional[str]:
"""
The transaction set control number suffix.
"""
return pulumi.get(self, "transaction_set_control_number_suffix")
@pulumi.output_type
class EdifactFramingSettingsResponse(dict):
"""
The Edifact agreement framing settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "characterSet":
suggest = "character_set"
elif key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "decimalPointIndicator":
suggest = "decimal_point_indicator"
elif key == "protocolVersion":
suggest = "protocol_version"
elif key == "releaseIndicator":
suggest = "release_indicator"
elif key == "repetitionSeparator":
suggest = "repetition_separator"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
elif key == "characterEncoding":
suggest = "character_encoding"
elif key == "serviceCodeListDirectoryVersion":
suggest = "service_code_list_directory_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactFramingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactFramingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactFramingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
character_set: str,
component_separator: int,
data_element_separator: int,
decimal_point_indicator: str,
protocol_version: int,
release_indicator: int,
repetition_separator: int,
segment_terminator: int,
segment_terminator_suffix: str,
character_encoding: Optional[str] = None,
service_code_list_directory_version: Optional[str] = None):
"""
The Edifact agreement framing settings.
:param str character_set: The EDIFACT frame setting characterSet.
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param str decimal_point_indicator: The EDIFACT frame setting decimal indicator.
:param int protocol_version: The protocol version.
:param int release_indicator: The release indicator.
:param int repetition_separator: The repetition separator.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The EDIFACT frame setting segment terminator suffix.
:param str character_encoding: The character encoding.
:param str service_code_list_directory_version: The service code list directory version.
"""
pulumi.set(__self__, "character_set", character_set)
pulumi.set(__self__, "component_separator", component_separator)
pulumi.set(__self__, "data_element_separator", data_element_separator)
pulumi.set(__self__, "decimal_point_indicator", decimal_point_indicator)
pulumi.set(__self__, "protocol_version", protocol_version)
pulumi.set(__self__, "release_indicator", release_indicator)
pulumi.set(__self__, "repetition_separator", repetition_separator)
pulumi.set(__self__, "segment_terminator", segment_terminator)
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
if character_encoding is not None:
pulumi.set(__self__, "character_encoding", character_encoding)
if service_code_list_directory_version is not None:
pulumi.set(__self__, "service_code_list_directory_version", service_code_list_directory_version)
@property
@pulumi.getter(name="characterSet")
def character_set(self) -> str:
"""
The EDIFACT frame setting characterSet.
"""
return pulumi.get(self, "character_set")
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> int:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> int:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="decimalPointIndicator")
def decimal_point_indicator(self) -> str:
"""
The EDIFACT frame setting decimal indicator.
"""
return pulumi.get(self, "decimal_point_indicator")
@property
@pulumi.getter(name="protocolVersion")
def protocol_version(self) -> int:
"""
The protocol version.
"""
return pulumi.get(self, "protocol_version")
@property
@pulumi.getter(name="releaseIndicator")
def release_indicator(self) -> int:
"""
The release indicator.
"""
return pulumi.get(self, "release_indicator")
@property
@pulumi.getter(name="repetitionSeparator")
def repetition_separator(self) -> int:
"""
The repetition separator.
"""
return pulumi.get(self, "repetition_separator")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> int:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> str:
"""
The EDIFACT frame setting segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@property
@pulumi.getter(name="characterEncoding")
def character_encoding(self) -> Optional[str]:
"""
The character encoding.
"""
return pulumi.get(self, "character_encoding")
@property
@pulumi.getter(name="serviceCodeListDirectoryVersion")
def service_code_list_directory_version(self) -> Optional[str]:
"""
The service code list directory version.
"""
return pulumi.get(self, "service_code_list_directory_version")
@pulumi.output_type
class EdifactMessageFilterResponse(dict):
"""
The Edifact message filter for odata query.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageFilterType":
suggest = "message_filter_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactMessageFilterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactMessageFilterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactMessageFilterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_filter_type: str):
"""
The Edifact message filter for odata query.
:param str message_filter_type: The message filter type.
"""
pulumi.set(__self__, "message_filter_type", message_filter_type)
@property
@pulumi.getter(name="messageFilterType")
def message_filter_type(self) -> str:
"""
The message filter type.
"""
return pulumi.get(self, "message_filter_type")
@pulumi.output_type
class EdifactMessageIdentifierResponse(dict):
"""
The Edifact message identifier.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageId":
suggest = "message_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactMessageIdentifierResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactMessageIdentifierResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactMessageIdentifierResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_id: str):
"""
The Edifact message identifier.
:param str message_id: The message id on which this envelope settings has to be applied.
"""
pulumi.set(__self__, "message_id", message_id)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> str:
"""
The message id on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_id")
@pulumi.output_type
class EdifactOneWayAgreementResponse(dict):
"""
The Edifact one way agreement.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "protocolSettings":
suggest = "protocol_settings"
elif key == "receiverBusinessIdentity":
suggest = "receiver_business_identity"
elif key == "senderBusinessIdentity":
suggest = "sender_business_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactOneWayAgreementResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactOneWayAgreementResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactOneWayAgreementResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
protocol_settings: 'outputs.EdifactProtocolSettingsResponse',
receiver_business_identity: 'outputs.BusinessIdentityResponse',
sender_business_identity: 'outputs.BusinessIdentityResponse'):
"""
The Edifact one way agreement.
:param 'EdifactProtocolSettingsResponse' protocol_settings: The EDIFACT protocol settings.
:param 'BusinessIdentityResponse' receiver_business_identity: The receiver business identity
:param 'BusinessIdentityResponse' sender_business_identity: The sender business identity
"""
pulumi.set(__self__, "protocol_settings", protocol_settings)
pulumi.set(__self__, "receiver_business_identity", receiver_business_identity)
pulumi.set(__self__, "sender_business_identity", sender_business_identity)
@property
@pulumi.getter(name="protocolSettings")
def protocol_settings(self) -> 'outputs.EdifactProtocolSettingsResponse':
"""
The EDIFACT protocol settings.
"""
return pulumi.get(self, "protocol_settings")
@property
@pulumi.getter(name="receiverBusinessIdentity")
def receiver_business_identity(self) -> 'outputs.BusinessIdentityResponse':
"""
The receiver business identity
"""
return pulumi.get(self, "receiver_business_identity")
@property
@pulumi.getter(name="senderBusinessIdentity")
def sender_business_identity(self) -> 'outputs.BusinessIdentityResponse':
"""
The sender business identity
"""
return pulumi.get(self, "sender_business_identity")
@pulumi.output_type
class EdifactProcessingSettingsResponse(dict):
"""
The Edifact agreement protocol settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createEmptyXmlTagsForTrailingSeparators":
suggest = "create_empty_xml_tags_for_trailing_separators"
elif key == "maskSecurityInfo":
suggest = "mask_security_info"
elif key == "preserveInterchange":
suggest = "preserve_interchange"
elif key == "suspendInterchangeOnError":
suggest = "suspend_interchange_on_error"
elif key == "useDotAsDecimalSeparator":
suggest = "use_dot_as_decimal_separator"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactProcessingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactProcessingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactProcessingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
create_empty_xml_tags_for_trailing_separators: bool,
mask_security_info: bool,
preserve_interchange: bool,
suspend_interchange_on_error: bool,
use_dot_as_decimal_separator: bool):
"""
The Edifact agreement protocol settings.
:param bool create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators.
:param bool mask_security_info: The value indicating whether to mask security information.
:param bool preserve_interchange: The value indicating whether to preserve interchange.
:param bool suspend_interchange_on_error: The value indicating whether to suspend interchange on error.
:param bool use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator.
"""
pulumi.set(__self__, "create_empty_xml_tags_for_trailing_separators", create_empty_xml_tags_for_trailing_separators)
pulumi.set(__self__, "mask_security_info", mask_security_info)
pulumi.set(__self__, "preserve_interchange", preserve_interchange)
pulumi.set(__self__, "suspend_interchange_on_error", suspend_interchange_on_error)
pulumi.set(__self__, "use_dot_as_decimal_separator", use_dot_as_decimal_separator)
@property
@pulumi.getter(name="createEmptyXmlTagsForTrailingSeparators")
def create_empty_xml_tags_for_trailing_separators(self) -> bool:
"""
The value indicating whether to create empty xml tags for trailing separators.
"""
return pulumi.get(self, "create_empty_xml_tags_for_trailing_separators")
@property
@pulumi.getter(name="maskSecurityInfo")
def mask_security_info(self) -> bool:
"""
The value indicating whether to mask security information.
"""
return pulumi.get(self, "mask_security_info")
@property
@pulumi.getter(name="preserveInterchange")
def preserve_interchange(self) -> bool:
"""
The value indicating whether to preserve interchange.
"""
return pulumi.get(self, "preserve_interchange")
@property
@pulumi.getter(name="suspendInterchangeOnError")
def suspend_interchange_on_error(self) -> bool:
"""
The value indicating whether to suspend interchange on error.
"""
return pulumi.get(self, "suspend_interchange_on_error")
@property
@pulumi.getter(name="useDotAsDecimalSeparator")
def use_dot_as_decimal_separator(self) -> bool:
"""
The value indicating whether to use dot as decimal separator.
"""
return pulumi.get(self, "use_dot_as_decimal_separator")
@pulumi.output_type
class EdifactProtocolSettingsResponse(dict):
"""
The Edifact agreement protocol settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementSettings":
suggest = "acknowledgement_settings"
elif key == "envelopeSettings":
suggest = "envelope_settings"
elif key == "framingSettings":
suggest = "framing_settings"
elif key == "messageFilter":
suggest = "message_filter"
elif key == "processingSettings":
suggest = "processing_settings"
elif key == "schemaReferences":
suggest = "schema_references"
elif key == "validationSettings":
suggest = "validation_settings"
elif key == "edifactDelimiterOverrides":
suggest = "edifact_delimiter_overrides"
elif key == "envelopeOverrides":
suggest = "envelope_overrides"
elif key == "messageFilterList":
suggest = "message_filter_list"
elif key == "validationOverrides":
suggest = "validation_overrides"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactProtocolSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactProtocolSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactProtocolSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_settings: 'outputs.EdifactAcknowledgementSettingsResponse',
envelope_settings: 'outputs.EdifactEnvelopeSettingsResponse',
framing_settings: 'outputs.EdifactFramingSettingsResponse',
message_filter: 'outputs.EdifactMessageFilterResponse',
processing_settings: 'outputs.EdifactProcessingSettingsResponse',
schema_references: Sequence['outputs.EdifactSchemaReferenceResponse'],
validation_settings: 'outputs.EdifactValidationSettingsResponse',
edifact_delimiter_overrides: Optional[Sequence['outputs.EdifactDelimiterOverrideResponse']] = None,
envelope_overrides: Optional[Sequence['outputs.EdifactEnvelopeOverrideResponse']] = None,
message_filter_list: Optional[Sequence['outputs.EdifactMessageIdentifierResponse']] = None,
validation_overrides: Optional[Sequence['outputs.EdifactValidationOverrideResponse']] = None):
"""
The Edifact agreement protocol settings.
:param 'EdifactAcknowledgementSettingsResponse' acknowledgement_settings: The EDIFACT acknowledgement settings.
:param 'EdifactEnvelopeSettingsResponse' envelope_settings: The EDIFACT envelope settings.
:param 'EdifactFramingSettingsResponse' framing_settings: The EDIFACT framing settings.
:param 'EdifactMessageFilterResponse' message_filter: The EDIFACT message filter.
:param 'EdifactProcessingSettingsResponse' processing_settings: The EDIFACT processing Settings.
:param Sequence['EdifactSchemaReferenceResponse'] schema_references: The EDIFACT schema references.
:param 'EdifactValidationSettingsResponse' validation_settings: The EDIFACT validation settings.
:param Sequence['EdifactDelimiterOverrideResponse'] edifact_delimiter_overrides: The EDIFACT delimiter override settings.
:param Sequence['EdifactEnvelopeOverrideResponse'] envelope_overrides: The EDIFACT envelope override settings.
:param Sequence['EdifactMessageIdentifierResponse'] message_filter_list: The EDIFACT message filter list.
:param Sequence['EdifactValidationOverrideResponse'] validation_overrides: The EDIFACT validation override settings.
"""
pulumi.set(__self__, "acknowledgement_settings", acknowledgement_settings)
pulumi.set(__self__, "envelope_settings", envelope_settings)
pulumi.set(__self__, "framing_settings", framing_settings)
pulumi.set(__self__, "message_filter", message_filter)
pulumi.set(__self__, "processing_settings", processing_settings)
pulumi.set(__self__, "schema_references", schema_references)
pulumi.set(__self__, "validation_settings", validation_settings)
if edifact_delimiter_overrides is not None:
pulumi.set(__self__, "edifact_delimiter_overrides", edifact_delimiter_overrides)
if envelope_overrides is not None:
pulumi.set(__self__, "envelope_overrides", envelope_overrides)
if message_filter_list is not None:
pulumi.set(__self__, "message_filter_list", message_filter_list)
if validation_overrides is not None:
pulumi.set(__self__, "validation_overrides", validation_overrides)
@property
@pulumi.getter(name="acknowledgementSettings")
def acknowledgement_settings(self) -> 'outputs.EdifactAcknowledgementSettingsResponse':
"""
The EDIFACT acknowledgement settings.
"""
return pulumi.get(self, "acknowledgement_settings")
@property
@pulumi.getter(name="envelopeSettings")
def envelope_settings(self) -> 'outputs.EdifactEnvelopeSettingsResponse':
"""
The EDIFACT envelope settings.
"""
return pulumi.get(self, "envelope_settings")
@property
@pulumi.getter(name="framingSettings")
def framing_settings(self) -> 'outputs.EdifactFramingSettingsResponse':
"""
The EDIFACT framing settings.
"""
return pulumi.get(self, "framing_settings")
@property
@pulumi.getter(name="messageFilter")
def message_filter(self) -> 'outputs.EdifactMessageFilterResponse':
"""
The EDIFACT message filter.
"""
return pulumi.get(self, "message_filter")
@property
@pulumi.getter(name="processingSettings")
def processing_settings(self) -> 'outputs.EdifactProcessingSettingsResponse':
"""
The EDIFACT processing Settings.
"""
return pulumi.get(self, "processing_settings")
@property
@pulumi.getter(name="schemaReferences")
def schema_references(self) -> Sequence['outputs.EdifactSchemaReferenceResponse']:
"""
The EDIFACT schema references.
"""
return pulumi.get(self, "schema_references")
@property
@pulumi.getter(name="validationSettings")
def validation_settings(self) -> 'outputs.EdifactValidationSettingsResponse':
"""
The EDIFACT validation settings.
"""
return pulumi.get(self, "validation_settings")
@property
@pulumi.getter(name="edifactDelimiterOverrides")
def edifact_delimiter_overrides(self) -> Optional[Sequence['outputs.EdifactDelimiterOverrideResponse']]:
"""
The EDIFACT delimiter override settings.
"""
return pulumi.get(self, "edifact_delimiter_overrides")
@property
@pulumi.getter(name="envelopeOverrides")
def envelope_overrides(self) -> Optional[Sequence['outputs.EdifactEnvelopeOverrideResponse']]:
"""
The EDIFACT envelope override settings.
"""
return pulumi.get(self, "envelope_overrides")
@property
@pulumi.getter(name="messageFilterList")
def message_filter_list(self) -> Optional[Sequence['outputs.EdifactMessageIdentifierResponse']]:
"""
The EDIFACT message filter list.
"""
return pulumi.get(self, "message_filter_list")
@property
@pulumi.getter(name="validationOverrides")
def validation_overrides(self) -> Optional[Sequence['outputs.EdifactValidationOverrideResponse']]:
"""
The EDIFACT validation override settings.
"""
return pulumi.get(self, "validation_overrides")
@pulumi.output_type
class EdifactSchemaReferenceResponse(dict):
"""
The Edifact schema reference.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageId":
suggest = "message_id"
elif key == "messageRelease":
suggest = "message_release"
elif key == "messageVersion":
suggest = "message_version"
elif key == "schemaName":
suggest = "schema_name"
elif key == "associationAssignedCode":
suggest = "association_assigned_code"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "senderApplicationQualifier":
suggest = "sender_application_qualifier"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactSchemaReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactSchemaReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactSchemaReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_id: str,
message_release: str,
message_version: str,
schema_name: str,
association_assigned_code: Optional[str] = None,
sender_application_id: Optional[str] = None,
sender_application_qualifier: Optional[str] = None):
"""
The Edifact schema reference.
:param str message_id: The message id.
:param str message_release: The message release version.
:param str message_version: The message version.
:param str schema_name: The schema name.
:param str association_assigned_code: The association assigned code.
:param str sender_application_id: The sender application id.
:param str sender_application_qualifier: The sender application qualifier.
"""
pulumi.set(__self__, "message_id", message_id)
pulumi.set(__self__, "message_release", message_release)
pulumi.set(__self__, "message_version", message_version)
pulumi.set(__self__, "schema_name", schema_name)
if association_assigned_code is not None:
pulumi.set(__self__, "association_assigned_code", association_assigned_code)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
if sender_application_qualifier is not None:
pulumi.set(__self__, "sender_application_qualifier", sender_application_qualifier)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> str:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="messageRelease")
def message_release(self) -> str:
"""
The message release version.
"""
return pulumi.get(self, "message_release")
@property
@pulumi.getter(name="messageVersion")
def message_version(self) -> str:
"""
The message version.
"""
return pulumi.get(self, "message_version")
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> str:
"""
The schema name.
"""
return pulumi.get(self, "schema_name")
@property
@pulumi.getter(name="associationAssignedCode")
def association_assigned_code(self) -> Optional[str]:
"""
The association assigned code.
"""
return pulumi.get(self, "association_assigned_code")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="senderApplicationQualifier")
def sender_application_qualifier(self) -> Optional[str]:
"""
The sender application qualifier.
"""
return pulumi.get(self, "sender_application_qualifier")
@pulumi.output_type
class EdifactValidationOverrideResponse(dict):
"""
The Edifact validation override settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "enforceCharacterSet":
suggest = "enforce_character_set"
elif key == "messageId":
suggest = "message_id"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactValidationOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactValidationOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactValidationOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: bool,
enforce_character_set: bool,
message_id: str,
trailing_separator_policy: str,
trim_leading_and_trailing_spaces_and_zeroes: bool,
validate_edi_types: bool,
validate_xsd_types: bool):
"""
The Edifact validation override settings.
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param bool enforce_character_set: The value indicating whether to validate character Set.
:param str message_id: The message id on which the validation settings has to be applied.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_edi_types: The value indicating whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to validate XSD types.
"""
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "enforce_character_set", enforce_character_set)
pulumi.set(__self__, "message_id", message_id)
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="enforceCharacterSet")
def enforce_character_set(self) -> bool:
"""
The value indicating whether to validate character Set.
"""
return pulumi.get(self, "enforce_character_set")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> str:
"""
The message id on which the validation settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> str:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> bool:
"""
The value indicating whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> bool:
"""
The value indicating whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
@pulumi.output_type
class EdifactValidationSettingsResponse(dict):
"""
The Edifact agreement validation settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "checkDuplicateGroupControlNumber":
suggest = "check_duplicate_group_control_number"
elif key == "checkDuplicateInterchangeControlNumber":
suggest = "check_duplicate_interchange_control_number"
elif key == "checkDuplicateTransactionSetControlNumber":
suggest = "check_duplicate_transaction_set_control_number"
elif key == "interchangeControlNumberValidityDays":
suggest = "interchange_control_number_validity_days"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateCharacterSet":
suggest = "validate_character_set"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EdifactValidationSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EdifactValidationSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EdifactValidationSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: bool,
check_duplicate_group_control_number: bool,
check_duplicate_interchange_control_number: bool,
check_duplicate_transaction_set_control_number: bool,
interchange_control_number_validity_days: int,
trailing_separator_policy: str,
trim_leading_and_trailing_spaces_and_zeroes: bool,
validate_character_set: bool,
validate_edi_types: bool,
validate_xsd_types: bool):
"""
The Edifact agreement validation settings.
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param bool check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number.
:param bool check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number.
:param bool check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number.
:param int interchange_control_number_validity_days: The validity period of interchange control number.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_character_set: The value indicating whether to validate character set in the message.
:param bool validate_edi_types: The value indicating whether to Whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to Whether to validate XSD types.
"""
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "check_duplicate_group_control_number", check_duplicate_group_control_number)
pulumi.set(__self__, "check_duplicate_interchange_control_number", check_duplicate_interchange_control_number)
pulumi.set(__self__, "check_duplicate_transaction_set_control_number", check_duplicate_transaction_set_control_number)
pulumi.set(__self__, "interchange_control_number_validity_days", interchange_control_number_validity_days)
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "validate_character_set", validate_character_set)
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="checkDuplicateGroupControlNumber")
def check_duplicate_group_control_number(self) -> bool:
"""
The value indicating whether to check for duplicate group control number.
"""
return pulumi.get(self, "check_duplicate_group_control_number")
@property
@pulumi.getter(name="checkDuplicateInterchangeControlNumber")
def check_duplicate_interchange_control_number(self) -> bool:
"""
The value indicating whether to check for duplicate interchange control number.
"""
return pulumi.get(self, "check_duplicate_interchange_control_number")
@property
@pulumi.getter(name="checkDuplicateTransactionSetControlNumber")
def check_duplicate_transaction_set_control_number(self) -> bool:
"""
The value indicating whether to check for duplicate transaction set control number.
"""
return pulumi.get(self, "check_duplicate_transaction_set_control_number")
@property
@pulumi.getter(name="interchangeControlNumberValidityDays")
def interchange_control_number_validity_days(self) -> int:
"""
The validity period of interchange control number.
"""
return pulumi.get(self, "interchange_control_number_validity_days")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> str:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateCharacterSet")
def validate_character_set(self) -> bool:
"""
The value indicating whether to validate character set in the message.
"""
return pulumi.get(self, "validate_character_set")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> bool:
"""
The value indicating whether to Whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> bool:
"""
The value indicating whether to Whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
@pulumi.output_type
class ExpressionResponse(dict):
def __init__(__self__, *,
error: Optional['outputs.AzureResourceErrorInfoResponse'] = None,
subexpressions: Optional[Sequence['outputs.ExpressionResponse']] = None,
text: Optional[str] = None,
value: Optional[Any] = None):
"""
:param 'AzureResourceErrorInfoResponse' error: The azure resource error info.
"""
if error is not None:
pulumi.set(__self__, "error", error)
if subexpressions is not None:
pulumi.set(__self__, "subexpressions", subexpressions)
if text is not None:
pulumi.set(__self__, "text", text)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def error(self) -> Optional['outputs.AzureResourceErrorInfoResponse']:
"""
The azure resource error info.
"""
return pulumi.get(self, "error")
@property
@pulumi.getter
def subexpressions(self) -> Optional[Sequence['outputs.ExpressionResponse']]:
return pulumi.get(self, "subexpressions")
@property
@pulumi.getter
def text(self) -> Optional[str]:
return pulumi.get(self, "text")
@property
@pulumi.getter
def value(self) -> Optional[Any]:
return pulumi.get(self, "value")
@pulumi.output_type
class ExpressionRootResponse(dict):
def __init__(__self__, *,
error: Optional['outputs.AzureResourceErrorInfoResponse'] = None,
path: Optional[str] = None,
subexpressions: Optional[Sequence['outputs.ExpressionResponse']] = None,
text: Optional[str] = None,
value: Optional[Any] = None):
"""
:param 'AzureResourceErrorInfoResponse' error: The azure resource error info.
:param str path: The path.
"""
if error is not None:
pulumi.set(__self__, "error", error)
if path is not None:
pulumi.set(__self__, "path", path)
if subexpressions is not None:
pulumi.set(__self__, "subexpressions", subexpressions)
if text is not None:
pulumi.set(__self__, "text", text)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def error(self) -> Optional['outputs.AzureResourceErrorInfoResponse']:
"""
The azure resource error info.
"""
return pulumi.get(self, "error")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
The path.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def subexpressions(self) -> Optional[Sequence['outputs.ExpressionResponse']]:
return pulumi.get(self, "subexpressions")
@property
@pulumi.getter
def text(self) -> Optional[str]:
return pulumi.get(self, "text")
@property
@pulumi.getter
def value(self) -> Optional[Any]:
return pulumi.get(self, "value")
@pulumi.output_type
class IntegrationAccountMapPropertiesResponseParametersSchema(dict):
"""
The parameters schema of integration account map.
"""
def __init__(__self__, *,
ref: Optional[str] = None):
"""
The parameters schema of integration account map.
:param str ref: The reference name.
"""
if ref is not None:
pulumi.set(__self__, "ref", ref)
@property
@pulumi.getter
def ref(self) -> Optional[str]:
"""
The reference name.
"""
return pulumi.get(self, "ref")
@pulumi.output_type
class IntegrationAccountSkuResponse(dict):
"""
The integration account sku.
"""
def __init__(__self__, *,
name: str):
"""
The integration account sku.
:param str name: The sku name.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The sku name.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class KeyVaultKeyReferenceResponse(dict):
"""
The reference to the key vault key.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyName":
suggest = "key_name"
elif key == "keyVault":
suggest = "key_vault"
elif key == "keyVersion":
suggest = "key_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyVaultKeyReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyVaultKeyReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyVaultKeyReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_name: str,
key_vault: 'outputs.KeyVaultKeyReferenceResponseKeyVault',
key_version: Optional[str] = None):
"""
The reference to the key vault key.
:param str key_name: The private key name in key vault.
:param 'KeyVaultKeyReferenceResponseKeyVault' key_vault: The key vault reference.
:param str key_version: The private key version in key vault.
"""
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "key_vault", key_vault)
if key_version is not None:
pulumi.set(__self__, "key_version", key_version)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
The private key name in key vault.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="keyVault")
def key_vault(self) -> 'outputs.KeyVaultKeyReferenceResponseKeyVault':
"""
The key vault reference.
"""
return pulumi.get(self, "key_vault")
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> Optional[str]:
"""
The private key version in key vault.
"""
return pulumi.get(self, "key_version")
@pulumi.output_type
class KeyVaultKeyReferenceResponseKeyVault(dict):
"""
The key vault reference.
"""
def __init__(__self__, *,
name: str,
type: str,
id: Optional[str] = None):
"""
The key vault reference.
:param str name: The resource name.
:param str type: The resource type.
:param str id: The resource id.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The resource id.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class KeyVaultKeyResponse(dict):
"""
The key vault key.
"""
def __init__(__self__, *,
attributes: Optional['outputs.KeyVaultKeyResponseAttributes'] = None,
kid: Optional[str] = None):
"""
The key vault key.
:param 'KeyVaultKeyResponseAttributes' attributes: The key attributes.
:param str kid: The key id.
"""
if attributes is not None:
pulumi.set(__self__, "attributes", attributes)
if kid is not None:
pulumi.set(__self__, "kid", kid)
@property
@pulumi.getter
def attributes(self) -> Optional['outputs.KeyVaultKeyResponseAttributes']:
"""
The key attributes.
"""
return pulumi.get(self, "attributes")
@property
@pulumi.getter
def kid(self) -> Optional[str]:
"""
The key id.
"""
return pulumi.get(self, "kid")
@pulumi.output_type
class KeyVaultKeyResponseAttributes(dict):
"""
The key attributes.
"""
def __init__(__self__, *,
created: Optional[float] = None,
enabled: Optional[bool] = None,
updated: Optional[float] = None):
"""
The key attributes.
:param float created: When the key was created.
:param bool enabled: Whether the key is enabled or not.
:param float updated: When the key was updated.
"""
if created is not None:
pulumi.set(__self__, "created", created)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if updated is not None:
pulumi.set(__self__, "updated", updated)
@property
@pulumi.getter
def created(self) -> Optional[float]:
"""
When the key was created.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Whether the key is enabled or not.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def updated(self) -> Optional[float]:
"""
When the key was updated.
"""
return pulumi.get(self, "updated")
@pulumi.output_type
class PartnerContentResponse(dict):
"""
The integration account partner content.
"""
def __init__(__self__, *,
b2b: Optional['outputs.B2BPartnerContentResponse'] = None):
"""
The integration account partner content.
:param 'B2BPartnerContentResponse' b2b: The B2B partner content.
"""
if b2b is not None:
pulumi.set(__self__, "b2b", b2b)
@property
@pulumi.getter
def b2b(self) -> Optional['outputs.B2BPartnerContentResponse']:
"""
The B2B partner content.
"""
return pulumi.get(self, "b2b")
@pulumi.output_type
class RecurrenceScheduleOccurrenceResponse(dict):
"""
The recurrence schedule occurrence.
"""
def __init__(__self__, *,
day: Optional[str] = None,
occurrence: Optional[int] = None):
"""
The recurrence schedule occurrence.
:param str day: The day of the week.
:param int occurrence: The occurrence.
"""
if day is not None:
pulumi.set(__self__, "day", day)
if occurrence is not None:
pulumi.set(__self__, "occurrence", occurrence)
@property
@pulumi.getter
def day(self) -> Optional[str]:
"""
The day of the week.
"""
return pulumi.get(self, "day")
@property
@pulumi.getter
def occurrence(self) -> Optional[int]:
"""
The occurrence.
"""
return pulumi.get(self, "occurrence")
@pulumi.output_type
class RecurrenceScheduleResponse(dict):
"""
The recurrence schedule.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "monthDays":
suggest = "month_days"
elif key == "monthlyOccurrences":
suggest = "monthly_occurrences"
elif key == "weekDays":
suggest = "week_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RecurrenceScheduleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RecurrenceScheduleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RecurrenceScheduleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
hours: Optional[Sequence[int]] = None,
minutes: Optional[Sequence[int]] = None,
month_days: Optional[Sequence[int]] = None,
monthly_occurrences: Optional[Sequence['outputs.RecurrenceScheduleOccurrenceResponse']] = None,
week_days: Optional[Sequence[str]] = None):
"""
The recurrence schedule.
:param Sequence[int] hours: The hours.
:param Sequence[int] minutes: The minutes.
:param Sequence[int] month_days: The month days.
:param Sequence['RecurrenceScheduleOccurrenceResponse'] monthly_occurrences: The monthly occurrences.
:param Sequence[str] week_days: The days of the week.
"""
if hours is not None:
pulumi.set(__self__, "hours", hours)
if minutes is not None:
pulumi.set(__self__, "minutes", minutes)
if month_days is not None:
pulumi.set(__self__, "month_days", month_days)
if monthly_occurrences is not None:
pulumi.set(__self__, "monthly_occurrences", monthly_occurrences)
if week_days is not None:
pulumi.set(__self__, "week_days", week_days)
@property
@pulumi.getter
def hours(self) -> Optional[Sequence[int]]:
"""
The hours.
"""
return pulumi.get(self, "hours")
@property
@pulumi.getter
def minutes(self) -> Optional[Sequence[int]]:
"""
The minutes.
"""
return pulumi.get(self, "minutes")
@property
@pulumi.getter(name="monthDays")
def month_days(self) -> Optional[Sequence[int]]:
"""
The month days.
"""
return pulumi.get(self, "month_days")
@property
@pulumi.getter(name="monthlyOccurrences")
def monthly_occurrences(self) -> Optional[Sequence['outputs.RecurrenceScheduleOccurrenceResponse']]:
"""
The monthly occurrences.
"""
return pulumi.get(self, "monthly_occurrences")
@property
@pulumi.getter(name="weekDays")
def week_days(self) -> Optional[Sequence[str]]:
"""
The days of the week.
"""
return pulumi.get(self, "week_days")
@pulumi.output_type
class ResourceReferenceResponse(dict):
"""
The resource reference.
"""
def __init__(__self__, *,
name: str,
type: str,
id: Optional[str] = None):
"""
The resource reference.
:param str name: Gets the resource name.
:param str type: Gets the resource type.
:param str id: The resource id.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def name(self) -> str:
"""
Gets the resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Gets the resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The resource id.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class SkuResponse(dict):
"""
The sku type.
"""
def __init__(__self__, *,
name: str,
plan: Optional['outputs.ResourceReferenceResponse'] = None):
"""
The sku type.
:param str name: The name.
:param 'ResourceReferenceResponse' plan: The reference to plan.
"""
pulumi.set(__self__, "name", name)
if plan is not None:
pulumi.set(__self__, "plan", plan)
@property
@pulumi.getter
def name(self) -> str:
"""
The name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.ResourceReferenceResponse']:
"""
The reference to plan.
"""
return pulumi.get(self, "plan")
@pulumi.output_type
class WorkflowParameterResponse(dict):
"""
The workflow parameters.
"""
def __init__(__self__, *,
description: Optional[str] = None,
metadata: Optional[Any] = None,
type: Optional[str] = None,
value: Optional[Any] = None):
"""
The workflow parameters.
:param str description: The description.
:param Any metadata: The metadata.
:param str type: The type.
:param Any value: The value.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> Optional[Any]:
"""
The value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class WorkflowTriggerListCallbackUrlQueriesResponse(dict):
"""
Gets the workflow trigger callback URL query parameters.
"""
def __init__(__self__, *,
api_version: Optional[str] = None,
se: Optional[str] = None,
sig: Optional[str] = None,
sp: Optional[str] = None,
sv: Optional[str] = None):
"""
Gets the workflow trigger callback URL query parameters.
:param str api_version: The api version.
:param str se: The SAS timestamp.
:param str sig: The SAS signature.
:param str sp: The SAS permissions.
:param str sv: The SAS version.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if se is not None:
pulumi.set(__self__, "se", se)
if sig is not None:
pulumi.set(__self__, "sig", sig)
if sp is not None:
pulumi.set(__self__, "sp", sp)
if sv is not None:
pulumi.set(__self__, "sv", sv)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
The api version.
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def se(self) -> Optional[str]:
"""
The SAS timestamp.
"""
return pulumi.get(self, "se")
@property
@pulumi.getter
def sig(self) -> Optional[str]:
"""
The SAS signature.
"""
return pulumi.get(self, "sig")
@property
@pulumi.getter
def sp(self) -> Optional[str]:
"""
The SAS permissions.
"""
return pulumi.get(self, "sp")
@property
@pulumi.getter
def sv(self) -> Optional[str]:
"""
The SAS version.
"""
return pulumi.get(self, "sv")
@pulumi.output_type
class WorkflowTriggerRecurrenceResponse(dict):
"""
The workflow trigger recurrence.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endTime":
suggest = "end_time"
elif key == "startTime":
suggest = "start_time"
elif key == "timeZone":
suggest = "time_zone"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkflowTriggerRecurrenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkflowTriggerRecurrenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkflowTriggerRecurrenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
end_time: Optional[str] = None,
frequency: Optional[str] = None,
interval: Optional[int] = None,
schedule: Optional['outputs.RecurrenceScheduleResponse'] = None,
start_time: Optional[str] = None,
time_zone: Optional[str] = None):
"""
The workflow trigger recurrence.
:param str end_time: The end time.
:param str frequency: The frequency.
:param int interval: The interval.
:param 'RecurrenceScheduleResponse' schedule: The recurrence schedule.
:param str start_time: The start time.
:param str time_zone: The time zone.
"""
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if frequency is not None:
pulumi.set(__self__, "frequency", frequency)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if time_zone is not None:
pulumi.set(__self__, "time_zone", time_zone)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[str]:
"""
The end time.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def frequency(self) -> Optional[str]:
"""
The frequency.
"""
return pulumi.get(self, "frequency")
@property
@pulumi.getter
def interval(self) -> Optional[int]:
"""
The interval.
"""
return pulumi.get(self, "interval")
@property
@pulumi.getter
def schedule(self) -> Optional['outputs.RecurrenceScheduleResponse']:
"""
The recurrence schedule.
"""
return pulumi.get(self, "schedule")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
"""
The start time.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> Optional[str]:
"""
The time zone.
"""
return pulumi.get(self, "time_zone")
@pulumi.output_type
class X12AcknowledgementSettingsResponse(dict):
"""
The X12 agreement acknowledgement settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementControlNumberLowerBound":
suggest = "acknowledgement_control_number_lower_bound"
elif key == "acknowledgementControlNumberUpperBound":
suggest = "acknowledgement_control_number_upper_bound"
elif key == "batchFunctionalAcknowledgements":
suggest = "batch_functional_acknowledgements"
elif key == "batchImplementationAcknowledgements":
suggest = "batch_implementation_acknowledgements"
elif key == "batchTechnicalAcknowledgements":
suggest = "batch_technical_acknowledgements"
elif key == "needFunctionalAcknowledgement":
suggest = "need_functional_acknowledgement"
elif key == "needImplementationAcknowledgement":
suggest = "need_implementation_acknowledgement"
elif key == "needLoopForValidMessages":
suggest = "need_loop_for_valid_messages"
elif key == "needTechnicalAcknowledgement":
suggest = "need_technical_acknowledgement"
elif key == "rolloverAcknowledgementControlNumber":
suggest = "rollover_acknowledgement_control_number"
elif key == "sendSynchronousAcknowledgement":
suggest = "send_synchronous_acknowledgement"
elif key == "acknowledgementControlNumberPrefix":
suggest = "acknowledgement_control_number_prefix"
elif key == "acknowledgementControlNumberSuffix":
suggest = "acknowledgement_control_number_suffix"
elif key == "functionalAcknowledgementVersion":
suggest = "functional_acknowledgement_version"
elif key == "implementationAcknowledgementVersion":
suggest = "implementation_acknowledgement_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12AcknowledgementSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12AcknowledgementSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12AcknowledgementSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_control_number_lower_bound: int,
acknowledgement_control_number_upper_bound: int,
batch_functional_acknowledgements: bool,
batch_implementation_acknowledgements: bool,
batch_technical_acknowledgements: bool,
need_functional_acknowledgement: bool,
need_implementation_acknowledgement: bool,
need_loop_for_valid_messages: bool,
need_technical_acknowledgement: bool,
rollover_acknowledgement_control_number: bool,
send_synchronous_acknowledgement: bool,
acknowledgement_control_number_prefix: Optional[str] = None,
acknowledgement_control_number_suffix: Optional[str] = None,
functional_acknowledgement_version: Optional[str] = None,
implementation_acknowledgement_version: Optional[str] = None):
"""
The X12 agreement acknowledgement settings.
:param int acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound.
:param int acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound.
:param bool batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements.
:param bool batch_implementation_acknowledgements: The value indicating whether to batch implementation acknowledgements.
:param bool batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements.
:param bool need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed.
:param bool need_implementation_acknowledgement: The value indicating whether implementation acknowledgement is needed.
:param bool need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages.
:param bool need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed.
:param bool rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number.
:param bool send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement.
:param str acknowledgement_control_number_prefix: The acknowledgement control number prefix.
:param str acknowledgement_control_number_suffix: The acknowledgement control number suffix.
:param str functional_acknowledgement_version: The functional acknowledgement version.
:param str implementation_acknowledgement_version: The implementation acknowledgement version.
"""
pulumi.set(__self__, "acknowledgement_control_number_lower_bound", acknowledgement_control_number_lower_bound)
pulumi.set(__self__, "acknowledgement_control_number_upper_bound", acknowledgement_control_number_upper_bound)
pulumi.set(__self__, "batch_functional_acknowledgements", batch_functional_acknowledgements)
pulumi.set(__self__, "batch_implementation_acknowledgements", batch_implementation_acknowledgements)
pulumi.set(__self__, "batch_technical_acknowledgements", batch_technical_acknowledgements)
pulumi.set(__self__, "need_functional_acknowledgement", need_functional_acknowledgement)
pulumi.set(__self__, "need_implementation_acknowledgement", need_implementation_acknowledgement)
pulumi.set(__self__, "need_loop_for_valid_messages", need_loop_for_valid_messages)
pulumi.set(__self__, "need_technical_acknowledgement", need_technical_acknowledgement)
pulumi.set(__self__, "rollover_acknowledgement_control_number", rollover_acknowledgement_control_number)
pulumi.set(__self__, "send_synchronous_acknowledgement", send_synchronous_acknowledgement)
if acknowledgement_control_number_prefix is not None:
pulumi.set(__self__, "acknowledgement_control_number_prefix", acknowledgement_control_number_prefix)
if acknowledgement_control_number_suffix is not None:
pulumi.set(__self__, "acknowledgement_control_number_suffix", acknowledgement_control_number_suffix)
if functional_acknowledgement_version is not None:
pulumi.set(__self__, "functional_acknowledgement_version", functional_acknowledgement_version)
if implementation_acknowledgement_version is not None:
pulumi.set(__self__, "implementation_acknowledgement_version", implementation_acknowledgement_version)
@property
@pulumi.getter(name="acknowledgementControlNumberLowerBound")
def acknowledgement_control_number_lower_bound(self) -> int:
"""
The acknowledgement control number lower bound.
"""
return pulumi.get(self, "acknowledgement_control_number_lower_bound")
@property
@pulumi.getter(name="acknowledgementControlNumberUpperBound")
def acknowledgement_control_number_upper_bound(self) -> int:
"""
The acknowledgement control number upper bound.
"""
return pulumi.get(self, "acknowledgement_control_number_upper_bound")
@property
@pulumi.getter(name="batchFunctionalAcknowledgements")
def batch_functional_acknowledgements(self) -> bool:
"""
The value indicating whether to batch functional acknowledgements.
"""
return pulumi.get(self, "batch_functional_acknowledgements")
@property
@pulumi.getter(name="batchImplementationAcknowledgements")
def batch_implementation_acknowledgements(self) -> bool:
"""
The value indicating whether to batch implementation acknowledgements.
"""
return pulumi.get(self, "batch_implementation_acknowledgements")
@property
@pulumi.getter(name="batchTechnicalAcknowledgements")
def batch_technical_acknowledgements(self) -> bool:
"""
The value indicating whether to batch the technical acknowledgements.
"""
return pulumi.get(self, "batch_technical_acknowledgements")
@property
@pulumi.getter(name="needFunctionalAcknowledgement")
def need_functional_acknowledgement(self) -> bool:
"""
The value indicating whether functional acknowledgement is needed.
"""
return pulumi.get(self, "need_functional_acknowledgement")
@property
@pulumi.getter(name="needImplementationAcknowledgement")
def need_implementation_acknowledgement(self) -> bool:
"""
The value indicating whether implementation acknowledgement is needed.
"""
return pulumi.get(self, "need_implementation_acknowledgement")
@property
@pulumi.getter(name="needLoopForValidMessages")
def need_loop_for_valid_messages(self) -> bool:
"""
The value indicating whether a loop is needed for valid messages.
"""
return pulumi.get(self, "need_loop_for_valid_messages")
@property
@pulumi.getter(name="needTechnicalAcknowledgement")
def need_technical_acknowledgement(self) -> bool:
"""
The value indicating whether technical acknowledgement is needed.
"""
return pulumi.get(self, "need_technical_acknowledgement")
@property
@pulumi.getter(name="rolloverAcknowledgementControlNumber")
def rollover_acknowledgement_control_number(self) -> bool:
"""
The value indicating whether to rollover acknowledgement control number.
"""
return pulumi.get(self, "rollover_acknowledgement_control_number")
@property
@pulumi.getter(name="sendSynchronousAcknowledgement")
def send_synchronous_acknowledgement(self) -> bool:
"""
The value indicating whether to send synchronous acknowledgement.
"""
return pulumi.get(self, "send_synchronous_acknowledgement")
@property
@pulumi.getter(name="acknowledgementControlNumberPrefix")
def acknowledgement_control_number_prefix(self) -> Optional[str]:
"""
The acknowledgement control number prefix.
"""
return pulumi.get(self, "acknowledgement_control_number_prefix")
@property
@pulumi.getter(name="acknowledgementControlNumberSuffix")
def acknowledgement_control_number_suffix(self) -> Optional[str]:
"""
The acknowledgement control number suffix.
"""
return pulumi.get(self, "acknowledgement_control_number_suffix")
@property
@pulumi.getter(name="functionalAcknowledgementVersion")
def functional_acknowledgement_version(self) -> Optional[str]:
"""
The functional acknowledgement version.
"""
return pulumi.get(self, "functional_acknowledgement_version")
@property
@pulumi.getter(name="implementationAcknowledgementVersion")
def implementation_acknowledgement_version(self) -> Optional[str]:
"""
The implementation acknowledgement version.
"""
return pulumi.get(self, "implementation_acknowledgement_version")
@pulumi.output_type
class X12AgreementContentResponse(dict):
"""
The X12 agreement content.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "receiveAgreement":
suggest = "receive_agreement"
elif key == "sendAgreement":
suggest = "send_agreement"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12AgreementContentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12AgreementContentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12AgreementContentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
receive_agreement: 'outputs.X12OneWayAgreementResponse',
send_agreement: 'outputs.X12OneWayAgreementResponse'):
"""
The X12 agreement content.
:param 'X12OneWayAgreementResponse' receive_agreement: The X12 one-way receive agreement.
:param 'X12OneWayAgreementResponse' send_agreement: The X12 one-way send agreement.
"""
pulumi.set(__self__, "receive_agreement", receive_agreement)
pulumi.set(__self__, "send_agreement", send_agreement)
@property
@pulumi.getter(name="receiveAgreement")
def receive_agreement(self) -> 'outputs.X12OneWayAgreementResponse':
"""
The X12 one-way receive agreement.
"""
return pulumi.get(self, "receive_agreement")
@property
@pulumi.getter(name="sendAgreement")
def send_agreement(self) -> 'outputs.X12OneWayAgreementResponse':
"""
The X12 one-way send agreement.
"""
return pulumi.get(self, "send_agreement")
@pulumi.output_type
class X12DelimiterOverridesResponse(dict):
"""
The X12 delimiter override settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "replaceCharacter":
suggest = "replace_character"
elif key == "replaceSeparatorsInPayload":
suggest = "replace_separators_in_payload"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
elif key == "messageId":
suggest = "message_id"
elif key == "protocolVersion":
suggest = "protocol_version"
elif key == "targetNamespace":
suggest = "target_namespace"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12DelimiterOverridesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12DelimiterOverridesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12DelimiterOverridesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
component_separator: int,
data_element_separator: int,
replace_character: int,
replace_separators_in_payload: bool,
segment_terminator: int,
segment_terminator_suffix: str,
message_id: Optional[str] = None,
protocol_version: Optional[str] = None,
target_namespace: Optional[str] = None):
"""
The X12 delimiter override settings.
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param int replace_character: The replacement character.
:param bool replace_separators_in_payload: The value indicating whether to replace separators in payload.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The segment terminator suffix.
:param str message_id: The message id.
:param str protocol_version: The protocol version.
:param str target_namespace: The target namespace on which this delimiter settings has to be applied.
"""
pulumi.set(__self__, "component_separator", component_separator)
pulumi.set(__self__, "data_element_separator", data_element_separator)
pulumi.set(__self__, "replace_character", replace_character)
pulumi.set(__self__, "replace_separators_in_payload", replace_separators_in_payload)
pulumi.set(__self__, "segment_terminator", segment_terminator)
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
if protocol_version is not None:
pulumi.set(__self__, "protocol_version", protocol_version)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> int:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> int:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="replaceCharacter")
def replace_character(self) -> int:
"""
The replacement character.
"""
return pulumi.get(self, "replace_character")
@property
@pulumi.getter(name="replaceSeparatorsInPayload")
def replace_separators_in_payload(self) -> bool:
"""
The value indicating whether to replace separators in payload.
"""
return pulumi.get(self, "replace_separators_in_payload")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> int:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> str:
"""
The segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="protocolVersion")
def protocol_version(self) -> Optional[str]:
"""
The protocol version.
"""
return pulumi.get(self, "protocol_version")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[str]:
"""
The target namespace on which this delimiter settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@pulumi.output_type
class X12EnvelopeOverrideResponse(dict):
"""
The X12 envelope override settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dateFormat":
suggest = "date_format"
elif key == "headerVersion":
suggest = "header_version"
elif key == "messageId":
suggest = "message_id"
elif key == "protocolVersion":
suggest = "protocol_version"
elif key == "receiverApplicationId":
suggest = "receiver_application_id"
elif key == "responsibleAgencyCode":
suggest = "responsible_agency_code"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "targetNamespace":
suggest = "target_namespace"
elif key == "timeFormat":
suggest = "time_format"
elif key == "functionalIdentifierCode":
suggest = "functional_identifier_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12EnvelopeOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12EnvelopeOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12EnvelopeOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
date_format: str,
header_version: str,
message_id: str,
protocol_version: str,
receiver_application_id: str,
responsible_agency_code: str,
sender_application_id: str,
target_namespace: str,
time_format: str,
functional_identifier_code: Optional[str] = None):
"""
The X12 envelope override settings.
:param str date_format: The date format.
:param str header_version: The header version.
:param str message_id: The message id on which this envelope settings has to be applied.
:param str protocol_version: The protocol version on which this envelope settings has to be applied.
:param str receiver_application_id: The receiver application id.
:param str responsible_agency_code: The responsible agency code.
:param str sender_application_id: The sender application id.
:param str target_namespace: The target namespace on which this envelope settings has to be applied.
:param str time_format: The time format.
:param str functional_identifier_code: The functional identifier code.
"""
pulumi.set(__self__, "date_format", date_format)
pulumi.set(__self__, "header_version", header_version)
pulumi.set(__self__, "message_id", message_id)
pulumi.set(__self__, "protocol_version", protocol_version)
pulumi.set(__self__, "receiver_application_id", receiver_application_id)
pulumi.set(__self__, "responsible_agency_code", responsible_agency_code)
pulumi.set(__self__, "sender_application_id", sender_application_id)
pulumi.set(__self__, "target_namespace", target_namespace)
pulumi.set(__self__, "time_format", time_format)
if functional_identifier_code is not None:
pulumi.set(__self__, "functional_identifier_code", functional_identifier_code)
@property
@pulumi.getter(name="dateFormat")
def date_format(self) -> str:
"""
The date format.
"""
return pulumi.get(self, "date_format")
@property
@pulumi.getter(name="headerVersion")
def header_version(self) -> str:
"""
The header version.
"""
return pulumi.get(self, "header_version")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> str:
"""
The message id on which this envelope settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="protocolVersion")
def protocol_version(self) -> str:
"""
The protocol version on which this envelope settings has to be applied.
"""
return pulumi.get(self, "protocol_version")
@property
@pulumi.getter(name="receiverApplicationId")
def receiver_application_id(self) -> str:
"""
The receiver application id.
"""
return pulumi.get(self, "receiver_application_id")
@property
@pulumi.getter(name="responsibleAgencyCode")
def responsible_agency_code(self) -> str:
"""
The responsible agency code.
"""
return pulumi.get(self, "responsible_agency_code")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> str:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> str:
"""
The target namespace on which this envelope settings has to be applied.
"""
return pulumi.get(self, "target_namespace")
@property
@pulumi.getter(name="timeFormat")
def time_format(self) -> str:
"""
The time format.
"""
return pulumi.get(self, "time_format")
@property
@pulumi.getter(name="functionalIdentifierCode")
def functional_identifier_code(self) -> Optional[str]:
"""
The functional identifier code.
"""
return pulumi.get(self, "functional_identifier_code")
@pulumi.output_type
class X12EnvelopeSettingsResponse(dict):
"""
The X12 agreement envelope settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "controlStandardsId":
suggest = "control_standards_id"
elif key == "controlVersionNumber":
suggest = "control_version_number"
elif key == "enableDefaultGroupHeaders":
suggest = "enable_default_group_headers"
elif key == "groupControlNumberLowerBound":
suggest = "group_control_number_lower_bound"
elif key == "groupControlNumberUpperBound":
suggest = "group_control_number_upper_bound"
elif key == "groupHeaderAgencyCode":
suggest = "group_header_agency_code"
elif key == "groupHeaderDateFormat":
suggest = "group_header_date_format"
elif key == "groupHeaderTimeFormat":
suggest = "group_header_time_format"
elif key == "groupHeaderVersion":
suggest = "group_header_version"
elif key == "interchangeControlNumberLowerBound":
suggest = "interchange_control_number_lower_bound"
elif key == "interchangeControlNumberUpperBound":
suggest = "interchange_control_number_upper_bound"
elif key == "overwriteExistingTransactionSetControlNumber":
suggest = "overwrite_existing_transaction_set_control_number"
elif key == "receiverApplicationId":
suggest = "receiver_application_id"
elif key == "rolloverGroupControlNumber":
suggest = "rollover_group_control_number"
elif key == "rolloverInterchangeControlNumber":
suggest = "rollover_interchange_control_number"
elif key == "rolloverTransactionSetControlNumber":
suggest = "rollover_transaction_set_control_number"
elif key == "senderApplicationId":
suggest = "sender_application_id"
elif key == "transactionSetControlNumberLowerBound":
suggest = "transaction_set_control_number_lower_bound"
elif key == "transactionSetControlNumberUpperBound":
suggest = "transaction_set_control_number_upper_bound"
elif key == "usageIndicator":
suggest = "usage_indicator"
elif key == "useControlStandardsIdAsRepetitionCharacter":
suggest = "use_control_standards_id_as_repetition_character"
elif key == "functionalGroupId":
suggest = "functional_group_id"
elif key == "transactionSetControlNumberPrefix":
suggest = "transaction_set_control_number_prefix"
elif key == "transactionSetControlNumberSuffix":
suggest = "transaction_set_control_number_suffix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12EnvelopeSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12EnvelopeSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12EnvelopeSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
control_standards_id: int,
control_version_number: str,
enable_default_group_headers: bool,
group_control_number_lower_bound: int,
group_control_number_upper_bound: int,
group_header_agency_code: str,
group_header_date_format: str,
group_header_time_format: str,
group_header_version: str,
interchange_control_number_lower_bound: int,
interchange_control_number_upper_bound: int,
overwrite_existing_transaction_set_control_number: bool,
receiver_application_id: str,
rollover_group_control_number: bool,
rollover_interchange_control_number: bool,
rollover_transaction_set_control_number: bool,
sender_application_id: str,
transaction_set_control_number_lower_bound: int,
transaction_set_control_number_upper_bound: int,
usage_indicator: str,
use_control_standards_id_as_repetition_character: bool,
functional_group_id: Optional[str] = None,
transaction_set_control_number_prefix: Optional[str] = None,
transaction_set_control_number_suffix: Optional[str] = None):
"""
The X12 agreement envelope settings.
:param int control_standards_id: The controls standards id.
:param str control_version_number: The control version number.
:param bool enable_default_group_headers: The value indicating whether to enable default group headers.
:param int group_control_number_lower_bound: The group control number lower bound.
:param int group_control_number_upper_bound: The group control number upper bound.
:param str group_header_agency_code: The group header agency code.
:param str group_header_date_format: The group header date format.
:param str group_header_time_format: The group header time format.
:param str group_header_version: The group header version.
:param int interchange_control_number_lower_bound: The interchange control number lower bound.
:param int interchange_control_number_upper_bound: The interchange control number upper bound.
:param bool overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number.
:param str receiver_application_id: The receiver application id.
:param bool rollover_group_control_number: The value indicating whether to rollover group control number.
:param bool rollover_interchange_control_number: The value indicating whether to rollover interchange control number.
:param bool rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number.
:param str sender_application_id: The sender application id.
:param int transaction_set_control_number_lower_bound: The transaction set control number lower bound.
:param int transaction_set_control_number_upper_bound: The transaction set control number upper bound.
:param str usage_indicator: The usage indicator.
:param bool use_control_standards_id_as_repetition_character: The value indicating whether to use control standards id as repetition character.
:param str functional_group_id: The functional group id.
:param str transaction_set_control_number_prefix: The transaction set control number prefix.
:param str transaction_set_control_number_suffix: The transaction set control number suffix.
"""
pulumi.set(__self__, "control_standards_id", control_standards_id)
pulumi.set(__self__, "control_version_number", control_version_number)
pulumi.set(__self__, "enable_default_group_headers", enable_default_group_headers)
pulumi.set(__self__, "group_control_number_lower_bound", group_control_number_lower_bound)
pulumi.set(__self__, "group_control_number_upper_bound", group_control_number_upper_bound)
pulumi.set(__self__, "group_header_agency_code", group_header_agency_code)
pulumi.set(__self__, "group_header_date_format", group_header_date_format)
pulumi.set(__self__, "group_header_time_format", group_header_time_format)
pulumi.set(__self__, "group_header_version", group_header_version)
pulumi.set(__self__, "interchange_control_number_lower_bound", interchange_control_number_lower_bound)
pulumi.set(__self__, "interchange_control_number_upper_bound", interchange_control_number_upper_bound)
pulumi.set(__self__, "overwrite_existing_transaction_set_control_number", overwrite_existing_transaction_set_control_number)
pulumi.set(__self__, "receiver_application_id", receiver_application_id)
pulumi.set(__self__, "rollover_group_control_number", rollover_group_control_number)
pulumi.set(__self__, "rollover_interchange_control_number", rollover_interchange_control_number)
pulumi.set(__self__, "rollover_transaction_set_control_number", rollover_transaction_set_control_number)
pulumi.set(__self__, "sender_application_id", sender_application_id)
pulumi.set(__self__, "transaction_set_control_number_lower_bound", transaction_set_control_number_lower_bound)
pulumi.set(__self__, "transaction_set_control_number_upper_bound", transaction_set_control_number_upper_bound)
pulumi.set(__self__, "usage_indicator", usage_indicator)
pulumi.set(__self__, "use_control_standards_id_as_repetition_character", use_control_standards_id_as_repetition_character)
if functional_group_id is not None:
pulumi.set(__self__, "functional_group_id", functional_group_id)
if transaction_set_control_number_prefix is not None:
pulumi.set(__self__, "transaction_set_control_number_prefix", transaction_set_control_number_prefix)
if transaction_set_control_number_suffix is not None:
pulumi.set(__self__, "transaction_set_control_number_suffix", transaction_set_control_number_suffix)
@property
@pulumi.getter(name="controlStandardsId")
def control_standards_id(self) -> int:
"""
The controls standards id.
"""
return pulumi.get(self, "control_standards_id")
@property
@pulumi.getter(name="controlVersionNumber")
def control_version_number(self) -> str:
"""
The control version number.
"""
return pulumi.get(self, "control_version_number")
@property
@pulumi.getter(name="enableDefaultGroupHeaders")
def enable_default_group_headers(self) -> bool:
"""
The value indicating whether to enable default group headers.
"""
return pulumi.get(self, "enable_default_group_headers")
@property
@pulumi.getter(name="groupControlNumberLowerBound")
def group_control_number_lower_bound(self) -> int:
"""
The group control number lower bound.
"""
return pulumi.get(self, "group_control_number_lower_bound")
@property
@pulumi.getter(name="groupControlNumberUpperBound")
def group_control_number_upper_bound(self) -> int:
"""
The group control number upper bound.
"""
return pulumi.get(self, "group_control_number_upper_bound")
@property
@pulumi.getter(name="groupHeaderAgencyCode")
def group_header_agency_code(self) -> str:
"""
The group header agency code.
"""
return pulumi.get(self, "group_header_agency_code")
@property
@pulumi.getter(name="groupHeaderDateFormat")
def group_header_date_format(self) -> str:
"""
The group header date format.
"""
return pulumi.get(self, "group_header_date_format")
@property
@pulumi.getter(name="groupHeaderTimeFormat")
def group_header_time_format(self) -> str:
"""
The group header time format.
"""
return pulumi.get(self, "group_header_time_format")
@property
@pulumi.getter(name="groupHeaderVersion")
def group_header_version(self) -> str:
"""
The group header version.
"""
return pulumi.get(self, "group_header_version")
@property
@pulumi.getter(name="interchangeControlNumberLowerBound")
def interchange_control_number_lower_bound(self) -> int:
"""
The interchange control number lower bound.
"""
return pulumi.get(self, "interchange_control_number_lower_bound")
@property
@pulumi.getter(name="interchangeControlNumberUpperBound")
def interchange_control_number_upper_bound(self) -> int:
"""
The interchange control number upper bound.
"""
return pulumi.get(self, "interchange_control_number_upper_bound")
@property
@pulumi.getter(name="overwriteExistingTransactionSetControlNumber")
def overwrite_existing_transaction_set_control_number(self) -> bool:
"""
The value indicating whether to overwrite existing transaction set control number.
"""
return pulumi.get(self, "overwrite_existing_transaction_set_control_number")
@property
@pulumi.getter(name="receiverApplicationId")
def receiver_application_id(self) -> str:
"""
The receiver application id.
"""
return pulumi.get(self, "receiver_application_id")
@property
@pulumi.getter(name="rolloverGroupControlNumber")
def rollover_group_control_number(self) -> bool:
"""
The value indicating whether to rollover group control number.
"""
return pulumi.get(self, "rollover_group_control_number")
@property
@pulumi.getter(name="rolloverInterchangeControlNumber")
def rollover_interchange_control_number(self) -> bool:
"""
The value indicating whether to rollover interchange control number.
"""
return pulumi.get(self, "rollover_interchange_control_number")
@property
@pulumi.getter(name="rolloverTransactionSetControlNumber")
def rollover_transaction_set_control_number(self) -> bool:
"""
The value indicating whether to rollover transaction set control number.
"""
return pulumi.get(self, "rollover_transaction_set_control_number")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> str:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@property
@pulumi.getter(name="transactionSetControlNumberLowerBound")
def transaction_set_control_number_lower_bound(self) -> int:
"""
The transaction set control number lower bound.
"""
return pulumi.get(self, "transaction_set_control_number_lower_bound")
@property
@pulumi.getter(name="transactionSetControlNumberUpperBound")
def transaction_set_control_number_upper_bound(self) -> int:
"""
The transaction set control number upper bound.
"""
return pulumi.get(self, "transaction_set_control_number_upper_bound")
@property
@pulumi.getter(name="usageIndicator")
def usage_indicator(self) -> str:
"""
The usage indicator.
"""
return pulumi.get(self, "usage_indicator")
@property
@pulumi.getter(name="useControlStandardsIdAsRepetitionCharacter")
def use_control_standards_id_as_repetition_character(self) -> bool:
"""
The value indicating whether to use control standards id as repetition character.
"""
return pulumi.get(self, "use_control_standards_id_as_repetition_character")
@property
@pulumi.getter(name="functionalGroupId")
def functional_group_id(self) -> Optional[str]:
"""
The functional group id.
"""
return pulumi.get(self, "functional_group_id")
@property
@pulumi.getter(name="transactionSetControlNumberPrefix")
def transaction_set_control_number_prefix(self) -> Optional[str]:
"""
The transaction set control number prefix.
"""
return pulumi.get(self, "transaction_set_control_number_prefix")
@property
@pulumi.getter(name="transactionSetControlNumberSuffix")
def transaction_set_control_number_suffix(self) -> Optional[str]:
"""
The transaction set control number suffix.
"""
return pulumi.get(self, "transaction_set_control_number_suffix")
@pulumi.output_type
class X12FramingSettingsResponse(dict):
"""
The X12 agreement framing settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "characterSet":
suggest = "character_set"
elif key == "componentSeparator":
suggest = "component_separator"
elif key == "dataElementSeparator":
suggest = "data_element_separator"
elif key == "replaceCharacter":
suggest = "replace_character"
elif key == "replaceSeparatorsInPayload":
suggest = "replace_separators_in_payload"
elif key == "segmentTerminator":
suggest = "segment_terminator"
elif key == "segmentTerminatorSuffix":
suggest = "segment_terminator_suffix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12FramingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12FramingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12FramingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
character_set: str,
component_separator: int,
data_element_separator: int,
replace_character: int,
replace_separators_in_payload: bool,
segment_terminator: int,
segment_terminator_suffix: str):
"""
The X12 agreement framing settings.
:param str character_set: The X12 character set.
:param int component_separator: The component separator.
:param int data_element_separator: The data element separator.
:param int replace_character: The replacement character.
:param bool replace_separators_in_payload: The value indicating whether to replace separators in payload.
:param int segment_terminator: The segment terminator.
:param str segment_terminator_suffix: The segment terminator suffix.
"""
pulumi.set(__self__, "character_set", character_set)
pulumi.set(__self__, "component_separator", component_separator)
pulumi.set(__self__, "data_element_separator", data_element_separator)
pulumi.set(__self__, "replace_character", replace_character)
pulumi.set(__self__, "replace_separators_in_payload", replace_separators_in_payload)
pulumi.set(__self__, "segment_terminator", segment_terminator)
pulumi.set(__self__, "segment_terminator_suffix", segment_terminator_suffix)
@property
@pulumi.getter(name="characterSet")
def character_set(self) -> str:
"""
The X12 character set.
"""
return pulumi.get(self, "character_set")
@property
@pulumi.getter(name="componentSeparator")
def component_separator(self) -> int:
"""
The component separator.
"""
return pulumi.get(self, "component_separator")
@property
@pulumi.getter(name="dataElementSeparator")
def data_element_separator(self) -> int:
"""
The data element separator.
"""
return pulumi.get(self, "data_element_separator")
@property
@pulumi.getter(name="replaceCharacter")
def replace_character(self) -> int:
"""
The replacement character.
"""
return pulumi.get(self, "replace_character")
@property
@pulumi.getter(name="replaceSeparatorsInPayload")
def replace_separators_in_payload(self) -> bool:
"""
The value indicating whether to replace separators in payload.
"""
return pulumi.get(self, "replace_separators_in_payload")
@property
@pulumi.getter(name="segmentTerminator")
def segment_terminator(self) -> int:
"""
The segment terminator.
"""
return pulumi.get(self, "segment_terminator")
@property
@pulumi.getter(name="segmentTerminatorSuffix")
def segment_terminator_suffix(self) -> str:
"""
The segment terminator suffix.
"""
return pulumi.get(self, "segment_terminator_suffix")
@pulumi.output_type
class X12MessageFilterResponse(dict):
"""
The X12 message filter for odata query.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageFilterType":
suggest = "message_filter_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12MessageFilterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12MessageFilterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12MessageFilterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_filter_type: str):
"""
The X12 message filter for odata query.
:param str message_filter_type: The message filter type.
"""
pulumi.set(__self__, "message_filter_type", message_filter_type)
@property
@pulumi.getter(name="messageFilterType")
def message_filter_type(self) -> str:
"""
The message filter type.
"""
return pulumi.get(self, "message_filter_type")
@pulumi.output_type
class X12MessageIdentifierResponse(dict):
"""
The X12 message identifier.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageId":
suggest = "message_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12MessageIdentifierResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12MessageIdentifierResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12MessageIdentifierResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_id: str):
"""
The X12 message identifier.
:param str message_id: The message id.
"""
pulumi.set(__self__, "message_id", message_id)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> str:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@pulumi.output_type
class X12OneWayAgreementResponse(dict):
"""
The X12 one-way agreement.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "protocolSettings":
suggest = "protocol_settings"
elif key == "receiverBusinessIdentity":
suggest = "receiver_business_identity"
elif key == "senderBusinessIdentity":
suggest = "sender_business_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12OneWayAgreementResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12OneWayAgreementResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12OneWayAgreementResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
protocol_settings: 'outputs.X12ProtocolSettingsResponse',
receiver_business_identity: 'outputs.BusinessIdentityResponse',
sender_business_identity: 'outputs.BusinessIdentityResponse'):
"""
The X12 one-way agreement.
:param 'X12ProtocolSettingsResponse' protocol_settings: The X12 protocol settings.
:param 'BusinessIdentityResponse' receiver_business_identity: The receiver business identity
:param 'BusinessIdentityResponse' sender_business_identity: The sender business identity
"""
pulumi.set(__self__, "protocol_settings", protocol_settings)
pulumi.set(__self__, "receiver_business_identity", receiver_business_identity)
pulumi.set(__self__, "sender_business_identity", sender_business_identity)
@property
@pulumi.getter(name="protocolSettings")
def protocol_settings(self) -> 'outputs.X12ProtocolSettingsResponse':
"""
The X12 protocol settings.
"""
return pulumi.get(self, "protocol_settings")
@property
@pulumi.getter(name="receiverBusinessIdentity")
def receiver_business_identity(self) -> 'outputs.BusinessIdentityResponse':
"""
The receiver business identity
"""
return pulumi.get(self, "receiver_business_identity")
@property
@pulumi.getter(name="senderBusinessIdentity")
def sender_business_identity(self) -> 'outputs.BusinessIdentityResponse':
"""
The sender business identity
"""
return pulumi.get(self, "sender_business_identity")
@pulumi.output_type
class X12ProcessingSettingsResponse(dict):
"""
The X12 processing settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "convertImpliedDecimal":
suggest = "convert_implied_decimal"
elif key == "createEmptyXmlTagsForTrailingSeparators":
suggest = "create_empty_xml_tags_for_trailing_separators"
elif key == "maskSecurityInfo":
suggest = "mask_security_info"
elif key == "preserveInterchange":
suggest = "preserve_interchange"
elif key == "suspendInterchangeOnError":
suggest = "suspend_interchange_on_error"
elif key == "useDotAsDecimalSeparator":
suggest = "use_dot_as_decimal_separator"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ProcessingSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ProcessingSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ProcessingSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
convert_implied_decimal: bool,
create_empty_xml_tags_for_trailing_separators: bool,
mask_security_info: bool,
preserve_interchange: bool,
suspend_interchange_on_error: bool,
use_dot_as_decimal_separator: bool):
"""
The X12 processing settings.
:param bool convert_implied_decimal: The value indicating whether to convert numerical type to implied decimal.
:param bool create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators.
:param bool mask_security_info: The value indicating whether to mask security information.
:param bool preserve_interchange: The value indicating whether to preserve interchange.
:param bool suspend_interchange_on_error: The value indicating whether to suspend interchange on error.
:param bool use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator.
"""
pulumi.set(__self__, "convert_implied_decimal", convert_implied_decimal)
pulumi.set(__self__, "create_empty_xml_tags_for_trailing_separators", create_empty_xml_tags_for_trailing_separators)
pulumi.set(__self__, "mask_security_info", mask_security_info)
pulumi.set(__self__, "preserve_interchange", preserve_interchange)
pulumi.set(__self__, "suspend_interchange_on_error", suspend_interchange_on_error)
pulumi.set(__self__, "use_dot_as_decimal_separator", use_dot_as_decimal_separator)
@property
@pulumi.getter(name="convertImpliedDecimal")
def convert_implied_decimal(self) -> bool:
"""
The value indicating whether to convert numerical type to implied decimal.
"""
return pulumi.get(self, "convert_implied_decimal")
@property
@pulumi.getter(name="createEmptyXmlTagsForTrailingSeparators")
def create_empty_xml_tags_for_trailing_separators(self) -> bool:
"""
The value indicating whether to create empty xml tags for trailing separators.
"""
return pulumi.get(self, "create_empty_xml_tags_for_trailing_separators")
@property
@pulumi.getter(name="maskSecurityInfo")
def mask_security_info(self) -> bool:
"""
The value indicating whether to mask security information.
"""
return pulumi.get(self, "mask_security_info")
@property
@pulumi.getter(name="preserveInterchange")
def preserve_interchange(self) -> bool:
"""
The value indicating whether to preserve interchange.
"""
return pulumi.get(self, "preserve_interchange")
@property
@pulumi.getter(name="suspendInterchangeOnError")
def suspend_interchange_on_error(self) -> bool:
"""
The value indicating whether to suspend interchange on error.
"""
return pulumi.get(self, "suspend_interchange_on_error")
@property
@pulumi.getter(name="useDotAsDecimalSeparator")
def use_dot_as_decimal_separator(self) -> bool:
"""
The value indicating whether to use dot as decimal separator.
"""
return pulumi.get(self, "use_dot_as_decimal_separator")
@pulumi.output_type
class X12ProtocolSettingsResponse(dict):
"""
The X12 agreement protocol settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acknowledgementSettings":
suggest = "acknowledgement_settings"
elif key == "envelopeSettings":
suggest = "envelope_settings"
elif key == "framingSettings":
suggest = "framing_settings"
elif key == "messageFilter":
suggest = "message_filter"
elif key == "processingSettings":
suggest = "processing_settings"
elif key == "schemaReferences":
suggest = "schema_references"
elif key == "securitySettings":
suggest = "security_settings"
elif key == "validationSettings":
suggest = "validation_settings"
elif key == "envelopeOverrides":
suggest = "envelope_overrides"
elif key == "messageFilterList":
suggest = "message_filter_list"
elif key == "validationOverrides":
suggest = "validation_overrides"
elif key == "x12DelimiterOverrides":
suggest = "x12_delimiter_overrides"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ProtocolSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ProtocolSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ProtocolSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acknowledgement_settings: 'outputs.X12AcknowledgementSettingsResponse',
envelope_settings: 'outputs.X12EnvelopeSettingsResponse',
framing_settings: 'outputs.X12FramingSettingsResponse',
message_filter: 'outputs.X12MessageFilterResponse',
processing_settings: 'outputs.X12ProcessingSettingsResponse',
schema_references: Sequence['outputs.X12SchemaReferenceResponse'],
security_settings: 'outputs.X12SecuritySettingsResponse',
validation_settings: 'outputs.X12ValidationSettingsResponse',
envelope_overrides: Optional[Sequence['outputs.X12EnvelopeOverrideResponse']] = None,
message_filter_list: Optional[Sequence['outputs.X12MessageIdentifierResponse']] = None,
validation_overrides: Optional[Sequence['outputs.X12ValidationOverrideResponse']] = None,
x12_delimiter_overrides: Optional[Sequence['outputs.X12DelimiterOverridesResponse']] = None):
"""
The X12 agreement protocol settings.
:param 'X12AcknowledgementSettingsResponse' acknowledgement_settings: The X12 acknowledgment settings.
:param 'X12EnvelopeSettingsResponse' envelope_settings: The X12 envelope settings.
:param 'X12FramingSettingsResponse' framing_settings: The X12 framing settings.
:param 'X12MessageFilterResponse' message_filter: The X12 message filter.
:param 'X12ProcessingSettingsResponse' processing_settings: The X12 processing settings.
:param Sequence['X12SchemaReferenceResponse'] schema_references: The X12 schema references.
:param 'X12SecuritySettingsResponse' security_settings: The X12 security settings.
:param 'X12ValidationSettingsResponse' validation_settings: The X12 validation settings.
:param Sequence['X12EnvelopeOverrideResponse'] envelope_overrides: The X12 envelope override settings.
:param Sequence['X12MessageIdentifierResponse'] message_filter_list: The X12 message filter list.
:param Sequence['X12ValidationOverrideResponse'] validation_overrides: The X12 validation override settings.
:param Sequence['X12DelimiterOverridesResponse'] x12_delimiter_overrides: The X12 delimiter override settings.
"""
pulumi.set(__self__, "acknowledgement_settings", acknowledgement_settings)
pulumi.set(__self__, "envelope_settings", envelope_settings)
pulumi.set(__self__, "framing_settings", framing_settings)
pulumi.set(__self__, "message_filter", message_filter)
pulumi.set(__self__, "processing_settings", processing_settings)
pulumi.set(__self__, "schema_references", schema_references)
pulumi.set(__self__, "security_settings", security_settings)
pulumi.set(__self__, "validation_settings", validation_settings)
if envelope_overrides is not None:
pulumi.set(__self__, "envelope_overrides", envelope_overrides)
if message_filter_list is not None:
pulumi.set(__self__, "message_filter_list", message_filter_list)
if validation_overrides is not None:
pulumi.set(__self__, "validation_overrides", validation_overrides)
if x12_delimiter_overrides is not None:
pulumi.set(__self__, "x12_delimiter_overrides", x12_delimiter_overrides)
@property
@pulumi.getter(name="acknowledgementSettings")
def acknowledgement_settings(self) -> 'outputs.X12AcknowledgementSettingsResponse':
"""
The X12 acknowledgment settings.
"""
return pulumi.get(self, "acknowledgement_settings")
@property
@pulumi.getter(name="envelopeSettings")
def envelope_settings(self) -> 'outputs.X12EnvelopeSettingsResponse':
"""
The X12 envelope settings.
"""
return pulumi.get(self, "envelope_settings")
@property
@pulumi.getter(name="framingSettings")
def framing_settings(self) -> 'outputs.X12FramingSettingsResponse':
"""
The X12 framing settings.
"""
return pulumi.get(self, "framing_settings")
@property
@pulumi.getter(name="messageFilter")
def message_filter(self) -> 'outputs.X12MessageFilterResponse':
"""
The X12 message filter.
"""
return pulumi.get(self, "message_filter")
@property
@pulumi.getter(name="processingSettings")
def processing_settings(self) -> 'outputs.X12ProcessingSettingsResponse':
"""
The X12 processing settings.
"""
return pulumi.get(self, "processing_settings")
@property
@pulumi.getter(name="schemaReferences")
def schema_references(self) -> Sequence['outputs.X12SchemaReferenceResponse']:
"""
The X12 schema references.
"""
return pulumi.get(self, "schema_references")
@property
@pulumi.getter(name="securitySettings")
def security_settings(self) -> 'outputs.X12SecuritySettingsResponse':
"""
The X12 security settings.
"""
return pulumi.get(self, "security_settings")
@property
@pulumi.getter(name="validationSettings")
def validation_settings(self) -> 'outputs.X12ValidationSettingsResponse':
"""
The X12 validation settings.
"""
return pulumi.get(self, "validation_settings")
@property
@pulumi.getter(name="envelopeOverrides")
def envelope_overrides(self) -> Optional[Sequence['outputs.X12EnvelopeOverrideResponse']]:
"""
The X12 envelope override settings.
"""
return pulumi.get(self, "envelope_overrides")
@property
@pulumi.getter(name="messageFilterList")
def message_filter_list(self) -> Optional[Sequence['outputs.X12MessageIdentifierResponse']]:
"""
The X12 message filter list.
"""
return pulumi.get(self, "message_filter_list")
@property
@pulumi.getter(name="validationOverrides")
def validation_overrides(self) -> Optional[Sequence['outputs.X12ValidationOverrideResponse']]:
"""
The X12 validation override settings.
"""
return pulumi.get(self, "validation_overrides")
@property
@pulumi.getter(name="x12DelimiterOverrides")
def x12_delimiter_overrides(self) -> Optional[Sequence['outputs.X12DelimiterOverridesResponse']]:
"""
The X12 delimiter override settings.
"""
return pulumi.get(self, "x12_delimiter_overrides")
@pulumi.output_type
class X12SchemaReferenceResponse(dict):
"""
The X12 schema reference.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "messageId":
suggest = "message_id"
elif key == "schemaName":
suggest = "schema_name"
elif key == "schemaVersion":
suggest = "schema_version"
elif key == "senderApplicationId":
suggest = "sender_application_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12SchemaReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12SchemaReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12SchemaReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
message_id: str,
schema_name: str,
schema_version: str,
sender_application_id: Optional[str] = None):
"""
The X12 schema reference.
:param str message_id: The message id.
:param str schema_name: The schema name.
:param str schema_version: The schema version.
:param str sender_application_id: The sender application id.
"""
pulumi.set(__self__, "message_id", message_id)
pulumi.set(__self__, "schema_name", schema_name)
pulumi.set(__self__, "schema_version", schema_version)
if sender_application_id is not None:
pulumi.set(__self__, "sender_application_id", sender_application_id)
@property
@pulumi.getter(name="messageId")
def message_id(self) -> str:
"""
The message id.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> str:
"""
The schema name.
"""
return pulumi.get(self, "schema_name")
@property
@pulumi.getter(name="schemaVersion")
def schema_version(self) -> str:
"""
The schema version.
"""
return pulumi.get(self, "schema_version")
@property
@pulumi.getter(name="senderApplicationId")
def sender_application_id(self) -> Optional[str]:
"""
The sender application id.
"""
return pulumi.get(self, "sender_application_id")
@pulumi.output_type
class X12SecuritySettingsResponse(dict):
"""
The X12 agreement security settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationQualifier":
suggest = "authorization_qualifier"
elif key == "securityQualifier":
suggest = "security_qualifier"
elif key == "authorizationValue":
suggest = "authorization_value"
elif key == "passwordValue":
suggest = "password_value"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12SecuritySettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12SecuritySettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12SecuritySettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_qualifier: str,
security_qualifier: str,
authorization_value: Optional[str] = None,
password_value: Optional[str] = None):
"""
The X12 agreement security settings.
:param str authorization_qualifier: The authorization qualifier.
:param str security_qualifier: The security qualifier.
:param str authorization_value: The authorization value.
:param str password_value: The password value.
"""
pulumi.set(__self__, "authorization_qualifier", authorization_qualifier)
pulumi.set(__self__, "security_qualifier", security_qualifier)
if authorization_value is not None:
pulumi.set(__self__, "authorization_value", authorization_value)
if password_value is not None:
pulumi.set(__self__, "password_value", password_value)
@property
@pulumi.getter(name="authorizationQualifier")
def authorization_qualifier(self) -> str:
"""
The authorization qualifier.
"""
return pulumi.get(self, "authorization_qualifier")
@property
@pulumi.getter(name="securityQualifier")
def security_qualifier(self) -> str:
"""
The security qualifier.
"""
return pulumi.get(self, "security_qualifier")
@property
@pulumi.getter(name="authorizationValue")
def authorization_value(self) -> Optional[str]:
"""
The authorization value.
"""
return pulumi.get(self, "authorization_value")
@property
@pulumi.getter(name="passwordValue")
def password_value(self) -> Optional[str]:
"""
The password value.
"""
return pulumi.get(self, "password_value")
@pulumi.output_type
class X12ValidationOverrideResponse(dict):
"""
The X12 validation override settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "messageId":
suggest = "message_id"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateCharacterSet":
suggest = "validate_character_set"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ValidationOverrideResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ValidationOverrideResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ValidationOverrideResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: bool,
message_id: str,
trailing_separator_policy: str,
trim_leading_and_trailing_spaces_and_zeroes: bool,
validate_character_set: bool,
validate_edi_types: bool,
validate_xsd_types: bool):
"""
The X12 validation override settings.
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param str message_id: The message id on which the validation settings has to be applied.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_character_set: The value indicating whether to validate character Set.
:param bool validate_edi_types: The value indicating whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to validate XSD types.
"""
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "message_id", message_id)
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "validate_character_set", validate_character_set)
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> str:
"""
The message id on which the validation settings has to be applied.
"""
return pulumi.get(self, "message_id")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> str:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateCharacterSet")
def validate_character_set(self) -> bool:
"""
The value indicating whether to validate character Set.
"""
return pulumi.get(self, "validate_character_set")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> bool:
"""
The value indicating whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> bool:
"""
The value indicating whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
@pulumi.output_type
class X12ValidationSettingsResponse(dict):
"""
The X12 agreement validation settings.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowLeadingAndTrailingSpacesAndZeroes":
suggest = "allow_leading_and_trailing_spaces_and_zeroes"
elif key == "checkDuplicateGroupControlNumber":
suggest = "check_duplicate_group_control_number"
elif key == "checkDuplicateInterchangeControlNumber":
suggest = "check_duplicate_interchange_control_number"
elif key == "checkDuplicateTransactionSetControlNumber":
suggest = "check_duplicate_transaction_set_control_number"
elif key == "interchangeControlNumberValidityDays":
suggest = "interchange_control_number_validity_days"
elif key == "trailingSeparatorPolicy":
suggest = "trailing_separator_policy"
elif key == "trimLeadingAndTrailingSpacesAndZeroes":
suggest = "trim_leading_and_trailing_spaces_and_zeroes"
elif key == "validateCharacterSet":
suggest = "validate_character_set"
elif key == "validateEDITypes":
suggest = "validate_edi_types"
elif key == "validateXSDTypes":
suggest = "validate_xsd_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X12ValidationSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X12ValidationSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X12ValidationSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_leading_and_trailing_spaces_and_zeroes: bool,
check_duplicate_group_control_number: bool,
check_duplicate_interchange_control_number: bool,
check_duplicate_transaction_set_control_number: bool,
interchange_control_number_validity_days: int,
trailing_separator_policy: str,
trim_leading_and_trailing_spaces_and_zeroes: bool,
validate_character_set: bool,
validate_edi_types: bool,
validate_xsd_types: bool):
"""
The X12 agreement validation settings.
:param bool allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes.
:param bool check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number.
:param bool check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number.
:param bool check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number.
:param int interchange_control_number_validity_days: The validity period of interchange control number.
:param str trailing_separator_policy: The trailing separator policy.
:param bool trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes.
:param bool validate_character_set: The value indicating whether to validate character set in the message.
:param bool validate_edi_types: The value indicating whether to Whether to validate EDI types.
:param bool validate_xsd_types: The value indicating whether to Whether to validate XSD types.
"""
pulumi.set(__self__, "allow_leading_and_trailing_spaces_and_zeroes", allow_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "check_duplicate_group_control_number", check_duplicate_group_control_number)
pulumi.set(__self__, "check_duplicate_interchange_control_number", check_duplicate_interchange_control_number)
pulumi.set(__self__, "check_duplicate_transaction_set_control_number", check_duplicate_transaction_set_control_number)
pulumi.set(__self__, "interchange_control_number_validity_days", interchange_control_number_validity_days)
pulumi.set(__self__, "trailing_separator_policy", trailing_separator_policy)
pulumi.set(__self__, "trim_leading_and_trailing_spaces_and_zeroes", trim_leading_and_trailing_spaces_and_zeroes)
pulumi.set(__self__, "validate_character_set", validate_character_set)
pulumi.set(__self__, "validate_edi_types", validate_edi_types)
pulumi.set(__self__, "validate_xsd_types", validate_xsd_types)
@property
@pulumi.getter(name="allowLeadingAndTrailingSpacesAndZeroes")
def allow_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to allow leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "allow_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="checkDuplicateGroupControlNumber")
def check_duplicate_group_control_number(self) -> bool:
"""
The value indicating whether to check for duplicate group control number.
"""
return pulumi.get(self, "check_duplicate_group_control_number")
@property
@pulumi.getter(name="checkDuplicateInterchangeControlNumber")
def check_duplicate_interchange_control_number(self) -> bool:
"""
The value indicating whether to check for duplicate interchange control number.
"""
return pulumi.get(self, "check_duplicate_interchange_control_number")
@property
@pulumi.getter(name="checkDuplicateTransactionSetControlNumber")
def check_duplicate_transaction_set_control_number(self) -> bool:
"""
The value indicating whether to check for duplicate transaction set control number.
"""
return pulumi.get(self, "check_duplicate_transaction_set_control_number")
@property
@pulumi.getter(name="interchangeControlNumberValidityDays")
def interchange_control_number_validity_days(self) -> int:
"""
The validity period of interchange control number.
"""
return pulumi.get(self, "interchange_control_number_validity_days")
@property
@pulumi.getter(name="trailingSeparatorPolicy")
def trailing_separator_policy(self) -> str:
"""
The trailing separator policy.
"""
return pulumi.get(self, "trailing_separator_policy")
@property
@pulumi.getter(name="trimLeadingAndTrailingSpacesAndZeroes")
def trim_leading_and_trailing_spaces_and_zeroes(self) -> bool:
"""
The value indicating whether to trim leading and trailing spaces and zeroes.
"""
return pulumi.get(self, "trim_leading_and_trailing_spaces_and_zeroes")
@property
@pulumi.getter(name="validateCharacterSet")
def validate_character_set(self) -> bool:
"""
The value indicating whether to validate character set in the message.
"""
return pulumi.get(self, "validate_character_set")
@property
@pulumi.getter(name="validateEDITypes")
def validate_edi_types(self) -> bool:
"""
The value indicating whether to Whether to validate EDI types.
"""
return pulumi.get(self, "validate_edi_types")
@property
@pulumi.getter(name="validateXSDTypes")
def validate_xsd_types(self) -> bool:
"""
The value indicating whether to Whether to validate XSD types.
"""
return pulumi.get(self, "validate_xsd_types")
|
py | 1a4814c976b6cc1e21524834c79db2f28d462176 | # Generated by Django 3.2.7 on 2021-10-05 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
py | 1a481622aef4798f8a1afa1d4c137cdafed9025c | import doctest
import k3etcd
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(k3etcd))
return tests
|
py | 1a48163c9e2b546a688d8f5e242aa6ba9e4e6dd2 | #!/usr/bin/python
# ---------------------------------------------------------------------------
# File: indefqpex1.py
# Version 12.8.0
# ---------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2009, 2017. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with
# IBM Corp.
# ---------------------------------------------------------------------------
"""
Entering and optimizing an indefinite quadratic programming problem.
To run from the command line, use
python indefqpex1.py
"""
from __future__ import print_function
import cplex
def solve_and_display(p):
p.solve()
# solution.get_status() returns an integer code
print("Solution status = ", p.solution.get_status(), ":", end=' ')
# the following line prints the corresponding string
print(p.solution.status[p.solution.get_status()])
print("Solution value = ", p.solution.get_objective_value())
numrows = p.linear_constraints.get_num()
for i in range(numrows):
print("Row ", i, ": ", end=' ')
print("Slack = %10f " % p.solution.get_linear_slacks(i), end=' ')
print("Pi = %10f" % p.solution.get_dual_values(i))
numcols = p.variables.get_num()
for j in range(numcols):
print("Column ", j, ": ", end=' ')
print("Value = %10f " % p.solution.get_values(j), end=' ')
print("Reduced Cost = %10f" % p.solution.get_reduced_costs(j))
def indefqpex1():
# This example solves the non-convex quadratic programming problem
#
# minimize (-3 * pow(x,2) - 3 * pow(y,2 )- 1 * x * y)/2
#
# subject to:
# x + y >= 0
# -x + y >= 0
# -1 <= x <= 1
# 0 <= y <= 1
#
# This model has local optima at (1, 1), (-1, 1), (- 0.1666667,1) and
# (0,0) with objective values -3.5, -2.5, -1.4583333333 and 0.0
# respectively.
#
# After the initial solve, constraints are added to the model to
# force CPLEX to converge to some of these local optima in turn
p = cplex.Cplex()
p.variables.add(lb=[-1.0, 0.0], ub=[1.0, 1.0])
p.linear_constraints.add(lin_expr=[[[0, 1], [1.0, 1.0]],
[[0, 1], [-1.0, 1.0]]],
rhs=[0.0, 0.0],
senses=['G', 'G'])
p.objective.set_quadratic([[[0, 1], [-3.0, -0.5]],
[[0, 1], [-0.5, -3.0]]])
# When a non-convex objective function is present, CPLEX will
# raise an exception unless the optimalitytarget parameter is set to
# accept first-order optimal solutions
p.parameters.optimalitytarget.set(2)
# CPLEX may converge to either local optimum
solve_and_display(p)
# Add a constraint that cuts off the solution at (-1, 1)
p.linear_constraints.add(lin_expr=[[[0], [1.0]]],
rhs=[0.0],
senses='G',
names=["new_constraint"])
solve_and_display(p)
# Reverse the sense of the newly added constraint to cut off the
# solution at (1, 1)
p.linear_constraints.set_senses("new_constraint", 'L')
solve_and_display(p)
if __name__ == "__main__":
indefqpex1()
|
py | 1a4816deefc86bb57c8ea47293d140f8cec6a400 | import logging
from os.path import join
from starlette.applications import Starlette
from starlette.endpoints import HTTPEndpoint
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import PlainTextResponse, RedirectResponse
from starlette.staticfiles import StaticFiles
import opsi
from opsi.util.networking import get_server_url
from opsi.util.templating import TemplateFolder
from .api import Api
from .test import WebserverTest
LOGGER = logging.getLogger(__name__)
class WebServer:
def __init__(self, program, frontend: str, port: int = 80, prefix="/"):
self.program = program
self.app = Starlette(debug=True)
self.url = get_server_url(program.lifespan, port, prefix)
self.template = TemplateFolder(join(frontend, "templates"))
self.app.add_route("/", self.template("nodetree.html"))
self.app.add_route(
"/settings",
self.template(
"settings.html",
persist=self.program.lifespan.persist,
daemon=self.program.lifespan.using_systemd,
nt=self.program.lifespan.NT_AVAIL,
netconf=self.program.lifespan.netconf_writable,
version=opsi.__version__,
),
)
self.testclient = WebserverTest(self.app)
self.api = Api(self.app, self.program)
self.make_hooks()
self.app.mount(
"/", CacheControlMiddleware(StaticFiles(directory=join(frontend, "www")))
)
def make_hooks(self):
PREFIX = "/hooks"
HOOKS = {
x[0]: x[1] for x in self.program.manager.hooks.items() if x[1].visible
} # {package: app}
self.app.add_route(
PREFIX, self.template("hooks.html", prefix=PREFIX, packages=HOOKS.keys())
)
# This is required because "/hooks/package/{path}"" and "/hooks/package/"" trigger the mounted app,
# but "/hooks/package" doesn't
self.app.add_route(PREFIX + "/{path}", self.trailingslash_redirect)
for package, hook in HOOKS.items():
path = PREFIX + "/" + package
hook.url = self.url + path.lstrip("/")
self.app.mount(path, hook.app)
def trailingslash_redirect(self, request):
return RedirectResponse(request.url.path + "/")
# These test functions go through the entire http pipeline
def get_funcs(self) -> str:
return self.testclient.get("/api/funcs")
def get_nodes(self) -> str:
return self.testclient.get("/api/nodes")
def set_nodes(self, data: str) -> str:
return self.testclient.post("/api/nodes", data)
class CacheControlMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
response = await call_next(request)
response.headers["Cache-Control"] = "no-cache public max-age=0 must-validate"
return response
|
py | 1a4817e65c837d897f8d1e0e64d880a5c4a48d84 | import numpy as np
import torch
from torch.autograd import Variable
from networks.losses import CombinedLoss
from torch.optim import lr_scheduler
import os
def per_class_dice(y_pred, y_true, num_class):
avg_dice = 0
y_pred = y_pred.data.cpu().numpy()
y_true = y_true.data.cpu().numpy()
for i in range(num_class):
GT = y_true == (i + 1)
Pred = y_pred == (i + 1)
inter = np.sum(np.matmul(GT, Pred)) + 0.0001
union = np.sum(GT) + np.sum(Pred) + 0.0001
t = 2 * inter / union
avg_dice = avg_dice + (t / num_class)
return avg_dice
def create_exp_directory(exp_dir_name):
if not os.path.exists('models/' + exp_dir_name):
os.makedirs('models/' + exp_dir_name)
class Solver(object):
# global optimiser parameters
default_optim_args = {"lr": 1e-2,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 0.0001}
gamma = 0.5
step_size = 5
NumClass = 9
def __init__(self, optim=torch.optim.Adam, optim_args={},
loss_func=CombinedLoss()):
optim_args_merged = self.default_optim_args.copy()
optim_args_merged.update(optim_args)
self.optim_args = optim_args_merged
self.optim = optim
self.loss_func = loss_func
self._reset_histories()
def _reset_histories(self):
"""
Resets train and val histories for the accuracy and the loss.
"""
self.train_loss_history = []
self.train_acc_history = []
self.val_acc_history = []
self.val_loss_history = []
def train(self, model, train_loader, val_loader, num_epochs=10, log_nth=5, exp_dir_name='exp_default'):
"""
Train a given model with the provided data.
Inputs:
- model: model object initialized from a torch.nn.Module
- train_loader: train data in torch.utils.data.DataLoader
- val_loader: val data in torch.utils.data.DataLoader
- num_epochs: total number of training epochs
- log_nth: log training accuracy and loss every nth iteration
"""
optim = self.optim(model.parameters(), **self.optim_args)
scheduler = lr_scheduler.StepLR(optim, step_size=self.step_size,
gamma=self.gamma) # decay LR by a factor of 0.5 every 5 epochs
self._reset_histories()
iter_per_epoch = 1
# iter_per_epoch = len(train_loader)
if torch.cuda.is_available():
model.cuda()
print('START TRAIN.')
curr_iter = 0
create_exp_directory(exp_dir_name)
for epoch in range(num_epochs):
scheduler.step()
for i_batch, sample_batched in enumerate(train_loader):
X = Variable(sample_batched[0])
y = Variable(sample_batched[1])
w = Variable(sample_batched[2])
if model.is_cuda:
X, y, w = X.cuda(), y.cuda(), w.cuda()
for iter in range(iter_per_epoch):
curr_iter += iter
optim.zero_grad()
output = model(X)
loss = self.loss_func(output, y, w)
loss.backward()
optim.step()
if iter % log_nth == 0:
self.train_loss_history.append(loss.data[0])
print('[Iteration : ' + str(iter) + '/' + str(iter_per_epoch * num_epochs) + '] : ' + str(
loss.data[0]))
#_, batch_output = torch.max(F.softmax(model(X),dim=1), dim=1)
#avg_dice = per_class_dice(batch_output, y, self.NumClass)
#print('Per class average dice score is ' + str(avg_dice))
# self.train_acc_history.append(train_accuracy)
#
# val_output = torch.max(model(Variable(torch.from_numpy(val_loader.dataset.X))), dim= 1)
# val_accuracy = self.accuracy(val_output[1], Variable(torch.from_numpy(val_loader.dataset.y)))
# self.val_acc_history.append(val_accuracy)
print('[Epoch : ' + str(epoch) + '/' + str(num_epochs) + '] : ' + str(loss.data[0]))
model.save('models/' + exp_dir_name + '/relaynet_epoch' + str(epoch + 1) + '.model')
print('FINISH.')
|
py | 1a4817f999e94b8565bb04ce4398068c9d43d5c7 | import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import cv2
import numpy as np
from PIL import Image
from math import ceil
class VOCDataset(Dataset):
def __init__(self, root_path="data/VOCdevkit", dataset="voc2012", image_size=321, is_training=True):
self.dataset = dataset
if self.dataset == "voc2007":
self.data_path = os.path.join(root_path, "VOC2007")
if is_training:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/trainval.txt")
else:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/test.txt")
elif self.dataset == "voc2012":
self.data_path = os.path.join(root_path, "VOC2012")
if is_training:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/train.txt")
else:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/val.txt")
elif self.dataset == "augmentedvoc":
self.data_path = os.path.join(root_path, "VOCaugmented")
if is_training:
id_list_path = os.path.join(self.data_path, "list/train_aug.txt")
else:
id_list_path = os.path.join(self.data_path, "list/val.txt")
self.ids = [id.strip() for id in open(id_list_path)]
self.classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
self.image_size = image_size
self.num_classes = len(self.classes)
self.num_images = len(self.ids)
self.is_training = is_training
def __len__(self):
return self.num_images
def __getitem__(self, item):
id = self.ids[item]
if self.dataset in ["voc2007", "voc2012"]:
image_path = os.path.join(self.data_path, "JPEGImages", "{}.jpg".format(id))
gt_image_path = os.path.join(self.data_path, "SegmentationClass", "{}.png".format(id))
elif self.dataset == "augmentedvoc":
image_path = os.path.join(self.data_path, "img", "{}.jpg".format(id))
gt_image_path = os.path.join(self.data_path, "gt", "{}.png".format(id))
image = cv2.imread(image_path).astype(np.float32)
image[:, :, 0] -= 104.008
image[:, :, 1] -= 116.669
image[:, :, 2] -= 122.675
gt_image = Image.open(gt_image_path).convert('P')
gt_image = np.asarray(gt_image, np.int32)
gt_image[gt_image == 255] = 0
image = cv2.resize(image, (self.image_size, self.image_size), interpolation=cv2.INTER_LINEAR)
gt_image = cv2.resize(gt_image, (self.image_size, self.image_size), interpolation=cv2.INTER_NEAREST)
gt_torch = torch.Tensor(torch.from_numpy(gt_image[None, None, :, :]).float())
gt1_size = ceil(self.image_size / 8.)
interp = nn.Upsample(size=(gt1_size, gt1_size), mode='bilinear', align_corners=True)
gt1 = interp(gt_torch).data.numpy()[0, 0, :, :]
gt2_size = ceil(self.image_size / 16.)
interp = nn.Upsample(size=(gt2_size, gt2_size), mode='bilinear', align_corners=True)
gt2 = interp(gt_torch).data.numpy()[0, 0, :, :]
return np.transpose(np.array(image, dtype=np.float32), (2, 0, 1)), np.array(gt1, dtype=np.float32), np.array(
gt2, dtype=np.float32)
|
py | 1a48194e6fb1ffe3b10763405676f573906327be | import bisect
import json
import logging
import random
from pathlib import Path
import discord
from discord.ext import commands
from bot.constants import Colours
log = logging.getLogger(__name__)
with Path("bot/resources/halloween/spooky_rating.json").open() as file:
SPOOKY_DATA = json.load(file)
SPOOKY_DATA = sorted((int(key), value) for key, value in SPOOKY_DATA.items())
class SpookyRating(commands.Cog):
"""A cog for calculating one's spooky rating."""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.local_random = random.Random()
@commands.command()
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def spookyrating(self, ctx: commands.Context, who: discord.Member = None) -> None:
"""
Calculates the spooky rating of someone.
Any user will always yield the same result, no matter who calls the command
"""
if who is None:
who = ctx.author
# This ensures that the same result over multiple runtimes
self.local_random.seed(who.id)
spooky_percent = self.local_random.randint(1, 101)
# We need the -1 due to how bisect returns the point
# see the documentation for further detail
# https://docs.python.org/3/library/bisect.html#bisect.bisect
index = bisect.bisect(SPOOKY_DATA, (spooky_percent,)) - 1
_, data = SPOOKY_DATA[index]
embed = discord.Embed(
title=data['title'],
description=f'{who} scored {spooky_percent}%!',
color=Colours.orange
)
embed.add_field(
name='A whisper from Satan',
value=data['text']
)
embed.set_thumbnail(
url=data['image']
)
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Spooky Rating Cog load."""
bot.add_cog(SpookyRating(bot))
|
py | 1a481959681bd31047bdda92c16475d202ac6d87 | '''
Created on Mar 8, 2013
@author: Gary
'''
import unittest
from housemonitor.outputs.zigbee.zigbeeoutput import ZigBeeOutput
from housemonitor.outputs.zigbee.zigbeecontrol import ZigBeeControl
from housemonitor.outputs.zigbee.zigbeeoutputstep import ZigBeeOutputStep
from housemonitor.outputs.zigbee.zigbeeoutputthread import ZigBeeOutputThread
from housemonitor.inputs.zigbeeinput.windowsxbeecommunications import WindowsXbeeCommunications
from housemonitor.inputs.zigbeeinput.beaglebonexbeecommunications import BeagleboneXbeeCommunications
from housemonitor.inputs.zigbeeinput.xbeecommunications import XBeeCommunications
from housemonitor.lib.hmqueue import HMQueue
from housemonitor.lib.constants import Constants
from mock import Mock, MagicMock, patch
from housemonitor.lib.common import Common
import logging.config
from xbee import ZigBee
class Test( unittest.TestCase ):
logger = logging.getLogger( 'UnitTest' )
def setUp( self ):
logging.config.fileConfig( "unittest_logging.conf" )
def tearDown( self ):
pass
def test_logger_name( self ):
out = ZigBeeOutput()
self.assertEqual( out.logger_name, Constants.LogKeys.outputsZigBee )
# @patch('housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications)
# def test_setLow(self, win):
# destination_address = 0x13a20040902a02
# port = 'DIO-1'
# ot = ZigBeeOutput()
# # ot.communication_module['nt'] = MagicMock()
# ot.startCorrectZigbee(os_name='nt')
# ot.setLow(destination_address, port)
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_0_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-0'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D0',
parameter='\x04',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_1_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-1'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D1',
parameter='\x04',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_2_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-2'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D2',
parameter='\x04',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_3_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-3'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D3',
parameter='\x04',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_4_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-4'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D4',
parameter='\x04',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_5_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-5'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D5',
parameter='\x04',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_6_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-6'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D6',
parameter='\x04',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_7_Low( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-7'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = False
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( dest_addr_long='\x00\x13\xa2\x00@\x90*\x02',
parameter='\x04',
command='D7',
frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_0_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-0'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D0',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_1_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-1'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D1',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_2_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-2'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D2',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_3_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-3'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D3',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_4_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-4'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D4',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_5_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-5'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D5',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_6_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-6'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D6',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02', frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_set_DIO_7_High( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-7'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
ot.zigbee.remote_at.assert_called_once_with( command='D7',
parameter='\x05',
dest_addr_long='\x00\x13\xa2\x00@\x90*\x02',
frame_id='\xaa' )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_no_device( self, win ):
data = {}
data[Constants.DataPacket.port] = 'DIO-7'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
with self.assertRaises( KeyError ):
ot.sendCommand( **data )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_no_port( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
with self.assertRaises( KeyError ):
ot.sendCommand( **data )
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_invalid_device( self, win ):
data = {}
data[Constants.DataPacket.device] = 'xxxxxxxxxx'
data[Constants.DataPacket.port] = 'DIO-7'
data[Constants.DataPacket.value] = True
data[Constants.DataPacket.ID] = 0xff
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
with self.assertRaisesRegexp( ValueError, "invalid literal for int.. with base 16:.*" ):
ot.sendCommand( **data )
# TODO work more on this
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_invalid_port( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-9'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = True
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
with self.assertRaisesRegexp( KeyError, "DIO-9" ):
ot.sendCommand( **data )
# TODO work more on this
# @patch('housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications)
# def test_invalid_value(self, win):
# data = {}
# data[Constants.DataPacket.device] = '0x13a20040902a02'
# data[Constants.DataPacket.port] = 'DIO-7'
# ot = ZigBeeOutput()
# ot.zigbee = MagicMock()
# ot.zigbee.remote_at = MagicMock()
# with self.assertRaisesRegexp(KeyError, ""):
# ot.sendCommand('', data)
# TODO work more on this
@patch( 'housemonitor.outputs.zigbee.zigbeeoutput.WindowsXbeeCommunications', spec=WindowsXbeeCommunications )
def test_pack_and_unpack( self, win ):
data = {}
data[Constants.DataPacket.device] = '0x13a20040902a02'
data[Constants.DataPacket.port] = 'DIO-7'
data[Constants.DataPacket.ID] = 0xAA
data[Constants.DataPacket.value] = 1
value = 1
ot = ZigBeeOutput()
ot.zigbee = MagicMock()
ot.zigbee.remote_at = MagicMock()
ot.sendCommand( **data )
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
py | 1a4819a691d4e4407bc37ad48fef4dd9785b99af | # Import python libs
import sys
import yaml
# Import salt libs
from saltunittest import TestLoader, TextTestRunner
import integration
from integration import TestDaemon
class MatchTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
'''
Test salt matchers
'''
_call_binary_ = 'salt'
def test_list(self):
'''
test salt -L matcher
'''
data = self.run_salt('-L minion test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-L minion,sub_minion test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
def test_compound(self):
'''
test salt compound matcher
'''
data = self.run_salt('-C "min* and G@test_grain:cheese" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-C "min* and not G@test_grain:foo" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-C "min* not G@test_grain:foo" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
match = 'P@test_grain:^cheese$ and * and G@test_grain:cheese'
data = self.run_salt('-t 1 -C \'{0}\' test.ping'.format(match))
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
match = 'L@sub_minion and E@.*'
data = self.run_salt('-t 1 -C "{0}" test.ping'.format(match))
data = '\n'.join(data)
self.assertIn('sub_minion', data)
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
def test_glob(self):
'''
test salt glob matcher
'''
data = self.run_salt('minion test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('"*" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
def test_regex(self):
'''
test salt regex matcher
'''
data = self.run_salt('-E "^minion$" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-E ".*" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
def test_grain(self):
'''
test salt grain matcher
'''
# First-level grain (string value)
data = self.run_salt('-t 1 -G "test_grain:cheese" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-G "test_grain:spam" test.ping')
data = '\n'.join(data)
self.assertIn('sub_minion', data)
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
# First-level grain (list member)
data = self.run_salt('-t 1 -G "planets:earth" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-G "planets:saturn" test.ping')
data = '\n'.join(data)
self.assertIn('sub_minion', data)
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
data = self.run_salt('-G "planets:pluto" test.ping')
self.assertEqual(
''.join(data),
'No minions matched the target. No command was sent, no jid was '
'assigned.'
)
# Nested grain (string value)
data = self.run_salt('-t 1 -G "level1:level2:foo" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-G "level1:level2:bar" test.ping')
data = '\n'.join(data)
self.assertIn('sub_minion', data)
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
# Nested grain (list member)
data = self.run_salt('-t 1 -G "companions:one:ian" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('-G "companions:two:jamie" test.ping')
data = '\n'.join(data)
self.assertIn('sub_minion', data)
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
def test_regrain(self):
'''
test salt grain matcher
'''
data = self.run_salt(
'-t 1 --grain-pcre "test_grain:^cheese$" test.ping'
)
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertNotIn('sub_minion', data)
data = self.run_salt('--grain-pcre "test_grain:.*am$" test.ping')
data = '\n'.join(data)
self.assertIn('sub_minion', data)
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
def test_pillar(self):
'''
test pillar matcher
'''
# First-level pillar (string value)
data = self.run_salt('-I "monty:python" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
# First-level pillar (string value, only in sub_minion)
data = self.run_salt('-I "sub:sub_minion" test.ping')
data = '\n'.join(data)
self.assertIn('sub_minion', data)
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
# First-level pillar (list member)
data = self.run_salt('-I "knights:Bedevere" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
# Nested pillar (string value)
data = self.run_salt('-I "level1:level2:foo" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
# Nested pillar (list member)
data = self.run_salt('-I "companions:three:sarah jane" test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
def test_exsel(self):
data = self.run_salt('-X test.ping test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
def test_ipcidr(self):
subnets_data = self.run_salt('--out yaml \'*\' network.subnets')
yaml_data = yaml.load('\n'.join(subnets_data))
# We're just after the first defined subnet from 'minion'
subnet = yaml_data['minion'][0]
data = self.run_salt('-S {0} test.ping'.format(subnet))
data = '\n'.join(data)
self.assertIn('minion', data)
self.assertIn('sub_minion', data)
def test_static(self):
'''
test salt static call
'''
data = self.run_salt('minion test.ping --static')
data = '\n'.join(data)
self.assertIn('minion', data)
def test_salt_documentation(self):
'''
Test to see if we're supporting --doc
'''
data = self.run_salt('-d \* user')
self.assertIn('user.add:', data)
def test_salt_documentation_arguments_not_assumed(self):
'''
Test to see if we're not auto-adding '*' and 'sys.doc' to the call
'''
data = self.run_salt('-d')
self.assertIn('user.add:', data)
data = self.run_salt('\'*\' -d')
self.assertIn('user.add:', data)
data = self.run_salt('\'*\' -d user')
self.assertIn('user.add:', data)
data = self.run_salt('\'*\' sys.doc -d user')
self.assertIn('user.add:', data)
data = self.run_salt('\'*\' sys.doc user')
self.assertIn('user.add:', data)
if __name__ == "__main__":
loader = TestLoader()
tests = loader.loadTestsFromTestCase(MatchTest)
print('Setting up Salt daemons to execute tests')
with TestDaemon():
runner = TextTestRunner(verbosity=1).run(tests)
sys.exit(runner.wasSuccessful())
|
py | 1a481a2e9d34883a9f5811288d76d73cfd608548 | from ctypes import byref, windll
from ctypes.wintypes import DWORD, HANDLE
from typing import Any, Optional, TextIO
from prompt_toolkit.data_structures import Size
from prompt_toolkit.renderer import Output
from prompt_toolkit.utils import is_windows
from prompt_toolkit.win32_types import STD_OUTPUT_HANDLE
from .color_depth import ColorDepth
from .vt100 import Vt100_Output
from .win32 import Win32Output
__all__ = [
"Windows10_Output",
]
# See: https://msdn.microsoft.com/pl-pl/library/windows/desktop/ms686033(v=vs.85).aspx
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
class Windows10_Output:
"""
Windows 10 output abstraction. This enables and uses vt100 escape sequences.
"""
def __init__(
self, stdout: TextIO, default_color_depth: Optional[ColorDepth] = None
) -> None:
self.win32_output = Win32Output(stdout, default_color_depth=default_color_depth)
self.vt100_output = Vt100_Output(
stdout, lambda: Size(0, 0), default_color_depth=default_color_depth
)
self._hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
def flush(self) -> None:
"""
Write to output stream and flush.
"""
original_mode = DWORD(0)
# Remember the previous console mode.
windll.kernel32.GetConsoleMode(self._hconsole, byref(original_mode))
# Enable processing of vt100 sequences.
windll.kernel32.SetConsoleMode(
self._hconsole,
DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING),
)
try:
self.vt100_output.flush()
finally:
# Restore console mode.
windll.kernel32.SetConsoleMode(self._hconsole, original_mode)
@property
def responds_to_cpr(self) -> bool:
return False # We don't need this on Windows.
def __getattr__(self, name: str) -> Any:
if name in (
"get_size",
"get_rows_below_cursor_position",
"enable_mouse_support",
"disable_mouse_support",
"scroll_buffer_to_prompt",
"get_win32_screen_buffer_info",
"enable_bracketed_paste",
"disable_bracketed_paste",
"get_default_color_depth",
):
return getattr(self.win32_output, name)
else:
return getattr(self.vt100_output, name)
Output.register(Windows10_Output)
def is_win_vt100_enabled() -> bool:
"""
Returns True when we're running Windows and VT100 escape sequences are
supported.
"""
if not is_windows():
return False
hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
# Get original console mode.
original_mode = DWORD(0)
windll.kernel32.GetConsoleMode(hconsole, byref(original_mode))
try:
# Try to enable VT100 sequences.
result = windll.kernel32.SetConsoleMode(
hconsole, DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
)
return result == 1
finally:
windll.kernel32.SetConsoleMode(hconsole, original_mode)
|
py | 1a481ba37131f51f52c604053d473fb2840372bf | """
Django settings for apiref project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os, sys
from apiref.db_secret import database_setting
try:
RUNNING_MODE = os.environ["APIREF_RUNNING_MODE"]
except:
RUNNING_MODE = "devel"
try:
DEBUG_LOG_MODE = os.environ["DEBUG_LOG_MODE"]
except:
DEBUG_LOG_MODE = "FALSE"
try:
SITE_NAME = os.environ["SITE_NAME"]
except:
SITE_NAME = "API Ref"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if RUNNING_MODE == "production":
SECRET_KEY = os.environ["SECRET_KEY"]
else:
SECRET_KEY = '^qae)-)isd=9t+u!)y*23)(rhi+3((7v$mnglv488iw*3f!b)4'
# SECURITY WARNING: don't run with debug turned on in production!
if RUNNING_MODE == "production":
DEBUG = False
else:
DEBUG = True
if DEBUG_LOG_MODE == "TRUE":
DEBUG = True
if RUNNING_MODE == "production":
ALLOWED_HOSTS = ["*"]
else:
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'setup.apps.SetupConfig',
'account.apps.AccountConfig',
'apib.apps.ApibConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apiblueprint_view',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'apiref.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["shell"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'apiref.context_processors.site_common_text'
],
},
},
]
WSGI_APPLICATION = 'apiref.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
if RUNNING_MODE == "production":
DATABASES = database_setting
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
# Email Settings
if RUNNING_MODE == "production":
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = os.environ["SENDGRID_USERNAME"]
EMAIL_HOST_PASSWORD = os.environ["SENDGRID_PASSWORD"]
EMAIL_FROM_ADDRESS = os.environ["SENDGRID_USERNAME"]
EMAIL_PORT = 587
EMAIL_USE_TLS = True
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.host'
EMAIL_HOST_USER = 'username'
EMAIL_HOST_PASSWORD = 'password'
EMAIL_FROM_ADDRESS = '[email protected]'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
py | 1a481c3b9e65751d570c2af33fa076a0c02459fa | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import six
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
@tf_export("reshape", v1=["reshape", "manip.reshape"])
def reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name
r"""Reshapes a tensor.
Given `tensor`, this operation returns a tensor that has the same values
as `tensor` with shape `shape`.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total size remains constant. In particular,
a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can
be -1.
If `shape` is 1-D or higher, then the operation returns a tensor with shape
`shape` filled with the values of `tensor`. In this case, the number of
elements implied by `shape` must be the same as the number of elements in
`tensor`.
For example:
```
# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
# tensor 't' is [[[1, 1], [2, 2]],
# [[3, 3], [4, 4]]]
# tensor 't' has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
[3, 3, 4, 4]]
# tensor 't' is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
# -1 can also be used to infer the shape
# -1 is inferred to be 9:
reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2:
reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3:
reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]]
# tensor 't' is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
```
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
result = gen_array_ops.reshape(tensor, shape, name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("fill")
def fill(dims, value, name=None):
r"""Creates a tensor filled with a scalar value.
This operation creates a tensor of shape `dims` and fills it with `value`.
For example:
```
# Output tensor has shape [2, 3].
fill([2, 3], 9) ==> [[9, 9, 9]
[9, 9, 9]]
```
`tf.fill` differs from `tf.constant` in a few ways:
* `tf.fill` only supports scalar contents, whereas `tf.constant` supports
Tensor values.
* `tf.fill` creates an Op in the computation graph that constructs the
actual
Tensor value at runtime. This is in contrast to `tf.constant` which embeds
the entire Tensor into the graph with a `Const` node.
* Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
based on other runtime Tensors, unlike `tf.constant`.
Args:
dims: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D.
Represents the shape of the output tensor.
value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor.
@compatibility(numpy) Equivalent to np.full @end_compatibility
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
result = gen_array_ops.fill(dims, value, name=name)
tensor_util.maybe_set_static_shape(result, dims)
return result
@tf_export("identity")
@dispatch.add_dispatch_support
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
For example:
```python
import tensorflow as tf
val0 = tf.ones((1,), dtype=tf.float32)
a = tf.atan2(val0, val0)
a_identity = tf.identity(a)
print(a.numpy()) #[0.7853982]
print(a_identity.numpy()) #[0.7853982]
```
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if context.executing_eagerly() and not hasattr(input, "graph"):
input = ops.convert_to_tensor(input)
in_device = input.backing_device
# TODO(ashankar): Does 'identity' need to invoke execution callbacks?
context_device = context.context().device_name
if not context_device:
context_device = "/job:localhost/replica:0/task:0/device:CPU:0"
if context_device == in_device:
return input
else:
copied = input._copy() # pylint: disable=protected-access
if hasattr(copied, "_handle_data"):
copied._handle_data = input._handle_data # pylint: disable=protected-access
return copied
else:
ret = gen_array_ops.identity(input, name=name)
# Propagate handle data for happier shape inference for resource variables.
if hasattr(input, "_handle_data"):
ret._handle_data = input._handle_data # pylint: disable=protected-access
return ret
# pylint: disable=redefined-builtin,protected-access
@tf_export(v1=["expand_dims"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if either both or neither of `dim` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("Must specify an axis argument to tf.expand_dims()")
return expand_dims_v2(input, axis, name)
@tf_export("expand_dims", v1=[])
@dispatch.add_dispatch_support
def expand_dims_v2(input, axis, name=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
"""
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated("2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@deprecation.deprecated("2018-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.sets.difference().")
@tf_export(v1=["setdiff1d"])
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
calling tf.shape on another Tensor) this computes a Tensor which is the shape
of the result of a broadcasting op applied in tensors of shapes shape_x and
shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
Tensor whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
def broadcast_static_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given known shapes.
When shape_x and shape_y are fully known TensorShapes this computes a
TensorShape which is the shape of the result of a broadcasting op applied in
tensors of shapes shape_x and shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
TensorShape whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors have statically known shapes.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape", v1=[])
def shape_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
return shape(input, name, out_type)
@tf_export(v1=["shape"])
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation (`int32` or `int64`).
Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size", v1=[])
@dispatch.add_dispatch_support
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
return size(input, name, out_type)
@tf_export(v1=["size"])
@dispatch.add_dispatch_support
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if (context.executing_eagerly()
and not hasattr(input, "graph")
and not isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))
):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
@dispatch.add_dispatch_support
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
_SLICE_TYPE_ERROR = (
"Only integers, slices (`:`), ellipsis (`...`), "
"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
"indices")
_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,
dtypes.int64_ref)
def _check_index(idx):
"""Check if a given value is a valid index into a tensor."""
if isinstance(idx, (six.integer_types, tensor_shape.Dimension)):
return
# Optimistic check. Assumptions:
# * any object with a dtype is supported
# * any object with a dtype has a sizeable shape attribute.
dtype = getattr(idx, "dtype", None)
if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
idx.shape and len(idx.shape) == 1):
# TODO(slebedev): IndexError seems more appropriate here, but it
# will break `_slice_helper` contract.
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# Strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# Skip every other row and reverse the order of the columns
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
# Masks
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable object to slice
(i.e. tensor is the read-only view of this variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, ellipsis,
tf.newaxis or scalar int32/int64 tensors.
"""
if isinstance(slice_spec, bool) or \
(isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
(isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
return boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and (isinstance(s.start, ops.Tensor) or
s.start != sys.maxsize):
_check_index(s.start)
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and (isinstance(s.stop, ops.Tensor) or
s.stop != sys.maxsize):
_check_index(s.stop)
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
_check_index(s.step)
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
_check_index(s)
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input_` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input_` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input_`. In other
words, `begin[i]` is the offset into the i'th dimension of `input_` that you
want to slice from.
Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input_.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input_`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1, taking on the value at index
`begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
if not (var is None and isinstance(op, ops.EagerTensor)):
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
else:
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: TypeError: If the slice indices aren't int, slice,
ellipsis, tf.newaxis or int32/int64 tensors.
"""
return _slice_helper(var.value(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
@dispatch.add_dispatch_support
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.stack([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
# checking.
if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" %
(elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be converted
to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _cast_nested_seqs_to_dtype(dtype):
def _maybe_cast(elem):
if ops.is_dense_tensor_like(elem):
if dtype != elem.dtype.base_dtype:
elem = gen_math_ops.cast(elem, dtype)
return elem
return _maybe_cast
_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)
_NON_AUTOPACKABLE_TYPES.add(np.ndarray)
def _should_not_autopack(v):
# The condition we really want is
# ops.is_dense_tensor_like(...)
# but it is >5x slower due to abc.ABCMeta.__instancecheck__.
# pylint: disable=unidiomatic-typecheck
# TODO(slebedev): add nest.all?
return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))
# pylint: enable=unidiomatic-typecheck
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref or _should_not_autopack(v):
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is None:
dtype = inferred_dtype
elif dtype != inferred_dtype:
v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
return _autopacking_helper(v, dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack.
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred if
`None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape.dims[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
@dispatch.add_dispatch_support
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) # [4, 3]
tf.shape(tf.concat([t3, t4], 1)) # [2, 6]
```
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
```python
t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
tf.concat([t1, t2], -1)
```
would produce:
```python
[[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing for
axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers
to `axis`-th dimension. And negative axis refers to `axis +
rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_has_rank(0)
return identity(values[0], name=name)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export(v1=["boolean_mask"])
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(
tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate(
[first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("boolean_mask", v1=[])
@dispatch.add_dispatch_support
def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
return boolean_mask(tensor, mask, name, axis)
@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse.mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
def unique(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer, then `value` is split along dimension
`axis` into `num_split` smaller tensors. This requires that `num_split` evenly
divides `value.shape[axis]`.
If `num_or_size_splits` is a 1-D Tensor (or list), we call it `size_splits`
and `value` is split into `len(size_splits)` elements. The shape of the `i`-th
element has the same size as the `value` except along dimension `axis` where
the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either an integer indicating the number of splits along
split_dim or a 1-D integer `Tensor` or Python list containing the sizes of
each output tensor along split_dim. If a scalar then it must evenly divide
`value.shape[axis]`; otherwise the sum of sizes along the split dimension
must match that of the `value`.
axis: An integer or scalar `int32` `Tensor`. The dimension along which to
split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if isinstance(num_or_size_splits,
six.integer_types + (tensor_shape.Dimension,)):
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
if size_splits._rank() == 0:
raise ValueError(
"Rank-0 tensors are not supported as the num_or_size_splits argument "
"to split. Argument provided: %s" % (num_or_size_splits,))
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose", v1=[])
def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
@tf_export(v1=["transpose"])
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
transpose_fn = (
gen_array_ops.conjugate_transpose if
(conjugate and a.dtype.is_complex) else gen_array_ops.transpose)
if perm is None:
a = ops.convert_to_tensor(a, name="a")
if not a.get_shape().ndims:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
else:
rank = a.get_shape().ndims
perm = (rank - 1) - np.arange(rank)
ret = transpose_fn(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
if not context.executing_eagerly():
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = transpose_fn(a, perm, name=name)
return ret
# pylint: disable=invalid-name
@tf_export(
"linalg.matrix_transpose",
v1=["linalg.transpose", "linalg.matrix_transpose", "matrix_transpose"])
@deprecation.deprecated_endpoints("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.linalg.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.linalg.matrix_transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `linalg.matrix_transpose` returns a new
tensor with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.linalg.matrix_transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
@tf_export("linalg.diag", v1=["linalg.diag", "matrix_diag"])
@deprecation.deprecated_endpoints("matrix_diag")
def matrix_diag(diagonal,
name="diag",
k=0,
num_rows=-1,
num_cols=-1,
padding_value=0):
"""Returns a batched diagonal tensor with given batched diagonal values.
Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
diagonals of a matrix, with everything else padded with `padding`. `num_rows`
and `num_cols` specify the dimension of the innermost matrix of the output. If
both are not specified, the op assumes the innermost matrix is square and
infers its size from `k` and the innermost dimension of `diagonal`. If only
one of them is specified, the op assumes the unspecified value is the smallest
possible based on other criteria.
Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor
has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only
one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has
rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
The second innermost dimension of `diagonal` has double meaning. When `k` is
scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and
the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
output[i, j, ..., l, m, n] ; otherwise
```
Otherwise, `M` is treated as the number of diagonals for the matrix in the
same batch (`M = k[1]-k[0]+1`), and the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, k[1]-d, n-max(d, 0)] ; if d_lower <= d <= d_upper
input[i, j, ..., l, m, n] ; otherwise
```
where `d = n - m`
For example:
```
# The main diagonal.
diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
[5, 6, 7, 8]])
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]],
[[5, 0, 0, 0],
[0, 6, 0, 0],
[0, 0, 7, 0],
[0, 0, 0, 8]]]
# A superdiagonal (per batch).
diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal, k = 1)
==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]],
[[0, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 6],
[0, 0, 0, 0]]]
# A band of diagonals.
diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3)
[4, 5, 0]],
[[6, 7, 9],
[9, 1, 0]]])
tf.matrix_diag(diagonals, k = (-1, 0))
==> [[[1, 0, 0], # Output shape: (2, 3, 3)
[4, 2, 0],
[0, 5, 3]],
[[6, 0, 0],
[9, 7, 0],
[0, 1, 9]]]
# Rectangular matrix.
diagonal = np.array([1, 2]) # Input shape: (2)
tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
==> [[0, 0, 0, 0], # Output shape: (3, 4)
[1, 0, 0, 0],
[0, 2, 0, 0]]
# Rectangular matrix with inferred num_cols and padding = 9.
tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding = 9)
==> [[9, 9], # Output shape: (3, 2)
[1, 9],
[9, 2]]
```
Args:
diagonal: A `Tensor` with `rank k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
num_rows: The number of rows of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
num_cols: The number of columns of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
Returns:
A Tensor. Has the same type as `diagonal`.
"""
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/kernel_tests/diag_op_test.py)
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(diagonal, "dtype") and diagonal.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_v2(
diagonal=diagonal,
k=k,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value,
name=name)
# Call v1 to maintain forward compatibility.
return gen_array_ops.matrix_diag(diagonal=diagonal, name=name)
@tf_export("linalg.diag_part", v1=["linalg.diag_part", "matrix_diag_part"])
@deprecation.deprecated_endpoints("matrix_diag_part")
@dispatch.add_dispatch_support
def matrix_diag_part(
input, # pylint:disable=redefined-builtin
name="diag_part",
k=0,
padding_value=0):
"""Returns the batched diagonal part of a batched tensor.
Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
`input`.
Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
Let `max_diag_len` be the maximum length among all diagonals to be extracted,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
Let `num_diags` be the number of diagonals to extract,
`num_diags = k[1] - k[0] + 1`.
If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
`[I, J, ..., L, max_diag_len]` and values:
```
diagonal[i, j, ..., l, n]
= input[i, j, ..., l, n+y, n+x] ; when 0 <= n-y < M and 0 <= n-x < N,
0 ; otherwise.
```
where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
Otherwise, the output tensor has rank `r` with dimensions
`[I, J, ..., L, num_diags, max_diag_len]` with values:
```
diagonal[i, j, ..., l, m, n]
= input[i, j, ..., l, n+y, n+x] ; when 0 <= n-y < M and 0 <= n-x < N,
0 ; otherwise.
```
where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.
The input must be at least a matrix.
For example:
```
input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8],
[9, 8, 7, 6]],
[[5, 4, 3, 2],
[1, 2, 3, 4],
[5, 6, 7, 8]]])
# A main diagonal from each batch.
tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
[5, 2, 7]]
# A superdiagonal from each batch.
tf.matrix_diag_part(input, k = 1)
==> [[2, 7, 6], # Output shape: (2, 3)
[4, 3, 8]]
# A tridiagonal band from each batch.
tf.matrix_diag_part(input, k = (-1, 1))
==> [[[2, 7, 6], # Output shape: (2, 3, 3)
[1, 6, 7],
[5, 8, 0]],
[[4, 3, 8],
[5, 2, 7],
[1, 6, 0]]]
# Padding = 9
tf.matrix_diag_part(input, k = (1, 3), padding = 9)
==> [[[4, 9, 9], # Output shape: (2, 3, 3)
[3, 8, 9],
[2, 7, 6]],
[[2, 9, 9],
[3, 4, 9],
[4, 3, 8]]]
```
Args:
input: A `Tensor` with `rank k >= 2`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
Returns:
A Tensor containing diagonals of `input`. Has the same type as `input`.
"""
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/kernel_tests/diag_op_test.py)
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(input, "dtype") and input.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_part_v2(
input=input, k=k, padding_value=padding_value, name=name)
# Call v1 to maintain forward compatibility.
return gen_array_ops.matrix_diag_part(input=input, name=name)
@tf_export("linalg.set_diag", v1=["linalg.set_diag", "matrix_set_diag"])
@deprecation.deprecated_endpoints("matrix_set_diag")
def matrix_set_diag(
input, # pylint:disable=redefined-builtin
diagonal,
name="set_diag",
k=0):
"""Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the
same shape and values as `input`, except for the specified diagonals of the
innermost matrices. These will be overwritten by the values in `diagonal`.
`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
If `k` is scalar or `k[0] == k[1]`:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
output[i, j, ..., l, m, n] ; otherwise
```
Otherwise,
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, k[1]-d, n-max(d, 0)] ; if d_lower <= d <= d_upper
input[i, j, ..., l, m, n] ; otherwise
```
where `d = n - m`
For example:
```
# The main diagonal.
input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]])
diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
# A superdiagonal (per batch).
tf.matrix_diag(diagonal, k = 1)
==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
[7, 7, 2, 7],
[7, 7, 7, 3]],
[[7, 4, 7, 7],
[7, 7, 5, 7],
[7, 7, 7, 6]]]
# A band of diagonals.
diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3)
[4, 5, 0]],
[[6, 1, 2],
[3, 4, 0]]])
tf.matrix_diag(diagonals, k = (-1, 0))
==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[4, 2, 7, 7],
[0, 5, 3, 7]],
[[6, 7, 7, 7],
[3, 1, 7, 7],
[7, 4, 2, 7]]]
```
Args:
input: A `Tensor` with rank `k + 1`, where `k >= 1`.
diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,
otherwise. `k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
"""
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/kernel_tests/diag_op_test.py)
return gen_array_ops.matrix_set_diag_v2(
input=input, diagonal=diagonal, k=k, name=name)
# Call v1 to maintain forward compatibility.
return gen_array_ops.matrix_set_diag(
input=input, diagonal=diagonal, name=name)
# pylint: enable=invalid-name
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except TypeError:
# Happens when shape is a Tensor, list with Tensor elements, etc.
pass
return None
@tf_export("zeros")
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["zeros_like"])
@dispatch.add_dispatch_support
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(tensor, dtype, name, optimize)
@tf_export("zeros_like", v1=[])
@dispatch.add_dispatch_support
def zeros_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]] with dtype=int32
If dtype of input `tensor` is `float32`, then the output is also of `float32`
tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor) # [[0., 0., 0.], [0., 0., 0.]] with dtype=floa32
If you want to specify desired dtype of output `tensor`, then specify it in
the op tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor,dtype=tf.int32) # [[0, 0, 0], [0, 0, 0]] with
dtype=int32
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(input, dtype, name, optimize=True)
def zeros_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 zeros_like API calls."""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if context.executing_eagerly():
if dtype is not None and dtype != tensor.dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
with ops.device(tensor.device):
return gen_array_ops.zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor.shape.is_fully_defined() and
tensor.dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export(v1=["ones_like"])
@dispatch.add_dispatch_support
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,
`complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
return ones_like_impl(tensor, dtype, name, optimize)
@tf_export("ones_like", v1=[])
@dispatch.add_dispatch_support
def ones_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to 1. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return ones_like_impl(input, dtype, name, optimize=True)
def ones_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 ones_like API calls."""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["placeholder"])
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
@compatibility(eager)
Placeholders are not compatible with eager execution.
@end_compatibility
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
@tf_export(v1=["placeholder_with_default"])
def placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin
"""A placeholder op that passes through `input` when its output is not fed.
Args:
input: A `Tensor`. The default value to produce when output is not fed.
shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of
the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
return gen_array_ops.placeholder_with_default(input, shape, name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.sparse.placeholder(tf.float32)
y = tf.sparse.reduce_sum(x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will
succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
@tf_export("pad", v1=[])
def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
return pad(tensor, paddings, mode, name, constant_values)
@tf_export(v1=["pad"])
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if not tensor_util.is_tensor(constant_values) and constant_values == 0:
result = gen_array_ops.pad(tensor, paddings, name=name)
else:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = tensor_util.constant_value(
result.op.inputs[1], partial=True)
input_shape = result.op.inputs[0].shape
if (input_shape.ndims is not None and
not result.shape.is_fully_defined() and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
@tf_export("meshgrid")
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape().dims[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],
name="crops")
return result_paddings, result_crops
@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch( # pylint: disable=missing-docstring
input, paddings, block_size=None, name=None, block_shape=None): # pylint: disable=redefined-builtin
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
def space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin
return space_to_batch_nd(input, block_shape, paddings, name)
space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("nn.space_to_depth", v1=[])
def space_to_depth_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("nn.depth_to_space", v1=[])
def depth_to_space_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export(v1=["batch_to_space"])
def batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("batch_to_space", v1=[])
def batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin
"""BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
shape `block_shape + [batch]`, interleaves these blocks back into the grid
defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
same rank as the input. The spatial dimensions of this intermediate result
are then optionally cropped according to `crops` to produce the output. This
is the reverse of SpaceToBatch. See below for a precise description.
Args:
input: A `Tensor`. N-D with shape `input_shape = [batch] + spatial_shape +
remaining_shape`, where spatial_shape has M dimensions.
block_shape: A `Tensor`. Must be one of the following types: `int32`,
`int64`. 1-D with shape `[M]`, all values must be >= 1. For backwards
compatibility with TF 1.0, this parameter may be an int, in which case it
is converted to `numpy.array([block_shape, block_shape],
dtype=numpy.int64)`.
crops: A `Tensor`. Must be one of the following types: `int32`, `int64`. 2-D
with shape `[M, 2]`, all values must be >= 0. `crops[i] = [crop_start,
crop_end]` specifies the amount to crop from input dimension `i + 1`,
which corresponds to spatial dimension `i`. It is required that
`crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,
block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,
input_shape[N-1]] 2. Permute dimensions of `reshaped` to produce
`permuted` of shape [batch / prod(block_shape), input_shape[1],
block_shape[0], ..., input_shape[M], block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]] 3. Reshape `permuted` to
produce `reshaped_permuted` of shape [batch / prod(block_shape),
input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]] 4. Crop the start and end of
dimensions `[1, ..., M]` of `reshaped_permuted` according to `crops` to
produce the
output of shape: [batch / prod(block_shape), input_shape[1] *
block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *
block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],
..., input_shape[N-1]]
Some examples: (1) For the following input of shape `[4, 1, 1, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` [[[[1]]],
[[[2]]], [[[3]]], [[[4]]]] ```
The output tensor has shape `[1, 2, 2, 1]` and value: ``` x = [[[[1],
[2]], [[3], [4]]]] ``` (2) For the following input of shape `[4, 1, 1,
3]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` [[[1, 2,
3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```
The output tensor has shape `[1, 2, 2, 3]` and value: ``` x = [[[[1, 2,
3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` (3) For the following
input of shape `[4, 2, 2, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` x =
[[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]],
[[13], [15]]], [[[6], [8]], [[14], [16]]]] ```
The output tensor has shape `[1, 4, 4, 1]` and value: ``` x = [[[1],
[2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]],
[[13], [14], [15], [16]]] ``` (4) For the following input of shape
`[8, 1, 3, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`: ``` x =
[[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0],
[10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6],
[8]]], [[[0], [14], [16]]]] ```
The output tensor has shape `[2, 2, 4, 1]` and value: ``` x = [[[[1],
[2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]] ```
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(block_shape, int):
block_shape = np.array([block_shape, block_shape], dtype=np.int64)
return batch_to_space_nd(
input=input, block_shape=block_shape, crops=crops, name=name)
@tf_export("one_hot")
@dispatch.add_dispatch_support
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer
to a non-ragged axis. The output will be equivalent to applying 'one_hot' on
the values of the RaggedTensor, and creating a new RaggedTensor from the
result.
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
indices = tf.ragged.constant([[0, 1], [2]])
depth = 3
tf.one_hot(indices, depth) # output: [2 x None x 3]
# [[[1., 0., 0.],
# [0., 1., 0.]],
# [[0., 0., 1.]]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(
name, "one_hot",
[indices, depth, on_value, off_value, axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = (
ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists else None)
off_dtype = (
ops.convert_to_tensor(off_value).dtype.base_dtype
if off_exists else None)
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export(v1=["squeeze"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`.
Must be specified if `input` is a `RaggedTensor`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "squeeze_dims",
squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("squeeze", v1=[])
@dispatch.add_dispatch_support
def squeeze_v2(input, axis=None, name=None):
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a
deprecated `squeeze_dims` argument.
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: The input cannot be converted to a tensor, or the specified
axis cannot be squeezed.
"""
# pylint: disable=redefined-builtin
return squeeze(input, axis, name)
@tf_export(v1=["where"])
@deprecation.deprecated(
date=None,
instructions="Use tf.where in 2.0, "
"which has the same broadcast rule as np.where")
@dispatch.add_dispatch_support
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are tensors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
@tf_export("where", v1=["where_v2"])
def where_v2(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `condition`, `x` and `y` must be broadcastable to the same
shape.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which is of the same type as `y`, and may be broadcastable with
`condition` and `y`.
y: A Tensor which is of the same type as `x`, and may be broadcastable with
`condition` and `x`.
name: A name of the operation (optional).
Returns:
A `Tensor` with the same type as `x` and `y`, and shape that
is broadcast from `condition`, `x`, and `y`, if `x`, `y` are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export(v1=["reverse_sequence"])
@deprecation.deprecated_args(None,
"seq_dim is deprecated, use seq_axis instead",
"seq_dim")
@deprecation.deprecated_args(None,
"batch_dim is deprecated, use batch_axis instead",
"batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
@tf_export("reverse_sequence", v1=[])
def reverse_sequence_v2(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None):
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
# pylint: enable=redefined-builtin
@tf_export(v1=["gather"])
@dispatch.add_dispatch_support
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0): # pylint: disable=g-doc-args
r"""Gather slices from params axis axis according to indices.
Gather slices from params axis `axis` according to `indices`. `indices` must
be an integer tensor of any dimension (usually 0-D or 1-D).
For 0-D (scalar) `indices`:
> `output`$$[p_0, ..., p_{axis-1}, \hspace{5.1em}
> p_{axis + 1}, ..., p_{N-1}]$$ =\
> `params`$$[p_0, ..., p_{axis-1}, \hspace{1em}
> indices, \hspace{1em}
> p_{axis + 1}, ..., p_{N-1}]$$.
For 1-D (vector) `indices` with `batch_dims=0`:
> `output`$$[p_0, ..., p_{axis-1}, \hspace{2.6em}
> i, \hspace{2.6em}
> p_{axis + 1}, ..., p_{N-1}]$$ =\
> `params`$$[p_0, ..., p_{axis-1}, \hspace{1em}
> indices[i], \hspace{1em}
> p_{axis + 1}, ..., p_{N-1}]$$.
In the general case, produces an output tensor where:
$$\begin{align*}
output[p_0, &..., p_{axis-1}, &
&i_{B}, ..., i_{M-1}, &
p_{axis + 1}, &..., p_{N-1}] = \\
params[p_0, &..., p_{axis-1}, &
indices[p_0, ..., p_{B-1}, &i_{B}, ..., i_{M-1}], &
p_{axis + 1}, &..., p_{N-1}]
\end{align*}$$
Where $$N$$=`ndims(params)`, $$M$$=`ndims(indices)`, and $$B$$=`batch_dims`.
Note that params.shape[:batch_dims] must be identical to
indices.shape[:batch_dims].
The shape of the output tensor is:
> `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +
> params.shape[axis + 1:]`.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the corresponding
output value.
See also `tf.gather_nd`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
alt>
</div>
Args:
params: The `Tensor` from which to gather values. Must be at least rank
`axis + 1`.
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
validate_indices: Deprecated, does nothing.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
batch_dims: An `integer`. The number of batch dimensions. Must be less
than `rank(indices)`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
del validate_indices
if compat.forward_compatible(2019, 9, 10):
if axis is None:
axis = batch_dims
if axis != 0:
return gen_array_ops.gather_v2(
params, indices, axis, batch_dims=batch_dims, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(
params, indices, axis, name=name)
if batch_dims != 0:
with ops.name_scope(name, "Gather", [params, indices, axis]):
return _batch_gather(params, indices, batch_dims, axis)
if axis is None:
axis = batch_dims
if axis != 0:
# Note that we do a sparse_read here to avoid snapshotting the entire
# resource variable and doing a gather, which can be inefficient and lead to
# subtle race conditions. TODO(apassos) implement axis != 0 on sparse_read
return gen_array_ops.gather_v2(params, indices, axis, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables without
# introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
@tf_export("gather", v1=[])
@dispatch.add_dispatch_support
def gather_v2(params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
return gather(
params,
indices,
validate_indices=validate_indices,
name=name,
axis=axis,
batch_dims=batch_dims)
gather_v2.__doc__ = gather.__doc__
@tf_export(v1=["batch_gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims=-1` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
"""Gather slices from params according to indices with leading batch dims."""
with ops.name_scope(name, "BatchGather", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if indices.shape.ndims is None:
raise ValueError(
"batch_gather does not allow indices with unknown shape.")
return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
def _batch_gather(params, indices, batch_dims, axis=None):
r"""Gather slices from params according to indices with leading batch dims.
This operation assumes that the leading `batch_dims` dimensions of `indices`
and `params` are batch dimensions; and performs a `tf.gather` operation within
each batch. (If `batch_dims` is not specified, then it defaults to
`rank(indices)-1`.) In the case in which `batch_dims==0`, this operation
is equivalent to `tf.gather`.
Args:
params: A Tensor. The tensor from which to gather values.
indices: A Tensor. Must be one of the following types: int32, int64. Index
tensor. Must be in range `[0, params.shape[batch_dims]]`.
batch_dims: An integer or none. The number of batch dimensions. Must be
less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
Returns:
A Tensor. Has the same type as `params`.
Raises:
ValueError: if `indices` has an unknown shape.
"""
if batch_dims is not None and not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError("tf.gather does not allow indices with unknown "
"rank when batch_dims is specified.")
if batch_dims is None:
batch_dims = indices_ndims - 1
if batch_dims < 0:
batch_dims += indices_ndims
if batch_dims < 0 or batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params.shape.ndims))
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, recursively calling batch_gather with axis=0, and then
# transposing the result to put the pre-axis dimensions before the indices
# dimensions.
if axis is not None and axis != batch_dims:
# Adjust axis to be positive.
if not isinstance(axis, int):
axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
elif axis < 0 and params.shape.ndims is None:
axis = axis + array_ops.rank(params)
else:
if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
raise ValueError("axis (%d) out of range [%d, %d)" %
(axis, -params.shape.ndims, params.shape.ndims))
if axis < 0:
axis += params.shape.ndims
if axis < batch_dims:
raise ValueError("batch_dims = %d must be less than or equal to "
"axis = %d" % (batch_dims, axis))
# Move params[axis] up to params[batch_dims].
perm = [
list(range(batch_dims)), [axis],
gen_math_ops._range(batch_dims, axis, 1),
gen_math_ops._range(axis + 1, rank(params), 1)
]
params = transpose(params, concat(perm, axis=0))
result = _batch_gather(params, indices, batch_dims=batch_dims)
# Move the result dimensions corresponding to params[batch_dims:axis]
# to just before the dimensions corresponding to indices[batch_dims:].
params_start = indices_ndims + axis - batch_dims
perm = [
list(range(batch_dims)),
gen_math_ops._range(indices_ndims, params_start, 1),
list(range(batch_dims, indices_ndims)),
gen_math_ops._range(params_start, rank(result), 1)
]
return transpose(result, perm=concat(perm, axis=0))
indices_shape = shape(indices)
params_shape = shape(params)
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
accum_dim_value = ones((), dtype=indices_dtype)
# Use correct type for offset index computation
casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = zeros((), dtype=indices_dtype)
step = ones((), dtype=indices_dtype)
dim_indices = gen_math_ops._range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = stack(
[1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
batch_indices += reshape(dim_indices, dim_shape)
flat_indices = reshape(batch_indices, [-1])
outer_shape = params_shape[batch_dims + 1:]
flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
False)
flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
axis=0))
flat_result = gather(flat_params, flat_indices)
result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
final_shape = indices.get_shape()[:batch_dims].merge_with(
params.get_shape()[:batch_dims])
final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
result.set_shape(final_shape)
return result
@tf_export(v1=["gather_nd", "manip.gather_nd"])
@dispatch.add_dispatch_support
@deprecated_endpoints("manip.gather_nd")
def gather_nd(params, indices, name=None, batch_dims=0):
r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
`indices` is an K-dimensional integer tensor, best thought of as a
(K-1)-dimensional tensor of indices into `params`, where each element defines
a slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of
`params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements
(if `indices.shape[-1] == params.rank`) or slices
(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
Additionally both 'params' and 'indices' can have M leading batch
dimensions that exactly match. In this case 'batch_dims' must be M.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
The examples below are for the case when only indices have leading extra
dimensions. If both 'params' and 'indices' have leading batch dimensions, use
the 'batch_dims' parameter to run gather_nd in batch mode.
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
Examples with batched 'params' and 'indices':
```python
batch_dims = 1
indices = [[1], [0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
batch_dims = 1
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0']], [['a1', 'b1']]]
batch_dims = 1
indices = [[[1, 0]], [[0, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0'], ['b1']]
```
See also `tf.gather`.
Args:
params: A `Tensor`. The tensor from which to gather values.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
name: A name for the operation (optional).
batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.
Returns:
A `Tensor`. Has the same type as `params`.
"""
batch_dims_ = tensor_util.constant_value(batch_dims)
if batch_dims_ is not None:
batch_dims = int(batch_dims_)
if batch_dims == 0:
if compat.forward_compatible(2019, 4, 29):
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.gather_nd(indices, name=name)
except AttributeError:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)
@tf_export("gather_nd", v1=[])
@dispatch.add_dispatch_support
def gather_nd_v2(params, indices, batch_dims=0, name=None):
return gather_nd(params, indices, name=name, batch_dims=batch_dims)
gather_nd_v2.__doc__ = gather_nd.__doc__
def batch_gather_nd(params, indices, batch_dims, name=None):
"""gather_nd implementation with batch support."""
with ops.name_scope(name, "BatchGatherND", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
if batch_dims < 0:
raise ValueError("tf.gather_nd does not allow negative batch_dims.")
params_ndims = params.shape.ndims
indices_ndims = indices.shape.ndims
if indices_ndims is not None and batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params_ndims is not None and batch_dims >= params_ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params_ndims))
expand = batch_dims == 0
if expand:
# Normally gather_nd will be called when batch_dims == 0.
# But if this function is called with batch_dims = 0, e.g. for testing
# purposes, this adds a dummy batch dimension to make batch_dims = 1.
params = expand_dims(params, axis=0)
indices = expand_dims(indices, axis=0)
batch_dims = 1
params_shape = shape(params)
indices_shape = shape(indices)
batch_shape = params_shape[:batch_dims]
batch_size = gen_math_ops.prod(batch_shape, [0])
index_internal_ndims = rank(indices) - batch_dims - 1
indices_internal_shape = indices_shape[batch_dims:-1]
# Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
# with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
# 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
# to the entire 'params' tensor.
# Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
# grid of size B1 x B2.
batch_dim_list = unstack(batch_shape, axis=0)
dim_ranges = [
gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
for x in batch_dim_list
]
mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
# Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
index_grid = transpose(stack(flat_list, axis=0))
# We need to concatenate these batch coordinates with the internal indices.
# concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
# So we reshape them both to [(B1.B2), i1, ..., iK, *]
index_grid_shape = shape(index_grid)
index_grid = reshape(
index_grid,
concat([
index_grid_shape[:1],
ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]
],
axis=0))
tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
index_grid = tile(index_grid, multiples=tile_shape)
# index_grid now has shape [(B1.B2), i1, ..., iK, 2]
flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
flat_indices = reshape(indices, shape=flat_shape)
# flat_indices now has shape [(B1.B2), i1, ..., iK, C]
indices = concat((index_grid, flat_indices), axis=-1)
# indices has shape [(B1.B2), i1, ..., iK, 2+C]
out = gen_array_ops.gather_nd(params, indices)
# out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
# its original form.
out_shape = shape(out)
out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
if expand:
out = squeeze(out, axis=0)
return out
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
@tf_export(v1=["quantize_v2"])
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
"instead.") # pylint: disable=missing-docstring
def quantize_v2(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO"):
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode)
quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
# We want to expose tf.quantization.quantize instead of
# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next
# version of TensorFlow.
@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
@deprecation.deprecated_endpoints("quantize")
def quantize(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None):
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name)
@tf_export("quantization.quantize_and_dequantize")
def quantize_and_dequantize(input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False):
"""Quantizes then dequantizes a tensor.
Args:
input: A `Tensor` to quantize and dequantize.
input_min: If range_given=True, the minimum input value that needs to be
represented in the quantized representation.
input_max: If range_given=True, the maximum input value that needs to be
represented in the quantized representation.
signed_input: True if the quantization is signed or unsigned.
num_bits: The bitwidth of the quantization.
range_given: If true use `input_min` and `input_max` for the range of the
input, otherwise determine min and max from the input `Tensor`.
round_mode: Rounding mode when rounding from float values to quantized ones.
name: Optional name for the operation.
narrow_range: If true, then the absolute value of the quantized minimum
value is the same as the quantized maximum value, instead of 1 greater.
i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
Returns:
A `Tensor`. Each element is the result of quantizing and dequantizing the
corresponding element of `input`.
"""
return gen_array_ops.quantize_and_dequantize_v2(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
name=name)
@tf_export("searchsorted")
def searchsorted(sorted_sequence,
values,
side="left",
out_type=dtypes.int32,
name=None):
"""Searches input tensor for values on the innermost dimension.
A 2-D example:
```
sorted_sequence = [[0, 3, 9, 9, 10],
[1, 2, 3, 4, 5]]
values = [[2, 4, 9],
[0, 2, 6]]
result = searchsorted(sorted_sequence, values, side="left")
result == [[1, 2, 2],
[0, 1, 5]]
result = searchsorted(sorted_sequence, values, side="right")
result == [[1, 2, 4],
[0, 2, 5]]
```
Args:
sorted_sequence: N-D `Tensor` containing a sorted sequence.
values: N-D `Tensor` containing the search values.
side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
upper_bound.
out_type: The output type (`int32` or `int64`). Default is `tf.int32`.
name: Optional name for the operation.
Returns:
An N-D `Tensor` the size of values containing the result of applying either
lower_bound or upper_bound (depending on side) to each value. The result
is not a global index to the entire `Tensor`, but the index in the last
dimension.
Raises:
ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
If the total size of values exceeds `2^31 - 1` elements.
If the first `N-1` dimensions of the two tensors don't match.
"""
sequence_size = shape_internal(sorted_sequence)[-1]
values_size = shape_internal(values)[-1]
sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
values_2d = reshape(values, [-1, values_size])
if side == "right":
output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
name)
elif side == "left":
output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
name)
else:
raise ValueError("side must be either 'right' or 'left'. Saw: %s." % side)
return reshape(output, shape_internal(values))
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
@tf_export("image.extract_patches")
def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):
r"""Extract `patches` from `images`.
This op collects patches from the input image, as if applying a
convolution. All extracted patches are stacked in the depth (last) dimension
of the output.
Specifically, the op extracts patches of shape `sizes` which are `strides`
apart in the input image. The output is subsampled using the `rates` argument,
in the same manner as "atrous" or "dilated" convolutions.
The result is a 4D tensor which is indexed by batch, row, and column.
`output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`
which is taken from the input starting at
`images[i, x*strides[1], y*strides[2]]`.
Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where
`depth` is `images.shape[3]`.
The output elements are taken from the input at intervals given by the `rate`
argument, as in dilated convolutions.
The `padding` argument has no effect on the size of each patch, it determines
how many patches are extracted. If `VALID`, only patches which are fully
contained in the input image are included. If `SAME`, all patches whose
starting point is inside the input are included, and areas outside the input
default to zero.
Example:
```
n = 10
# images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100
images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]
# We generate two outputs as follows:
# 1. 3x3 patches with stride length 5
# 2. Same as above, but the rate is increased to 2
tf.extract_image_patches(images=images,
ksizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 1, 1, 1],
padding='VALID')
# Yields:
[[[[ 1 2 3 11 12 13 21 22 23]
[ 6 7 8 16 17 18 26 27 28]]
[[51 52 53 61 62 63 71 72 73]
[56 57 58 66 67 68 76 77 78]]]]
```
If we mark the pixels in the input image which are taken for the output with
`*`, we see the pattern:
```
* * * 4 5 * * * 9 10
* * * 14 15 * * * 19 20
* * * 24 25 * * * 29 30
31 32 33 34 35 36 37 38 39 40
41 42 43 44 45 46 47 48 49 50
* * * 54 55 * * * 59 60
* * * 64 65 * * * 69 70
* * * 74 75 * * * 79 80
81 82 83 84 85 86 87 88 89 90
91 92 93 94 95 96 97 98 99 100
```
```
tf.extract_image_patches(images=images,
sizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 2, 2, 1],
padding='VALID')
# Yields:
[[[[ 1 3 5 21 23 25 41 43 45]
[ 6 8 10 26 28 30 46 48 50]]
[[ 51 53 55 71 73 75 91 93 95]
[ 56 58 60 76 78 80 96 98 100]]]]
```
We can again draw the effect, this time using the symbols `*`, `x`, `+` and
`o` to distinguish the patches:
```
* 2 * 4 * x 7 x 9 x
11 12 13 14 15 16 17 18 19 20
* 22 * 24 * x 27 x 29 x
31 32 33 34 35 36 37 38 39 40
* 42 * 44 * x 47 x 49 x
+ 52 + 54 + o 57 o 59 o
61 62 63 64 65 66 67 68 69 70
+ 72 + 74 + o 77 o 79 o
81 82 83 84 85 86 87 88 89 90
+ 92 + 94 + o 97 o 99 o
```
Args:
images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]
sizes: The size of the extracted patches. Must
be [1, size_rows, size_cols, 1].
strides: A 1-D Tensor of length 4. How far the centers of two consecutive
patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
This is the input stride, specifying how far two consecutive patch samples
are in the input. Equivalent to extracting patches with `patch_sizes_eff =
patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
them spatially by a factor of `rates`. This is equivalent to `rate` in
dilated (a.k.a. Atrous) convolutions.
padding: The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A 4-D Tensor of the same type as the input.
"""
return gen_array_ops.extract_image_patches(images, sizes, strides, rates,
padding, name)
@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
@deprecation.deprecated_args(None, "ksizes is deprecated, use sizes instead",
"ksizes")
def extract_image_patches( # pylint: disable=missing-docstring
images,
ksizes=None,
strides=None,
rates=None,
padding=None,
name=None,
sizes=None):
ksizes = deprecation.deprecated_argument_lookup("sizes", sizes, "ksizes",
ksizes)
return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,
padding, name)
extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
@tf_export("fingerprint")
def fingerprint(data, method="farmhash64", name=None):
r"""Generates fingerprint values.
Generates fingerprint values of `data`.
Fingerprint op considers the first dimension of `data` as the batch dimension,
and `output[i]` contains the fingerprint value generated from contents in
`data[i, ...]` for all `i`.
Fingerprint op writes fingerprint values as byte arrays. For example, the
default method `farmhash64` generates a 64-bit fingerprint value at a time.
This 8-byte value is written out as an `tf.uint8` array of size 8, in
little-endian order.
For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),
and that the fingerprint method is `farmhash64`. In this case, the output
shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the
size of each fingerprint value in bytes. `output[0, :]` is generated from
12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from
other 12 integers in `data[1, :, :]`.
Note that this op fingerprints the raw underlying buffer, and it does not
fingerprint Tensor's metadata such as data type and/or shape. For example, the
fingerprint values are invariant under reshapes and bitcasts as long as the
batch dimension remain the same:
```python
tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))
tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))
```
For string data, one should expect `tf.fingerprint(data) !=
tf.fingerprint(tf.string.reduce_join(data))` in general.
Args:
data: A `Tensor`. Must have rank 1 or higher.
method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.
Currently available method is `farmhash64`.
name: A name for the operation (optional).
Returns:
A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
`data`'s first dimension, and the second dimension size depends on the
fingerprint algorithm.
"""
return gen_array_ops.fingerprint(data, method, name)
|
py | 1a481c84f0e0aae65a996b0f9ae9a91df347936b | import random
import json
from sqlalchemy import Boolean
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql.expression import cast
from dallinger.models import Info
from dallinger.models import Transformation
from dallinger.nodes import Agent
from dallinger.nodes import Source
class MCMCPAgent(Agent):
__mapper_args__ = {"polymorphic_identity": "MCMCP_agent"}
def update(self, infos):
info = infos[0]
self.replicate(info)
new_info = AnimalInfo(origin=self, contents=info.perturbed_contents())
Perturbation(info_in=info, info_out=new_info)
def _what(self):
infos = self.infos()
return [i for i in infos if i.chosen][0]
class AnimalSource(Source):
"""A source that transmits animal shapes."""
__mapper_args__ = {"polymorphic_identity": "animal_source"}
def create_information(self):
"""Create a new Info.
transmit() -> _what() -> create_information().
"""
return AnimalInfo(origin=self, contents=None)
class AnimalInfo(Info):
"""An Info that can be chosen."""
__mapper_args__ = {"polymorphic_identity": "vector_info"}
@hybrid_property
def chosen(self):
"""Use property1 to store whether an info was chosen."""
try:
return bool(self.property1)
except TypeError:
return None
@chosen.setter
def chosen(self, chosen):
"""Assign chosen to property1."""
self.property1 = repr(chosen)
@chosen.expression
def chosen(self):
"""Retrieve chosen via property1."""
return cast(self.property1, Boolean)
properties = {
"foot_spread": [0, 1],
"body_height": [0.1, 1.5],
"body_tilt": [-15, 45],
"tail_length": [0.05, 1.2],
"tail_angle": [-45, 190],
"neck_length": [0, 2.5],
"neck_angle": [90, 180],
"head_length": [0.05, 0.75],
"head_angle": [5, 80],
}
def __init__(self, origin, contents=None, **kwargs):
if contents is None:
data = {}
for prop, prop_range in self.properties.items():
data[prop] = random.uniform(prop_range[0], prop_range[1])
contents = json.dumps(data)
super(AnimalInfo, self).__init__(origin, contents, **kwargs)
def perturbed_contents(self):
"""Perturb the given animal."""
animal = json.loads(self.contents)
for prop, prop_range in self.properties.items():
range = prop_range[1] - prop_range[0]
jittered = animal[prop] + random.gauss(0, 0.1 * range)
animal[prop] = max(min(jittered, prop_range[1]), prop_range[0])
return json.dumps(animal)
class Perturbation(Transformation):
"""A perturbation is a transformation that perturbs the contents."""
__mapper_args__ = {"polymorphic_identity": "perturbation"}
|
py | 1a481d8eb4400119930f974f4abba27288528080 | from __future__ import absolute_import
from __future__ import unicode_literals
try:
from collections import MutableSequence, MutableMapping
except ImportError:
from collections.abc import MutableSequence, MutableMapping
from collections import OrderedDict
import ast
from functools import reduce
from inflection import underscore
def reshape(schema, data):
reshaped = [] if isinstance(schema, MutableSequence) else OrderedDict()
def _reshape(schema, data, new_data):
if isinstance(schema, MutableMapping):
for idx, (key, value) in enumerate(schema.items()):
try:
d = data[key]
except KeyError:
continue
new_data[key] = (
[] if isinstance(value, MutableSequence) else {}
)
if not value:
new_data[key] = data[key]
else:
_reshape(value, d, new_data[key])
elif isinstance(schema, MutableSequence):
schema = schema[0]
for idx, datum in enumerate(data):
try:
new_data[idx]
except IndexError:
new_data.append({})
_reshape(schema, datum, new_data[idx])
else:
new_data[schema] = data[schema]
_reshape(schema, data, reshaped)
return reshaped
def nargs_to_dict(nargs):
args = zip(nargs[0::2], nargs[1::2])
d = reduce(rec_nargs_to_dict, args, {})
return {'fields': d}
def rec_nargs_to_dict(accum, kv):
k, v = kv
keys = k.split('.')
if len(keys) > 1:
accum[keys[0]] = rec_nargs_to_dict({}, (keys[1], eval_value(v)))
else:
accum[keys[0]] = eval_value(v)
return accum
def eval_value(value):
try:
return ast.literal_eval(value)
except (SyntaxError, ValueError):
return value
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = (
underscore(parent_key) +
sep +
underscore(k) if parent_key else underscore(k)
)
if isinstance(v, MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return OrderedDict(sorted(items))
|
py | 1a481d9e1540bf1cb657ef776f66526e3cd5b92d | # Copyright (c) 2018-2020, Manfred Moitzi
# License: MIT License
import pytest
from math import radians
import ezdxf
from ezdxf.math import Vector, BoundingBox
from ezdxf.render.forms import cube
from ezdxf.render.mesh import MeshVertexMerger, MeshBuilder, MeshTransformer, MeshAverageVertexMerger
from ezdxf.addons import SierpinskyPyramid
def test_vertex_merger_indices():
merger = MeshVertexMerger()
indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert indices == indices2
def test_vertex_merger_vertices():
merger = MeshVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.vertices == [(1, 2, 3), (4, 5, 6)]
def test_vertex_merger_index_of():
merger = MeshVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.index((1, 2, 3)) == 0
assert merger.index((4, 5, 6)) == 1
with pytest.raises(IndexError):
merger.index((7, 8, 9))
def test_average_vertex_merger_indices():
merger = MeshAverageVertexMerger()
indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert indices == indices2
def test_average_vertex_merger_vertices():
merger = MeshAverageVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.vertices == [(1, 2, 3), (4, 5, 6)]
def test_average_vertex_merger_index_of():
merger = MeshAverageVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.index((1, 2, 3)) == 0
assert merger.index((4, 5, 6)) == 1
with pytest.raises(IndexError):
merger.index((7, 8, 9))
def test_mesh_builder():
dwg = ezdxf.new('R2000')
pyramid = SierpinskyPyramid(level=4, sides=3)
pyramid.render(dwg.modelspace(), merge=False)
meshes = dwg.modelspace().query('MESH')
assert len(meshes) == 256
def test_vertex_merger():
pyramid = SierpinskyPyramid(level=4, sides=3)
faces = pyramid.faces()
mesh = MeshVertexMerger()
for vertices in pyramid:
mesh.add_mesh(vertices=vertices, faces=faces)
assert len(mesh.vertices) == 514
assert len(mesh.faces) == 1024
def test_average_vertex_merger():
pyramid = SierpinskyPyramid(level=4, sides=3)
faces = pyramid.faces()
mesh = MeshAverageVertexMerger()
for vertices in pyramid:
mesh.add_mesh(vertices=vertices, faces=faces)
assert len(mesh.vertices) == 514
assert len(mesh.faces) == 1024
REGULAR_FACE = Vector.list([(0, 0, 0), (1, 0, 1), (1, 1, 1), (0, 1, 0)])
IRREGULAR_FACE = Vector.list([(0, 0, 0), (1, 0, 1), (1, 1, 0), (0, 1, 0)])
def test_has_none_planar_faces():
mesh = MeshBuilder()
mesh.add_face(REGULAR_FACE)
assert mesh.has_none_planar_faces() is False
mesh.add_face(IRREGULAR_FACE)
assert mesh.has_none_planar_faces() is True
def test_scale_mesh():
mesh = cube(center=False)
mesh.scale(2, 3, 4)
bbox = BoundingBox(mesh.vertices)
assert bbox.extmin.isclose((0, 0, 0))
assert bbox.extmax.isclose((2, 3, 4))
def test_rotate_x():
mesh = cube(center=False)
mesh.rotate_x(radians(90))
bbox = BoundingBox(mesh.vertices)
assert bbox.extmin.isclose((0, -1, 0))
assert bbox.extmax.isclose((1, 0, 1))
@pytest.fixture(scope='module')
def msp():
doc = ezdxf.new()
return doc.modelspace()
@pytest.fixture(scope='module')
def cube_polyface(msp):
p = msp.add_polyface()
p.append_faces(cube().faces_as_vertices())
return p
def test_from_empty_polyface(msp):
empty_polyface = msp.add_polyface()
b = MeshBuilder.from_polyface(empty_polyface)
assert len(b.vertices) == 0
assert len(b.faces) == 0
def test_from_cube_polyface(cube_polyface):
b = MeshBuilder.from_polyface(cube_polyface)
assert len(b.vertices) == 24 # unoptimized mesh builder
assert len(b.faces) == 6
def test_render_polyface(cube_polyface):
doc = ezdxf.new()
msp = doc.modelspace()
t = MeshTransformer.from_polyface(cube_polyface)
assert len(t.vertices) == 24 # unoptimized mesh builder
assert len(t.faces) == 6
t.render_polyface(msp)
new_polyface = msp[-1]
assert new_polyface.dxftype() == 'POLYLINE'
assert new_polyface.is_poly_face_mesh is True
assert len(new_polyface.vertices) == 8 + 6
assert new_polyface.vertices[0] is not cube_polyface.vertices[0]
def test_from_polymesh(msp):
polymesh = msp.add_polymesh(size=(4, 4))
b = MeshBuilder.from_polyface(polymesh)
n = polymesh.dxf.n_count
m = polymesh.dxf.m_count
nfaces = (n - 1) * (m - 1)
assert len(b.vertices) == nfaces * 4 # unoptimized mesh builder
assert len(b.faces) == nfaces
def test_from_polyface_type_error(msp):
polyline = msp.add_polyline3d([(0, 0, 0), (1, 0, 0)])
with pytest.raises(TypeError):
MeshBuilder.from_polyface(polyline)
line = msp.add_line(start=(0, 0, 0), end=(1, 0, 0))
with pytest.raises(TypeError):
MeshBuilder.from_polyface(line)
|
py | 1a481edee63dd6a77c01d712465fee2523799568 | import datetime
from unittest import TestCase
from tarentsocialwall.WordpressConnector import WordpressConnector
class TestWordpressConnector(TestCase):
service = None
def setUp(self):
self.service = WordpressConnector()
def test_convert_to_socialpost_from_event_empty(self):
event = []
social_posts = [] # type: List[SocialPost]
self.service.convert_to_socialpost(event, social_posts)
def test_convert_to_socialpost_from_event_corectly(self):
self.service.access_token = "i have access"
event = {}
event['id'] = '123456'
event['title'] = {}
event['title']['rendered'] = 'test'
event['content'] = {}
event['content']['rendered'] = 'test'
event['date'] = datetime.datetime.now().strftime(self.service.wordpressDateFormat)
events = [event]
social_posts = []
self.service.convert_to_socialpost(events, social_posts)
self.assertTrue(len(social_posts) == 1)
social_post = social_posts[0]
externalId = social_post.externalId
self.assertTrue(externalId == '123456') |
py | 1a481f2596fca8884855c2eebe73e716b1e41793 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class MSDeploy(ProxyOnlyResource):
"""MSDeploy ARM PUT information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param package_uri: Package URI
:type package_uri: str
:param connection_string: SQL Connection String
:type connection_string: str
:param db_type: Database Type
:type db_type: str
:param set_parameters_xml_file_uri: URI of MSDeploy Parameters file. Must
not be set if SetParameters is used.
:type set_parameters_xml_file_uri: str
:param set_parameters: MSDeploy Parameters. Must not be set if
SetParametersXmlFileUri is used.
:type set_parameters: dict[str, str]
:param skip_app_data: Controls whether the MSDeploy operation skips the
App_Data directory.
If set to <code>true</code>, the existing App_Data directory on the
destination
will not be deleted, and any App_Data directory in the source will be
ignored.
Setting is <code>false</code> by default.
:type skip_app_data: bool
:param app_offline: Sets the AppOffline rule while the MSDeploy operation
executes.
Setting is <code>false</code> by default.
:type app_offline: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'package_uri': {'key': 'properties.packageUri', 'type': 'str'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'db_type': {'key': 'properties.dbType', 'type': 'str'},
'set_parameters_xml_file_uri': {'key': 'properties.setParametersXmlFileUri', 'type': 'str'},
'set_parameters': {'key': 'properties.setParameters', 'type': '{str}'},
'skip_app_data': {'key': 'properties.skipAppData', 'type': 'bool'},
'app_offline': {'key': 'properties.appOffline', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(MSDeploy, self).__init__(**kwargs)
self.package_uri = kwargs.get('package_uri', None)
self.connection_string = kwargs.get('connection_string', None)
self.db_type = kwargs.get('db_type', None)
self.set_parameters_xml_file_uri = kwargs.get('set_parameters_xml_file_uri', None)
self.set_parameters = kwargs.get('set_parameters', None)
self.skip_app_data = kwargs.get('skip_app_data', None)
self.app_offline = kwargs.get('app_offline', None)
|
py | 1a481facb091b3630f46f57fc13d4c2f10af1de9 | # SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# -*- mode: python; tab-width: 4 -*-
#
# Copyright (C) 2001 Michael Teo <[email protected]>
# nmb.py - NetBIOS library
#
# This software is provided 'as-is', without any express or implied warranty.
# In no event will the author be held liable for any damages arising from the
# use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice cannot be removed or altered from any source distribution.
#
# Altered source done by Alberto Solino (@agsolino)
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import errno
import re
import select
import socket
import string
import time
import random
from struct import pack, unpack
from six import byte2int
from .structure import Structure
# Our random number generator
try:
rand = random.SystemRandom()
except NotImplementedError:
rand = random
pass
################################################################################
# CONSTANTS
################################################################################
# Taken from socket module reference
INADDR_ANY = '0.0.0.0'
BROADCAST_ADDR = '<broadcast>'
# Default port for NetBIOS name service
NETBIOS_NS_PORT = 137
# Default port for NetBIOS session service
NETBIOS_SESSION_PORT = 139
# Default port for SMB session service
SMB_SESSION_PORT = 445
# Owner Node Type Constants
NODE_B = 0x0000
NODE_P = 0x2000
NODE_M = 0x4000
NODE_RESERVED = 0x6000
NODE_GROUP = 0x8000
NODE_UNIQUE = 0x0
# Name Type Constants
TYPE_UNKNOWN = 0x01
TYPE_WORKSTATION = 0x00
TYPE_CLIENT = 0x03
TYPE_SERVER = 0x20
TYPE_DOMAIN_MASTER = 0x1B
TYPE_DOMAIN_CONTROLLER = 0x1C
TYPE_MASTER_BROWSER = 0x1D
TYPE_BROWSER = 0x1E
TYPE_NETDDE = 0x1F
TYPE_STATUS = 0x21
# Opcodes values
OPCODE_QUERY = 0
OPCODE_REGISTRATION = 0x5 << 11
OPCODE_RELEASE = 0x6 << 11
OPCODE_WACK = 0x7 << 11
OPCODE_REFRESH = 0x8 << 11
OPCODE_REQUEST = 0 << 11
OPCODE_RESPONSE = 0x10 << 11
# NM_FLAGS
NM_FLAGS_BROADCAST = 0x1 << 4
NM_FLAGS_UNICAST = 0 << 4
NM_FLAGS_RA = 0x8 << 4
NM_FLAGS_RD = 0x10 << 4
NM_FLAGS_TC = 0x20 << 4
NM_FLAGS_AA = 0x40 << 4
# QUESTION_TYPE
QUESTION_TYPE_NB = 0x20 # NetBIOS general Name Service Resource Record
QUESTION_TYPE_NBSTAT = 0x21 # NetBIOS NODE STATUS Resource Record
# QUESTION_CLASS
QUESTION_CLASS_IN = 0x1 # Internet class
# RESOURCE RECORD RR_TYPE field definitions
RR_TYPE_A = 0x1 # IP address Resource Record
RR_TYPE_NS = 0x2 # Name Server Resource Record
RR_TYPE_NULL = 0xA # NULL Resource Record
RR_TYPE_NB = 0x20 # NetBIOS general Name Service Resource Record
RR_TYPE_NBSTAT = 0x21 # NetBIOS NODE STATUS Resource Record
# RESOURCE RECORD RR_CLASS field definitions
RR_CLASS_IN = 1 # Internet class
# RCODE values
RCODE_FMT_ERR = 0x1 # Format Error. Request was invalidly formatted.
RCODE_SRV_ERR = 0x2 # Server failure. Problem with NBNS, cannot process name.
RCODE_IMP_ERR = 0x4 # Unsupported request error. Allowable only for challenging NBNS when gets an Update type
# registration request.
RCODE_RFS_ERR = 0x5 # Refused error. For policy reasons server will not register this name from this host.
RCODE_ACT_ERR = 0x6 # Active error. Name is owned by another node.
RCODE_CFT_ERR = 0x7 # Name in conflict error. A UNIQUE name is owned by more than one node.
# NAME_FLAGS
NAME_FLAGS_PRM = 0x0200 # Permanent Name Flag. If one (1) then entry is for the permanent node name. Flag is zero
# (0) for all other names.
NAME_FLAGS_ACT = 0x0400 # Active Name Flag. All entries have this flag set to one (1).
NAME_FLAG_CNF = 0x0800 # Conflict Flag. If one (1) then name on this node is in conflict.
NAME_FLAG_DRG = 0x1000 # Deregister Flag. If one (1) then this name is in the process of being deleted.
# NB_FLAGS
NB_FLAGS_ONT_B = 0
NB_FLAGS_ONT_P = 1 << 13
NB_FLAGS_ONT_M = 2 << 13
NB_FLAGS_G = 1 << 15
NAME_TYPES = {TYPE_UNKNOWN: 'Unknown', TYPE_WORKSTATION: 'Workstation', TYPE_CLIENT: 'Client',
TYPE_SERVER: 'Server', TYPE_DOMAIN_MASTER: 'Domain Master', TYPE_DOMAIN_CONTROLLER: 'Domain Controller',
TYPE_MASTER_BROWSER: 'Master Browser', TYPE_BROWSER: 'Browser Server', TYPE_NETDDE: 'NetDDE Server',
TYPE_STATUS: 'Status'}
# NetBIOS Session Types
NETBIOS_SESSION_MESSAGE = 0x0
NETBIOS_SESSION_REQUEST = 0x81
NETBIOS_SESSION_POSITIVE_RESPONSE = 0x82
NETBIOS_SESSION_NEGATIVE_RESPONSE = 0x83
NETBIOS_SESSION_RETARGET_RESPONSE = 0x84
NETBIOS_SESSION_KEEP_ALIVE = 0x85
################################################################################
# HELPERS
################################################################################
def encode_name(name, type, scope):
# ToDo: Rewrite this simpler, we're using less than written
"""
Perform first and second level encoding of name as specified in RFC 1001 (Section 4)
:param string name: the name to encode
:param integer type: the name type constants
:param string scope: the name's scope
:return string: the encoded name.
"""
if name == '*':
name += '\0' * 15
elif len(name) > 15:
name = name[:15] + chr(type)
else:
name = name.ljust(15) + chr(type)
encoded_name = chr(len(name) * 2) + re.sub('.', _do_first_level_encoding, name)
if scope:
encoded_scope = ''
for s in string.split(scope, '.'):
encoded_scope = encoded_scope + chr(len(s)) + s
return (encoded_name + encoded_scope) + '\0'
else:
return encoded_name + '\0'
# Internal method for use in encode_name()
def _do_first_level_encoding(m):
s = ord(m.group(0))
return string.ascii_uppercase[s >> 4] + string.ascii_uppercase[s & 0x0f]
def decode_name(name):
# ToDo: Rewrite this simpler, we're using less than written
"""
Perform first and second level decoding of name as specified in RFC 1001 (Section 4)
:param string name: the name to dencode
:return string: the decoded name.
"""
name_length = ord(name[0])
assert name_length == 32
decoded_name = re.sub('..', _do_first_level_decoding, name[1:33])
if name[33] == '\0':
return 34, decoded_name, ''
else:
decoded_domain = ''
offset = 34
while 1:
domain_length = byte2int(name[offset:offset+1])
if domain_length == 0:
break
decoded_domain = '.' + name[offset:offset + domain_length]
offset += domain_length
return offset + 1, decoded_name, decoded_domain
def _do_first_level_decoding(m):
s = m.group(0)
return chr(((ord(s[0]) - ord('A')) << 4) | (ord(s[1]) - ord('A')))
ERRCLASS_QUERY = 0x00
ERRCLASS_SESSION = 0xf0
ERRCLASS_OS = 0xff
QUERY_ERRORS = {0x01: 'Format Error. Request was invalidly formatted',
0x02: 'Server failure. Problem with NBNS, cannot process name.',
0x03: 'Name does not exist',
0x04: 'Unsupported request error. Allowable only for challenging NBNS when gets an Update type registration request.',
0x05: 'Refused error. For policy reasons server will not register this name from this host.',
0x06: 'Active error. Name is owned by another node.',
0x07: 'Name in conflict error. A UNIQUE name is owned by more than one node.',
}
SESSION_ERRORS = {0x80: 'Not listening on called name',
0x81: 'Not listening for calling name',
0x82: 'Called name not present',
0x83: 'Sufficient resources',
0x8f: 'Unspecified error'
}
class NetBIOSError(Exception):
def __init__(self, error_message='', error_class=None, error_code=None):
self.error_class = error_class
self.error_code = error_code
self.error_msg = error_message
def get_error_code(self):
return self.error
def getErrorCode(self):
return self.get_error_code()
def get_error_string(self):
return str(self)
def getErrorString(self):
return str(self)
def __str__(self):
if self.error_code is not None:
if self.error_code in QUERY_ERRORS:
return '%s-%s(%s)' % (self.error_msg, QUERY_ERRORS[self.error_code], self.error_code)
elif self.error_code in SESSION_ERRORS:
return '%s-%s(%s)' % (self.error_msg, SESSION_ERRORS[self.error_code], self.error_code)
else:
return '%s(%s)' % (self.error_msg, self.error_code)
else:
return '%s' % self.error_msg
class NetBIOSTimeout(Exception):
def __init__(self, message = 'The NETBIOS connection with the remote host timed out.'):
Exception.__init__(self, message)
################################################################################
# 4.2 NAME SERVER PACKETS
################################################################################
class NBNSResourceRecord(Structure):
structure = (
('RR_NAME','z=\x00'),
('RR_TYPE','>H=0'),
('RR_CLASS','>H=0'),
('TTL','>L=0'),
('RDLENGTH','>H-RDATA'),
('RDATA',':=""'),
)
class NBNodeStatusResponse(NBNSResourceRecord):
def __init__(self, data = 0):
NBNSResourceRecord.__init__(self, data)
self.mac = b'00-00-00-00-00-00'
self.num_names = unpack('B', self['RDATA'][:1])[0]
self.entries = list()
data = self['RDATA'][1:]
for _ in range(self.num_names):
entry = NODE_NAME_ENTRY(data)
data = data[len(entry):]
self.entries.append(entry)
self.statistics = STATISTICS(data)
self.set_mac_in_hexa(self.statistics['UNIT_ID'])
def set_mac_in_hexa(self, data):
data_aux = u''
for d in bytearray(data):
if data_aux == '':
data_aux = '%02x' % d
else:
data_aux += '-%02x' % d
self.mac = data_aux.upper()
def get_mac(self):
return self.mac
def rawData(self):
res = pack('!B', self.num_names )
for i in range(0, self.num_names):
res += self.entries[i].getData()
class NBPositiveNameQueryResponse(NBNSResourceRecord):
def __init__(self, data = 0):
NBNSResourceRecord.__init__(self, data)
self.entries = [ ]
rdata = self['RDATA']
while len(rdata) > 0:
entry = ADDR_ENTRY(rdata)
rdata = rdata[len(entry):]
self.entries.append(socket.inet_ntoa(entry['NB_ADDRESS']))
# 4.2.1. GENERAL FORMAT OF NAME SERVICE PACKETS
class NAME_SERVICE_PACKET(Structure):
commonHdr = (
('NAME_TRN_ID','>H=0'),
('FLAGS','>H=0'),
('QDCOUNT','>H=0'),
('ANCOUNT','>H=0'),
('NSCOUNT','>H=0'),
('ARCOUNT','>H=0'),
)
structure = (
('ANSWERS',':'),
)
# 4.2.1.2. QUESTION SECTION
class QUESTION_ENTRY(Structure):
commonHdr = (
('QUESTION_NAME','z'),
('QUESTION_TYPE','>H=0'),
('QUESTION_CLASS','>H=0'),
)
# 4.2.1.3. RESOURCE RECORD
class RESOURCE_RECORD(Structure):
structure = (
('RR_NAME','z=\x00'),
('RR_TYPE','>H=0'),
('RR_CLASS','>H=0'),
('TTL','>L=0'),
('RDLENGTH','>H-RDATA'),
('RDATA',':=""'),
)
# 4.2.2. NAME REGISTRATION REQUEST
class NAME_REGISTRATION_REQUEST(NAME_SERVICE_PACKET):
structure = (
('QUESTION_NAME', 'z'),
('QUESTION_TYPE', '>H=0'),
('QUESTION_CLASS', '>H=0'),
('RR_NAME','z', ),
('RR_TYPE', '>H=0'),
('RR_CLASS','>H=0'),
('TTL', '>L=0'),
('RDLENGTH', '>H=6'),
('NB_FLAGS', '>H=0'),
('NB_ADDRESS', '4s=""'),
)
def __init__(self, data=None):
NAME_SERVICE_PACKET.__init__(self,data)
self['FLAGS'] = OPCODE_REQUEST | NM_FLAGS_RD | OPCODE_REGISTRATION
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 1
self['QUESTION_TYPE'] = QUESTION_TYPE_NB
self['QUESTION_CLASS'] = QUESTION_CLASS_IN
self['RR_TYPE'] = RR_TYPE_NB
self['RR_CLASS'] = RR_CLASS_IN
# 4.2.3. NAME OVERWRITE REQUEST & DEMAND
class NAME_OVERWRITE_REQUEST(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
self['FLAGS'] = OPCODE_REQUEST | OPCODE_REGISTRATION
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 1
# 4.2.4. NAME REFRESH REQUEST
class NAME_REFRESH_REQUEST(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
self['FLAGS'] = OPCODE_REFRESH | 0x1
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 1
# 4.2.5. POSITIVE NAME REGISTRATION RESPONSE
# 4.2.6. NEGATIVE NAME REGISTRATION RESPONSE
# 4.2.7. END-NODE CHALLENGE REGISTRATION RESPONSE
class NAME_REGISTRATION_RESPONSE(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
# 4.2.8. NAME CONFLICT DEMAND
class NAME_CONFLICT_DEMAND(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
# ToDo: 4.2.9. NAME RELEASE REQUEST & DEMAND
# ToDo: 4.2.10. POSITIVE NAME RELEASE RESPONSE
# ToDo: 4.2.11. NEGATIVE NAME RELEASE RESPONSE
# 4.2.12. NAME QUERY REQUEST
class NAME_QUERY_REQUEST(NAME_SERVICE_PACKET):
structure = (
('QUESTION_NAME', 'z'),
('QUESTION_TYPE', '>H=0'),
('QUESTION_CLASS', '>H=0'),
)
def __init__(self, data=None):
NAME_SERVICE_PACKET.__init__(self,data)
self['FLAGS'] = OPCODE_REQUEST | OPCODE_REGISTRATION | NM_FLAGS_RD
self['RCODE'] = 0
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 0
self['QUESTION_TYPE'] = QUESTION_TYPE_NB
self['QUESTION_CLASS'] = QUESTION_CLASS_IN
# 4.2.13. POSITIVE NAME QUERY RESPONSE
class ADDR_ENTRY(Structure):
structure = (
('NB_FLAGS', '>H=0'),
('NB_ADDRESS', '4s=""'),
)
# ToDo: 4.2.15. REDIRECT NAME QUERY RESPONSE
# ToDo: 4.2.16. WAIT FOR ACKNOWLEDGEMENT (WACK) RESPONSE
# 4.2.17. NODE STATUS REQUEST
class NODE_STATUS_REQUEST(NAME_QUERY_REQUEST):
def __init__(self, data=None):
NAME_QUERY_REQUEST.__init__(self,data)
self['FLAGS'] = 0
self['QUESTION_TYPE'] = QUESTION_TYPE_NBSTAT
# 4.2.18. NODE STATUS RESPONSE
class NODE_NAME_ENTRY(Structure):
structure = (
('NAME','15s=""'),
('TYPE','B=0'),
('NAME_FLAGS','>H'),
)
class STATISTICS(Structure):
structure = (
('UNIT_ID','6s=""'),
('JUMPERS','B'),
('TEST_RESULT','B'),
('VERSION_NUMBER','>H'),
('PERIOD_OF_STATISTICS','>H'),
('NUMBER_OF_CRCs','>H'),
('NUMBER_ALIGNMENT_ERRORS','>H'),
('NUMBER_OF_COLLISIONS','>H'),
('NUMBER_SEND_ABORTS','>H'),
('NUMBER_GOOD_SENDS','>L'),
('NUMBER_GOOD_RECEIVES','>L'),
('NUMBER_RETRANSMITS','>H'),
('NUMBER_NO_RESOURCE_CONDITIONS','>H'),
('NUMBER_FREE_COMMAND_BLOCKS','>H'),
('TOTAL_NUMBER_COMMAND_BLOCKS','>H'),
('MAX_TOTAL_NUMBER_COMMAND_BLOCKS','>H'),
('NUMBER_PENDING_SESSIONS','>H'),
('MAX_NUMBER_PENDING_SESSIONS','>H'),
('MAX_TOTAL_SESSIONS_POSSIBLE','>H'),
('SESSION_DATA_PACKET_SIZE','>H'),
)
class NetBIOS:
# Creates a NetBIOS instance without specifying any default NetBIOS domain nameserver.
# All queries will be sent through the servport.
def __init__(self, servport = NETBIOS_NS_PORT):
self.__servport = NETBIOS_NS_PORT
self.__nameserver = None
self.__broadcastaddr = BROADCAST_ADDR
self.mac = b'00-00-00-00-00-00'
def _setup_connection(self, dstaddr, timeout=None):
port = rand.randint(10000, 60000)
af, socktype, proto, _canonname, _sa = socket.getaddrinfo(dstaddr, port, socket.AF_INET, socket.SOCK_DGRAM)[0]
s = socket.socket(af, socktype, proto)
has_bind = 1
for _i in range(0, 10):
# We try to bind to a port for 10 tries
try:
s.bind((INADDR_ANY, rand.randint(10000, 60000)))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
has_bind = 1
except socket.error:
pass
if not has_bind:
raise NetBIOSError('Cannot bind to a good UDP port', ERRCLASS_OS, errno.EAGAIN)
self.__sock = s
def send(self, request, destaddr, timeout):
self._setup_connection(destaddr)
tries = 3
while 1:
try:
self.__sock.sendto(request.getData(), 0, (destaddr, self.__servport))
ready, _, _ = select.select([self.__sock.fileno()], [], [], timeout)
if not ready:
if tries:
# Retry again until tries == 0
tries -= 1
else:
raise NetBIOSTimeout
else:
try:
data, _ = self.__sock.recvfrom(65536, 0)
except Exception as e:
raise NetBIOSError("recvfrom error: %s" % str(e))
self.__sock.close()
res = NAME_SERVICE_PACKET(data)
if res['NAME_TRN_ID'] == request['NAME_TRN_ID']:
if (res['FLAGS'] & 0xf) > 0:
raise NetBIOSError('Negative response', ERRCLASS_QUERY, res['FLAGS'] & 0xf)
return res
except select.error as ex:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise NetBIOSError('Error occurs while waiting for response', ERRCLASS_OS, ex[0])
except socket.error as ex:
raise NetBIOSError('Connection error: %s' % str(ex))
# Set the default NetBIOS domain nameserver.
def set_nameserver(self, nameserver):
self.__nameserver = nameserver
# Return the default NetBIOS domain nameserver, or None if none is specified.
def get_nameserver(self):
return self.__nameserver
# Set the broadcast address to be used for query.
def set_broadcastaddr(self, broadcastaddr):
self.__broadcastaddr = broadcastaddr
# Return the broadcast address to be used, or BROADCAST_ADDR if default broadcast address is used.
def get_broadcastaddr(self):
return self.__broadcastaddr
# Returns a NBPositiveNameQueryResponse instance containing the host information for nbname.
# If a NetBIOS domain nameserver has been specified, it will be used for the query.
# Otherwise, the query is broadcasted on the broadcast address.
def gethostbyname(self, nbname, qtype = TYPE_WORKSTATION, scope = None, timeout = 1):
resp = self.name_query_request(nbname, self.__nameserver, qtype, scope, timeout)
return resp
# Returns a list of NBNodeEntry instances containing node status information for nbname.
# If destaddr contains an IP address, then this will become an unicast query on the destaddr.
# Raises NetBIOSTimeout if timeout (in secs) is reached.
# Raises NetBIOSError for other errors
def getnodestatus(self, nbname, destaddr = None, type = TYPE_WORKSTATION, scope = None, timeout = 1):
if destaddr:
return self.node_status_request(nbname, destaddr, type, scope, timeout)
else:
return self.node_status_request(nbname, self.__nameserver, type, scope, timeout)
def getnetbiosname(self, ip):
entries = self.getnodestatus('*',ip)
entries = [x for x in entries if x['TYPE'] == TYPE_SERVER]
return entries[0]['NAME'].strip().decode('latin-1')
def getmacaddress(self):
return self.mac
def name_registration_request(self, nbname, destaddr, qtype, scope, nb_flags=0, nb_address='0.0.0.0'):
netbios_name = nbname.upper()
qn_label = encode_name(netbios_name, qtype, scope)
p = NAME_REGISTRATION_REQUEST()
p['NAME_TRN_ID'] = rand.randint(1, 32000)
p['QUESTION_NAME'] = qn_label[:-1]
p['RR_NAME'] = qn_label[:-1]
p['TTL'] = 0xffff
p['NB_FLAGS'] = nb_flags
p['NB_ADDRESS'] = socket.inet_aton(nb_address)
if not destaddr:
p['FLAGS'] |= NM_FLAGS_BROADCAST
destaddr = self.__broadcastaddr
req = p.getData()
res = self.send(p, destaddr, 1)
return res
def name_query_request(self, nbname, destaddr = None, qtype = TYPE_SERVER, scope = None, timeout = 1):
netbios_name = nbname.upper()
qn_label = encode_name(netbios_name, qtype, scope)
p = NAME_QUERY_REQUEST()
p['NAME_TRN_ID'] = rand.randint(1, 32000)
p['QUESTION_NAME'] = qn_label[:-1]
p['FLAGS'] = NM_FLAGS_RD
if not destaddr:
p['FLAGS'] |= NM_FLAGS_BROADCAST
destaddr = self.__broadcastaddr
req = p.getData()
res = self.send(p, destaddr, timeout)
return NBPositiveNameQueryResponse(res['ANSWERS'])
def node_status_request(self, nbname, destaddr, type, scope, timeout):
netbios_name = nbname.upper()
qn_label = encode_name(netbios_name, type, scope)
p = NODE_STATUS_REQUEST()
p['NAME_TRN_ID'] = rand.randint(1, 32000)
p['QUESTION_NAME'] = qn_label[:-1]
if not destaddr:
p['FLAGS'] = NM_FLAGS_BROADCAST
destaddr = self.__broadcastaddr
res = self.send(p, destaddr, timeout)
answ = NBNodeStatusResponse(res['ANSWERS'])
self.mac = answ.get_mac()
return answ.entries
################################################################################
# 4.2 SESSION SERVICE PACKETS
################################################################################
class NetBIOSSessionPacket:
def __init__(self, data=0):
self.type = 0x0
self.flags = 0x0
self.length = 0x0
if data == 0:
self._trailer = b''
else:
try:
self.type = ord(data[0])
if self.type == NETBIOS_SESSION_MESSAGE:
self.length = ord(data[1]) << 16 | (unpack('!H', data[2:4])[0])
else:
self.flags = ord(data[1])
self.length = unpack('!H', data[2:4])[0]
self._trailer = data[4:]
except:
raise NetBIOSError('Wrong packet format ')
def set_type(self, type):
self.type = type
def get_type(self):
return self.type
def rawData(self):
if self.type == NETBIOS_SESSION_MESSAGE:
data = pack('!BBH', self.type, self.length >> 16, self.length & 0xFFFF) + self._trailer
else:
data = pack('!BBH', self.type, self.flags, self.length) + self._trailer
return data
def set_trailer(self, data):
self._trailer = data
self.length = len(data)
def get_length(self):
return self.length
def get_trailer(self):
return self._trailer
class NetBIOSSession:
def __init__(self, myname, remote_name, remote_host, remote_type=TYPE_SERVER, sess_port=NETBIOS_SESSION_PORT,
timeout=None, local_type=TYPE_WORKSTATION, sock=None):
"""
:param unicode myname: My local NetBIOS name
:param unicode remote_name: Remote NetBIOS name
:param unicode remote_host: Remote IP Address
:param integer remote_type: NetBIOS Host type
:param integer sess_port: Session port to connect (139,445)
:param integer timeout: Timeout for connection
:param integer local_type: My Local Host Type
:param socket sock: Socket for already established connection
"""
if len(myname) > 15:
self.__myname = string.upper(myname[:15])
else:
self.__myname = string.upper(myname)
self.__local_type = local_type
assert remote_name
# if destination port SMB_SESSION_PORT and remote name *SMBSERVER, we're changing it to its IP address
# helping solving the client mistake ;)
if remote_name == '*SMBSERVER' and sess_port == SMB_SESSION_PORT:
remote_name = remote_host
# If remote name is *SMBSERVER let's try to query its name.. if can't be guessed, continue and hope for the best
if remote_name == '*SMBSERVER':
nb = NetBIOS()
try:
res = nb.getnetbiosname(remote_host)
except:
res = None
pass
if res is not None:
remote_name = res
if len(remote_name) > 15:
self.__remote_name = string.upper(remote_name[:15])
else:
self.__remote_name = string.upper(remote_name)
self.__remote_type = remote_type
self.__remote_host = remote_host
if sock is not None:
# We are acting as a server
self._sock = sock
else:
self._sock = self._setup_connection((remote_host, sess_port), timeout)
if sess_port == NETBIOS_SESSION_PORT:
self._request_session(remote_type, local_type, timeout)
def _request_session(self, remote_type, local_type, timeout):
raise NotImplementedError('Not Implemented!')
def _setup_connection(self, peer, timeout=None):
raise NotImplementedError('Not Implemented!')
def get_myname(self):
return self.__myname
def get_mytype(self):
return self.__local_type
def get_remote_host(self):
return self.__remote_host
def get_remote_name(self):
return self.__remote_name
def get_remote_type(self):
return self.__remote_type
def close(self):
self._sock.close()
def get_socket(self):
return self._sock
class NetBIOSUDPSessionPacket(Structure):
TYPE_DIRECT_UNIQUE = 16
TYPE_DIRECT_GROUP = 17
FLAGS_MORE_FRAGMENTS = 1
FLAGS_FIRST_FRAGMENT = 2
FLAGS_B_NODE = 0
structure = (
('Type','B=16'), # Direct Unique Datagram
('Flags','B=2'), # FLAGS_FIRST_FRAGMENT
('ID','<H'),
('_SourceIP','>L'),
('SourceIP','"'),
('SourcePort','>H=138'),
('DataLegth','>H-Data'),
('Offset','>H=0'),
('SourceName','z'),
('DestinationName','z'),
('Data',':'),
)
def getData(self):
addr = self['SourceIP'].split('.')
addr = [int(x) for x in addr]
addr = (((addr[0] << 8) + addr[1] << 8) + addr[2] << 8) + addr[3]
self['_SourceIP'] = addr
return Structure.getData(self)
def get_trailer(self):
return self['Data']
class NetBIOSUDPSession(NetBIOSSession):
def _setup_connection(self, peer, timeout=None):
af, socktype, proto, canonname, sa = socket.getaddrinfo(peer[0], peer[1], 0, socket.SOCK_DGRAM)[0]
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((INADDR_ANY, 138))
self.peer = peer
return sock
def _request_session(self, remote_type, local_type, timeout = None):
pass
def next_id(self):
if hasattr(self, '__dgram_id'):
answer = self.__dgram_id
else:
self.__dgram_id = rand.randint(1,65535)
answer = self.__dgram_id
self.__dgram_id += 1
return answer
def send_packet(self, data):
# Yes... I know...
self._sock.connect(self.peer)
p = NetBIOSUDPSessionPacket()
p['ID'] = self.next_id()
p['SourceIP'] = self._sock.getsockname()[0]
p['SourceName'] = encode_name(self.get_myname(), self.get_mytype(), '')[:-1]
p['DestinationName'] = encode_name(self.get_remote_name(), self.get_remote_type(), '')[:-1]
p['Data'] = data
self._sock.sendto(str(p), self.peer)
self._sock.close()
self._sock = self._setup_connection(self.peer)
def recv_packet(self, timeout = None):
# The next loop is a workaround for a bigger problem:
# When data reaches higher layers, the lower headers are lost,
# and with them, for example, the source IP. Hence, SMB users
# can't know where packets are coming from... we need a better
# solution, right now, we will filter everything except packets
# coming from the remote_host specified in __init__()
while 1:
data, peer = self._sock.recvfrom(8192)
# print "peer: %r self.peer: %r" % (peer, self.peer)
if peer == self.peer: break
return NetBIOSUDPSessionPacket(data)
class NetBIOSTCPSession(NetBIOSSession):
def __init__(self, myname, remote_name, remote_host, remote_type=TYPE_SERVER, sess_port=NETBIOS_SESSION_PORT,
timeout=None, local_type=TYPE_WORKSTATION, sock=None, select_poll=False):
"""
:param unicode myname: My local NetBIOS name
:param unicode remote_name: Remote NetBIOS name
:param unicode remote_host: Remote IP Address
:param integer remote_type: NetBIOS Host type
:param integer sess_port: Session port to connect (139,445)
:param integer timeout: Timeout for connection
:param integer local_type: My Local Host Type
:param socket sock: Socket for already established connection
:param boolean select_poll: Type of polling mechanism
"""
self.__select_poll = select_poll
if self.__select_poll:
self.read_function = self.polling_read
else:
self.read_function = self.non_polling_read
NetBIOSSession.__init__(self, myname, remote_name, remote_host, remote_type=remote_type, sess_port=sess_port,
timeout=timeout, local_type=local_type, sock=sock)
def _setup_connection(self, peer, timeout=None):
try:
af, socktype, proto, canonname, sa = socket.getaddrinfo(peer[0], peer[1], 0, socket.SOCK_STREAM)[0]
sock = socket.socket(af, socktype, proto)
oldtimeout = sock.gettimeout()
sock.settimeout(timeout)
sock.connect(sa)
sock.settimeout(oldtimeout)
except socket.error as e:
raise socket.error("Connection error (%s:%s)" % (peer[0], peer[1]), e)
return sock
def send_packet(self, data):
p = NetBIOSSessionPacket()
p.set_type(NETBIOS_SESSION_MESSAGE)
p.set_trailer(data)
self._sock.sendall(p.rawData())
def recv_packet(self, timeout = None):
data = self.__read(timeout)
return NetBIOSSessionPacket(data)
def _request_session(self, remote_type, local_type, timeout = None):
p = NetBIOSSessionPacket()
remote_name = encode_name(self.get_remote_name(), remote_type, '')
myname = encode_name(self.get_myname(), local_type, '')
p.set_type(NETBIOS_SESSION_REQUEST)
p.set_trailer(remote_name.encode('latin-1') + myname.encode('latin-1'))
self._sock.sendall(p.rawData())
while 1:
p = self.recv_packet(timeout)
if p.get_type() == NETBIOS_SESSION_NEGATIVE_RESPONSE:
raise NetBIOSError('Cannot request session (Called Name:%s)' % self.get_remote_name())
elif p.get_type() == NETBIOS_SESSION_POSITIVE_RESPONSE:
break
else:
# Ignore all other messages, most probably keepalive messages
pass
def polling_read(self, read_length, timeout):
data = b''
if timeout is None:
timeout = 3600
time_left = timeout
CHUNK_TIME = 0.025
bytes_left = read_length
while bytes_left > 0:
try:
ready, _, _ = select.select([self._sock.fileno()], [], [], 0)
if not ready:
if time_left <= 0:
raise NetBIOSTimeout
else:
time.sleep(CHUNK_TIME)
time_left -= CHUNK_TIME
continue
received = self._sock.recv(bytes_left)
if len(received) == 0:
raise NetBIOSError('Error while reading from remote', ERRCLASS_OS, None)
data = data + received
bytes_left = read_length - len(data)
except select.error as ex:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise NetBIOSError('Error occurs while reading from remote', ERRCLASS_OS, ex[0])
return data
def non_polling_read(self, read_length, timeout):
data = b''
bytes_left = read_length
while bytes_left > 0:
try:
ready, _, _ = select.select([self._sock.fileno()], [], [], timeout)
if not ready:
raise NetBIOSTimeout
received = self._sock.recv(bytes_left)
if len(received) == 0:
raise NetBIOSError('Error while reading from remote', ERRCLASS_OS, None)
data = data + received
bytes_left = read_length - len(data)
except select.error as ex:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise NetBIOSError('Error occurs while reading from remote', ERRCLASS_OS, ex[0])
return data
def __read(self, timeout = None):
data = self.read_function(4, timeout)
type, flags, length = unpack('>ccH', data)
if ord(type) == NETBIOS_SESSION_MESSAGE:
length |= ord(flags) << 16
else:
if ord(flags) & 0x01:
length |= 0x10000
data2 = self.read_function(length, timeout)
return data + data2
|
py | 1a4822ac411fa529407d24940c64b3fd4271ee98 | # -*- coding: utf-8 -*-
"""
Doc serving from Python.
In production there are two modes,
* Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)
* Serving from private symlinks in Python (readthedocs.com only)
In development, we have two modes:
* Serving from public symlinks in Python
* Serving from private symlinks in Python
This means we should only serve from public symlinks in dev,
and generally default to serving from private symlinks in Python only.
Privacy
-------
These views will take into account the version privacy level.
Settings
--------
PYTHON_MEDIA (False) - Set this to True to serve docs & media from Python
SERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import logging
import mimetypes
import os
from functools import wraps
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.static import serve
from readthedocs.builds.models import Version
from readthedocs.core.permissions import AdminPermission
from readthedocs.core.resolver import resolve, resolve_path
from readthedocs.core.symlink import PrivateSymlink, PublicSymlink
from readthedocs.projects import constants
from readthedocs.projects.models import Project, ProjectRelationship
log = logging.getLogger(__name__)
def map_subproject_slug(view_func):
"""
A decorator that maps a ``subproject_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa
if subproject is None and subproject_slug:
try:
subproject = Project.objects.get(slug=subproject_slug)
except Project.DoesNotExist:
try:
# Depends on a project passed into kwargs
rel = ProjectRelationship.objects.get(
parent=kwargs['project'],
alias=subproject_slug,
)
subproject = rel.child
except (ProjectRelationship.DoesNotExist, KeyError):
raise Http404
return view_func(request, subproject=subproject, *args, **kwargs)
return inner_view
def map_project_slug(view_func):
"""
A decorator that maps a ``project_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa
if project is None:
if not project_slug:
project_slug = request.slug
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
raise Http404('Project does not exist.')
return view_func(request, project=project, *args, **kwargs)
return inner_view
@map_project_slug
@map_subproject_slug
def redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument
"""Handle / -> /en/latest/ directs on subdomains."""
return HttpResponseRedirect(resolve(subproject or project))
@map_project_slug
@map_subproject_slug
def redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa
"""Redirect /page/file.html to /en/latest/file.html."""
return HttpResponseRedirect(
resolve(subproject or project, filename=filename))
def _serve_401(request, project):
res = render(request, '401.html')
res.status_code = 401
log.error('Unauthorized access to {0} documentation'.format(project.slug))
return res
def _serve_file(request, filename, basepath):
# Serve the file from the proper location
if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):
# Serve from Python
return serve(request, filename, basepath)
else:
# Serve from Nginx
content_type, encoding = mimetypes.guess_type(
os.path.join(basepath, filename))
content_type = content_type or 'application/octet-stream'
response = HttpResponse(content_type=content_type)
if encoding:
response['Content-Encoding'] = encoding
try:
response['X-Accel-Redirect'] = os.path.join(
basepath[len(settings.SITE_ROOT):],
filename,
)
except UnicodeEncodeError:
raise Http404
return response
@map_project_slug
@map_subproject_slug
def serve_docs(
request, project, subproject, lang_slug=None, version_slug=None,
filename=''):
"""Exists to map existing proj, lang, version, filename views to the file format."""
if not version_slug:
version_slug = project.get_default_version()
try:
version = project.versions.public(request.user).get(slug=version_slug)
except Version.DoesNotExist:
# Properly raise a 404 if the version doesn't exist & a 401 if it does
if project.versions.filter(slug=version_slug).exists():
return _serve_401(request, project)
raise Http404('Version does not exist.')
filename = resolve_path(
subproject or project, # Resolve the subproject if it exists
version_slug=version_slug,
language=lang_slug,
filename=filename,
subdomain=True, # subdomain will make it a "full" path without a URL prefix
)
if (version.privacy_level == constants.PRIVATE and
not AdminPermission.is_member(user=request.user, obj=project)):
return _serve_401(request, project)
return _serve_symlink_docs(
request,
filename=filename,
project=project,
privacy_level=version.privacy_level,
)
@map_project_slug
def _serve_symlink_docs(request, project, privacy_level, filename=''):
"""Serve a file by symlink, or a 404 if not found."""
# Handle indexes
if filename == '' or filename[-1] == '/':
filename += 'index.html'
# This breaks path joining, by ignoring the root when given an "absolute" path
if filename[0] == '/':
filename = filename[1:]
log.info('Serving %s for %s', filename, project)
files_tried = []
serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])
if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa
public_symlink = PublicSymlink(project)
basepath = public_symlink.project_root
if os.path.exists(os.path.join(basepath, filename)):
return _serve_file(request, filename, basepath)
else:
files_tried.append(os.path.join(basepath, filename))
if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa
# Handle private
private_symlink = PrivateSymlink(project)
basepath = private_symlink.project_root
if os.path.exists(os.path.join(basepath, filename)):
return _serve_file(request, filename, basepath)
else:
files_tried.append(os.path.join(basepath, filename))
raise Http404(
'File not found. Tried these files: %s' % ','.join(files_tried))
|
py | 1a48234339dd714d6ef139e286e842aee708769b | import re
def get_message(fault):
match = re.match(r"<class 'Exception'>:([\w\s]+)", fault.faultString)
return 'Server error: "{0}"'.format(match.group(1))
def ask_user(message):
while True:
print(message)
answer = input()
if re.match(r'\w', answer):
return answer
print('Invalid input')
def format_posts(posts):
def format_post(post):
mask = 'Subject: {0}\nCreation: {1}\nTitle: {2}\nBody: {3}'
return mask.format(
post['subject'],
post['creation'],
post['title'],
post['body']
)
return '\n\n'.join(map(format_post, posts))
|
py | 1a4824ae671c13419f731f4306b4dda10ff90703 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pack relocations in a library (or copy unchanged).
If --enable-packing and --configuration-name=='Release', invoke the
relocation_packer tool to pack the .rel.dyn or .rela.dyn section in the given
library files. This step is inserted after the libraries are stripped.
If --enable-packing is zero, the script copies files verbatim, with no
attempt to pack relocations.
Any library listed in --exclude-packing-list is also copied verbatim,
irrespective of any --enable-packing setting. Typically this would be
'libchromium_android_linker.so'.
"""
import optparse
import os
import shutil
import sys
import tempfile
from util import build_utils
def PackLibraryRelocations(android_pack_relocations, library_path, output_path):
shutil.copy(library_path, output_path)
pack_command = [android_pack_relocations, output_path]
build_utils.CheckOutput(pack_command)
def CopyLibraryUnchanged(library_path, output_path):
shutil.copy(library_path, output_path)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--clear-dir', action='store_true',
help='If set, the destination directory will be deleted '
'before copying files to it. This is highly recommended to '
'ensure that no stale files are left in the directory.')
parser.add_option('--configuration-name',
default='Release',
help='Gyp configuration name (i.e. Debug, Release)')
parser.add_option('--enable-packing',
choices=['0', '1'],
help=('Pack relocations if 1 and configuration name is \'Release\','
' otherwise plain file copy'))
parser.add_option('--exclude-packing-list',
default='',
help='Names of any libraries explicitly not packed')
parser.add_option('--android-pack-relocations',
help='Path to the relocations packer binary')
parser.add_option('--stripped-libraries-dir',
help='Directory for stripped libraries')
parser.add_option('--packed-libraries-dir',
help='Directory for packed libraries')
parser.add_option('--libraries', action='append',
help='List of libraries')
parser.add_option('--stamp', help='Path to touch on success')
parser.add_option('--filelistjson',
help='Output path of filelist.json to write')
options, _ = parser.parse_args(args)
enable_packing = (options.enable_packing == '1' and
options.configuration_name == 'Release')
exclude_packing_set = set(build_utils.ParseGypList(
options.exclude_packing_list))
libraries = []
for libs_arg in options.libraries:
libraries += build_utils.ParseGypList(libs_arg)
if options.clear_dir:
build_utils.DeleteDirectory(options.packed_libraries_dir)
build_utils.MakeDirectory(options.packed_libraries_dir)
output_paths = []
for library in libraries:
library_path = os.path.join(options.stripped_libraries_dir, library)
output_path = os.path.join(
options.packed_libraries_dir, os.path.basename(library))
output_paths.append(output_path)
if enable_packing and library not in exclude_packing_set:
PackLibraryRelocations(options.android_pack_relocations,
library_path,
output_path)
else:
CopyLibraryUnchanged(library_path, output_path)
if options.filelistjson:
build_utils.WriteJson({ 'files': output_paths }, options.filelistjson)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
py | 1a48261ff356629886a3f2c139c167bed43caa21 | # -*- coding: utf-8 -*-
import datetime
from iemlav import logger
import multiprocessing
import netfilterqueue
from iemlav.lib.firewall.packet_filter import PacketFilter
from iemlav.lib.firewall.firewall_monitor import FirewallMonitor
from iemlav.lib.firewall import utils
class FirewallEngine(object):
def __init__(self, cred, debug=False):
"""Initialize FirewallEngine."""
self.cred = cred
self.logger = logger.IemlAVLogger(
__name__,
debug
)
# Parse and setup rules and actions
(self.ip_inbound,
self.action_inbound_IPRule) = self.parse_inbound_IPRule()
(self.ip_outbound,
self.action_outbound_IPRule) = self.parse_outbound_IPRule()
(self.protocols,
self.action_protocolRule) = self.parse_protocolRule()
(self.sports,
self.action_source_portRule) = self.parse_source_portRule()
(self.dports,
self.action_dest_portRule) = self.parse_dest_portRule()
(self.dns,
self.action_DNSRule) = self.parse_DNSRule()
(self.extensions,
self.action_scanLoad) = self.parse_scanLoad()
self.action_HTTPRequest = self.parse_HTTPRequest()
self.action_HTTPResponse = self.parse_HTTPResponse()
# Interface
self.interface = str(self.cred['interface'])
if self.interface == "":
self.interface = utils.get_interface()
# Setup PacketFilter object
self.packetFilterObj = PacketFilter(interface=self.interface,
debug=debug,
ip_inbound=self.ip_inbound,
ip_outbound=self.ip_outbound,
protocols=self.protocols,
dns=self.dns,
dports=self.dports,
sports=self.sports,
extensions=self.extensions,
action_inbound_IPRule=self.action_inbound_IPRule,
action_outbound_IPRule=self.action_outbound_IPRule,
action_DNSRule=self.action_DNSRule,
action_source_portRule=self.action_source_portRule,
action_dest_portRule=self.action_dest_portRule,
action_HTTPResponse=self.action_HTTPResponse,
action_HTTPRequest=self.action_HTTPRequest,
action_protocolRule=self.action_protocolRule,
action_scanLoad=self.action_scanLoad)
# Setup Montior object
self.monitorObj = FirewallMonitor(interface=self.interface,
debug=debug)
# Integrations
self.integrations = ['Firewall',
'Monitor']
@staticmethod
def restore_state():
resp = utils.excecute_command('iptables --flush')
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
def parse_inbound_IPRule(self):
try:
action = int(self.cred['inbound_IPRule']['action'])
temp_ip_inbound = []
if len(self.cred['inbound_IPRule']['ip_inbound']):
list_of_IPs = str(self.cred['inbound_IPRule']['ip_inbound'])
list_of_IPs = list_of_IPs.split(',')
for IP in list_of_IPs:
if '-' in IP:
for new_ip in utils.generate_IPs(IP):
if (new_ip not in temp_ip_inbound and
utils.check_ip(new_ip)):
temp_ip_inbound.append(str(new_ip).strip())
elif (utils.check_ip(IP)):
if IP not in temp_ip_inbound:
temp_ip_inbound.append(str(IP).strip())
return temp_ip_inbound, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_outbound_IPRule(self):
try:
action = int(self.cred['outbound_IPRule']['action'])
temp_ip_outbound = []
if len(self.cred['outbound_IPRule']['ip_outbound']):
list_of_IPs = str(self.cred['outbound_IPRule']['ip_outbound'])
list_of_IPs = list_of_IPs.split(',')
for IP in list_of_IPs:
if '-' in IP:
for new_ip in utils.generate_IPs(IP):
if (new_ip not in temp_ip_outbound and
utils.check_ip(new_ip)):
temp_ip_outbound.append(str(new_ip).strip())
elif (utils.check_ip(IP)):
if IP not in temp_ip_outbound:
temp_ip_outbound.append(str(IP).strip())
return temp_ip_outbound, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_protocolRule(self):
try:
temp_protocol = []
action = int(self.cred['protocolRule']['action'])
if len(self.cred['protocolRule']['protocols']):
protocols = str(self.cred['protocolRule']['protocols'])
protocols = protocols.split(',')
protocols = map(utils.map_protocol, protocols)
protocols = list(protocols)
for protocol in protocols:
if (protocol and
protocol not in temp_protocol):
temp_protocol.append(protocol)
return temp_protocol, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_DNSRule(self):
try:
temp_DNS = []
action = int(self.cred['DNSRule']['action'])
if len(self.cred['DNSRule']['dns']):
dns = str(self.cred['DNSRule']['dns'])
dns = dns.split(',')
for single_dns in dns:
if single_dns not in temp_DNS:
temp_DNS.append(str(single_dns).strip())
return temp_DNS, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_source_portRule(self):
try:
temp_sports = []
action = int(self.cred['source_portRule']['action'])
if len(self.cred['source_portRule']['sports']):
sports = str(self.cred['source_portRule']['sports'])
sports = sports.split(',')
for port in sports:
if '-' in port:
for new_port in utils.generate_ports(port):
if (new_port not in temp_sports and
utils.check_port(new_port)):
temp_sports.append(str(new_port).strip())
elif utils.check_port(port):
if port not in temp_sports:
temp_sports.append(str(port).strip())
return temp_sports, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_dest_portRule(self):
try:
temp_dports = []
action = int(self.cred['dest_portRule']['action'])
if len(self.cred['dest_portRule']['dports']):
dports = str(self.cred['dest_portRule']['dports'])
dports = dports.split(',')
for port in dports:
if '-' in port:
for new_port in utils.generate_ports(port):
if (new_port not in temp_dports and
utils.check_port(new_port)):
temp_dports.append(str(new_port).strip())
elif utils.check_port(port):
if port not in temp_dports:
temp_dports.append(str(port).strip())
return temp_dports, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_HTTPResponse(self):
"""
Parse HTTPResponse configurations.
Args:
None
Raises:
None
Returns:
action (int): 0 or 1
"""
try:
action = int(self.cred['HTTPResponse']['action'])
return action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Allow HTTPResponse
return 1
def parse_HTTPRequest(self):
"""
Parse HTTPRequest configurations.
Args:
None
Raises:
None
Returns:
action (int): 0 or 1
"""
try:
action = int(self.cred['HTTPRequest']['action'])
return action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Allow HTTPRequest
return 1
def parse_scanLoad(self):
try:
temp_extension = []
action = int(self.cred['scanLoad']['action'])
if len(self.cred['scanLoad']['extensions']):
extensions = str(self.cred['scanLoad']['extensions'])
extensions = extensions.split(',')
for extension in extensions:
if extension not in temp_extension:
temp_extension.append(str(extension).strip())
return temp_extension, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_time(self):
try:
current_time = datetime.datetime.now()
time_lb = self.cred['time']['time_lb']
time_ub = self.cred['time']['time_ub']
datetime_lb = current_time.replace(hour=int((time_lb).split(':')[0]),
minute=int((time_lb).split(':')[1]))
datetime_ub = current_time.replace(hour=int((time_ub).split(':')[0]),
minute=int((time_ub).split(':')[1]))
if (current_time > datetime_lb and
current_time < datetime_ub):
return True
else:
return False
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
def process_packet(self, pkt):
if (self.packetFilterObj.process(pkt) and
self.parse_time):
pkt.accept()
else:
pkt.drop()
def startFirewall(self):
input_command = 'iptables -I INPUT -j NFQUEUE --queue-num 0'
output_command = 'iptables -I OUTPUT -j NFQUEUE --queue-num 0'
resp = utils.excecute_command(input_command)
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
resp = utils.excecute_command(output_command)
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
try:
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, self.process_packet)
queue.run()
except KeyboardInterrupt:
# Restore iptables state
self.restore_state()
def startMonitor(self):
self.monitorObj.startMonitoring()
def startEngine(self):
processes = []
firewallProcess = multiprocessing.Process(target=self.startFirewall)
monitorProcess = multiprocessing.Process(target=self.startMonitor)
firewallProcess.start()
monitorProcess.start()
processes.append(firewallProcess)
processes.append(monitorProcess)
self.logger.log(
"Integrations: " + str(self.integrations),
logtype="info"
)
for process in processes:
process.join()
|
py | 1a4826aaed2e996ada87fbc7a1dbaaad1c58546d | from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
class Link(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return "Link to %s id=%s" % (self.content_type, self.object_id)
class Place(models.Model):
name = models.CharField(max_length=100)
links = generic.GenericRelation(Link)
def __unicode__(self):
return "Place: %s" % self.name
class Restaurant(Place):
def __unicode__(self):
return "Restaurant: %s" % self.name |
py | 1a48273fcffbd81bf82e527381a62c49f570d938 | from collections import defaultdict, Counter, deque
from functools import cache
from itertools import product, pairwise
from multiprocessing import Pool
import math
import re
non_digits = re.compile('[^0-9]+')
def sign(a, b, step=1):
return int(math.copysign(step, b-a))
def autorange(a,b, step=1):
if a == b:return (a,)
s = sign(a, b, step)
return range(a, b+s, s)
def get_ints(line, strip_line=False):
if strip_line:
line = line.strip()
return [*map(int, non_digits.split(line))]
grid_char = {'.': '.', (0,1): 'v', (1,0):'>'}
def d25(inp, sample=False):
p1, p2 = None, None
grid = {}
max_x, max_y = 0, 0
for y, line in enumerate(inp.split()):
max_y = max(y+1, max_y)
for x, char in enumerate(line):
max_x = max(x+1, max_x)
if char == '>':
grid[x,y] = (1,0)
elif char == 'v':
grid[x,y] = (0,1)
turn = 0
moved = True
n_grid = {}
while moved:
# if turn in (0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 55, 56, 57, 58):
# print(f"After {turn} steps:")
# for y in range(max_y):
# for x in range(max_x):
# print(grid_char[grid.get((x,y), '.')], end='')
# print()
turn += 1
moved = False
for (x,y), (dx, dy) in grid.items():
if dy:
n_grid[x,y] = grid[x,y]
continue
nt = nx, ny = (x+dx)%max_x, (y+dy)%max_y
if grid.get(nt, None) is None:
n_grid[nt] = dx,dy
moved = True
else:
n_grid[x,y] = dx,dy
grid = n_grid
n_grid = {}
for (x,y), (dx, dy) in grid.items():
if dx:
n_grid[x,y] = grid[x,y]
continue
nt = nx, ny = (x+dx)%max_x, (y+dy)%max_y
if grid.get(nt, None) is None:
n_grid[nt] = dx,dy
moved = True
else:
n_grid[x,y] = dx,dy
grid = n_grid
n_grid = {}
p1 = turn
return p1, p2
def validate_test(case_id, inp=None, want_p1=None, want_p2=None):
do_p1, do_p2 = False, False
#print(f"validate_test({case_id}, {inp}, {want_p1}, {want_p2})")
got_p1, got_p2 = d25(inp, sample=True)
if want_p1 is not None:
assert want_p1 == got_p1, f"{case_id=} p1:\n\t{want_p1=}\n\t{got_p1=}"
do_p1 = True
if want_p2 is not None:
assert want_p2 == got_p2, f"{case_id=} p2:\n\t{want_p2=}\n\t{got_p2=}"
do_p2 = True
return True, do_p1, do_p2
def main():
with open('../inputs/d25.txt') as f:
inp = f.read().strip()
return d25(inp)
if __name__ == '__main__':
cases = [
#(id, inp, p1, p2),
(0, """v...>>.vv>
.vv>>.vv..
>>.>v>...v
>>v>>.>.v.
v>v.vv.v..
>.>>..v...
.vv..>.>v.
v.v..>>v.v
....v..v.>""", 58, None),
]
"""
# Non multiprocessing version
for case in cases:
validate_test(*case)
p1, p2 = main()
print(f"p1 = {p1}\np2 = {p2}")
"""
with Pool(processes=min(8, len(cases) + 1)) as pool:
main_res = pool.apply_async(main)
test_res = [pool.apply_async(validate_test, case) for case in cases]
test_pass, do_p1, do_p2 = True, False, False
for test in test_res:
tp, dp1, dp2 = test.get(30)
test_pass &= tp
do_p1 |= dp1
do_p2 |= dp2
if test_pass:
p1, p2 = main_res.get(60)
assert do_p1 or do_p2, "Didn't run any tets"
assert p1 is None or do_p1 == True, "Got P1 value without 'do_p1' set"
assert p2 is None or do_p2 == True, "Got P2 value without 'do_p2' set"
print(f"p1 = {p1}\np2 = {p2}")
|
py | 1a4828d8608636eb2fa97f41d96f8c3558c6a4f3 | import numpy as np
import matplotlib.pyplot as plt
from math import ceil
from random import randint
import pickle,glob, cv2,os
def get_input_shape(dataPath, segmentationScheme):
imgLoc = "{}/segmentation/{}/training/images/".format(dataPath,segmentationScheme)
labelLoc = "{}/segmentation/{}/labels.pickle".format(dataPath,segmentationScheme)
with open(labelLoc, 'rb') as f:
data = pickle.load(f)
data = data['training']
imageName = data[0]
img = cv2.imread("{}{}.jpg".format(imgLoc,imageName))
return img.shape
def read_n_images(data, start, end, dataPath):
"""
Read images (should be jpg) from a dataset (from indexes start to end).
:param data: list - image names
:param start: int - start index
:param end: int - end index
:param loc: str - directory location of the images
:return: numpy - numpy array of (BGR) image
"""
assert glob.glob(dataPath), "Check directory."
assert glob.glob("{}/*.jpg".format(dataPath)), "Check file extension (should be 'jpg')."
images_list = data[start:end]
images = [cv2.imread("{}/{}.jpg".format(dataPath, image)) for image in images_list]
return np.array(images)
def generate_image_segmentation_labels(method,segmentationScheme ,batchSize, dataDir='', squashOutput=True):
imagePath = "{}/segmentation/{}/{}/images".format(dataDir,segmentationScheme,method)
segmentsPath = "{}/segmentation/{}/{}/labels".format(dataDir,segmentationScheme,method)
labelPath = "{}/segmentation/{}/labels.pickle".format(dataDir,segmentationScheme)
while True:
with open(labelPath, 'rb') as f:
data = pickle.load(f)
methods = list(data.keys())
assert method in methods, "'{}' not a valid mode (must be one of {})".format(method, str(methods))
data = data[method]
for idx in range(0, len(data), batchSize):
start = idx
end = idx + batchSize
images = read_n_images(data, start, end, imagePath)
segmentations = read_n_images(data, start, end, segmentsPath)
if squashOutput == True:
segmentations = segmentations[:,:,:,0]+segmentations[:,:,:,1]+segmentations[:,:,:,2]
segmentations = segmentations
sShape = segmentations.shape
segmentations = segmentations.reshape((sShape[0],sShape[1],sShape[2],1))
yield (images / 255, segmentations / 255)
def get_num_images(method, segmentationScheme, dataDir):
labelPath = "{}/segmentation/{}/labels.pickle".format(dataDir, segmentationScheme)
with open(labelPath, 'rb') as f:
data = pickle.load(f)
data = data[method]
return len(data) |
py | 1a482b2e970419a9087358942d464514060db49a | from django.db import models
class Almacen(models.Model):
nombre = models.CharField(max_length=30, help_text="nombre del almacen")
direccion = models.CharField(max_length=60, help_text="direccion del almacen")
telefono = models.CharField(null=True, max_length=15,
help_text="telefono de comunicacion")
def __str__(self):
return self.nombre
class Estante(models.Model):
nombre = models.CharField(max_length=30, help_text="nombre del almacen")
almacen = models.ForeignKey(Almacen, on_delete=models.CASCADE,
help_text="Almacen al que pertenece",
related_name="estantes")
capacidad_total = models.FloatField(default=0, help_text="capacidad del almacen")
capacidad_restante = models.FloatField(help_text="capacidad disponible en el almacen") |
py | 1a482bc7ec37ee3ef8b384f3024373b745b16639 | import GCRCatalogs
from GCRCatalogs import GCRQuery
import pandas as pd
import numpy as np
# We load the catalog with addons
cat = GCRCatalogs.load_catalog('dc2_object_run2.2i_dr6_with_addons')
columns_to_get0 = ["objectId", "Ixx_pixel", "Iyy_pixel", "Ixy_pixel", "IxxPSF_pixel", "IyyPSF_pixel", 'IxyPSF_pixel']
#columns_to_get0 = ["objectId"]
columns_to_get2 = ["match_objectId", "cosmodc2_id_truth"]
DF0 = cat.catalogs[0].get_quantities(columns_to_get0)
DF0 = pd.DataFrame(DF0)
print(DF0.head())
DF2 = cat.catalogs[2].get_quantities(columns_to_get2)
DF2 = pd.DataFrame(DF2)
print(DF2.head())
# rename match_objectid in DF2
DF2.rename(columns={"match_objectId":"objectId"}, inplace=True)
DF_merged = pd.merge(DF0, DF2, on=["objectId"])
print(DF_merged.head())
DF_merged.rename(columns={"cosmodc2_id_truth":"cosmoDC2_ID"}, inplace=True)
#get a sense for the ranges of IDs
print(np.sort(DF_merged.loc[DF_merged['cosmoDC2_ID'] > 0, 'cosmoDC2_ID']))
print("Number of nans in Ixx_pixel: ", np.sum(np.isnan(DF_merged['Ixx_pixel'])))
DF_merged = DF_merged.loc[np.logical_not(np.isnan(DF_merged['Ixx_pixel']))] # remove the nans
DF_merged['RSQ_pixel_gal'] = (DF_merged['Ixx_pixel']+DF_merged['Iyy_pixel']) - (DF_merged['IxxPSF_pixel']+DF_merged['IyyPSF_pixel'])
print("saving file.")
print(np.sort(DF_merged.loc[DF_merged['cosmoDC2_ID'] > 0, 'cosmoDC2_ID']))
DF_merged.to_csv("/global/cscratch1/sd/mlokken/sn_hostenv/FullImageMomentsCatalog.tar.gz")
print("Done.") |
py | 1a482bddc36b5aebea5040359d6f0941eac147e2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Bruce Smith <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: nictagadm
short_description: Manage nic tags on SmartOS systems
description:
- Create or delete nic tags on SmartOS systems.
version_added: '2.8'
author:
- Bruce Smith (@SmithX10)
options:
name:
description:
- Name of the nic tag.
required: true
type: str
mac:
description:
- Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
- Parameters I(mac) and I(etherstub) are mutually exclusive.
type: str
etherstub:
description:
- Specifies that the nic tag will be attached to a created I(etherstub).
- Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
type: bool
default: no
mtu:
description:
- Specifies the size of the I(mtu) of the desired nic tag.
- Parameters I(mtu) and I(etherstub) are mutually exclusive.
type: int
force:
description:
- When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
type: bool
default: no
state:
description:
- Create or delete a SmartOS nic tag.
type: str
choices: [ absent, present ]
default: present
'''
EXAMPLES = r'''
- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
nictagadm:
name: storage0
mac: 00:1b:21:a3:f5:4d
mtu: 9000
state: present
- name: Remove 'storage0' nic tag
nictagadm:
name: storage0
state: absent
'''
RETURN = r'''
name:
description: nic tag name
returned: always
type: str
sample: storage0
mac:
description: MAC Address that the nic tag was attached to.
returned: always
type: str
sample: 00:1b:21:a3:f5:4d
etherstub:
description: specifies if the nic tag will create and attach to an etherstub.
returned: always
type: bool
sample: False
mtu:
description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
returned: always
type: int
sample: 1500
force:
description: Shows if -f was used during the deletion of a nic tag
returned: always
type: bool
sample: False
state:
description: state of the target
returned: always
type: str
sample: present
'''
from ansible.module_utils.basic import AnsibleModule
import re
class NicTag(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.mac = module.params['mac']
self.etherstub = module.params['etherstub']
self.mtu = module.params['mtu']
self.force = module.params['force']
self.state = module.params['state']
self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
def is_valid_mac(self):
if re.match("[0-9a-f]{2}([:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", self.mac.lower()):
return True
return False
def nictag_exists(self):
cmd = [self.nictagadm_bin]
cmd.append('exists')
cmd.append(self.name)
(rc, dummy, dummy) = self.module.run_command(cmd)
return rc == 0
def add_nictag(self):
cmd = [self.nictagadm_bin]
cmd.append('-v')
cmd.append('add')
if self.etherstub:
cmd.append('-l')
if self.mtu:
cmd.append('-p')
cmd.append('mtu=' + str(self.mtu))
if self.mac:
cmd.append('-p')
cmd.append('mac=' + str(self.mac))
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_nictag(self):
cmd = [self.nictagadm_bin]
cmd.append('-v')
cmd.append('delete')
if self.force:
cmd.append('-f')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
mac=dict(type='str'),
etherstub=dict(type='bool', default=False),
mtu=dict(type='int'),
force=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
mutually_exclusive=[
['etherstub', 'mac'],
['etherstub', 'mtu'],
],
required_if=[
['etherstub', False, ['name', 'mac']],
['state', 'absent', ['name', 'force']],
],
supports_check_mode=True
)
nictag = NicTag(module)
rc = None
out = ''
err = ''
result = dict(
changed=False,
etherstub=nictag.etherstub,
force=nictag.force,
name=nictag.name,
mac=nictag.mac,
mtu=nictag.mtu,
state=nictag.state,
)
if not nictag.is_valid_mac():
module.fail_json(msg='Invalid MAC Address Value',
name=nictag.name,
mac=nictag.mac,
etherstub=nictag.etherstub)
if nictag.state == 'absent':
if nictag.nictag_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = nictag.delete_nictag()
if rc != 0:
module.fail_json(name=nictag.name, msg=err, rc=rc)
elif nictag.state == 'present':
if not nictag.nictag_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = nictag.add_nictag()
if rc is not None and rc != 0:
module.fail_json(name=nictag.name, msg=err, rc=rc)
if rc is not None:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
py | 1a482be81a1d34d63fca13d11aee9b5842d12e12 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Omar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DownloadSalesInvoice(Document):
def get_company_info(self):
doc = frappe.get_single("Company Info")
return doc.company_name, doc.address, doc.mobile, doc.tax_id
def validate(self):
doc = frappe.get_single("Company Info")
self.company_name = doc.company_name
self.company_address = doc.address
self.company_mobile = doc.mobile
self.tax_id = doc.tax_id
|
py | 1a482c6b780fc4178a82434cf64436f5e044ac97 | import sdp.scripts.load_nstx_exp_ref as nstx_exp
import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp
import sdp.plasma.analysis as ana
import matplotlib.pyplot as plt
import pickle
import numpy as np
with open('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/ref_pos.pck','r') as f:
ref_pos = pickle.load(f)
dne_ana = ana.XGC_Density_Loader('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/dne_file.sav.npz')
n_channel = 16
#create the distance matrix, dx[i,j] is the absolute distance between the reflection points of i-th and j-th channel
dx = np.absolute(np.zeros((n_channel,n_channel))+ref_pos[np.newaxis,:]-ref_pos[:,np.newaxis])
#calculate cross-correlation matrix from synthetic signals
cc_fwr = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref2d_out)
cc_fwr2 = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref2d_amp2_out)
cc_fwr01 = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref2d_amp01_out)
cc_3d = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref3d_out)
cs_fwr = fwrpp.pp.Self_Correlation(fwrpp.ref2d_out)
cs_fwr2 = fwrpp.pp.Self_Correlation(fwrpp.ref2d_amp2_out)
cs_fwr01 = fwrpp.pp.Self_Correlation(fwrpp.ref2d_amp01_out)
cs_3d = fwrpp.pp.Self_Correlation(fwrpp.ref3d_out)
print 'FWR data loaded'
#calculate cross-correlation matrix from experimental signals, note that for our case, the simulated time slice is at t=0.632s, so we choose corresponding experimental data from 0.632-0.640, the total sample number is chosen to be 2000 because larger sample doesn't bring in any difference, since the increased samples are not statistical independent.
cc_exp = nstx_exp.analyser.Cross_Correlation_by_fft(0.632,0.640,8000)
#cc_exp_short = nstx_exp.analyser.Cross_Correlation_by_fft(0.634,0.6348,8000)
#calculate coherent signal for all channels from NSTX. The result is an 2D array containing time series of coherent signal from all the channels.
cs_exp = nstx_exp.analyser.Coherent_over_time(0.632,0.640,2e-5,1e-4)
print 'nstx data loaded'
#choose the channel ranges representing top/bottom part of pedestal, and center channels for each region.
top_center = 11
top_range = [8,12]
bottom_center = 6
bottom_range = [2,7]
#pick chosen data from whole correlation matrices
fwr_top=[]
fwr2_top = []
fwr01_top=[]
fwr3d_top=[]
exp_top = []
dx_top=[]
def pick_top():
global fwr_top,fwr2_top,exp_top,dx_top,fwr01_top,fwr3d_top
fwr_top = np.absolute(cc_fwr[top_center,top_range[0]:top_range[1]])
fwr2_top = np.absolute(cc_fwr2[top_center,top_range[0]:top_range[1]])
fwr01_top = np.absolute(cc_fwr01[top_center,top_range[0]:top_range[1]])
fwr3d_top = np.absolute(cc_3d[top_center,top_range[0]:top_range[1]])
exp_top = np.absolute(cc_exp[top_center,top_range[0]:top_range[1]])
dx_top = dx[top_center,top_range[0]:top_range[1]]
pick_top()
fwr_bot=[]
fwr2_bot=[]
fwr01_bot = []
fwr3d_bot = []
exp_bot=[]
dx_bot=[]
def pick_bottom():
global fwr_bot,fwr2_bot,fwr01_bot,exp_bot,dx_bot,fwr3d_bot
fwr_bot = np.absolute(cc_fwr[bottom_center,bottom_range[0]:bottom_range[1]])
fwr2_bot = np.absolute(cc_fwr2[bottom_center,bottom_range[0]:bottom_range[1]])
fwr01_bot = np.absolute(cc_fwr01[bottom_center,bottom_range[0]:bottom_range[1]])
fwr3d_bot = np.absolute(cc_3d[bottom_center,bottom_range[0]:bottom_range[1]])
exp_bot = np.absolute(cc_exp[bottom_center,bottom_range[0]:bottom_range[1]])
dx_bot = dx[bottom_center,bottom_range[0]:bottom_range[1]]
pick_bottom()
#fitting with gaussian(for bottom) and exponential(for top)
xmax_t = 0
xfit_t = 0
fwr_fit_t = 0
fwr2_fit_t = 0
fwr01_fit_t = 0
fwr3d_fit_t = 0
exp_fit_t = 0
fwr_t_a,fwr_t_sa = 0,0
fwr2_t_a,fwr2_t_sa = 0,0
fwr01_t_a,fwr01_t_sa = 0,0
fwr3d_t_a,fwr3d_t_sa = 0,0
exp_t_a,exp_t_sa = 0,0
xgc_fit_t = 0
xgc_t_a,xgc_t_sa = 0,0
x_t,dne_c_t = 0,0
def fit_top():
global fwr_t_a,fwr_t_sa,fwr2_t_a,fwr2_t_sa,fwr01_t_a,fwr01_t_sa,fwr3d_t_a,fwr3d_t_sa,exp_t_a,expt_sa,xmax_t,xfit_t,fwr_fit_t,fwr2_fit_t,exp_fit_t,fwr01_fit_t,fwr3d_fit_t,xgc_fit_t,xgc_t_a,xgc_t_sa,x_t,dne_c_t
fwr_t_a,fwr_t_sa = fwrpp.pp.fitting_cross_correlation(fwr_top,dx_top,'exponential')
fwr2_t_a,fwr2_t_sa = fwrpp.pp.fitting_cross_correlation(fwr2_top,dx_top,'exponential')
fwr01_t_a,fwr01_t_sa = fwrpp.pp.fitting_cross_correlation(fwr01_top,dx_top,'exponential')
fwr3d_t_a,fwr3d_t_sa = fwrpp.pp.fitting_cross_correlation(fwr3d_top,dx_top,'exponential')
exp_t_a,exp_t_sa = fwrpp.pp.fitting_cross_correlation(exp_top,dx_top,'exponential')
opt_t,x_t,dne_c_t = dne_ana.density_correlation(ref_pos[top_center],width = ref_pos[top_range[0]]-ref_pos[top_center])
xgc_t_a,xgc_t_sa = opt_t
xmax_t = 2*np.max((np.abs(fwr_t_a),np.abs(fwr2_t_a),np.abs(exp_t_a)))
xfit_t = np.linspace(0,xmax_t,500)
fwr_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr_t_a)
fwr2_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr2_t_a)
fwr01_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr01_t_a)
fwr3d_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr3d_t_a)
exp_fit_t = fwrpp.pp.exponential_fit(xfit_t,exp_t_a)
xgc_fit_t = ana.gaussian_correlation_func(xfit_t,xgc_t_a)
fit_top()
xmax_b = 0
xfit_b = 0
fwr_fit_b = 0
fwr2_fit_b = 0
fwr01_fit_b = 0
fwr3d_fit_b = 0
exp_fit_b = 0
fwr_b_a,fwr_b_sa = 0,0
fwr2_b_a,fwr2_b_sa = 0,0
fwr01_b_a,fwr01_b_sa = 0,0
fwr3d_b_a,fwr3d_b_sa = 0,0
exp_b_a,exp_b_sa = 0,0
xgc_fit_b = 0
xgc_b_a,xgc_b_sa = 0,0
x_b,dne_c_b = 0,0
def fit_bot():
global fwr_b_a,fwr_b_sa,fwr2_b_a,fwr2_b_sa,fwr01_b_a,fwr01_b_sa,fwr3d_b_a,fwr3d_b_sa,exp_b_a,expt_sa,xmax_b,xfit_b,fwr_fit_b,fwr2_fit_b,exp_fit_b,fwr01_fit_b,fwr3d_fit_b,xgc_fit_b,xgc_b_a,xgc_b_sa,x_b,dne_c_b
fwr_b_a,fwr_b_sa = fwrpp.pp.fitting_cross_correlation(fwr_bot,dx_bot,'gaussian')
fwr2_b_a,fwr2_b_sa = fwrpp.pp.fitting_cross_correlation(fwr2_bot,dx_bot,'gaussian')
fwr01_b_a,fwr01_b_sa = fwrpp.pp.fitting_cross_correlation(fwr01_bot,dx_bot,'gaussian')
fwr3d_b_a,fwr3d_b_sa = fwrpp.pp.fitting_cross_correlation(fwr3d_bot,dx_bot,'gaussian')
exp_b_a,exp_b_sa = fwrpp.pp.fitting_cross_correlation(exp_bot,dx_bot,'gaussian')
opt_b,x_b,dne_c_b = dne_ana.density_correlation(ref_pos[bottom_center],width = ref_pos[bottom_range[0]]-ref_pos[bottom_center])
xgc_b_a,xgc_b_sa = opt_b
xmax_b = 2*np.sqrt(np.max((np.abs(fwr_b_a),np.abs(fwr2_b_a),np.abs(exp_b_a))))
xfit_b = np.linspace(0,xmax_b,500)
fwr_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr_b_a)
fwr2_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr2_b_a)
fwr01_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr01_b_a)
fwr3d_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr3d_b_a)
exp_fit_b = fwrpp.pp.gaussian_fit(xfit_b,exp_b_a)
xgc_fit_b = ana.gaussian_correlation_func(xfit_b,xgc_b_a)
fit_bot()
print 'fitting complete'
print 'fitting curve ready. call plot() to plot. note that the default region is top, pass "bottom" as the argument to plot bottom region. '
#plot the data points and curves
total_plot = 0
#top data
def plot(region = 'top'):
global total_plot
#plt.figure()
#total_plot += 1
if(region == 'top'):
plt.title('Cross-Correlation at Upper Pedestal,center_channel at {0:.4}m'.format(ref_pos[top_center]))
plt.plot(dx_top,exp_top,'bs',label = 'exp data')
plt.plot(dx_top,fwr_top,'ro',label = 'FWR data amp=1')
plt.plot(dx_top,fwr2_top,'r^',label = 'FWR data amp=2')
plt.plot(dx_top,fwr01_top,'r+',label = 'FWR data amp=0.1')
plt.plot(xfit_t,exp_fit_t,'b-',label = 'exp exponential fit')
plt.plot(xfit_t,fwr_fit_t,'r--',label = 'FWR fit')
plt.plot(xfit_t,fwr2_fit_t,'r-.',label = 'FWR amp2 fit')
plt.plot(xfit_t,fwr01_fit_t,'r:',label = 'FWR amp0.1 fit')
plt.xlabel('distance from center channel reflection($m$)')
plt.ylabel('cross-correlation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region == 'bottom'):
plt.title('Cross-Correlation at Lower Pedestal,center_channel at {0:.4}m'.format(ref_pos[bottom_center]))
plt.plot(dx_bot,exp_bot,'bs',label = 'exp data')
plt.plot(dx_bot,fwr_bot,'ro',label = 'FWR data amp=1')
plt.plot(dx_bot,fwr2_bot,'r^',label = 'FWR data amp=2')
plt.plot(dx_bot,fwr01_bot,'r+',label = 'FWR data amp=0.1')
plt.plot(xfit_b,exp_fit_b,'b-',label = 'exp gaussian fit')
plt.plot(xfit_b,fwr_fit_b,'r--',label = 'FWR fit')
plt.plot(xfit_b,fwr2_fit_b,'r-.',label = 'FWR amp2 fit')
plt.plot(xfit_b,fwr01_fit_b,'r:',label = 'FWR amp0.1 fit')
plt.xlabel('distance from center channel reflection($m$)')
plt.ylabel('cross-correlation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region == '2d/3d_top'):
plt.title('Cross-Correlation at Upper Pedestal,center_channel at {0:.4}m'.format(ref_pos[top_center]))
plt.plot(dx_top,exp_top,'bs',label = 'exp data')
plt.plot(dx_top,fwr_top,'ro',label = 'FWR2D data')
plt.plot(dx_top,fwr3d_top,'r^',label = 'FWR3D data')
plt.plot(xfit_t,exp_fit_t,'b-',label = 'exp exponential fit')
plt.plot(xfit_t,fwr_fit_t,'r--',label = 'FWR2D fit')
plt.plot(xfit_t,fwr3d_fit_t,'r-.',label = 'FWR3D fit')
plt.xlabel('distance from center channel reflection($m$)')
plt.ylabel('cross-correlation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region =='2d/3d_bot'):
#plt.title('Cross-Correlation at Lower Pedestal,center_channel at {0:.4}m'.format(ref_pos[bottom_center]))
plt.plot(dx_bot,exp_bot,'bs',label = 'exp data')
plt.plot(dx_bot,fwr_bot,'go',label = 'FWR2D data')
plt.plot(dx_bot,fwr3d_bot,'r^',label = 'FWR3D data')
plt.plot(xfit_b,exp_fit_b,'b-')
plt.plot(xfit_b,fwr_fit_b,'g--')
plt.plot(xfit_b,fwr3d_fit_b,'r-.')
plt.xlabel('$distance from center channel(mm)$')
plt.ylabel('$\gamma$')
plt.legend(labelspacing = 0.2,prop = {'size':15})
plt.tight_layout()
elif(region == '3d_bot'):
plt.title('2D/3D Cross-Correlation and XGC1 Density Correlation, Lower')
plt.plot(dx_bot,fwr_bot,'ro',label = '2D')
plt.plot(dx_bot,fwr3d_bot,'r^',label = '3D')
plt.plot(x_b,dne_c_b,'bs',label = 'XGC')
plt.plot(xfit_b,fwr_fit_b,'r-.',label = '2D fit')
plt.plot(xfit_b,fwr3d_fit_b,'r--',label = '3D fit')
plt.plot(xfit_b,xgc_fit_b,'b-',label = 'XGC fit')
plt.xlabel('distance from center channel relfection($m$)')
plt.ylabel('cross-corelation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region == '3d_top'):
plt.title('2D/3D Cross-Correlation and XGC1 Density Correlation, Upper')
plt.plot(dx_top,fwr_top,'ro',label = '2D')
plt.plot(dx_top,fwr3d_top,'r^',label = '3D')
plt.plot(x_t,dne_c_t,'bs',label = 'XGC')
plt.plot(xfit_t,fwr_fit_t,'r-.',label = '2D fit')
plt.plot(xfit_t,fwr3d_fit_t,'r--',label = '3D fit')
plt.plot(xfit_t,xgc_fit_t,'b-',label = 'XGC fit')
plt.xlabel('distance from center channel relfection($m$)')
plt.ylabel('cross-corelation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
def clear_all():
global total_plot
for i in range(total_plot):
plt.close()
# Coherent Signal comparison
|
py | 1a482c7d5f3f5bd01c35bd4b4d00a630c0cc05c9 | import mpfam.mpfam
def main():
mpfam.mpfam.launch()
if __name__ == "__main__":
main()
|
py | 1a482de85aa7c1890c6f86eadadf687a696f9b91 | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
import textwrap
# Append the src dir
sys.path.append(os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), 'src'))
import passes # noqa (E402 module level import not at top of file)
# TODO: This should not be hard coded.
PIPELINES = ["PreSpecialize", "HighLevel", "EarlyLoopOpt",
"MidLevelOpt", "Lower", "LowLevel", "LateLoopOpt"]
PASSES = [p.name for p in passes.PASSES]
DEFAULT_PRESENTS = \
"--preset=buildbot_incremental_extra_swift_args,tools=RA,stdlib=RD"
def run_build_script_with_data_file(build_script, data_file, verbose=False):
build_script_args = [
build_script,
DEFAULT_PRESENTS,
r'extra_swift_args=^Swift$;-Xfrontend\;' +
r'-external-pass-pipeline-filename\;-Xfrontend\;%s' % data_file]
sys.stdout.write("Running build script with: %s..." %
' '.join(build_script_args))
sys.stdout.flush()
if not verbose:
p = subprocess.Popen(
build_script_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stdout.readlines()
status = p.wait()
if status == 0:
sys.stdout.write(" Success!\n")
else:
sys.stdout.write(" Failure:\n")
sys.stdout.write(output)
sys.stdout.flush()
else:
p = subprocess.Popen(build_script_args)
status = p.wait()
if status == 0:
sys.stdout.write(" Success!\n")
else:
sys.stdout.write(" Failure:\n")
def build_disable_slice_pipelines(**kwargs):
pipeline_range = range(len(PIPELINES))
def get_pipeline_args(script, iter):
result = [script]
for j in iter:
result.extend(['--disable-passpipeline', PIPELINES[j]])
return result
for i in pipeline_range:
pipeline_args = get_pipeline_args(
kwargs['pipeline_script'], pipeline_range[:i + 1])
data_file = os.path.join(
kwargs['output_dir'],
"pipeline-slice-%.2d-disabled-pipeline.json" % i)
with open(data_file, 'w') as f:
f.write(subprocess.check_output(pipeline_args))
run_build_script_with_data_file(
kwargs['build_script'], data_file, verbose=kwargs['verbose'])
def build_disable_individual_pass(**kwargs):
pass_name = kwargs['pass_name']
data_file = os.path.join(
kwargs['output_dir'], "%s-disabled-pass.json" % pass_name)
with open(data_file, 'w') as f:
f.write(subprocess.check_output(
[kwargs['pipeline_script'], '--disable-pass', pass_name]))
run_build_script_with_data_file(
kwargs['build_script'], data_file, verbose=kwargs['verbose'])
def build_disable_individual_passes(**kwargs):
for p in PASSES:
d = dict(kwargs)
d['pass_name'] = p
build_disable_individual_pass(**d)
def add_default_parser_args(p):
p.add_argument('pipeline_script', help=textwrap.dedent("""
The path to normal_pipeline.py. In the future could be generalized to take
other files.
"""))
p.add_argument('build_script', help=textwrap.dedent("""
The path to build-script.
"""))
p.add_argument('output_dir', help=textwrap.dedent("""
The output directory to use.
"""))
p.add_argument('-v', action='store_true', dest='verbose',
help=textwrap.dedent("""
Emit verbose output from build-script.
"""))
def main():
parser = argparse.ArgumentParser(
description="Run build-script with various passes disabled")
subparsers = parser.add_subparsers(help="The specific action to perform")
slice_pipeline_parser = subparsers.add_parser(
'disable_slice_pipelines',
description=textwrap.dedent("""
Go through all predefined pass pipelines and run build_script with only
specific slices enabled. Currently what this means is that we perform
the normal pipeline order, stopping after N pipelines have run.
"""))
slice_pipeline_parser.set_defaults(func=build_disable_slice_pipelines)
add_default_parser_args(slice_pipeline_parser)
disable_individual_passes_parser = subparsers.add_parser(
'disable_individual_passes',
description=textwrap.dedent("""
Loop over all predefines passes and run build_script once for each pass
with that pass disabled.
"""))
disable_individual_passes_parser.set_defaults(
func=build_disable_individual_passes)
add_default_parser_args(disable_individual_passes_parser)
disable_individual_pass_parser = subparsers.add_parser(
'disable_individual_pass',
description=textwrap.dedent("""
Run build-script disabling only the specified passes.
"""))
disable_individual_pass_parser.add_argument(
'pass_name',
help="The pass to disable",
choices=PASSES,
type=str)
disable_individual_pass_parser.set_defaults(
func=build_disable_individual_pass)
add_default_parser_args(disable_individual_pass_parser)
args = parser.parse_args()
args.func(**vars(args))
if __name__ == "__main__":
main()
|
py | 1a482e5936d14d586f4a6ad2442371aa448022aa | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Robin Rosenstock and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestExercise_Property_sports_managementu(unittest.TestCase):
pass
|
py | 1a482e821af9e832b4fa91611a47a158823e495b | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.cred_usage_type import CredUsageType # noqa: E501
from openapi_client.rest import ApiException
class TestCredUsageType(unittest.TestCase):
"""CredUsageType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CredUsageType
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.cred_usage_type.CredUsageType() # noqa: E501
if include_optional :
return CredUsageType(
)
else :
return CredUsageType(
)
def testCredUsageType(self):
"""Test CredUsageType"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
py | 1a482ed909f928d3b0e523ea9f03afd23eb0e3f9 | from web3 import Web3, HTTPProvider, IPCProvider, WebsocketProvider
import sys
sys.path.insert(0,"W3CR")
from writer import writer
from reader import reader
import hashlib
import json
import time
import random
import string
from datetime import datetime
class W3CR():
Writer = None
Reader = None
ToReveal = None
Address = None
Password = None
lastTxHash = None
def Commit(self, _data):
key = "".join(random.choices(string.ascii_uppercase, k=9))
dataToCommit = {
"data" : _data,
"key": key,
}
dataBytes = json.dumps(dataToCommit)
self.ToReveal = dataBytes
Hash = hashlib.sha256(dataBytes.encode()).hexdigest()
try:
result = self.Writer.write(Hash)
print("--COMMIT--")
self.lastTxHash = result
except Exception as e:
result = None
print ("--WARNING: Failed To Commit--")
print(e)
print ("--TRY AGAIN--")
time.sleep(60)
self.Commit(_data)
return result
def Reveal(self):
toReveal = {
"payload": json.loads(self.ToReveal),
"commitHash": self.lastTxHash,
}
dataBytes = json.dumps(toReveal)
try:
self.Writer.write(dataBytes)
print("--REVEAL--")
except:
print ("--WARNING: Failed To Reveal--")
print(e)
print ("--TRY AGAIN--")
time.sleep(60)
self.Reveal()
def CheckAndGetData(self, _r):
try:
Tx = self.Reader.read(_r)
if Tx != None:
Proven = False
if len(Tx["input"]) >135:
revealInput = json.loads(self.w3.toText(Tx["input"]))
commitTx = self.w3.eth.getTransaction(revealInput["commitHash"])
if self.w3.toText(commitTx["input"]) == hashlib.sha256(json.dumps(revealInput["payload"]).encode()).hexdigest():
Proven = True
ToReturn = {
"msg" : revealInput["payload"],
"wasCommited" : Proven,
"commitmentBlock" : commitTx["blockNumber"] ,
"txHash" : revealInput["commitHash"]
}
return ToReturn
else:
return "commitTX"
else:
return None
except Exception as e:
print("WARNING: Failed to Read--")
print(e)
return None
def __init__(self, _adr, _psw, _w3, _firstRev):
self.Writer = writer(_psw, _adr, _w3)
self.w3 = _w3
self.Reader = reader(_adr, _w3)
self.Password = _psw
self.Address = _adr
self.ToReveal = json.dumps(_firstRev)
|
py | 1a48306b5918795287f541bc9da9bb86f7910ece | import datetime
import hashlib
from urllib.parse import urlparse
import json
class Blockchain():
def __init__(self):
self.chain = []
self.nodes = set()
self.create_block(proof = 1, previous_hash = '0')
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'previous_hash': previous_hash,
'proof': proof
}
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while not check_proof:
problem = new_proof**2 - previous_proof**2 #problem should not be symetric
hash_operation = hashlib.sha256(str(problem).encode()).hexdigest()
if hash_operation[:4] == '0000':
#more the number of leading zeros smaller the target and harder to get the proof of work
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_valid_chain(self, chain):
previous_block = chain[0]
previous_index = 1
while previous_index < len(self.chain):
#check if previous_hash is equal to previous block hash
block = chain[previous_index]
if block['previous_hash'] != self.hash(previous_block):
return False
#check if hash_operation of previous_proof and current_proof is below the target
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
#update
previous_block = block
previous_index += 1
return True
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
|
py | 1a4831053613c2bf1e3006e9a98836b5ca9ac36f | #!/usr/bin/env python
from nose.tools import *
import cynetworkx as nx
class TestFloyd:
def setUp(self):
pass
def test_floyd_warshall_predecessor_and_distance(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG)
assert_equal(dist['s']['v'], 9)
assert_equal(path['s']['v'], 'u')
assert_equal(dist,
{'y': {'y': 0, 'x': 12, 's': 7, 'u': 15, 'v': 6},
'x': {'y': 2, 'x': 0, 's': 9, 'u': 3, 'v': 4},
's': {'y': 7, 'x': 5, 's': 0, 'u': 8, 'v': 9},
'u': {'y': 2, 'x': 2, 's': 9, 'u': 0, 'v': 1},
'v': {'y': 1, 'x': 13, 's': 8, 'u': 16, 'v': 0}})
GG = XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight'] = 2
path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
assert_equal(dist['s']['v'], 8)
# skip this test, could be alternate path s-u-v
# assert_equal(path['s']['v'],'y')
G = nx.DiGraph() # no weights
G.add_edges_from([('s', 'u'), ('s', 'x'),
('u', 'v'), ('u', 'x'),
('v', 'y'), ('x', 'u'),
('x', 'v'), ('x', 'y'),
('y', 's'), ('y', 'v')])
path, dist = nx.floyd_warshall_predecessor_and_distance(G)
assert_equal(dist['s']['v'], 2)
# skip this test, could be alternate path s-u-v
# assert_equal(path['s']['v'],'x')
# alternate interface
dist = nx.floyd_warshall(G)
assert_equal(dist['s']['v'], 2)
@raises(KeyError)
def test_reconstruct_path(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
predecessors, _ = nx.floyd_warshall_predecessor_and_distance(XG)
path = nx.reconstruct_path('s', 'v', predecessors)
assert_equal(path, ['s', 'x', 'u', 'v'])
path = nx.reconstruct_path('s', 's', predecessors)
assert_equal(path, [])
# this part raises the keyError
nx.reconstruct_path('1', '2', predecessors)
def test_cycle(self):
path, dist = nx.floyd_warshall_predecessor_and_distance(
nx.cycle_graph(7))
assert_equal(dist[0][3], 3)
assert_equal(path[0][3], 2)
assert_equal(dist[0][4], 3)
def test_weighted(self):
XG3 = nx.Graph()
XG3.add_weighted_edges_from([[0, 1, 2], [1, 2, 12], [2, 3, 1],
[3, 4, 5], [4, 5, 1], [5, 0, 10]])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG3)
assert_equal(dist[0][3], 15)
assert_equal(path[0][3], 2)
def test_weighted2(self):
XG4 = nx.Graph()
XG4.add_weighted_edges_from([[0, 1, 2], [1, 2, 2], [2, 3, 1],
[3, 4, 1], [4, 5, 1], [5, 6, 1],
[6, 7, 1], [7, 0, 1]])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG4)
assert_equal(dist[0][2], 4)
assert_equal(path[0][2], 1)
def test_weight_parameter(self):
XG4 = nx.Graph()
XG4.add_edges_from([(0, 1, {'heavy': 2}), (1, 2, {'heavy': 2}),
(2, 3, {'heavy': 1}), (3, 4, {'heavy': 1}),
(4, 5, {'heavy': 1}), (5, 6, {'heavy': 1}),
(6, 7, {'heavy': 1}), (7, 0, {'heavy': 1})])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG4,
weight='heavy')
assert_equal(dist[0][2], 4)
assert_equal(path[0][2], 1)
def test_zero_distance(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG)
for u in XG:
assert_equal(dist[u][u], 0)
GG = XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight'] = 2
path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
for u in GG:
dist[u][u] = 0
def test_zero_weight(self):
G = nx.DiGraph()
edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1),
(5, 4, 0), (4, 3, -5), (2, 5, -7)]
G.add_weighted_edges_from(edges)
dist = nx.floyd_warshall(G)
assert_equal(dist[1][3], -14)
G = nx.MultiDiGraph()
edges.append((2, 5, -7))
G.add_weighted_edges_from(edges)
dist = nx.floyd_warshall(G)
assert_equal(dist[1][3], -14)
|
py | 1a4833909e6d0170d8a611b2942c89c399930b67 | import os, sys, pygame
from pygame.locals import *
from Pellet import Pellet
from Pacman import Pacman
from Box import Box
# Initialize Pygame
pygame.init()
# Initialize Clock
mainClock = pygame.time.Clock()
# Constants
WINDOWWIDTH = 448 #(16 * 28) (row numbers range from 0 - 27)
WINDOWHEIGHT = 512 #(16 * 32) (column numbers range from 0 - 31)
LIVES = 3
# Initialize window
window = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
# Initialize colours
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# Set background
background = pygame.image.load('../../sprites/pacman-level.png')
window.blit(background, (0, 0))
# Initialize movement variables
moveLeft = False
moveRight = False
moveDown = False
moveUp = False
# Pixels per loop
MOVESPEED = 16
# Boxes (for collision purposes)
# To create a Box object: Box(x, y, COLOR)
box_group = pygame.sprite.Group()
# Grid (for movement)
# Uses Box objects
grid_group = pygame.sprite.Group()
# Teleporters
l_transporter = pygame.sprite.GroupSingle(Box(0, 16 * 15, BLUE))
r_transporter = pygame.sprite.GroupSingle(Box(16 * 27, 16 * 15, BLUE))
# Goes through the entire map and outlines which 16x16 areas are black
# This identifies where Pacman and Pellets can and cannot go
x = 0
y = 16
while y < WINDOWHEIGHT:
while x < WINDOWWIDTH:
# 16x16 area used for cropping
selected_area = pygame.Rect(x, y, 16, 16)
# Creates a cropped image from the background
cropped_image = background.subsurface(selected_area)
# If the cropped image's color is BLACK
if pygame.transform.average_color(cropped_image)[:3] == BLACK:
grid_member = Box(x, y, GREEN)
grid_member.check_possible_moves(x, y)
grid_group.add(grid_member)
else:
box_group.add(Box(x, y, RED))
x += 16
y += 16
x = 0
# Initialize Pacman
pacman = Pacman(224, 384, MOVESPEED, box_group) # 16 * 14, 16 * 24
pacman_group = pygame.sprite.GroupSingle(pacman)
# Initialize movement variable
movement = 'R'
last_movement = 'R'
# Draw Pacman onto the window
pacman_group.draw(window)
# Update display
pygame.display.update()
def update_window():
"""Updates the window by redrawing the background and sprites"""
# Redraw the background and sprites
window.blit(background, (0, 0))
# box_group.draw(window)
# grid_group.draw(window)
pacman_group.draw(window)
# Update the display
pygame.display.update()
mainClock.tick(10)
def transport_right(sprite):
"""Transports sprite from the right side of the window to the left side"""
while sprite.rect.left <= WINDOWWIDTH:
sprite.rect.right += 2
update_window()
sprite.rect.right = 0
while sprite.rect.left <= 0:
sprite.rect.right += 2
update_window()
sprite.rect = pygame.Rect(16 * 1, 16 * 15, 16, 16)
def transport_left(sprite):
"""Transports sprite from the left side of the window to the right side"""
while sprite.rect.right >= 0:
sprite.rect.left -= 2
update_window()
sprite.rect.left = WINDOWWIDTH
while sprite.rect.right >= WINDOWWIDTH:
sprite.rect.left -= 2
update_window()
sprite.rect = pygame.Rect(16 * 26, 16 * 15, 16, 16)
# Main loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
movement = 'U'
if event.key == K_DOWN:
movement = 'D'
if event.key == K_LEFT:
movement = 'L'
if event.key == K_RIGHT:
movement = 'R'
current_grid_location = pygame.sprite.spritecollide(pacman, grid_group, False)
grid_member = current_grid_location.pop()
if movement in grid_member.valid_moves:
# Updates Pacman's movement
pacman_group.update(movement)
last_movement = movement
else:
if last_movement in grid_member.valid_moves:
pacman_group.update(last_movement)
# Transport Pacman if Pacman collides with either transporter
if pygame.sprite.spritecollide(pacman, l_transporter, False):
transport_left(pacman)
elif pygame.sprite.spritecollide(pacman, r_transporter, False):
transport_right(pacman)
# Update game
update_window() |
py | 1a483399e875aa9c29d463b2bf1fd1351ed640c8 | # All modules in utils package can be run independently and have no dependencies on other modules in the project.
# This makes it easy to reuse in other projects.
|
py | 1a4834af872d5900c3c874cfc99e106d903bc287 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-21 06:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('t3', '0002_stu_grade'),
]
operations = [
migrations.CreateModel(
name='IdCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num', models.CharField(max_length=20)),
('org', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='人名')),
('age', models.IntegerField(default=1, verbose_name='年纪')),
('id_card', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='t3.IdCard', verbose_name='身份证')),
],
),
migrations.AlterModelManagers(
name='stu',
managers=[
('my_manage', django.db.models.manager.Manager()),
],
),
]
|
py | 1a483703323ebf803bbb08192b8afadf11d70356 | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for resetting an instance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags
DETAILED_HELP = {
'brief': 'Reset a virtual machine instance.',
'DESCRIPTION':
"""\
*{command}* is used to perform a hard reset on a Google
Compute Engine virtual machine.
This will not perform a clean shutdown of the guest OS on the instance.
""",
'EXAMPLES':
"""\
To reset an instance named ``test-instance'', run:
$ {command} test-instance
"""
}
class Reset(base.SilentCommand):
"""Reset a virtual machine instance."""
@staticmethod
def Args(parser):
flags.INSTANCES_ARG.AddArgument(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
instance_refs = flags.INSTANCES_ARG.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(client))
request_list = []
for instance_ref in instance_refs:
request = client.messages.ComputeInstancesResetRequest(
instance=instance_ref.Name(),
project=instance_ref.project,
zone=instance_ref.zone)
request_list.append((client.apitools_client.instances, 'Reset', request))
return client.MakeRequests(request_list)
Reset.detailed_help = DETAILED_HELP
|
py | 1a4837589cdea2bfb2a0925bd47922c03723627d | import requests
import argparse
import sys
import os
import bs4
import csv
import re
import enum
from datetime import datetime
class LogType(enum.Enum):
crime = "crime"
noncrime = "noncrime"
def _process_html(string, output_writer, log_type):
soup = bs4.BeautifulSoup(string, features="lxml")
table = soup.find(class_="articletext")
header_row = table.find(class_="White")
data1_list = []
data2_list = []
# For some reason each table is formatted slightly differently, so we have to account for that.
if log_type == LogType.crime:
data1_list = [data1 for data1 in table.find_all("tr") if len(data1.attrs) == 1 and "bgcolor" in data1.attrs]
data2_list = [data2 for data2 in table.find_all("tr", class_="body")]
elif log_type == LogType.noncrime:
data1_list = [data1 for data1 in table.find_all("tr") if len(data1.attrs) == 2 and "id" in data1.attrs and "row" in data1.attrs["id"]]
data2_list = [data2 for data2 in table.find_all("tr", class_="body")]
for data1, data2 in zip(data1_list, data2_list):
row = [re.sub(r'[\r\n\t]', '', value.text) for value in data1.find_all("td", limit=5)]
# Format the date column into year-first format.
try:
row[1] = datetime.strptime(row[1], "%m/%d/%Y").strftime("%y/%m/%d")
except ValueError:
pass
data2str = data2.find("td").text
location_str = re.search(r'Location:(.*)\n', data2str).group(1)
nature_str = re.search(r'Nature:(.*)\n', data2str).group(1)
row.append(re.sub(r'[\r\n\t]', '', location_str).strip())
row.append(re.sub(r'[\r\n\t]', '', nature_str).strip())
output_writer.writerow(row)
total_entry_count = table.find("span", style="font-weight: bold", string="of").parent.text.split()[-1]
return int(total_entry_count)
def scrape(scrape_output_dir="./scraped",
starting_offset=0,
max_entries=0,
max_retries=0,
local_files_path=None):
out_dir = os.path.join(scrape_output_dir, str(datetime.now())) # the unique output dir we'll use
os.makedirs(out_dir)
process_chunk = 100 # how many entries (we think) are getting retrieved per request
retry_count = 0
for log_type in LogType:
total_entries = max_entries
entries_processed = starting_offset
with open(os.path.join(out_dir, 'scrape-result-{}.csv'.format(log_type.value)), 'w') as output_file:
output_writer = csv.writer(output_file)
output_writer.writerow(["Case #", "Date Reported", "Occurrence Interval", "Disposition", "Status", "Location", "Nature"])
if local_files_path is None:
while total_entries == 0 or entries_processed < total_entries:
print("Processing entries {} through {}".format(entries_processed, entries_processed + process_chunk))
params = {"offset": str(entries_processed)}
try:
result = requests.get("http://police.gatech.edu/{}log.php".format(log_type.value), params)
result.raise_for_status()
except requests.exceptions.HTTPError:
if max_retries != 0 and retry_count >= max_retries:
print("Exceeded maximum retry count, aborting.")
exit(1)
else:
print("Request for entries starting at {} failed, retrying...".format(entries_processed))
retry_count += 1
continue
retry_count = 0
# Write fetched html to file, naming it [start-entry]-[end-entry]
with open(os.path.join(out_dir, "{}-{}-{}.html"
.format(log_type.value, entries_processed, entries_processed + process_chunk)), 'w') as result_html_file:
result_html_file.write(result.text)
reported_total_entries = _process_html(result.text, output_writer, log_type)
if total_entries == 0:
total_entries = reported_total_entries
entries_processed += process_chunk
else:
# We have a local set of previously-fetched files to use.
local_files = [file for file in os.listdir(local_files_path)
if os.path.isfile(os.path.join(local_files_path, file)) and file.startswith(log_type.value) and file.endswith(".html")]
for filename in local_files:
with open(os.path.join(local_files_path, filename), 'r') as file:
_process_html(file.read(), output_writer, log_type)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Scrape GTPD crime/non-crime logs")
argparser.add_argument("--scrape-output-dir", help="Where to save the scraped data.", default="./scraped")
argparser.add_argument("--starting-offset", help="Collection offset from the most recent log entry.", default=0)
argparser.add_argument("--max-entries", help="Maximum number of entries to gather from starting point."
"0 means no limit.", default=0, type=int)
argparser.add_argument("--max-retries", help="Maximum number of retries. 0 means no limit.", default=5)
argparser.add_argument("--local-files", help="Directory containing a set of local html files that contain GTPD"
"crime tables.")
arg_list = sys.argv.copy()
arg_list.pop(0)
args = argparser.parse_args(arg_list)
scrape(scrape_output_dir=args.scrape_output_dir,
starting_offset=args.starting_offset,
max_entries=args.max_entries,
max_retries=args.max_retries,
local_files_path=args.local_files)
|
py | 1a4838d9a49f1b208287456056c9707681d45861 | # Copyright Brett Fraley 2017
class ShortMemory:
pass
class LongMemory:
pass
class FileMemory:
pass
class RelationalMemory:
pass
class MemoryEvent:
pass
class ProvenMemory:
pass
class UnprovedMemory:
pass
class MemorySourceEntity:
pass
class MemoryThread:
pass
class MemoryGroup:
pass
class MemoryCrowd:
pass
class MemorySlice:
pass
class MemoryBit:
pass
class MemoryPurpose:
pass
class MemoryImpact:
pass
class MemoryReflection:
pass
class MemoryQuery:
pass
class MemoryResult:
pass
class MemoryOutcome:
pass
class MemoryEmotionalRanking:
pass
class MemoryAddition:
pass
class MemorySubtraction:
pass
|
py | 1a483a237c9aa5a2aa64365d5e8af68767156aac | import json
import zlib
def compress(message):
try:
return zlib.compress(
json.dumps(message,
separators=(',', ':')),
9
)
except:
return ''
def decompress(message):
try:
return json.loads(
zlib.decompress(message)
)
except:
return {}
def create_id(ip_address):
return
|
py | 1a483a2a05460aa52c27b4695431d4662d0c128d | import graphgallery
import tensorflow as tf
graphgallery.set_memory_growth()
print("GraphGallery version: ", graphgallery.__version__)
print("TensorFlow version: ", tf.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
from graphgallery.gallery import Deepwalk
trainer = Deepwalk(graph).process().build()
his = trainer.train(splits.train_nodes)
results = trainer.test(splits.test_nodes)
print(f'Test accuracy {results.accuracy:.2%}')
|
py | 1a483a4e50c0dfdfa8492a3124d6d184adaf7ddb | '''
Simulation Based on Hippocampus Recordings
Copyright Nate Sutton 2015
References:
Data from CRCNS.org hc3 .
Izhikevich neuron parameters from:
http://f1000research.com/articles/3-104/v1
'''
import pylab
import nest
import math as math
import numpy as np
'''
Create objects to run experiment with
'''
multimeter = nest.Create("multimeter",10)
nest.SetStatus(multimeter, {"withtime":True, "record_from":["V_m"]})
multimeter2 = nest.Create("multimeter")
nest.SetStatus(multimeter2, {"withtime":True, "record_from":["V_m"]})
spikedetector_e_c_3 = nest.Create("spike_detector", params={"withgid": True, "withtime": True})
spikedetector_e_c_5 = nest.Create("spike_detector", params={"withgid": True, "withtime": True})
spikedetector_c_a_1 = nest.Create("spike_detector", params={"withgid": True, "withtime": True})
'''noise = nest.Create("poisson_generator", 2)
nest.SetStatus(noise, [{"rate": 80000.0}, {"rate": 15000.0}])'''
e_c_3_layer = nest.Create("izhikevich",500,{'V_m':-70.0,'I_e':-160.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
e_c_5_layer = nest.Create("izhikevich",500,{'V_m':-70.0,'I_e':-180.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
c_a_1_layer = nest.Create("izhikevich",500,{'V_m':-70.0,'I_e':-180.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
'''
Form connections between neurons and run sim
NOTE: I may need to split the neurons into Ex and In
groups in layers for connections
With a number of neuron mismatch between layers
how is that processed in connections?
'''
'''
Synapses
'''
spike_time_incr = 5.0
def createSyn(input_layer, output_layer, fire_rate_ratio, syn_weight, neuron_range):
'''
neuron_range: min_index, max_index, total_neurons
Note: later uneven numbers of neurons in layers
could be added but for now using even.
Ratio of 1.0 creates 50% ex and 50% inh
2.0 creates 66% ex and 33% inh
0.5 creates 33% ex and 66% inh
TODO: check if ratio calc works exactly right
TODO: for now synapses are one-to-one to control ratio of responses.
In the future more e.g. one-to-many should be made while controlling
activity between layers
Note: It is needed that the number of exhitatory connections totally
control the amount of firing fore each next layer, no origional firing
occurs in any layer but the first.
'''
times_greater_ratio = math.ceil(fire_rate_ratio)
syn_dict = {"weight": syn_weight}
min_index = neuron_range[0]
max_index = neuron_range[1]
total_neurons = neuron_range[2]
total_range = range(math.floor((max_index-min_index)*total_neurons))
#print(total_range[-1])
total_range = np.array(total_range) + math.floor(min_index*total_neurons)
#print(total_range[-1])
len_in_layer = len(total_range)
#print(len_in_layer)
#len_out_layer = len(output_layer)
for time_greater in range(times_greater_ratio):
adjusted_delay = 0.1 + (spike_time_incr * time_greater)
adjusted_conn_total = len_in_layer
if (time_greater==(times_greater_ratio-1)):
adjusted_conn_total = math.floor(len_in_layer*(fire_rate_ratio-(times_greater_ratio-1)))
syn_dict = {"weight": syn_weight, "delay":adjusted_delay}
print(adjusted_conn_total)
for i in range(adjusted_conn_total):
#print(total_range[0])
#print(i)
n_i = total_range[i]
nest.Connect([input_layer[n_i]], [output_layer[n_i]], "one_to_one", syn_dict)
def eval_syn_weight(firing_ratio, initial_firing, region):
'''
Formula fitting tool used from here:
http://www.xuru.org/rt/MLR.asp
'''
if region == "ec3_to_ec5":
x1 = firing_ratio
x2 = initial_firing
w1 = 25.52143243
w2 = 1.057133539*(10**-2)
w3 = 25.75235337
y = w1 * x1 + w2 * x2 + w3
return y
elif region == "ec5_to_ca1":
x1 = firing_ratio
x2 = initial_firing
w1 = 5.128145363
w2 = 9.81585591*(10**-2)
w3 = 61.24183811
y = w1 * x1 + w2 * x2 + w3
return y
syn_weight = eval_syn_weight(1.4917, 582.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,1.4917, syn_weight, [0.0, 0.233, len(e_c_3_layer)])
syn_weight = eval_syn_weight(2.2081, 332.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,2.2081, syn_weight, [0.233, 0.367, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(2.2081, 500, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,2.2081, syn_weight, [0.367, 0.567, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(0.6152, 250, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,0.6152, syn_weight, [0.567, 0.667, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(0.3024, 167.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,0.3024, syn_weight, [0.667, 0.733, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(0.3024, 667.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,0.3024, syn_weight, [0.733, 1.0, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(6.8897, 865, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,6.8897, syn_weight, [0.0, 0.233, len(e_c_3_layer)])
syn_weight = eval_syn_weight(4.6546, 725, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,4.6546, syn_weight, [0.233, 0.367, len(e_c_3_layer)])
syn_weight = eval_syn_weight(1.6016, 1090, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,1.6016, syn_weight, [0.367, 0.567, len(e_c_3_layer)])
syn_weight = eval_syn_weight(5.7480, 195, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,5.7480, syn_weight, [0.567, 0.733, len(e_c_3_layer)])
syn_weight = eval_syn_weight(7.6722, 200, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,7.6722, syn_weight, [0.733, 1.0, len(e_c_3_layer)])
nest.Connect(multimeter, e_c_3_layer)
nest.Connect(multimeter2, c_a_1_layer)
nest.Connect(e_c_3_layer, spikedetector_e_c_3)
nest.Connect(e_c_5_layer, spikedetector_e_c_5)
nest.Connect(c_a_1_layer, spikedetector_c_a_1)
'''
NOTE: filtering of spike counts after a certain
time happens later and therefore only a portion
of sim time is counted.
'''
nest.Simulate(2000.0)
'''
Record activity
'''
dmm = nest.GetStatus(multimeter)[0]
Vms = dmm["events"]["V_m"]
ts = dmm["events"]["times"]
dmm2 = nest.GetStatus(multimeter2)[0]
Vms2 = dmm2["events"]["V_m"]
ts2 = dmm2["events"]["times"]
'''
Plot results
'''
#pylab.figure(1)
#pylab.plot(ts, Vms)
#pylab.figure(2)
#pylab.plot(ts2, Vms2)
dSD = nest.GetStatus(spikedetector_e_c_3,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
print ('number of spikes')
print(sum(ts>800))
#pylab.figure(2)
#pylab.plot(ts, evs, ".")
dSD = nest.GetStatus(spikedetector_e_c_5,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
print ('number of spikes')
print(sum(ts>800))
#pylab.figure(3)
#pylab.plot(ts, evs, ".")
dSD = nest.GetStatus(spikedetector_c_a_1,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
print ('number of spikes')
print(sum(ts>800))
#pylab.figure(4)
#pylab.plot(ts, evs, ".")
pylab.show() |
py | 1a483b0c3d6eff244d4b833a85e19ee0a5d2c5b2 | """
Copyright 2021 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging, kopf
from seeder_ccloud.openstack.openstack_helper import OpenstackHelper
from seeder_ccloud import utils
config = utils.Config()
@kopf.on.update(config.crd_info['plural'], annotations={'operatorVersion': config.operator_version}, field='spec.openstack.share_types')
@kopf.on.create(config.crd_info['plural'], annotations={'operatorVersion': config.operator_version}, field='spec.openstack.share_types')
def seed_share_types_handler(memo: kopf.Memo, new, old, name, annotations, **_):
logging.info('seeding {} share_types'.format(name))
if not config.is_dependency_successful(annotations):
raise kopf.TemporaryError('error seeding {}: {}'.format(name, 'dependencies error'), delay=30)
try:
changed = utils.get_changed_seeds(old, new)
Share_Types(memo['args'], memo['dry_run']).seed(changed)
except Exception as error:
raise kopf.TemporaryError('error seeding {}: {}'.format(name, error), delay=30)
class Share_Types():
def __init__(self, args, dry_run=False):
self.dry_run = dry_run
self.args = args
self.openstack = OpenstackHelper(args)
def seed(self, share_types):
logging.info('seeding manila share_types')
for share_type in share_types:
self._seed_share_types(share_type)
def _seed_share_types(self, share_type):
""" seed manila share type """
logging.debug("seeding Manila share type %s" % share_type)
# intialize manila client
try:
client = self.openstack.get_manilaclient("2.40")
manager = client.share_types
except Exception as e:
logging.error("Fail to initialize client: %s" % e)
raise
def get_type_by_name(name):
opts = {'all_tenants': 1}
for t in manager.list(search_opts=opts):
if t.name == name:
return t
return None
def validate_share_type(sharetype):
sharetype = self.openstack.sanitize(sharetype, [
'name', 'description', 'is_public', 'specs', 'extra_specs'])
specs = sharetype.pop('specs')
try:
sharetype['extra_specs'].update(specs)
except KeyError:
sharetype['extra_specs'] = specs
return sharetype
def update_type(stype, extra_specs):
to_be_unset = []
for k in list(stype.extra_specs.keys()):
if k not in list(extra_specs.keys()):
to_be_unset.append(k)
stype.unset_keys(to_be_unset)
stype.set_keys(extra_specs)
def create_type(sharetype):
extra_specs = sharetype['extra_specs']
try:
dhss = extra_specs.pop('driver_handles_share_servers')
sharetype['spec_driver_handles_share_servers'] = dhss
except KeyError:
pass
try:
snapshot_support = extra_specs.pop('snapshot_support')
sharetype['spec_snapshot_support'] = snapshot_support
except KeyError:
pass
sharetype['extra_specs'] = extra_specs
try:
manager.create(**sharetype)
except:
sharetype.pop('description')
manager.create(**sharetype)
# validation sharetype
share_type = validate_share_type(share_type)
logging.debug("Validated Manila share type %s" % share_type)
# update share type if exists
stype = get_type_by_name(share_type['name'])
if stype:
try:
if not self.dry_run:
update_type(stype, share_type['extra_specs'])
except Exception as e:
logging.error("Failed to update share type %s: %s" % (share_type, e))
raise
else:
try:
if not self.dry_run:
create_type(share_type)
except Exception as e:
logging.error("Failed to create share type %s: %s" % (share_type, e))
raise |
py | 1a483b66e74602483a2ad45f506a0c7c93b1fe1e | import pipes
from galaxy import exceptions
from galaxy.util.none_like import NoneDataset
from galaxy.util import odict
from galaxy.util.object_wrapper import wrap_with_safe_string
from logging import getLogger
log = getLogger( __name__ )
# Fields in .log files corresponding to paths, must have one of the following
# field names and all such fields are assumed to be paths. This is to allow
# remote ComputeEnvironments (such as one used by LWR) determine what values to
# rewrite or transfer...
PATH_ATTRIBUTES = [ "path" ]
# ... by default though - don't rewrite anything (if no ComputeEnviornment
# defined or ComputeEnvironment doesn't supply a rewriter).
DEFAULT_PATH_REWRITER = lambda x: x
class ToolParameterValueWrapper( object ):
"""
Base class for object that Wraps a Tool Parameter and Value.
"""
def __nonzero__( self ):
return bool( self.value )
def get_display_text( self, quote=True ):
"""
Returns a string containing the value that would be displayed to the user in the tool interface.
When quote is True (default), the string is escaped for e.g. command-line usage.
"""
rval = self.input.value_to_display_text( self.value, self.input.tool.app ) or ''
if quote:
return pipes.quote( rval ) or "''" # pipes.quote in Python < 2.7 returns an empty string instead of the expected quoted empty string
return rval
class RawObjectWrapper( ToolParameterValueWrapper ):
"""
Wraps an object so that __str__ returns module_name:class_name.
"""
def __init__( self, obj ):
self.obj = obj
def __nonzero__( self ):
return bool( self.obj ) # FIXME: would it be safe/backwards compatible to rename .obj to .value, so that we can just inherit this method?
def __str__( self ):
try:
return "%s:%s" % (self.obj.__module__, self.obj.__class__.__name__)
except:
#Most likely None, which lacks __module__.
return str( self.obj )
def __getattr__( self, key ):
return getattr( self.obj, key )
class LibraryDatasetValueWrapper( ToolParameterValueWrapper ):
"""
Wraps an input so that __str__ gives the "param_dict" representation.
"""
def __init__( self, input, value, other_values={} ):
self.input = input
self.value = value
self._other_values = other_values
self.counter = 0
def __str__( self ):
return self.value
def __iter__( self ):
return self
def next( self ):
if self.counter >= len(self.value):
raise StopIteration
self.counter += 1
return self.value[ self.counter - 1 ]
def __getattr__( self, key ):
return getattr( self.value, key )
class InputValueWrapper( ToolParameterValueWrapper ):
"""
Wraps an input so that __str__ gives the "param_dict" representation.
"""
def __init__( self, input, value, other_values={} ):
self.input = input
self.value = value
self._other_values = other_values
def __str__( self ):
return self.input.to_param_dict_string( self.value, self._other_values )
def __getattr__( self, key ):
return getattr( self.value, key )
class SelectToolParameterWrapper( ToolParameterValueWrapper ):
"""
Wraps a SelectTooParameter so that __str__ returns the selected value, but all other
attributes are accessible.
"""
class SelectToolParameterFieldWrapper:
"""
Provide access to any field by name or index for this particular value.
Only applicable for dynamic_options selects, which have more than simple 'options' defined (name, value, selected).
"""
def __init__( self, input, value, other_values, path_rewriter ):
self._input = input
self._value = value
self._other_values = other_values
self._fields = {}
self._path_rewriter = path_rewriter
def __getattr__( self, name ):
if name not in self._fields:
self._fields[ name ] = self._input.options.get_field_by_name_for_value( name, self._value, None, self._other_values )
values = map( str, self._fields[ name ] )
if name in PATH_ATTRIBUTES:
# If we infer this is a path, rewrite it if needed.
values = map( self._path_rewriter, values )
return self._input.separator.join( values )
def __init__( self, input, value, app, other_values={}, path_rewriter=None ):
self.input = input
self.value = value
self.input.value_label = input.value_to_display_text( value, app )
self._other_values = other_values
self._path_rewriter = path_rewriter or DEFAULT_PATH_REWRITER
self.fields = self.SelectToolParameterFieldWrapper( input, value, other_values, self._path_rewriter )
def __str__( self ):
# Assuming value is never a path - otherwise would need to pass
# along following argument value_map=self._path_rewriter.
return self.input.to_param_dict_string( self.value, other_values=self._other_values )
def __getattr__( self, key ):
return getattr( self.input, key )
class DatasetFilenameWrapper( ToolParameterValueWrapper ):
"""
Wraps a dataset so that __str__ returns the filename, but all other
attributes are accessible.
"""
class MetadataWrapper:
"""
Wraps a Metadata Collection to return MetadataParameters wrapped
according to the metadata spec. Methods implemented to match behavior
of a Metadata Collection.
"""
def __init__( self, metadata ):
self.metadata = metadata
def __getattr__( self, name ):
rval = self.metadata.get( name, None )
if name in self.metadata.spec:
if rval is None:
rval = self.metadata.spec[name].no_value
rval = self.metadata.spec[ name ].param.to_safe_string( rval )
# Store this value, so we don't need to recalculate if needed
# again
setattr( self, name, rval )
else:
#escape string value of non-defined metadata value
rval = wrap_with_safe_string( rval )
return rval
def __nonzero__( self ):
return self.metadata.__nonzero__()
def __iter__( self ):
return self.metadata.__iter__()
def get( self, key, default=None ):
try:
return getattr( self, key )
except:
return default
def items( self ):
return iter( [ ( k, self.get( k ) ) for k, v in self.metadata.items() ] )
def __init__( self, dataset, datatypes_registry=None, tool=None, name=None, dataset_path=None, identifier=None ):
if not dataset:
try:
# TODO: allow this to work when working with grouping
ext = tool.inputs[name].extensions[0]
except:
ext = 'data'
self.dataset = wrap_with_safe_string( NoneDataset( datatypes_registry=datatypes_registry, ext=ext ), no_wrap_classes=ToolParameterValueWrapper )
else:
# Tool wrappers should not normally be accessing .dataset directly,
# so we will wrap it and keep the original around for file paths
# Should we name this .value to maintain consistency with most other ToolParameterValueWrapper?
self.unsanitized = dataset
self.dataset = wrap_with_safe_string( dataset, no_wrap_classes=ToolParameterValueWrapper )
self.metadata = self.MetadataWrapper( dataset.metadata )
self.datatypes_registry = datatypes_registry
self.false_path = getattr( dataset_path, "false_path", None )
self.false_extra_files_path = getattr( dataset_path, "false_extra_files_path", None )
self._element_identifier = identifier
@property
def element_identifier( self ):
identifier = self._element_identifier
if identifier is None:
identifier = self.name
return identifier
@property
def is_collection( self ):
return False
def is_of_type( self, *exts ):
datatypes = [ self.datatypes_registry.get_datatype_by_extension( e ) for e in exts ]
return self.dataset.datatype.matches_any( datatypes )
def __str__( self ):
if self.false_path is not None:
return self.false_path
else:
return self.unsanitized.file_name
def __getattr__( self, key ):
if self.false_path is not None and key == 'file_name':
# Path to dataset was rewritten for this job.
return self.false_path
elif self.false_extra_files_path is not None and key == 'extra_files_path':
# Path to extra files was rewritten for this job.
return self.false_extra_files_path
elif key == 'extra_files_path':
try:
# Assume it is an output and that this wrapper
# will be set with correct "files_path" for this
# job.
return self.files_path
except AttributeError:
# Otherwise, we have an input - delegate to model and
# object store to find the static location of this
# directory.
try:
return self.unsanitized.extra_files_path
except exceptions.ObjectNotFound:
# NestedObjectstore raises an error here
# instead of just returning a non-existent
# path like DiskObjectStore.
raise
else:
return getattr( self.dataset, key )
def __nonzero__( self ):
return bool( self.dataset )
class HasDatasets:
def _dataset_wrapper( self, dataset, dataset_paths, **kwargs ):
wrapper_kwds = kwargs.copy()
if dataset:
real_path = dataset.file_name
if real_path in dataset_paths:
wrapper_kwds[ "dataset_path" ] = dataset_paths[ real_path ]
return DatasetFilenameWrapper( dataset, **wrapper_kwds )
class DatasetListWrapper( list, ToolParameterValueWrapper, HasDatasets ):
"""
"""
def __init__( self, datasets, dataset_paths=[], **kwargs ):
if not isinstance(datasets, list):
datasets = [datasets]
def to_wrapper( dataset ):
if hasattr(dataset, "element_identifier"):
element = dataset
dataset = element.dataset_instance
kwargs["identifier"] = element.element_identifier
return self._dataset_wrapper( dataset, dataset_paths, **kwargs )
list.__init__( self, map( to_wrapper, datasets ) )
def __str__( self ):
return ','.join( map( str, self ) )
class DatasetCollectionWrapper( ToolParameterValueWrapper, HasDatasets ):
def __init__( self, has_collection, dataset_paths=[], **kwargs ):
super(DatasetCollectionWrapper, self).__init__()
if has_collection is None:
self.__input_supplied = False
return
else:
self.__input_supplied = True
if hasattr( has_collection, "name" ):
# It is a HistoryDatasetCollectionAssociation
collection = has_collection.collection
self.name = has_collection.name
elif hasattr( has_collection, "child_collection" ):
# It is a DatasetCollectionElement instance referencing another collection
collection = has_collection.child_collection
self.name = has_collection.element_identifier
else:
collection = has_collection
self.name = None
elements = collection.elements
element_instances = odict.odict()
element_instance_list = []
for dataset_collection_element in elements:
element_object = dataset_collection_element.element_object
element_identifier = dataset_collection_element.element_identifier
if dataset_collection_element.is_collection:
element_wrapper = DatasetCollectionWrapper( dataset_collection_element, dataset_paths, **kwargs )
else:
element_wrapper = self._dataset_wrapper( element_object, dataset_paths, **kwargs)
element_instances[element_identifier] = element_wrapper
element_instance_list.append( element_wrapper )
self.__element_instances = element_instances
self.__element_instance_list = element_instance_list
def keys( self ):
if not self.__input_supplied:
return []
return self.__element_instances.keys()
@property
def is_collection( self ):
return True
@property
def is_input_supplied( self ):
return self.__input_supplied
def __getitem__( self, key ):
if not self.__input_supplied:
return None
if isinstance( key, int ):
return self.__element_instance_list[ key ]
else:
return self.__element_instances[ key ]
def __getattr__( self, key ):
if not self.__input_supplied:
return None
return self.__element_instances[ key ]
def __iter__( self ):
if not self.__input_supplied:
return [].__iter__()
return self.__element_instance_list.__iter__()
def __nonzero__( self ):
# Fail `#if $param` checks in cheetah is optional input
# not specified or if resulting collection is empty.
return self.__input_supplied and bool( self.__element_instance_list )
|
py | 1a483b7b9d322157ed1ccbf1b6bedf998e11a97f | # !/usr/bin/env python2.7
# File: taxii_parser.py
#
# Copyright (c) 2014-2016 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# This file contains the code to parset a STIX xml file.
import sys
import simplejson as json
import cStringIO
import libtaxii as lt
import stix_parser as sp
def parse_taxii_message(taxii_message, base_connector=None):
number_of_cbs = len(taxii_message.content_blocks)
if (not number_of_cbs):
return {'error': 'no control blocks found'}
packages = []
for i, cb in enumerate(taxii_message.content_blocks):
if (base_connector):
base_connector.send_progress("Parsing Content Block # {0}".format(i))
# Give it to the stix parser to create the containers and artifacts
# This code is the only place where the stix parsing will be written
stix_xml = cb.content
cstrio = cStringIO.StringIO()
cstrio.write(stix_xml)
cstrio.seek(0)
package = sp.parse_stix(cstrio, base_connector)
if (package):
# print (json.dumps(package, indent=' ' * 4))
packages.append(package)
return sp.parse_packages(packages, base_connector)
if __name__ == '__main__':
import pudb
pudb.set_trace()
results = None
with open(sys.argv[1]) as f:
# first try to parse it as a taxii message
try:
taxii_msg = lt.tm11.get_message_from_xml(f.read())
except:
# Now as a a stix document
try:
f.seek(0)
package = sp.parse_stix(f, None)
if (package):
packages = [package]
results = sp.parse_packages(packages, None)
except:
raise
else:
results = parse_taxii_message(taxii_msg, None)
# import pprint;pprint.pprint(results)
with open('./taxii-parsed.json', 'w') as f:
f.write(json.dumps(results, indent=' ' * 4))
|
py | 1a483c0c331bf1d38c7e63ff72e9fdef5f56fb5b | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import py3to2
from robot.variables import VariableIterator
@py3to2
class Token(object):
"""Token representing piece of Robot Framework data.
Each token has type, value, line number, column offset and end column
offset in :attr:`type`, :attr:`value`, :attr:`lineno`, :attr:`col_offset`
and :attr:`end_col_offset` attributes, respectively. Tokens representing
error also have their error message in :attr:`error` attribute.
Token types are declared as class attributes such as :attr:`SETTING_HEADER`
and :attr:`EOL`. Values of these constants have changed slightly in Robot
Framework 4.0 and they may change again in the future. It is thus safer
to use the constants, not their values, when types are needed. For example,
use ``Token(Token.EOL)`` instead of ``Token('EOL')`` and
``token.type == Token.EOL`` instead of ``token.type == 'EOL'``.
If :attr:`value` is not given when :class:`Token` is initialized and
:attr:`type` is :attr:`IF`, :attr:`ELSE_IF`, :attr:`ELSE`, :attr:`FOR`,
:attr:`END`, :attr:`WITH_NAME` or :attr:`CONTINUATION`, the value is
automatically set to the correct marker value like ``'IF'`` or ``'ELSE IF'``.
If :attr:`type` is :attr:`EOL` in this case, the value is set to ``'\\n'``.
"""
SETTING_HEADER = 'SETTING HEADER'
VARIABLE_HEADER = 'VARIABLE HEADER'
TESTCASE_HEADER = 'TESTCASE HEADER'
KEYWORD_HEADER = 'KEYWORD HEADER'
COMMENT_HEADER = 'COMMENT HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
KEYWORD_NAME = 'KEYWORD NAME'
DOCUMENTATION = 'DOCUMENTATION'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
METADATA = 'METADATA'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
FORCE_TAGS = 'FORCE TAGS'
DEFAULT_TAGS = 'DEFAULT TAGS'
LIBRARY = 'LIBRARY'
RESOURCE = 'RESOURCE'
VARIABLES = 'VARIABLES'
SETUP = 'SETUP'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TIMEOUT = 'TIMEOUT'
TAGS = 'TAGS'
ARGUMENTS = 'ARGUMENTS'
RETURN = 'RETURN'
NAME = 'NAME'
VARIABLE = 'VARIABLE'
ARGUMENT = 'ARGUMENT'
ASSIGN = 'ASSIGN'
KEYWORD = 'KEYWORD'
WITH_NAME = 'WITH NAME'
FOR = 'FOR'
FOR_SEPARATOR = 'FOR SEPARATOR'
END = 'END'
IF = 'IF'
ELSE_IF = 'ELSE IF'
ELSE = 'ELSE'
SEPARATOR = 'SEPARATOR'
COMMENT = 'COMMENT'
CONTINUATION = 'CONTINUATION'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
FATAL_ERROR = 'FATAL ERROR'
NON_DATA_TOKENS = frozenset((
SEPARATOR,
COMMENT,
CONTINUATION,
EOL,
EOS
))
SETTING_TOKENS = frozenset((
DOCUMENTATION,
SUITE_SETUP,
SUITE_TEARDOWN,
METADATA,
TEST_SETUP,
TEST_TEARDOWN,
TEST_TEMPLATE,
TEST_TIMEOUT,
FORCE_TAGS,
DEFAULT_TAGS,
LIBRARY,
RESOURCE,
VARIABLES,
SETUP,
TEARDOWN,
TEMPLATE,
TIMEOUT,
TAGS,
ARGUMENTS,
RETURN
))
HEADER_TOKENS = frozenset((
SETTING_HEADER,
VARIABLE_HEADER,
TESTCASE_HEADER,
KEYWORD_HEADER,
COMMENT_HEADER
))
ALLOW_VARIABLES = frozenset((
NAME,
ARGUMENT,
TESTCASE_NAME,
KEYWORD_NAME
))
__slots__ = ['type', 'value', 'lineno', 'col_offset', 'error']
def __init__(self, type=None, value=None, lineno=-1, col_offset=-1, error=None):
self.type = type
if value is None:
value = {
Token.IF: 'IF', Token.ELSE_IF: 'ELSE IF', Token.ELSE: 'ELSE',
Token.FOR: 'FOR', Token.END: 'END', Token.CONTINUATION: '...',
Token.EOL: '\n', Token.WITH_NAME: 'WITH NAME'
}.get(type, '')
self.value = value
self.lineno = lineno
self.col_offset = col_offset
self.error = error
@property
def end_col_offset(self):
if self.col_offset == -1:
return -1
return self.col_offset + len(self.value)
def set_error(self, error, fatal=False):
self.type = Token.ERROR if not fatal else Token.FATAL_ERROR
self.error = error
def tokenize_variables(self):
"""Tokenizes possible variables in token value.
Yields the token itself if the token does not allow variables (see
:attr:`Token.ALLOW_VARIABLES`) or its value does not contain
variables. Otherwise yields variable tokens as well as tokens
before, after, or between variables so that they have the same
type as the original token.
"""
if self.type not in Token.ALLOW_VARIABLES:
return self._tokenize_no_variables()
variables = VariableIterator(self.value)
if not variables:
return self._tokenize_no_variables()
return self._tokenize_variables(variables)
def _tokenize_no_variables(self):
yield self
def _tokenize_variables(self, variables):
lineno = self.lineno
col_offset = self.col_offset
remaining = ''
for before, variable, remaining in variables:
if before:
yield Token(self.type, before, lineno, col_offset)
col_offset += len(before)
yield Token(Token.VARIABLE, variable, lineno, col_offset)
col_offset += len(variable)
if remaining:
yield Token(self.type, remaining, lineno, col_offset)
def __str__(self):
return self.value
def __repr__(self):
type_ = self.type.replace(' ', '_') if self.type else 'None'
error = '' if not self.error else ', %r' % self.error
return 'Token(%s, %r, %s, %s%s)' % (type_, self.value, self.lineno,
self.col_offset, error)
def __eq__(self, other):
return (isinstance(other, Token)
and self.type == other.type
and self.value == other.value
and self.lineno == other.lineno
and self.col_offset == other.col_offset
and self.error == other.error)
def __ne__(self, other):
return not self == other
class EOS(Token):
"""Token representing end of a statement."""
__slots__ = []
def __init__(self, lineno=-1, col_offset=-1):
Token.__init__(self, Token.EOS, '', lineno, col_offset)
@classmethod
def from_token(cls, token):
return EOS(lineno=token.lineno, col_offset=token.end_col_offset)
|
py | 1a483cbb7faf650937e3eac9edc98485a7cdf492 | from __future__ import absolute_import
import logging
import re
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.conf.urls import patterns, include, url
from sentry.plugins.base import plugins
logger = logging.getLogger("sentry.plugins")
def ensure_url(u):
if isinstance(u, (tuple, list)):
return url(*u)
elif not isinstance(u, (RegexURLResolver, RegexURLPattern)):
raise TypeError(
"url must be RegexURLResolver or RegexURLPattern, not %r: %r" % (type(u).__name__, u)
)
return u
def load_plugin_urls(plugins):
urlpatterns = patterns("")
for plugin in plugins:
try:
urls = plugin.get_project_urls()
if not urls:
continue
urls = [ensure_url(u) for u in urls]
except Exception:
logger.exception("routes.failed", extra={"plugin": type(plugin).__name__})
else:
urlpatterns.append(url(r"^%s/" % re.escape(plugin.slug), include(urls)))
return urlpatterns
urlpatterns = load_plugin_urls(plugins.all())
|
py | 1a483d182768ffaa9acf9a5e52c09d95b3db14b7 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import print_function, unicode_literals
try:
from io import StringIO
except ImportError: # 2.7
from StringIO import StringIO
import os
import os.path
import sys
import unittest
from ...util import Stub, StubProxy
from testing_tools.adapter.errors import UnsupportedCommandError
from testing_tools.adapter.info import TestInfo, TestPath, ParentInfo
from testing_tools.adapter.pytest import (
discover, add_cli_subparser, TestCollector, DiscoveredTests
)
import pytest
class StubSubparsers(StubProxy):
def __init__(self, stub=None, name='subparsers'):
super(StubSubparsers, self).__init__(stub, name)
def add_parser(self, name):
self.add_call('add_parser', None, {'name': name})
return self.return_add_parser
class StubArgParser(StubProxy):
def __init__(self, stub=None):
super(StubArgParser, self).__init__(stub, 'argparser')
def add_argument(self, *args, **kwargs):
self.add_call('add_argument', args, kwargs)
class StubPyTest(StubProxy):
def __init__(self, stub=None):
super(StubPyTest, self).__init__(stub, 'pytest')
self.return_main = 0
def main(self, args, plugins):
self.add_call('main', None, {'args': args, 'plugins': plugins})
return self.return_main
class StubPlugin(StubProxy):
_started = True
def __init__(self, stub=None, tests=None):
super(StubPlugin, self).__init__(stub, 'plugin')
if tests is None:
tests = StubDiscoveredTests(self.stub)
self._tests = tests
def __getattr__(self, name):
if not name.startswith('pytest_'):
raise AttributeError(name)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
class StubDiscoveredTests(StubProxy):
NOT_FOUND = object()
def __init__(self, stub=None):
super(StubDiscoveredTests, self).__init__(stub, 'discovered')
self.return_items = []
self.return_parents = []
def __len__(self):
self.add_call('__len__', None, None)
return len(self.return_items)
def __getitem__(self, index):
self.add_call('__getitem__', (index,), None)
return self.return_items[index]
@property
def parents(self):
self.add_call('parents', None, None)
return self.return_parents
def reset(self):
self.add_call('reset', None, None)
def add_test(self, test, suiteids):
self.add_call('add_test', None, {'test': test, 'suiteids': suiteids})
class FakeFunc(object):
def __init__(self, name):
self.__name__ = name
class FakeMarker(object):
def __init__(self, name):
self.name = name
class StubPytestItem(StubProxy):
kind = 'Function'
_debugging = False
_hasfunc = True
def __init__(self, stub=None, **attrs):
super(StubPytestItem, self).__init__(stub, 'pytest.Item')
if attrs.get('function') is None:
attrs.pop('function', None)
self._hasfunc = False
attrs.setdefault('user_properties', [])
self.__dict__.update(attrs)
if 'own_markers' not in attrs:
self.own_markers = ()
def __getattr__(self, name):
if not self._debugging:
self.add_call(name + ' (attr)', None, None)
if name == 'function':
if not self._hasfunc:
raise AttributeError(name)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
class StubPytestSession(StubProxy):
def __init__(self, stub=None):
super(StubPytestSession, self).__init__(stub, 'pytest.Session')
def __getattr__(self, name):
self.add_call(name + ' (attr)', None, None)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
class StubPytestConfig(StubProxy):
def __init__(self, stub=None):
super(StubPytestConfig, self).__init__(stub, 'pytest.Config')
def __getattr__(self, name):
self.add_call(name + ' (attr)', None, None)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
##################################
# tests
class AddCLISubparserTests(unittest.TestCase):
def test_discover(self):
stub = Stub()
subparsers = StubSubparsers(stub)
parser = StubArgParser(stub)
subparsers.return_add_parser = parser
add_cli_subparser('discover', 'pytest', subparsers)
self.assertEqual(stub.calls, [
('subparsers.add_parser', None, {'name': 'pytest'}),
])
def test_unsupported_command(self):
subparsers = StubSubparsers(name=None)
subparsers.return_add_parser = None
with self.assertRaises(UnsupportedCommandError):
add_cli_subparser('run', 'pytest', subparsers)
with self.assertRaises(UnsupportedCommandError):
add_cli_subparser('debug', 'pytest', subparsers)
with self.assertRaises(UnsupportedCommandError):
add_cli_subparser('???', 'pytest', subparsers)
self.assertEqual(subparsers.calls, [
('add_parser', None, {'name': 'pytest'}),
('add_parser', None, {'name': 'pytest'}),
('add_parser', None, {'name': 'pytest'}),
])
class DiscoverTests(unittest.TestCase):
DEFAULT_ARGS = [
'--collect-only',
]
def test_basic(self):
stub = Stub()
stubpytest = StubPyTest(stub)
plugin = StubPlugin(stub)
expected = []
plugin.discovered = expected
parents, tests = discover([], _pytest_main=stubpytest.main, _plugin=plugin)
self.assertEqual(parents, [])
self.assertEqual(tests, expected)
self.assertEqual(stub.calls, [
('pytest.main', None, {'args': self.DEFAULT_ARGS,
'plugins': [plugin]}),
('discovered.parents', None, None),
('discovered.__len__', None, None),
('discovered.__getitem__', (0,), None),
])
def test_failure(self):
stub = Stub()
pytest = StubPyTest(stub)
pytest.return_main = 2
plugin = StubPlugin(stub)
with self.assertRaises(Exception):
discover([], _pytest_main=pytest.main, _plugin=plugin)
self.assertEqual(stub.calls, [
('pytest.main', None, {'args': self.DEFAULT_ARGS,
'plugins': [plugin]}),
])
def test_stdio_hidden(self):
pytest_stdout = 'spamspamspamspamspamspamspammityspam'
stub = Stub()
def fake_pytest_main(args, plugins):
stub.add_call('pytest.main', None, {'args': args,
'plugins': plugins})
print(pytest_stdout, end='')
return 0
plugin = StubPlugin(stub)
plugin.discovered = []
buf = StringIO()
sys.stdout = buf
try:
discover([], hidestdio=True,
_pytest_main=fake_pytest_main, _plugin=plugin)
finally:
sys.stdout = sys.__stdout__
captured = buf.getvalue()
self.assertEqual(captured, '')
self.assertEqual(stub.calls, [
('pytest.main', None, {'args': self.DEFAULT_ARGS,
'plugins': [plugin]}),
('discovered.parents', None, None),
('discovered.__len__', None, None),
('discovered.__getitem__', (0,), None),
])
def test_stdio_not_hidden(self):
pytest_stdout = 'spamspamspamspamspamspamspammityspam'
stub = Stub()
def fake_pytest_main(args, plugins):
stub.add_call('pytest.main', None, {'args': args,
'plugins': plugins})
print(pytest_stdout, end='')
return 0
plugin = StubPlugin(stub)
plugin.discovered = []
buf = StringIO()
sys.stdout = buf
try:
discover([], hidestdio=False,
_pytest_main=fake_pytest_main, _plugin=plugin)
finally:
sys.stdout = sys.__stdout__
captured = buf.getvalue()
self.assertEqual(captured, pytest_stdout)
self.assertEqual(stub.calls, [
('pytest.main', None, {'args': self.DEFAULT_ARGS,
'plugins': [plugin]}),
('discovered.parents', None, None),
('discovered.__len__', None, None),
('discovered.__getitem__', (0,), None),
])
class CollectorTests(unittest.TestCase):
def test_modifyitems(self):
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
config = StubPytestConfig(stub)
collector = TestCollector(tests=discovered)
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile1 = './test_spam.py'.replace('/', os.path.sep)
relfile2 = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
collector.pytest_collection_modifyitems(session, config, [
StubPytestItem(
stub,
nodeid='test_spam.py::SpamTests::test_one',
name='test_one',
location=('test_spam.py', 12, 'SpamTests.test_one'),
fspath=os.path.join(testroot, 'test_spam.py'),
function=FakeFunc('test_one'),
),
StubPytestItem(
stub,
nodeid='test_spam.py::SpamTests::test_other',
name='test_other',
location=('test_spam.py', 19, 'SpamTests.test_other'),
fspath=os.path.join(testroot, 'test_spam.py'),
function=FakeFunc('test_other'),
),
StubPytestItem(
stub,
nodeid='test_spam.py::test_all',
name='test_all',
location=('test_spam.py', 144, 'test_all'),
fspath=os.path.join(testroot, 'test_spam.py'),
function=FakeFunc('test_all'),
),
StubPytestItem(
stub,
nodeid='test_spam.py::test_each[10-10]',
name='test_each[10-10]',
location=('test_spam.py', 273, 'test_each[10-10]'),
fspath=os.path.join(testroot, 'test_spam.py'),
function=FakeFunc('test_each'),
),
StubPytestItem(
stub,
nodeid=relfile2 + '::All::BasicTests::test_first',
name='test_first',
location=(relfile2, 31, 'All.BasicTests.test_first'),
fspath=os.path.join(testroot, relfile2),
function=FakeFunc('test_first'),
),
StubPytestItem(
stub,
nodeid=relfile2 + '::All::BasicTests::test_each[1+2-3]',
name='test_each[1+2-3]',
location=(relfile2, 62, 'All.BasicTests.test_each[1+2-3]'),
fspath=os.path.join(testroot, relfile2),
function=FakeFunc('test_each'),
own_markers=[FakeMarker(v) for v in [
# supported
'skip', 'skipif', 'xfail',
# duplicate
'skip',
# ignored (pytest-supported)
'parameterize', 'usefixtures', 'filterwarnings',
# ignored (custom)
'timeout',
]],
),
])
self.maxDiff = None
self.assertEqual(stub.calls, [
('discovered.reset', None, None),
('discovered.add_test', None, dict(
suiteids=['test_spam.py::SpamTests'],
test=TestInfo(
id='test_spam.py::SpamTests::test_one',
name='test_one',
path=TestPath(
root=testroot,
relfile=relfile1,
func='SpamTests.test_one',
sub=None,
),
source='{}:{}'.format(relfile1, 12),
markers=None,
parentid='test_spam.py::SpamTests',
),
)),
('discovered.add_test', None, dict(
suiteids=['test_spam.py::SpamTests'],
test=TestInfo(
id='test_spam.py::SpamTests::test_other',
name='test_other',
path=TestPath(
root=testroot,
relfile=relfile1,
func='SpamTests.test_other',
sub=None,
),
source='{}:{}'.format(relfile1, 19),
markers=None,
parentid='test_spam.py::SpamTests',
),
)),
('discovered.add_test', None, dict(
suiteids=[],
test=TestInfo(
id='test_spam.py::test_all',
name='test_all',
path=TestPath(
root=testroot,
relfile=relfile1,
func='test_all',
sub=None,
),
source='{}:{}'.format(relfile1, 144),
markers=None,
parentid='test_spam.py',
),
)),
('discovered.add_test', None, dict(
suiteids=[],
test=TestInfo(
id='test_spam.py::test_each[10-10]',
name='test_each[10-10]',
path=TestPath(
root=testroot,
relfile=relfile1,
func='test_each',
sub=['[10-10]'],
),
source='{}:{}'.format(relfile1, 273),
markers=None,
parentid='test_spam.py::test_each',
),
)),
('discovered.add_test', None, dict(
suiteids=[relfile2 + '::All',
relfile2 + '::All::BasicTests'],
test=TestInfo(
id=relfile2 + '::All::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot,
relfile=relfile2,
func='All.BasicTests.test_first',
sub=None,
),
source='{}:{}'.format(relfile2, 31),
markers=None,
parentid=relfile2 + '::All::BasicTests',
),
)),
('discovered.add_test', None, dict(
suiteids=[relfile2 + '::All',
relfile2 + '::All::BasicTests'],
test=TestInfo(
id=relfile2 + '::All::BasicTests::test_each[1+2-3]',
name='test_each[1+2-3]',
path=TestPath(
root=testroot,
relfile=relfile2,
func='All.BasicTests.test_each',
sub=['[1+2-3]'],
),
source='{}:{}'.format(relfile2, 62),
markers=['expected-failure', 'skip', 'skip-if'],
parentid=relfile2 + '::All::BasicTests::test_each',
),
)),
])
def test_finish(self):
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
session.items = [
StubPytestItem(
stub,
nodeid=relfile + '::SpamTests::test_spam',
name='test_spam',
location=(relfile, 12, 'SpamTests.test_spam'),
fspath=os.path.join(testroot, relfile),
function=FakeFunc('test_spam'),
),
]
collector = TestCollector(tests=discovered)
collector.pytest_collection_finish(session)
self.maxDiff = None
self.assertEqual(stub.calls, [
('discovered.reset', None, None),
('discovered.add_test', None, dict(
suiteids=[relfile + '::SpamTests'],
test=TestInfo(
id=relfile + '::SpamTests::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile=relfile,
func='SpamTests.test_spam',
sub=None,
),
source='{}:{}'.format(relfile, 12),
markers=None,
parentid=relfile + '::SpamTests',
),
)),
])
def test_doctest(self):
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
testroot = '/a/b/c'.replace('/', os.path.sep)
doctestfile = 'x/test_doctest.txt'.replace('/', os.path.sep)
relfile = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
session.items = [
StubPytestItem(
stub,
nodeid=doctestfile + '::test_doctest.txt',
name='test_doctest.txt',
location=(doctestfile, 0, '[doctest] test_doctest.txt'),
fspath=os.path.join(testroot, doctestfile),
kind='DoctestItem',
),
# With --doctest-modules
StubPytestItem(
stub,
nodeid=relfile + '::test_eggs',
name='test_eggs',
location=(relfile, 0, '[doctest] test_eggs'),
fspath=os.path.join(testroot, relfile),
kind='DoctestItem',
),
StubPytestItem(
stub,
nodeid=relfile + '::test_eggs.TestSpam',
name='test_eggs.TestSpam',
location=(relfile, 12, '[doctest] test_eggs.TestSpam'),
fspath=os.path.join(testroot, relfile),
kind='DoctestItem',
),
StubPytestItem(
stub,
nodeid=relfile + '::test_eggs.TestSpam.TestEggs',
name='test_eggs.TestSpam.TestEggs',
location=(relfile, 27, '[doctest] test_eggs.TestSpam.TestEggs'),
fspath=os.path.join(testroot, relfile),
kind='DoctestItem',
),
]
collector = TestCollector(tests=discovered)
collector.pytest_collection_finish(session)
self.maxDiff = None
self.assertEqual(stub.calls, [
('discovered.reset', None, None),
('discovered.add_test', None, dict(
suiteids=[],
test=TestInfo(
id=doctestfile + '::test_doctest.txt',
name='test_doctest.txt',
path=TestPath(
root=testroot,
relfile=doctestfile,
func=None,
),
source='{}:{}'.format(doctestfile, 0),
markers=[],
parentid=doctestfile,
),
)),
('discovered.add_test', None, dict(
suiteids=[],
test=TestInfo(
id=relfile + '::test_eggs',
name='test_eggs',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 0),
markers=[],
parentid=relfile,
),
)),
('discovered.add_test', None, dict(
suiteids=[],
test=TestInfo(
id=relfile + '::test_eggs.TestSpam',
name='test_eggs.TestSpam',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 12),
markers=[],
parentid=relfile,
),
)),
('discovered.add_test', None, dict(
suiteids=[],
test=TestInfo(
id=relfile + '::test_eggs.TestSpam.TestEggs',
name='test_eggs.TestSpam.TestEggs',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 27),
markers=[],
parentid=relfile,
),
)),
])
def test_nested_brackets(self):
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
session.items = [
StubPytestItem(
stub,
nodeid=relfile + '::SpamTests::test_spam[a-[b]-c]',
name='test_spam[a-[b]-c]',
location=(relfile, 12, 'SpamTests.test_spam[a-[b]-c]'),
fspath=os.path.join(testroot, relfile),
function=FakeFunc('test_spam'),
),
]
collector = TestCollector(tests=discovered)
collector.pytest_collection_finish(session)
self.maxDiff = None
self.assertEqual(stub.calls, [
('discovered.reset', None, None),
('discovered.add_test', None, dict(
suiteids=[relfile + '::SpamTests'],
test=TestInfo(
id=relfile + '::SpamTests::test_spam[a-[b]-c]',
name='test_spam[a-[b]-c]',
path=TestPath(
root=testroot,
relfile=relfile,
func='SpamTests.test_spam',
sub=['[a-[b]-c]'],
),
source='{}:{}'.format(relfile, 12),
markers=None,
parentid=relfile + '::SpamTests::test_spam',
),
)),
])
def test_nested_suite(self):
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
session.items = [
StubPytestItem(
stub,
nodeid=relfile + '::SpamTests::Ham::Eggs::test_spam',
name='test_spam',
location=(relfile, 12, 'SpamTests.Ham.Eggs.test_spam'),
fspath=os.path.join(testroot, relfile),
function=FakeFunc('test_spam'),
),
]
collector = TestCollector(tests=discovered)
collector.pytest_collection_finish(session)
self.maxDiff = None
self.assertEqual(stub.calls, [
('discovered.reset', None, None),
('discovered.add_test', None, dict(
suiteids=[
relfile + '::SpamTests',
relfile + '::SpamTests::Ham',
relfile + '::SpamTests::Ham::Eggs',
],
test=TestInfo(
id=relfile + '::SpamTests::Ham::Eggs::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile=relfile,
func='SpamTests.Ham.Eggs.test_spam',
sub=None,
),
source='{}:{}'.format(relfile, 12),
markers=None,
parentid=relfile + '::SpamTests::Ham::Eggs',
),
)),
])
def test_windows(self):
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
testroot = r'c:\a\b\c'
relfile = r'X\Y\Z\test_eggs.py'
session.items = [
StubPytestItem(
stub,
nodeid=relfile + '::SpamTests::test_spam',
name='test_spam',
location=('x/y/z/test_eggs.py', 12, 'SpamTests.test_spam'),
fspath=testroot + '\\' + relfile,
function=FakeFunc('test_spam'),
),
]
collector = TestCollector(tests=discovered)
if os.name != 'nt':
def normcase(path):
path = path.lower()
return path.replace('/', '\\')
collector.NORMCASE = normcase
collector.PATHSEP = '\\'
collector.pytest_collection_finish(session)
self.maxDiff = None
self.assertEqual(stub.calls, [
('discovered.reset', None, None),
('discovered.add_test', None, dict(
suiteids=[relfile + '::SpamTests'],
test=TestInfo(
id=relfile + '::SpamTests::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile=relfile,
func='SpamTests.test_spam',
sub=None,
),
source='{}:{}'.format(relfile, 12),
markers=None,
parentid=relfile + '::SpamTests',
),
)),
])
def test_imported_test(self):
# pytest will even discover tests that were imported from
# another module!
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
srcfile = 'x/y/z/_extern.py'.replace('/', os.path.sep)
session.items = [
StubPytestItem(
stub,
nodeid=relfile + '::SpamTests::test_spam',
name='test_spam',
location=(srcfile, 12, 'SpamTests.test_spam'),
fspath=os.path.join(testroot, relfile),
function=FakeFunc('test_spam'),
),
]
collector = TestCollector(tests=discovered)
collector.pytest_collection_finish(session)
self.maxDiff = None
self.assertEqual(stub.calls, [
('discovered.reset', None, None),
('discovered.add_test', None, dict(
suiteids=[relfile + '::SpamTests'],
test=TestInfo(
id=relfile + '::SpamTests::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile=relfile,
func='SpamTests.test_spam',
sub=None,
),
source='{}:{}'.format(srcfile, 12),
markers=None,
parentid=relfile + '::SpamTests',
),
)),
])
class DiscoveredTestsTests(unittest.TestCase):
def test_list(self):
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = 'test_spam.py'
relfileid = os.path.join('.', relfile)
tests = [
TestInfo(
id=relfile + '::test_each[10-10]',
name='test_each[10-10]',
path=TestPath(
root=testroot,
relfile=relfile,
func='test_each',
sub=['[10-10]'],
),
source='{}:{}'.format(relfile, 10),
markers=None,
parentid=relfile + '::test_each',
),
TestInfo(
id=relfile + '::All::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot,
relfile=relfile,
func='All.BasicTests.test_first',
sub=None,
),
source='{}:{}'.format(relfile, 61),
markers=None,
parentid=relfile + '::All::BasicTests',
),
]
allsuiteids = [
[],
[relfile + '::All',
relfile + '::All::BasicTests',
],
]
expected = [test._replace(id=os.path.join('.', test.id),
parentid=os.path.join('.', test.parentid))
for test in tests]
discovered = DiscoveredTests()
for test, suiteids in zip(tests, allsuiteids):
discovered.add_test(test, suiteids)
size = len(discovered)
items = [discovered[0], discovered[1]]
snapshot = list(discovered)
self.maxDiff = None
self.assertEqual(size, 2)
self.assertEqual(items, expected)
self.assertEqual(snapshot, expected)
def test_reset(self):
testroot = '/a/b/c'.replace('/', os.path.sep)
discovered = DiscoveredTests()
discovered.add_test(
TestInfo(
id='test_spam.py::test_each',
name='test_each',
path=TestPath(
root=testroot,
relfile='test_spam.py',
func='test_each',
),
source='{}:{}'.format('test_spam.py', 10),
markers=[],
parentid='test_spam.py',
),
[])
before = len(discovered), len(discovered.parents)
discovered.reset()
after = len(discovered), len(discovered.parents)
self.assertEqual(before, (1, 2))
self.assertEqual(after, (0, 0))
def test_parents(self):
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = 'x/y/z/test_spam.py'.replace('/', os.path.sep)
relfileid = os.path.join('.', relfile)
tests = [
TestInfo(
id=relfile + '::test_each[10-10]',
name='test_each[10-10]',
path=TestPath(
root=testroot,
relfile=relfile,
func='test_each',
sub=['[10-10]'],
),
source='{}:{}'.format(relfile, 10),
markers=None,
parentid=relfile + '::test_each',
),
TestInfo(
id=relfile + '::All::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot,
relfile=relfile,
func='All.BasicTests.test_first',
sub=None,
),
source='{}:{}'.format(relfile, 61),
markers=None,
parentid=relfile + '::All::BasicTests',
),
]
allsuiteids = [
[],
[relfile + '::All',
relfile + '::All::BasicTests',
],
]
discovered = DiscoveredTests()
for test, suiteids in zip(tests, allsuiteids):
discovered.add_test(test, suiteids)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(parents, [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id='./x'.replace('/', os.path.sep),
kind='folder',
name='x',
root=testroot,
parentid='.',
),
ParentInfo(
id='./x/y'.replace('/', os.path.sep),
kind='folder',
name='y',
root=testroot,
parentid='./x'.replace('/', os.path.sep),
),
ParentInfo(
id='./x/y/z'.replace('/', os.path.sep),
kind='folder',
name='z',
root=testroot,
parentid='./x/y'.replace('/', os.path.sep),
),
ParentInfo(
id=relfileid,
kind='file',
name=os.path.basename(relfile),
root=testroot,
parentid=os.path.dirname(relfileid),
),
ParentInfo(
id=relfileid + '::All',
kind='suite',
name='All',
root=testroot,
parentid=relfileid,
),
ParentInfo(
id=relfileid + '::All::BasicTests',
kind='suite',
name='BasicTests',
root=testroot,
parentid=relfileid + '::All',
),
ParentInfo(
id=relfileid + '::test_each',
kind='function',
name='test_each',
root=testroot,
parentid=relfileid,
),
])
def test_add_test_simple(self):
testroot = '/a/b/c'.replace('/', os.path.sep)
test = TestInfo(
id='test_spam.py::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile='test_spam.py',
func='test_spam',
),
source='{}:{}'.format('test_spam.py', 11),
markers=[],
parentid='test_spam.py',
)
expected = test._replace(id=os.path.join('.', test.id),
parentid=os.path.join('.', test.parentid))
discovered = DiscoveredTests()
before = list(discovered), discovered.parents
discovered.add_test(test, [])
after = list(discovered), discovered.parents
self.maxDiff = None
self.assertEqual(before, ([], []))
self.assertEqual(after, ([expected], [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id=os.path.join('.', 'test_spam.py'),
kind='file',
name='test_spam.py',
root=testroot,
parentid='.',
),
]))
def test_multiroot(self):
# the first root
testroot1 = '/a/b/c'.replace('/', os.path.sep)
relfile1 = 'test_spam.py'
relfileid1 = os.path.join('.', relfile1)
alltests = [
TestInfo(
id=relfile1 + '::test_spam',
name='test_spam',
path=TestPath(
root=testroot1,
relfile=relfile1,
func='test_spam',
),
source='{}:{}'.format(relfile1, 10),
markers=[],
parentid=relfile1,
),
]
allsuiteids = [
[],
]
# the second root
testroot2 = '/x/y/z'.replace('/', os.path.sep)
relfile2 = 'w/test_eggs.py'
relfileid2 = os.path.join('.', relfile2)
alltests.extend([
TestInfo(
id=relfile2 + 'BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot2,
relfile=relfile2,
func='BasicTests.test_first',
),
source='{}:{}'.format(relfile2, 61),
markers=[],
parentid=relfile2 + '::BasicTests',
),
])
allsuiteids.extend([
[relfile2 + '::BasicTests',
],
])
discovered = DiscoveredTests()
for test, suiteids in zip(alltests, allsuiteids):
discovered.add_test(test, suiteids)
tests = list(discovered)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(tests, [
# the first root
TestInfo(
id=relfileid1 + '::test_spam',
name='test_spam',
path=TestPath(
root=testroot1,
relfile=relfile1,
func='test_spam',
),
source='{}:{}'.format(relfile1, 10),
markers=[],
parentid=relfileid1,
),
# the secondroot
TestInfo(
id=relfileid2 + 'BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot2,
relfile=relfile2,
func='BasicTests.test_first',
),
source='{}:{}'.format(relfile2, 61),
markers=[],
parentid=relfileid2 + '::BasicTests',
),
])
self.assertEqual(parents, [
# the first root
ParentInfo(
id='.',
kind='folder',
name=testroot1,
),
ParentInfo(
id=relfileid1,
kind='file',
name=os.path.basename(relfile1),
root=testroot1,
parentid=os.path.dirname(relfileid1),
),
# the secondroot
ParentInfo(
id='.',
kind='folder',
name=testroot2,
),
ParentInfo(
id='./w'.replace('/', os.path.sep),
kind='folder',
name='w',
root=testroot2,
parentid='.',
),
ParentInfo(
id=relfileid2,
kind='file',
name=os.path.basename(relfile2),
root=testroot2,
parentid=os.path.dirname(relfileid2),
),
ParentInfo(
id=relfileid2 + '::BasicTests',
kind='suite',
name='BasicTests',
root=testroot2,
parentid=relfileid2,
),
])
def test_doctest(self):
stub = Stub()
testroot = '/a/b/c'.replace('/', os.path.sep)
doctestfile = './x/test_doctest.txt'.replace('/', os.path.sep)
relfile = './x/y/z/test_eggs.py'.replace('/', os.path.sep)
alltests = [
TestInfo(
id=doctestfile + '::test_doctest.txt',
name='test_doctest.txt',
path=TestPath(
root=testroot,
relfile=doctestfile,
func=None,
),
source='{}:{}'.format(doctestfile, 0),
markers=[],
parentid=doctestfile,
),
# With --doctest-modules
TestInfo(
id=relfile + '::test_eggs',
name='test_eggs',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 0),
markers=[],
parentid=relfile,
),
TestInfo(
id=relfile + '::test_eggs.TestSpam',
name='test_eggs.TestSpam',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 12),
markers=[],
parentid=relfile,
),
TestInfo(
id=relfile + '::test_eggs.TestSpam.TestEggs',
name='test_eggs.TestSpam.TestEggs',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 27),
markers=[],
parentid=relfile,
),
]
discovered = DiscoveredTests()
for test in alltests:
discovered.add_test(test, [])
tests = list(discovered)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(tests, alltests)
self.assertEqual(parents, [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id='./x'.replace('/', os.path.sep),
kind='folder',
name='x',
root=testroot,
parentid='.',
),
ParentInfo(
id=doctestfile,
kind='file',
name=os.path.basename(doctestfile),
root=testroot,
parentid=os.path.dirname(doctestfile),
),
ParentInfo(
id='./x/y'.replace('/', os.path.sep),
kind='folder',
name='y',
root=testroot,
parentid='./x'.replace('/', os.path.sep),
),
ParentInfo(
id='./x/y/z'.replace('/', os.path.sep),
kind='folder',
name='z',
root=testroot,
parentid='./x/y'.replace('/', os.path.sep),
),
ParentInfo(
id=relfile,
kind='file',
name=os.path.basename(relfile),
root=testroot,
parentid=os.path.dirname(relfile),
),
])
def test_nested_suite_simple(self):
stub = Stub()
discovered = StubDiscoveredTests(stub)
session = StubPytestSession(stub)
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = './test_eggs.py'.replace('/', os.path.sep)
alltests = [
TestInfo(
id=relfile + '::TestOuter::TestInner::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile=relfile,
func='TestOuter.TestInner.test_spam',
),
source='{}:{}'.format(relfile, 10),
markers=None,
parentid=relfile + '::TestOuter::TestInner',
),
TestInfo(
id=relfile + '::TestOuter::TestInner::test_eggs',
name='test_eggs',
path=TestPath(
root=testroot,
relfile=relfile,
func='TestOuter.TestInner.test_eggs',
),
source='{}:{}'.format(relfile, 21),
markers=None,
parentid=relfile + '::TestOuter::TestInner',
),
]
allsuiteids = [
[relfile + '::TestOuter',
relfile + '::TestOuter::TestInner',
],
[relfile + '::TestOuter',
relfile + '::TestOuter::TestInner',
],
]
discovered = DiscoveredTests()
for test, suiteids in zip(alltests, allsuiteids):
discovered.add_test(test, suiteids)
tests = list(discovered)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(tests, alltests)
self.assertEqual(parents, [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id=relfile,
kind='file',
name=os.path.basename(relfile),
root=testroot,
parentid=os.path.dirname(relfile),
),
ParentInfo(
id=relfile + '::TestOuter',
kind='suite',
name='TestOuter',
root=testroot,
parentid=relfile,
),
ParentInfo(
id=relfile + '::TestOuter::TestInner',
kind='suite',
name='TestInner',
root=testroot,
parentid=relfile + '::TestOuter',
),
])
|
py | 1a483d58ba03233fcaa12b796d118a3cdb74b0b5 | """Run the tests
Assumes /test.py has been invoked on the host and everything is set up.
"""
import sys
import gws.lib.test
sys.exit(gws.lib.test.main(sys.argv[1:]) or 0)
|
py | 1a483f24758573277f7487658048edf1cff710f9 | from Benchmarker import Benchmarker
import numpy as np
def pandas_prod(df):
return df["A"].prod()
def numpy_prod(df):
return np.prod(df["A"])
def numpy_values_prod(df):
return np.prod(df["A"].values)
def numpy_values_nanprod(df):
return np.nanprod(df["A"].values)
params = {
"df_generator": 'pd.DataFrame(np.random.randint(1, df_size, (df_size, 2)), columns=list("AB"))',
"functions_to_evaluate": [numpy_values_prod, numpy_values_nanprod, pandas_prod, numpy_prod],
"title": "Pandas Prod vs Numpy Prod",
}
benchmark = Benchmarker(**params)
benchmark.benchmark_all()
benchmark.print_results()
benchmark.plot_results()
|
py | 1a4840dca2da4677fe6f4cd6dfdd0df03ddff7ce | class No:
def __init__(self, dadosIniti=0):
self.result = 0
self.dadosIniti = dadosIniti
self.somaResult()
def setDados(self, dadosIniti):
self.dadosIniti = dadosIniti
def getDados(self):
return self.dadosIniti
def somaResult(self):
self.result = self.result + self.dadosIniti
|
py | 1a4841579c6091ebeca7dbec0e6c90a4e4c225a4 | """ Since we also expect the answers in the SQuAD format, we reuse its code """
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for dp in dataset:
for qa in dp['qa']:
total += 1
if qa['id'] not in predictions:
message = 'Question id ' + qa['id'] + \
' not present. Will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = ['NA'] if len(qa['answers']) == 0 else qa['answers']
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluation for DuoRC')
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset = json.load(dataset_file)
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions))) |
py | 1a4843b34710ee7796d79a75635ec6f0275bef24 | #! /usr/bin/env python3
"""Simple module/script showing argparse"""
from argparse import ArgumentParser
def get_args():
"""argument parser for argparse_example"""
parser = ArgumentParser()
parser.add_argument("--datadir", required=False, type=str,
default="/logs/MECS", help="path to data directory")
parser.add_argument("--threads", "-j", required=False, type=int,
help="number of threads", default=5)
parser.add_argument("--verbose", "-v", required=False,
action="store_true",
help="used to produce more verbose output")
args = parser.parse_args()
return args
def main(args):
"""main function for argparse_example
splitting the args out separately allows this to be reused in programs"""
print("The args namespace is {}".format(args))
if __name__ == "__main__":
args = get_args()
main(args)
|
py | 1a4843e5d2ad77db7560cf53fe30534e16566638 | #!/usr/bin/env python3
import os
# requires prompt-toolkit > 2
from prompt_toolkit import PromptSession, HTML
from prompt_toolkit.application.current import get_app
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.styles import Style
global_kb = KeyBindings()
# if bottom toolbar is visible
global_toolbar = None
# color the application
style = Style.from_dict({
# reset bottom_toolbar style
'bottom-toolbar': 'noreverse gray bg:black',
'bottom-toolbar label': 'black bg:darkcyan'
})
def get_mainbar():
""" return keybar adding missing empty keys
"""
mainbar = [
(1, 'Help'),
(10, 'Quit'),
]
# there could be 12 keys at the bottom
keysno = max(10, max(m[0] for m in mainbar))
# add missing keys to keybar
for idx in range(1, keysno+1): # 1-10 or 1-12
if len(mainbar) >= idx:
#print(len(mainbar), idx, mainbar[idx-1])
if mainbar[idx-1][0] == idx:
continue
else:
mainbar.insert(idx-1, (idx, ''))
else: # no more buttons
mainbar.append((idx, ''))
#print(mainbar)
return mainbar
def bottom_toolbar():
keybar = get_mainbar()
minlabel = 6
labelled = []
for key, label in keybar:
if len(label) > minlabel: # then trim label
label = label[:minlabel]
# {:{}} means width is specified by argument
labelled.append("{}<label>{:{w}}</label>".format(
key, label, w=minlabel))
return HTML(' '.join(labelled))
@global_kb.add('c-b') # Ctrl+B
def switch_toolbar(event):
global global_toolbar, session
if not global_toolbar:
global_toolbar = bottom_toolbar
else:
global_toolbar = None
session.bottom_toolbar = global_toolbar
@global_kb.add('f10') # F10
def quit(event):
get_app().exit(exception=EOFError) # mimic Ctrl-D
session = PromptSession(os.getcwd() + ">", key_bindings=global_kb, bottom_toolbar=bottom_toolbar, style=style)
while True:
try:
text = session.prompt()
except KeyboardInterrupt: # [ ] in Far Ctrl+C does nothing
break
except EOFError: # [ ] in Far Ctrl+D does nothing
break
else:
print("[debug] " + text)
|
py | 1a4846271cc61c73c5c2a36aedf9df70ac92cc12 | from datetime import date
from datetime import timedelta
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class CovidNineteen:
def get_latest_daily_report(self):
"""
Get latest daily report(world) from:
https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports
"""
data_date = date.today()
data_date_delta = timedelta(days=1)
daily_report_url_no_date = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{}.csv"
while True:
data_date_str = date.strftime(data_date, '%m-%d-%Y')
daily_report_url = daily_report_url_no_date.format(data_date_str)
try:
print("Trying to get {} daily report.".format(data_date_str))
daily_report = pd.read_csv(daily_report_url)
print("The file exists,got {} daily report.".format(data_date_str))
break
except:
print("{} hasn't uploaded yet.".format(data_date_str))
data_date -= data_date_delta # data_date = data_date - data_date_delta
return daily_report
def get_time_series(self):
"""
Get time series data from:
https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
"""
time_series = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
return time_series
covid_19 = CovidNineteen()
daily_report = covid_19.get_latest_daily_report()
time_series = covid_19.get_time_series()
time_series.head()
idVars =['Province/State', 'Country/Region', 'Lat', 'Long']
time_series_long = pd.melt(time_series, id_vars=idVars, var_name='Date', value_name='Confirmed')
time_series_long['Date'] = pd.to_datetime(time_series_long['Date'])
country_confirmed_groupby = time_series_long.groupby(['Date', 'Country/Region'])['Confirmed'].sum()
df_country_confirmed = pd.DataFrame(country_confirmed_groupby).reset_index()
country_confirmed = df_country_confirmed.sort_values('Confirmed', ascending=True)
#============================= Make a matplotlib picture =========================================
#us = country_confirmed[country_confirmed['Country/Region'].str.contains('US')]
#If we use USA plot on this pic, we won't see the other Country's progress.
cn = country_confirmed[country_confirmed['Country/Region'].str.contains('China')]
jpn = country_confirmed[country_confirmed['Country/Region'].str.contains('Japan')]
kr = country_confirmed[country_confirmed['Country/Region'].str.contains('Korea, South')]
tw = country_confirmed[country_confirmed['Country/Region'].str.contains('Taiwan')]
plt.figure(figsize=(20, 5))
plt.title('Covid 19 Confirmed Results', fontsize=20)
plt.xlabel('Date', fontsize=20)
plt.ylabel('Population', fontsize=20)
#plt.plot(us['Date'], us['Confirmed'], label='USA')
plt.plot(cn['Date'], cn['Confirmed'], label='China')
plt.plot(jpn['Date'], jpn['Confirmed'], label='Japan')
plt.plot(kr['Date'], kr['Confirmed'], label='Korea South')
plt.plot(tw['Date'], tw['Confirmed'], label='Taiwan')
plt.legend(loc=2)
plt.show()
|
py | 1a48468a9ecb696040f1061a1e4e4113b58c93c6 | """
This is used for dynamic object completion.
Jedi tries to guess param types with a backtracking approach.
"""
def func(a, default_arg=2):
#? int()
default_arg
#? int() str()
return a
#? int()
func(1)
func
int(1) + (int(2))+ func('')
# Again the same function, but with another call.
def func(a):
#? float()
return a
func(1.0)
# Again the same function, but with no call.
def func(a):
#?
return a
def func(a):
#? float()
return a
str(func(1.0))
# -----------------
# *args, **args
# -----------------
def arg(*args):
#? tuple()
args
#? int()
args[0]
arg(1,"")
# -----------------
# decorators
# -----------------
def def_func(f):
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
@def_func
def func(c):
#? str()
return c
#? str()
func("something")
@def_func
def func(c=1):
#? float()
return c
func(1.0)
def tricky_decorator(func):
def wrapper(*args):
return func(1, *args)
return wrapper
@tricky_decorator
def func(a, b):
#? int()
a
#? float()
b
func(1.0)
# Needs to be here, because in this case func is an import -> shouldn't lead to
# exceptions.
import sys as func
func.sys
# -----------------
# classes
# -----------------
class A():
def __init__(self, a):
#? str()
a
A("s")
class A():
def __init__(self, a):
#? int()
a
self.a = a
def test(self, a):
#? float()
a
self.c = self.test2()
def test2(self):
#? int()
return self.a
def test3(self):
#? int()
self.test2()
#? int()
self.c
A(3).test(2.0)
A(3).test2()
def from_class(x):
#?
x
from UNDEFINED import from_class
class Foo(from_class(1),):
pass
# -----------------
# comprehensions
# -----------------
def from_comprehension(foo):
#? int() float()
return foo
[from_comprehension(1.0) for n in (1,)]
[from_comprehension(n) for n in (1,)]
# -----------------
# lambdas
# -----------------
#? int()
x_lambda = lambda x: x
x_lambda(1)
class X():
#? str()
x_method = lambda self, a: a
X().x_method('')
|
py | 1a4846c88799802466c078f7c52dbf5061c55749 | import re
from typing import Match, Optional
from dl_plus import ytdl
InfoExtractor = ytdl.import_from('extractor.common', 'InfoExtractor')
ExtractorError = ytdl.import_from('utils', 'ExtractorError')
class Extractor(InfoExtractor):
"""
A base class for pluggable extractors
"""
# Set by `ExtractorPlugin.register`, do not override.
IE_NAME: Optional[str] = None
@classmethod
def ie_key(cls):
return cls.IE_NAME
# dl-plus extra attributes/methods
DLP_BASE_URL: Optional[str] = None
DLP_REL_URL: Optional[str] = None
@classmethod
def dlp_match(cls, url: str) -> Optional[Match[str]]:
# a copy/paste from youtube-dl
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url)
|
py | 1a48471fcd20a7a7143dbee4b76a6a797aec4787 | from hdmf.testing import TestCase
from hdmf.utils import LabelledDict
class MyTestClass:
def __init__(self, prop1, prop2):
self._prop1 = prop1
self._prop2 = prop2
@property
def prop1(self):
return self._prop1
@property
def prop2(self):
return self._prop2
class TestLabelledDict(TestCase):
def test_constructor(self):
"""Test that constructor sets arguments properly."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
self.assertEqual(ld.label, 'all_objects')
self.assertEqual(ld.key_attr, 'prop1')
def test_constructor_default(self):
"""Test that constructor sets default key attribute."""
ld = LabelledDict(label='all_objects')
self.assertEqual(ld.key_attr, 'name')
def test_set_key_attr(self):
"""Test that the key attribute cannot be set after initialization."""
ld = LabelledDict(label='all_objects')
with self.assertRaisesWith(AttributeError, "can't set attribute"):
ld.key_attr = 'another_name'
def test_getitem_unknown_val(self):
"""Test that dict[unknown_key] where the key unknown_key is not in the dict raises an error."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
with self.assertRaisesWith(KeyError, "'unknown_key'"):
ld['unknown_key']
def test_getitem_eqeq_unknown_val(self):
"""Test that dict[unknown_attr == val] where there are no query matches returns an empty set."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
self.assertSetEqual(ld['unknown_attr == val'], set())
def test_getitem_eqeq_other_key(self):
"""Test that dict[other_attr == val] where there are no query matches returns an empty set."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
self.assertSetEqual(ld['prop2 == val'], set())
def test_getitem_eqeq_no_key_attr(self):
"""Test that dict[key_attr == val] raises an error if key_attr is not given."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
with self.assertRaisesWith(ValueError, "An attribute name is required before '=='."):
ld[' == unknown_key']
def test_getitem_eqeq_no_val(self):
"""Test that dict[key_attr == val] raises an error if val is not given."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
with self.assertRaisesWith(ValueError, "A value is required after '=='."):
ld['prop1 == ']
def test_getitem_eqeq_no_key_attr_no_val(self):
"""Test that dict[key_attr == val] raises an error if key_attr is not given and val is not given."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
with self.assertRaisesWith(ValueError, "An attribute name is required before '=='."):
ld[' == ']
def test_add_basic(self):
"""Test add method on object with correct key_attr."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
self.assertIs(ld['a'], obj1)
def test_add_value_missing_key(self):
"""Test that add raises an error if the value being set does not have the attribute key_attr."""
ld = LabelledDict(label='all_objects', key_attr='unknown_key')
obj1 = MyTestClass('a', 'b')
err_msg = r"Cannot set value '<.*>' in LabelledDict\. Value must have attribute 'unknown_key'\."
with self.assertRaisesRegex(ValueError, err_msg):
ld.add(obj1)
def test_setitem_getitem_basic(self):
"""Test that setitem and getitem properly set and get the object."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
self.assertIs(ld['a'], obj1)
def test_setitem_value_missing_key(self):
"""Test that setitem raises an error if the value being set does not have the attribute key_attr."""
ld = LabelledDict(label='all_objects', key_attr='unknown_key')
obj1 = MyTestClass('a', 'b')
err_msg = r"Cannot set value '<.*>' in LabelledDict\. Value must have attribute 'unknown_key'\."
with self.assertRaisesRegex(ValueError, err_msg):
ld['a'] = obj1
def test_setitem_value_inconsistent_key(self):
"""Test that setitem raises an error if the value being set has an inconsistent key."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
err_msg = r"Key 'b' must equal attribute 'prop1' of '<.*>'\."
with self.assertRaisesRegex(KeyError, err_msg):
ld['b'] = obj1
def test_setitem_value_duplicate_key(self):
"""Test that setitem raises an error if the key already exists in the dict."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
obj2 = MyTestClass('a', 'c')
ld['a'] = obj1
err_msg = "Key 'a' is already in this dict. Cannot reset items in a LabelledDict."
with self.assertRaisesWith(TypeError, err_msg):
ld['a'] = obj2
def test_add_callable(self):
"""Test that add properly adds the object and calls the add_callable function."""
self.signal = None
def func(v):
self.signal = v
ld = LabelledDict(label='all_objects', key_attr='prop1', add_callable=func)
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
self.assertIs(ld['a'], obj1)
self.assertIs(self.signal, obj1)
def test_setitem_callable(self):
"""Test that setitem properly sets the object and calls the add_callable function."""
self.signal = None
def func(v):
self.signal = v
ld = LabelledDict(label='all_objects', key_attr='prop1', add_callable=func)
obj1 = MyTestClass('a', 'b')
ld['a'] = obj1
self.assertIs(ld['a'], obj1)
self.assertIs(self.signal, obj1)
def test_getitem_eqeq_nonempty(self):
"""Test that dict[key_attr == val] returns the single matching object."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
self.assertIs(ld['prop1 == a'], obj1)
def test_getitem_eqeq_nonempty_key_attr_no_match(self):
"""Test that dict[key_attr == unknown_val] where a matching value is not found raises a KeyError."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
with self.assertRaisesWith(KeyError, "'unknown_val'"):
ld['prop1 == unknown_val'] # same as ld['unknown_val']
def test_getitem_eqeq_nonempty_unknown_attr(self):
"""Test that dict[unknown_attr == val] where unknown_attr is not a field on the values raises an error."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld['a'] = obj1
self.assertSetEqual(ld['unknown_attr == unknown_val'], set())
def test_getitem_nonempty_other_key(self):
"""Test that dict[other_key == val] returns a set of matching objects."""
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
obj2 = MyTestClass('d', 'b')
obj3 = MyTestClass('f', 'e')
ld.add(obj1)
ld.add(obj2)
ld.add(obj3)
self.assertSetEqual(ld['prop2 == b'], {obj1, obj2})
def test_pop_nocallback(self):
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
ret = ld.pop('a')
self.assertEqual(ret, obj1)
self.assertEqual(ld, dict())
def test_pop_callback(self):
self.signal = None
def func(v):
self.signal = v
ld = LabelledDict(label='all_objects', key_attr='prop1', remove_callable=func)
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
ret = ld.pop('a')
self.assertEqual(ret, obj1)
self.assertEqual(self.signal, obj1)
self.assertEqual(ld, dict())
def test_popitem_nocallback(self):
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
ret = ld.popitem()
self.assertEqual(ret, ('a', obj1))
self.assertEqual(ld, dict())
def test_popitem_callback(self):
self.signal = None
def func(v):
self.signal = v
ld = LabelledDict(label='all_objects', key_attr='prop1', remove_callable=func)
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
ret = ld.popitem()
self.assertEqual(ret, ('a', obj1))
self.assertEqual(self.signal, obj1)
self.assertEqual(ld, dict())
def test_clear_nocallback(self):
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
obj2 = MyTestClass('d', 'b')
ld.add(obj1)
ld.add(obj2)
ld.clear()
self.assertEqual(ld, dict())
def test_clear_callback(self):
self.signal = set()
def func(v):
self.signal.add(v)
ld = LabelledDict(label='all_objects', key_attr='prop1', remove_callable=func)
obj1 = MyTestClass('a', 'b')
obj2 = MyTestClass('d', 'b')
ld.add(obj1)
ld.add(obj2)
ld.clear()
self.assertSetEqual(self.signal, {obj2, obj1})
self.assertEqual(ld, dict())
def test_delitem_nocallback(self):
ld = LabelledDict(label='all_objects', key_attr='prop1')
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
del ld['a']
self.assertEqual(ld, dict())
def test_delitem_callback(self):
self.signal = None
def func(v):
self.signal = v
ld = LabelledDict(label='all_objects', key_attr='prop1', remove_callable=func)
obj1 = MyTestClass('a', 'b')
ld.add(obj1)
del ld['a']
self.assertEqual(self.signal, obj1)
self.assertEqual(ld, dict())
def test_update_callback(self):
ld = LabelledDict(label='all_objects', key_attr='prop1')
with self.assertRaisesWith(TypeError, "update is not supported for LabelledDict"):
ld.update(object())
def test_setdefault_callback(self):
ld = LabelledDict(label='all_objects', key_attr='prop1')
with self.assertRaisesWith(TypeError, "setdefault is not supported for LabelledDict"):
ld.setdefault(object())
|
py | 1a484753c6bbffd3e8f6fe86fc2b801201c64445 | from sympy.core.containers import Tuple
from sympy.core.symbol import symbols
from sympy.matrices.dense import Matrix
from sympy.physics.quantum.trace import Tr
from sympy.testing.pytest import raises
def test_trace_new():
a, b, c, d, Y = symbols('a b c d Y')
A, B, C, D = symbols('A B C D', commutative=False)
assert Tr(a + b) == a + b
assert Tr(A + B) == Tr(A) + Tr(B)
#check trace args not implicitly permuted
assert Tr(C*D*A*B).args[0].args == (C, D, A, B)
# check for mul and adds
assert Tr((a*b) + ( c*d)) == (a*b) + (c*d)
# Tr(scalar*A) = scalar*Tr(A)
assert Tr(a*A) == a*Tr(A)
assert Tr(a*A*B*b) == a*b*Tr(A*B)
# since A is symbol and not commutative
assert isinstance(Tr(A), Tr)
#POW
assert Tr(pow(a, b)) == a**b
assert isinstance(Tr(pow(A, a)), Tr)
#Matrix
M = Matrix([[1, 1], [2, 2]])
assert Tr(M) == 3
##test indices in different forms
#no index
t = Tr(A)
assert t.args[1] == Tuple()
#single index
t = Tr(A, 0)
assert t.args[1] == Tuple(0)
#index in a list
t = Tr(A, [0])
assert t.args[1] == Tuple(0)
t = Tr(A, [0, 1, 2])
assert t.args[1] == Tuple(0, 1, 2)
#index is tuple
t = Tr(A, (0))
assert t.args[1] == Tuple(0)
t = Tr(A, (1, 2))
assert t.args[1] == Tuple(1, 2)
#trace indices test
t = Tr((A + B), [2])
assert t.args[0].args[1] == Tuple(2) and t.args[1].args[1] == Tuple(2)
t = Tr(a*A, [2, 3])
assert t.args[1].args[1] == Tuple(2, 3)
#class with trace method defined
#to simulate numpy objects
class Foo:
def trace(self):
return 1
assert Tr(Foo()) == 1
#argument test
# check for value error, when either/both arguments are not provided
raises(ValueError, lambda: Tr())
raises(ValueError, lambda: Tr(A, 1, 2))
def test_trace_doit():
a, b, c, d = symbols('a b c d')
A, B, C, D = symbols('A B C D', commutative=False)
#TODO: needed while testing reduced density operations, etc.
def test_permute():
A, B, C, D, E, F, G = symbols('A B C D E F G', commutative=False)
t = Tr(A*B*C*D*E*F*G)
assert t.permute(0).args[0].args == (A, B, C, D, E, F, G)
assert t.permute(2).args[0].args == (F, G, A, B, C, D, E)
assert t.permute(4).args[0].args == (D, E, F, G, A, B, C)
assert t.permute(6).args[0].args == (B, C, D, E, F, G, A)
assert t.permute(8).args[0].args == t.permute(1).args[0].args
assert t.permute(-1).args[0].args == (B, C, D, E, F, G, A)
assert t.permute(-3).args[0].args == (D, E, F, G, A, B, C)
assert t.permute(-5).args[0].args == (F, G, A, B, C, D, E)
assert t.permute(-8).args[0].args == t.permute(-1).args[0].args
t = Tr((A + B)*(B*B)*C*D)
assert t.permute(2).args[0].args == (C, D, (A + B), (B**2))
t1 = Tr(A*B)
t2 = t1.permute(1)
assert id(t1) != id(t2) and t1 == t2
|
py | 1a48489a3f7dc8cc674ce9abd0f8381b1d6f215e | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from .object import Object
from .primitives import Int, Long
class Message(Object):
ID = 0x5bb8e511 # hex(crc32(b"message msg_id:long seqno:int bytes:int body:Object = Message"))
def __init__(self, body: Object, msg_id: int, seq_no: int, length: int):
self.msg_id = msg_id
self.seq_no = seq_no
self.length = length
self.body = body
@staticmethod
def read(b: BytesIO, *args) -> "Message":
msg_id = Long.read(b)
seq_no = Int.read(b)
length = Int.read(b)
body = b.read(length)
return Message(Object.read(BytesIO(body)), msg_id, seq_no, length)
def write(self) -> bytes:
b = BytesIO()
b.write(Long(self.msg_id))
b.write(Int(self.seq_no))
b.write(Int(self.length))
b.write(self.body.write())
return b.getvalue()
|
py | 1a48489c4054cc00fd186b2ced937820bc8ebd81 | from models.app_config import ApplicationGroupConfig
from modules.configs.config_factory import ConfigFactory
from modules.runner.app_runner import ApplicationRunner
from pick import pick
def pick_config(app_configs: list[ApplicationGroupConfig]):
title = 'Choose which workflow config to start: '
config_options = list(map(lambda c: c.get_config_name(), app_configs))
option, index = pick(config_options, title, indicator='=> ')
return app_configs[index]
def main():
group_configs = ConfigFactory.get_default_parser().get_config()
chosen_group_config = pick_config(group_configs)
app_runner = ApplicationRunner(chosen_group_config)
app_runner.spawn_work_spaces()
input("Press enter to confirm all the windows have started (you config will be restored)")
app_runner.restore_config()
if __name__ == '__main__':
main()
|
py | 1a4848a9fd2c7c5b8e79ae16e71a07ff6e2b9934 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-10 12:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "0016_auto_20170109_1514")]
operations = [
migrations.AddField(
model_name="election",
name="election_title",
field=models.CharField(blank=True, max_length=255),
)
]
|
py | 1a4848c0bdac3ac717937b75154020ba685713e5 | # Generated by Django 3.2.12 on 2022-03-11 15:07
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Category",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("title", models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name="Component",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("title", models.CharField(max_length=100)),
("description", models.TextField()),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="categories", to="components.category"
),
),
],
),
]
|
py | 1a484976009cc446a63ab039be5a620628b98b5f | import pandas as pd
import matplotlib.pyplot as plt
import csv
query = "query-1"
auto_scaler = "HPA"
percentage = "80"
path_to_file = "../experiment_data_processed/full_data/" + query + "_" + auto_scaler + "_" + percentage + ".csv"
df = pd.read_csv(path_to_file)
taskmanager = df['taskmanager'].tolist()
latency = df['latency'].tolist()
previous_number_taskmanagers = taskmanager[0]
scaling_events = 0
for val in taskmanager:
if val != previous_number_taskmanagers:
scaling_events += 1
previous_number_taskmanagers = val
average_latency = sum(latency) / len(latency)
average_taskmanager = sum(taskmanager) / len(taskmanager)
with open("../experiment_data_processed/evaluation_metrics/" + query + "_" + auto_scaler + "_" + percentage + ".csv", 'w') as f:
# create the csv writer
writer = csv.writer(f)
writer.writerow(["latency", average_latency])
writer.writerow(["taskmanager", average_taskmanager])
writer.writerow(["scaling_events", scaling_events]) |
py | 1a484b0678b1f659da89297e5597a2dd09c147e8 | import discord
import asyncio
import logging
###########
# GLOBALS #
###########
logger = logging.getLogger("bullinsbot.clean")
def get_available_commands():
return {"clean": execute}
async def execute(client, message, instruction, **kwargs):
"""Searches through a number of past bot commands and replies from requested channel and deletes them."""
def bot_related(m):
return m.content.startswith(client.invocation) or m.author == client.user
deleted_messages = await client.purge_from(message.channel, limit=int(instruction[1]), check=bot_related)
logger.info("Deleted %d bot-related messages from channel.", len(deleted_messages))
|
py | 1a484b2d89ee38879594e4d53eb1a3ff7b6e8ad4 | """
Current-flow closeness centrality measures.
"""
# Copyright (C) 2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg <[email protected]>"""
__all__ = ['current_flow_closeness_centrality','information_centrality']
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import *
def current_flow_closeness_centrality(G, normalized=True, weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow closeness centrality for nodes.
A variant of closeness centrality based on effective
resistance between nodes in a network. This metric
is also known as information centrality.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional
If True the values are normalized by 1/(n-1) where n is the
number of nodes in G.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with current flow closeness centrality as the value.
See Also
--------
closeness_centrality
Notes
-----
The algorithm is from Brandes [1]_.
See also [2]_ for the original definition of information centrality.
References
----------
.. [1] Ulrik Brandes and Daniel Fleischer,
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] Stephenson, K. and Zelen, M.
Rethinking centrality: Methods and examples.
Social Networks. Volume 11, Issue 1, March 1989, pp. 1-37
http://dx.doi.org/10.1016/0378-8733(89)90016-6
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_closeness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_closeness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_closeness_centrality ',
'not defined for digraphs.')
if G.is_directed():
raise nx.NetworkXError(\
"current_flow_closeness_centrality() not defined for digraphs.")
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername={"full" :FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
n = G.number_of_nodes()
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
for v in H:
col=C2.get_row(v)
for w in H:
betweenness[v]+=col[v]-2*col[w]
betweenness[w]+=col[v]
if normalized:
nb=len(betweenness)-1.0
else:
nb=1.0
for v in H:
betweenness[v]=nb/(betweenness[v])
return dict((ordering[k],float(v)) for k,v in betweenness.items())
information_centrality=current_flow_closeness_centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.