code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import networkx as nx
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from PIL import Image, ImageTk
import os
import tkinter
from tkinter import messagebox
from core import Router, Event, Tools
class Gui(object):
def __init__(self, master, log):
self.master = master
self.log = log
# 全界面
self.maxFrame = tkinter.Frame(master)
self.maxFrame.pack(fill=tkinter.BOTH, expand=1, padx=1, pady=1)
# 左侧控制区域
# self.LFrame = tkinter.LabelFrame(self.maxFrame, font=18, padx=1, pady=1)
# self.LFrame.pack(side=tkinter.LEFT)
# 左侧控制区域
self.fm1 = tkinter.Frame(self.maxFrame)
self.fm1.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=tkinter.YES)
self.leftFrame = tkinter.LabelFrame(self.fm1, font=18, padx=1, pady=1)
self.leftFrame.pack(side=tkinter.BOTTOM, fill=tkinter.BOTH, expand=tkinter.YES)
# 图像
# self.imageFrame=tkinter.Frame(self.fm1)
self.imageFrame = tkinter.LabelFrame(self.fm1, font=18, padx=1, pady=1)
self.imageFrame.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=tkinter.YES)
# 路由创建区域
self.image_lable = tkinter.Label(self.imageFrame)
self.image_lable.pack()
if os.path.exists("ba.png"):
self.tk_image = tkinter.PhotoImage(file="ba.png")
self.image_lable.config(image=self.tk_image)
self.routerGenerate = tkinter.Button(self.leftFrame, text='生成拓扑网络', font=18, padx=20, pady=5)
self.routerGenerate.grid(row=6, column=0, pady=5, columnspan=2)
tkinter.Label(self.leftFrame, text='更新路由表', font=18).grid(row=7, column=0, sticky='W', pady=5)
self.routerUpdate = tkinter.Button(self.leftFrame, text='更新全部路由', font=18, padx=20, pady=5)
self.routerUpdate.grid(row=8, column=0, pady=5, columnspan=2)
self.routerUpdateStep = tkinter.Button(self.leftFrame, text='更新下一路由', font=18, padx=20, pady=5)
self.routerUpdateStep.grid(row=8, column=2, pady=5, columnspan=2)
# 故障模拟区域
tkinter.Label(self.leftFrame, text='模拟网络故障', font=18).grid(row=9, column=0, sticky='W', pady=5)
tkinter.Label(self.leftFrame, text='故障网络名称:', font=16).grid(row=10, column=0, sticky='W', pady=5)
self.routerFault = tkinter.Entry(self.leftFrame, justify=tkinter.CENTER)
self.routerFault.grid(row=10, column=1)
self.faultStart = tkinter.Button(self.leftFrame, text='故障', font=18, padx=20, pady=5)
self.faultStart.grid(row=11, column=0, pady=5, columnspan=2)
tkinter.Label(self.leftFrame, text='', font=18).grid(row=12, column=0, sticky='W', pady=5)
tkinter.Label(self.leftFrame, text='', font=18).grid(row=13, column=0, sticky='W', pady=5)
# 路由信息显示界面
self.routeInfoFrame = tkinter.LabelFrame(self.maxFrame, font=18, padx=1, pady=1)
self.routeInfoFrame.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=tkinter.YES)
tkinter.Label(self.routeInfoFrame, text='路由表信息', font=16).pack()
self.routeShowArea = tkinter.Scrollbar(self.routeInfoFrame)
self.routeShowArea.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.routeData = tkinter.Text(self.routeInfoFrame, width=60, height=18, font=16, state=tkinter.DISABLED,
yscrollcommand=self.routeShowArea.set)
self.routeData.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
self.routeShowArea.config(command=self.routeData.yview)
# 日志信息显示界面
self.logFrame = tkinter.LabelFrame(self.maxFrame, padx=1, pady=1)
self.logFrame.pack()
tkinter.Label(self.logFrame, text='操作日志', font=16).pack()
self.logShowArea = tkinter.Scrollbar(self.logFrame)
self.logShowArea.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.logData = tkinter.Text(self.logFrame, width=60, height=13, font=16, state=tkinter.DISABLED,
yscrollcommand=self.logShowArea.set)
self.logData.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
self.logShowArea.config(command=self.logData.yview)
# 按钮功能绑定
self.event_bound()
def event_bound(self):
# 添加路由按钮
# self.routerAdd.bind('<ButtonRelease-1>', self.add_button)
self.routerGenerate.bind('<ButtonRelease-1>', self.generate_network)
# 更新路由表按钮
self.routerUpdate.bind('<ButtonRelease-1>', self.update_button)
self.routerUpdateStep.bind('<ButtonRelease-1>', self.update_step_button)
# 设置故障按钮
self.faultStart.bind('<ButtonRelease-1>', self.fault_button)
# 距离下拉框
# self.routerDistance.bind("<<ComboboxSelected>>", self.set_router_next)
# 退出时保存日志
self.master.protocol('WM_DELETE_WINDOW', self.save_log_event)
def set_router_next(self, event):
# 距离下拉框选择1时,设置下一跳为*
if self.routerDistance.get() == '1':
self.EntryData = tkinter.StringVar()
self.routerNext = tkinter.Entry(self.leftFrame, justify=tkinter.CENTER,
textvariable=self.EntryData, state='readonly')
self.EntryData.set('*')
self.routerNext.grid(row=5, column=1)
else:
self.routerNext = tkinter.Entry(self.leftFrame, justify=tkinter.CENTER)
self.routerNext.grid(row=5, column=1)
def update_button(self, event):
# 发送按钮功能绑定
# 如果没有路由表
if len(os.listdir(Router.ROOT_ROUTER_PATH)) == 0:
messagebox.showinfo('警告', '不存在路由表,无法发送!')
return
Event.update_router(log_show=self.logData, router_show=self.routeData)
def update_step_button(self, event):
# 发送按钮功能绑定
# 如果没有路由表
if len(os.listdir(Router.ROOT_ROUTER_PATH)) == 0:
messagebox.showinfo('警告', '不存在路由表,无法发送!')
return
Event.update_step_router(log_show=self.logData, router_show=self.routeData)
def add_button(self, event):
# 添加按钮功能绑定
# 输入为空
if (self.routerName.get().strip() == '') or \
(self.routerTarget.get().strip() == '') or \
(self.routerTarget.get().strip() == '') or \
(self.routerNext.get().strip() == ''):
return
Event.add_router(log_show=self.logData, router_show=self.routeData,
name=self.routerName.get(), ip=self.routerIp.get(),
target=self.routerTarget.get(), distance=self.routerDistance.get(),
next=self.routerNext.get())
def fault_button(self, event):
# 故障按钮功能绑定
# 如果没有路由表
if len(os.listdir(Router.ROOT_ROUTER_PATH)) == 0:
messagebox.showinfo('警告', '不存在路由表,无法发送!')
return
# 判断网络是否存在
sign = 0 # 标志位
for router_list in Tools.get_all_router_list(Router.ROOT_ROUTER_PATH):
if self.routerFault.get() in [router_info['target'] for router_info
in router_list.get_router_info()]:
sign = 1
break
if sign == 0:
messagebox.showinfo('警告', '此网络不存在,无法进行故障测试!')
return
Event.fault_test(log_show=self.logData, router_show=self.routeData,
fault=self.routerFault.get())
def save_log_event(self):
# 保存日志
Tools.save_log(log_data=self.routeData, log=self.log)
# 关闭窗口
self.master.destroy()
def generate_network(self, event):
# clf() # 清图。
# cla() # 清坐标轴。
# close() # 关窗口
plt.cla()
# G = fat_tree_topo()
# nx.draw(G,pos = nx.random_layout(G),node_color = 'b',edge_color = 'r',with_labels = False,font_size =18,node_size =20)
# nx.draw(G,pos = nx.spring_layout(G),node_color = 'b',edge_color = 'r',with_labels = False,font_size =18,node_size =20)
# nx.draw(G,pos = nx.circular_layout(G),node_color = 'b',edge_color = 'r',with_labels = False,font_size =18,node_size =20)
# nx.draw(G,pos = nx.shell_layout(G),node_color = 'b',edge_color = 'r',with_labels = False,font_size =18,node_size =20)
# nx.draw(topo, with_labels=True)
# G = nx.petersen_graph()
# G = nx.tutte_graph()
G = nx.erdos_renyi_graph(16, 0.15)
# G = nx.watts_strogatz_graph(30, 3, 0.1)
# G = nx.barabasi_albert_graph(17, 1)
# G = nx.random_lobster(17, 0.9, 0.9)
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
pos = graphviz_layout(G, prog='dot')
# nx.draw(G, pos=pos, node_color='b', edge_color='r', with_labels=True, font_size=18, node_size=20)
ip = "127.0.0.1"
start_port = 8080
labels = {}
# letter = 97
letter = "R"
# 设置节点信息
for i in range(list(G.nodes).__len__()):
port = start_port + i
# name = chr(letter + i).upper()
name = letter + str(i)
ip_address = ip + ":" + str(port)
labels[i] = name + "/" + str(port)
G.add_node(1)
G.nodes[i]['ip_address'] = ip + ":" + str(port)
G.nodes[i]['router_name'] = name
print(G.nodes[i])
# print(G.neighbors(G.nodes[i]))
print(G[i])
Router.remove_route_file(Router.ROOT_ROUTER_PATH)
for i in range(list(G.nodes).__len__()):
port = start_port + i
# name = chr(letter + i).upper()
name = letter + str(i)
ip_address = ip + ":" + str(port)
# 获取所有节点的边,并生成路由表
# 删除路由器
# 创建一个路由器实例
router = Router.Router(name=name, ip_address=ip_address)
# 添加自己
router.get_router_list().add_router_info(data={'target': ip_address,
'distance': str(0),
'next': "*"})
for k in G[i].keys():
# 向此路由表添加一条新路由信息
router.get_router_list().add_router_info(data={'target': G.nodes[k]["ip_address"],
'distance': str(1),
'next': G.nodes[k]["router_name"]})
# 保存此路由表
router.get_router_list().save_router_list()
# labels=dict((i, chr(97+i).upper()) for i in range(list(G.nodes).__len__()))
# nx.draw(G, pos=nx.shell_layout(G), node_color='b', edge_color='r', labels=labels,with_labels=False, font_size=18, node_size=40)
nx.draw(G, pos=pos, node_color='b', edge_color='r', labels=labels, with_labels=True, font_size=10, node_size=18)
# plt.figure(figsize=fs, dpi=dpi_set)
# plt.rcParams['figure.figsize'] = (8.0, 4.0)
plt.savefig("ba.png")
# plt.show()
# self.image_lable.config(image=self.tk_image)
# self.image_lable.image=self.tk_image
# self.tk_image = tkinter.PhotoImage(file="ba.png")
self.show_image()
def show_image(self):
w_box = 600
h_box = 400
try:
# pil_image = Image.open("/Users/zengcd/Desktop/SocketRIP/ba.png")
# w, h = pil_image.size
# pil_image_resized = self.resize(w, h, w_box, h_box, pil_image)
# tk_image = ImageTk.PhotoImage(pil_image)
# tk_image = tkinter.PhotoImage(file="ba.png")
# image_lable = tkinter.Label(self.imageFrame, image=self.tk_image, text="Inside the LabelFrame")
# image_lable.pack()
self.tk_image = tkinter.PhotoImage(file="ba.png")
self.image_lable.config(image=self.tk_image)
# self.maxFrame.update()
# self.image_lable.config(image=self.tk_image)
# image_lable.grid(row=2, sticky='W', pady=0) # 把图片整合到标签类中
except Exception as e:
print(e)
def resize(self, w, h, w_box, h_box, pil_image):
'''
resize a pil_image object so it will fit into
a box of size w_box times h_box, but retain aspect ratio
对一个pil_image对象进行缩放,让它在一个矩形框内,还能保持比例
'''
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS) | zrip | /zrip-1.4-py3-none-any.whl/core/Gui.py | Gui.py |
import UDP.Sever
import UDP.Client
import os
# 路由器根目录
from core import RouterList
ROOT_ROUTER_PATH = os.getcwd()+'/ROUTER'
class Router(object):
# 路由器类
def __init__(self, **params):
# ip地址
self.ip_address = params['ip_address']
# 路由器名
self.router_name = params['name']
if not os.path.exists(ROOT_ROUTER_PATH):
os.mkdir(ROOT_ROUTER_PATH)
# 如果路由器不存在,则创建
if not os.path.exists(os.path.join(ROOT_ROUTER_PATH,self.ip_address.split(':')[0]+' '+self.ip_address.split(':')[1])):
os.mkdir(os.path.join(ROOT_ROUTER_PATH,self.ip_address.split(':')[0]+' '+self.ip_address.split(':')[1]))
# 路由表
self.router_list = RouterList.RouterList(os.path.join(ROOT_ROUTER_PATH,
self.ip_address.split(':')[0] + ' ' +
self.ip_address.split(':')[1]
,self.router_name+'.rl'))
# 路由器路径
self.path = os.path.join(ROOT_ROUTER_PATH,self.ip_address.split(':')[0]+' '+self.ip_address.split(':')[1])
def get_router_name(self):
# 获取路由器名
return self.router_name
def get_router_path(self):
# 获取路由器路径
return self.path
def get_router_ip_address(self):
# 获取路由器ip地址
ip_port = (self.ip_address.split(':')[0], int(self.ip_address.split(':')[1]))
return ip_port
def get_router_list(self):
# 获取路由表
return self.router_list
def get_near_router(self, all_near_router):
# 获取相邻路由器列表
return all_near_router[self.router_name]
def send_router_list(self, **params):
# 发送路由表
'''
param params: sever-服务端连接 host-服务端ip port-服务端端口 path-上传文件保存路径
'''
send_client = UDP.Client.Client(host=params['host'],
port=params['port'], buf_size=1024)
data = [router_info['target']+' '+router_info['distance']+' '+router_info['next']
for router_info in self.router_list.set_send_router_info()]
try:
if send_client.upload_data('\n'.join(data), params['path']):
send_client.close()
except Exception as e:
print("端口:"+params['port'])
print(e)
params['sever'].handle_request()
def getDistance(self,router_list,target):
#根据name获得ip:port
for router in router_list:
if router['target']==target:
return router['distance']
pass
def update_router_list(self):
# 更新路由表
for other_router_list in [RouterList.RouterList(os.path.join(self.path,router_list))
for router_list in os.listdir(self.path)
if not os.path.basename(router_list).split('.')[0] == self.router_name]:
for other_router_info in other_router_list.get_router_info():
for self_router_info in self.router_list.get_router_info():
sign = 0 # 标志位
# 目的网络相同
if other_router_info['target'] == self_router_info['target']:
# 下一跳相同
if other_router_info['next'] == self_router_info['next']:
# 更新
self_router_info['distance'] = other_router_info['distance']
sign = 1
break
# 下一跳不同
else:
# 距离小于原表
if int(other_router_info['distance']) < int(self_router_info['distance']):
# 更新
self_router_info['distance'] = int(other_router_info['distance'])
self_router_info['next'] = other_router_info['next']
sign = 1
break
# 距离不小于原表
else:
# 不变
sign = 1
break
else:
pass
# 原表中没有此目的网络
if sign == 0:
# 添加
self.router_list.add_router_info(other_router_info)
# 删除其他路由表
os.remove(other_router_list.get_router_list_path())
# 保存路由表
self.router_list.save_router_list()
def remove_route_file(path):
# path=ROOT_ROUTER_PATH
if os.path.exists(path):
for i in os.listdir(path):
path_file = os.path.join(path, i)
if os.path.isfile(path_file):
os.remove(path_file)
else:
remove_route_file(path_file) | zrip | /zrip-1.4-py3-none-any.whl/core/Router.py | Router.py |
import os
from tkinter import messagebox
class RouterList(object):
# 路由表类
def __init__(self, path):
# 是否存在
if os.path.exists(path):
self.router_name = os.path.basename(path).split('.')[0]
self.path = path
f = open(path)
io_data_s = f.readlines()
f.close()
self.router_list = [{'target': io_data.split(' ')[0], 'distance': io_data.split(' ')[1],
'next': io_data.split(' ')[2].replace('\n', '')}
for io_data in io_data_s]
else:
# 不存在则创建
f = open(path, 'w+')
f.close()
self.router_name = os.path.basename(path).split('.')[0]
self.router_list = []
self.path = path
def get_router_name(self):
# 获取路由器名
return self.router_name
def get_router_list_path(self):
# 获取路由表路径
return self.path
def get_router_list_count(self):
# 获得路由表中路由信息数量
return len(self.router_list)
def get_router_info(self):
# 获取所有路由信息
return self.router_list
def add_router_info(self, data):
# 添加路由信息
if isinstance(data, dict):
if self.router_list:
if data['target'] in [router_data['target'] for router_data in self.router_list]:
messagebox.showinfo('警告', '此目的网络已经存在!')
return
else:
self.router_list.append(data)
else:
self.router_list.append(data)
def save_router_list(self):
# 保存路由表
new_router_list = [io_data['target'] + ' ' + str(io_data['distance']) + ' ' + io_data['next']
for io_data in self.router_list]
f = open(self.path, 'w+')
f.write('\n'.join(new_router_list))
f.close()
def is_near_router(self, other):
# 判断两路由器是否相邻
if isinstance(other, RouterList):
for data in [router_info for router_info in self.router_list
if int(router_info['distance']) == 1]:
# if data in other.get_router_info():
if data['next'] == other.get_router_name():
return True
return False
def set_send_router_info(self):
# 发送路由表前修改路由表信息
for router_info in self.router_list:
if int(router_info['distance']) > 15:
pass
else:
router_info['distance'] = str(int(router_info['distance'])+1)
router_info['next'] = self.router_name
return self.router_list | zrip | /zrip-1.4-py3-none-any.whl/core/RouterList.py | RouterList.py |
import os
import time
import copy
import tkinter
from core import Router
def get_all_router(root_path):
# 获取全部路由器,返回值为路由器列表
return [Router.Router(name=os.path.basename(router_list).split('.')[0],
ip_address=router_dir.replace(' ', ':'))
for router_dir in os.listdir(root_path)
for router_list in os.listdir(os.path.join(root_path,router_dir))]
def get_all_router_list(root_path):
# 获取全部路由表,返回值为路由表列表
return [router.get_router_list() for router in get_all_router(root_path)]
def get_all_near_router(root_path):
# 相邻路由器
all_near_router = {}
# 获取所有路由表
all_router = get_all_router(root_path)
# 获取所有相邻路由器
sign_index = 0 # 标志位
for router in all_router:
copy_all_router = copy.deepcopy(all_router) # 全部路由表复制
copy_all_router.pop(sign_index)
near_list = [] # 存储每个路由器相邻路由器表的临时变量
for copy_router in copy_all_router:
if router.get_router_list().is_near_router(copy_router.get_router_list()):
near_list.append(copy_router)
all_near_router[router.get_router_name()] = near_list
sign_index += 1
return all_near_router
def get_now_time(sign):
# 按格式获取当前时间
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
now_time1 = time.strftime('%Y-%m-%d %H %M %S', time.localtime(time.time()))
if sign == 0:
return '['+now_time+']'
if sign == 1:
return now_time1
def show_log(log_data, data):
# 日志显示
log_data.config(state=tkinter.NORMAL)
log_data.insert(tkinter.END, data)
log_data.see(tkinter.END)
def show_router_info(route_data, data):
# 路由信息显示
route_data.config(state=tkinter.NORMAL)
route_data.insert(tkinter.END, data)
route_data.see(tkinter.END)
def create_log(log_root_path):
# 日志文件创建
log_file_path = os.path.join(log_root_path,get_now_time(1)+'.log')
if os.path.exists(log_file_path):
log_file_path = os.path.join(log_root_path,get_now_time(1)+'COPY'+'.log')
f = open(log_file_path, 'w+')
f.close()
return log_file_path
def save_log(**params):
# 保存日志
show_log_data = get_now_time(0) + '保存日志\n'
show_log(params['log_data'], show_log_data)
f = open(params['log'], 'w+')
f.write(params['log_data'].get(1.0, tkinter.END))
f.close() | zrip | /zrip-1.4-py3-none-any.whl/core/Tools.py | Tools.py |
# Zirconium Logging (ZrLog)
This package adds logging support using the Zirconium configuration tool and TOML, with an extension for supporting
logging audit events.
## Defining Logging Parameters
Configuration for the logging module can be added in TOML under the `logging` key. The entries correspond to those
supported by `logging.config.dictConfig()` with a few additions to support audit logging. For example:
# .logging.toml (or your own app configuration file that you've registered)
[logging]
version = 1
[logging.root]
level = "INFO"
handlers = ["console"]
[logging.handlers.console]
class = "logging.StreamHandler"
formatter = "brief"
level = "WARNING"
stream = "ext://sys.stdout"
[logging.formatters.brief]
format = "%(message)s [%(levelname)s]"
Of note, if you want to change a specific logger (which often have dots in the name), you must quote the key:
[logging.loggers."module.foo.bar"]
level = "WARNING"
## Setting up logging
Logging can be initialized at the appropriate place in your code. This should be AFTER you have registered all your
configuration with Zirconium but before you want to do any logging.
import zrlog
zrlog.init_logging()
## Additional Logging Levels
This package adds three additional levels of logging:
- audit(), which is intended to be used only with the Python auditing system as described below. The logging level is
set to 1.
- trace(), which is intended to be even more detailed than debug(). The logging level is set to 5.
- out(), which is ranked between INFO and WARNING and is intended to be used to log user output for command-line
applications. The logging level is set to 25.
These are configured as methods on the `getLogger()` class as well as on `logging` itself for the root logger.
## Logging Audit Events
This package provides a system for turning `sys.audit()` events into log records using a thread-based queue. This is
necessary because audit events don't play nicely with the logging subsystem, leading to inconsistent errors if the
logger `log()` method is called directly from the audit hook. Audit logging must be enabled specifically by setting
the with_audit flag:
# .logging.toml
[logging]
with_audit = true
While the default level is "AUDIT", you can change this to any of the logging level prefixes by specifying the
audit_level:
# .logging.toml
[logging]
with_audit = true
audit_level = "INFO"
One specific event can cause further problems: sys._getframe() is called repeatedly from the logging subsystem in Python
(in 3.8 at least). These audit events are NOT logged by default, but logging of them can be enabled by turning off the
`omit_logging_frames` flag.
# .logging.toml
[logging]
with_audit = true
omit_logging_frames = false
Audit events are logged at the AUDIT level which is below TRACE; your logger and handler must be set to that level to
see these events:
[logging.root]
level = "AUDIT"
handlers = ["console"]
[logging.handlers.console]
class = "logging.StreamHandler"
formatter = "brief"
level = "AUDIT"
stream = "ext://sys.stdout"
| zrlog | /zrlog-0.1.1.tar.gz/zrlog-0.1.1/README.md | README.md |
# zrouter
基于Flask的路由工具库,集成登录验证、权限控制、日志记录、RESTful API快速构建等功能。
## 安装
```shell
pip install zrouter
```
## 基本使用
```python
# 定义路由器
router = Router('body', __name__, url_prefix='/body')
# 添加单一路由
@router.add('/article', methods=['GET'])
def get_article(article_id: int):
return ArticleMapper.get_json(article_id)
@ router.add('/article', methods=['POST'])
def post_article(article_id: int, data: dict):
return ArticleMapper.update(article_id, data)
# 添加REST资源
router.add_resource('/metric', MetricResource)
# 批量添加REST资源
router.add_resources({
'/metric': MetricResource,
'/sport': SportResource,
'/entry': EntryResource,
'/entry/stat': EntryStatResource,
'/punch': PunchResource,
'/punch/stat': PunchStatResource
})
```
## 自定义
通过继承实现用户验证方法、错误处理方法。
```python
from zrouter import Router as Router_
class Router(Router_):
def verify_user(self):
# 通过继承在此添加代码,实现用户验证、日志记录
def handle_error(self, e):
# 通过继承在此添加代码,实现错误处理
``` | zrouter | /zrouter-0.5.20.tar.gz/zrouter-0.5.20/README.md | README.md |
[](https://opensource.org/licenses/MIT)
[](https://pypi.python.org/pypi/zroya/)
[](https://pypi.python.org/pypi/zroya/)
[](https://gitHub.com/malja/zroya/graphs/commit-activity)
# zroya2
Zroya is a Python package for creating native Windows notifications.
In contrast to first version of zroya, zroya2 is a Python extension built around C++
[WinToast](https://github.com/mohabouje/WinToast) library.
**Note**: Zroya2 is in beta testing. I would be grateful for any bug reports.
## Prerequisites
There are no requirements at the moment.
## Installation
Zroya2 is now available from pypi:
```
python -m pip install zroya
```
## Example
```python
import zroya
# Initialize zroya module. Make sure to call this function.
# All parameters are required
zroya.init("YourAppName", "CompanyName", "ProductName", "SubProduct", "Version")
# Create notification template. TYPE_TEXT1 means one bold line withou image.
template = zroya.Template( zroya.TemplateType.Text1 )
# Set first line
template.setFirstLine("My First line")
# Save notification id for later use
notificationID = zroya.show(template)
# .. do something, maybe sleep?
# Hide notification
zroya.hide(notificationID)
```
## Documentation
You may find some limited documentation on [Zroya Page](https://malja.github.io/zroya)
| zroya | /zroya-0.2.4-cp36-cp36m-win32.whl/zroya-0.2.4.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
Zest Race Predictor
===================
[](https://zrp-docs.readthedocs.io/en/latest/?badge=latest)
[](https://badge.fury.io/py/zrp)
[](https://mybinder.org/v2/gh/zestai/zrp/HEAD)
[](https://pypi.org/project/zrp/)
Zest Race Predictor (ZRP) is an open-source machine learning algorithm
that estimates the race/ethnicity of an individual using only their full
name and home address as inputs. ZRP improves upon the most widely used
racial and ethnic data estimation method, Bayesian Improved Surname
Geocoding (BISG), developed by RAND Corporation in 2009.
ZRP was built using ML techniques such as gradient boosting and trained
on voter data from the southeastern U.S. It was then validated on a
national sample using adjusted tract-level American Community Survey
(ACS) data. (Model training procedures are provided.)
***Compared to BISG, ZRP correctly identified:***
* 25% more African-Americans as African-American
* 35% fewer African-Americans as non-African American
* 60% fewer Whites as non-White
ZRP can be used to analyze racial equity and outcomes in critical
spheres such as health care, financial services, criminal justice, or
anywhere there's a need to impute the race or ethnicity of a population
dataset. (Usage examples are included.) The financial services industry,
for example, has struggled for years to achieve more equitable outcomes
amid charges of discrimination in lending practices.
Zest AI began developing ZRP in 2020 to improve the accuracy of our
clients' fair lending analyses by using more data and better math. We
believe ZRP can greatly improve our understanding of the disparate
impact and disparate treatment of protected-status borrowers. Armed with
a better understanding of the disparities that exist in our financial
system, we can highlight inequities and create a roadmap to improve
equity in access to finance.
Notes
=====
This is the preliminary version and implementation of the ZRP tool.
We\'re dedicated to continue improving both the algorithm and
documentation and hope that government agencies, lenders, citizen data
scientists and other interested parties will help us improve the model.
Details of the model development process can be found in the [model
development documentation](./model_report.rst)
Install
=======
Install requires an internet connection. The package has been tested on python 3.7.4, but should likely work with 3.7.X.
Note: Due to the size and number of lookup tables necesary for the zrp
package, total installation requires 3 GB of available space.
### Setting up your virtual environment
We recommend installing zrp
inside a [python virtual
environment](https://docs.python.org/3/library/venv.html#creating-virtual-environments).
Run the following to build your virtual envrionment:
python3 -m venv /path/to/new/virtual/environment
Activate your virtual environment:
source /path/to/new/virtual/environment/bin/activate
Ex.:
python -m venv /Users/joejones/Documents/ZestAI/zrpvenv
source /Users/joejones/Documents/ZestAI/zrpvenv/bin/activate
### Unix-like systems
pip install zrp
After installing via pip, you need to download the lookup tables and
pipelines using the following command: :
python -m zrp download
### Windows
pip install pipwin
pipwin install gdal
pipwin install fiona
pip install zrp
After installing via pip, you need to download the lookup tables and
pipelines using the following command: :
python -m zrp download
If you're experiencing issues with installation, please consult our [Common Issues](https://github.com/zestai/zrp/blob/main/common_issues.rst#manually-installing-lookup-tables-and-pipeline-files) page.
Data
====
### Training Data
The models available in this package were trained on voter registration
data from the states of Florida , Georgia, and North Carolina. Summary
statistics on these datasets and additional datasets used as validation
can be found
[here](https://github.com/zestai/zrp/blob/main/dataset_statistics.txt) .
***Consult the following to download state voter registration data:***
* [North Carolina](https://www.ncsbe.gov/results-data/voter-registration-data)
* [Florida](https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/UBIG3F)
* [Alabama](https://www.alabamainteractive.org/sos/voter/voterWelcome.action)
* [South Carolina](https://www.scvotes.gov/sale-voter-registration-lists)
* [Georgia](https://sos.ga.gov/index.php/elections/order_voter_registration_lists_and_files)
* [Louisiana](https://www.sos.la.gov/ElectionsAndVoting/BecomeACandidate/PurchaseVoterLists/Pages/default.aspx)
### American Community Survey (ACS) Data:
The US Census Bureau details that, \"the American Community Survey (ACS)
is an ongoing survey that provides data every year \-- giving
communities the current information they need to plan investments and
services. The ACS covers a broad range of topics about social, economic,
demographic, and housing characteristics of the U.S. population. The
5-year estimates from the ACS are \"period\" estimates that represent
data collected over a period of time. The primary advantage of using
multiyear estimates is the increased statistical reliability of the data
for less populated areas and small population subgroups. The 5-year
estimates are available for all geographies down to the block group
level.\" ( Bureau, US Census. "American Community Survey 5-Year Data
(2009-2019)." Census.gov, 8 Dec. 2021,
<https://www.census.gov/data/developers/data-sets/acs-5year.html>. )
ACS data is available in 1 or 5 year spans. The 5yr ACS data is the most
comprehensive & is available at more granular levels than 1yr data. It
is thus used in this work.
## Model Development and Feature Documentation
Details of the model development process can be found in the [model
development documentation](./model_report.rst) . Details of the human
readable feature definitions as well as feature importances can be found
[here](https://github.com/zestai/zrp/tree/main/zrp/modeling#feature-definitions).
## Usage and Examples
To get started using the ZRP, first ensure the download is complete (as
described above) and xgboost == 1.0.2
Check out the guides in the
[examples](https://github.com/zestai/zrp/tree/main/examples) folder.
Clone the repo in order to obtain the example notebooks and data; this
is not provided in the pip installable package. If you\'re experiencing
issues, first consult our [common issues
guide](https://github.com/zestai/zrp/blob/main/common_issues.rst).
[Here](https://mybinder.org/v2/gh/zestai/zrp/HEAD), we additionally
provide an interactive virtual environment, via Binder, with ZRP
installed. Once you open this link and are taken to the JupyterLab
environment, open up a terminal and run the following: :
python -m zrp download
Next, we present the primary ways you\'ll use ZRP.
### ZRP Predictions
**Summary of commands:** :
>>> from zrp import ZRP
>>> zest_race_predictor = ZRP()
>>> zest_race_predictor.fit()
>>> zrp_output = zest_race_predictor.transform(input_dataframe)
**Breaking down key commands** :
>>> zest_race_predictor = ZRP()
- **ZRP(pipe\_path=None, support\_files\_path=\"data/processed\",
key=\"ZEST\_KEY\", first\_name=\"first\_name\",
middle\_name=\"middle\_name\", last\_name=\"last\_name\",
house\_number=\"house\_number\",
street\_address=\"street\_address\", city=\"city\", state=\"state\",
zip\_code=\"zip\_code\", race=\'race\', proxy=\"probs\",
census\_tract=None, street\_address\_2=None, name\_prefix=None,
name\_suffix=None, na\_values=None, file\_path=None, geocode=True,
bisg=True, readout=True, n\_jobs=49, year=\"2019\", span=\"5\",
runname=\"test\")**
- What it does:
- Prepares data to generate race & ethnicity proxies
You can find parameter descriptions in the [ZRP
class](https://github.com/zestai/zrp/blob/main/zrp/zrp.py) and it\'s
[parent
class](https://github.com/zestai/zrp/blob/main/zrp/prepare/base.py).
```
>>> zrp_output = zest_race_predictor.transform(input_dataframe)
```
- **zest\_race\_predictor.transform(df)**
- What it does:
- Processes input data and generates ZRP proxy predictions.
- Attempts to predict on block group, then census tract, then
zip code based on which level ACS data is found for. If Geo
level data is unattainable, the BISG proxy is computed. No
prediction returned if BISG cannot be computed either.
> -----------------------------------------------------------------------------
> Parameters
> ------------ ----------------------------------------------------------------
> df : {DataFrame} Pandas dataframe containing input data
> (see below for necessary columns)
>
> -----------------------------------------------------------------------------
Input data, **df**, into the prediction/modeling pipeline **MUST**
contain the following columns: first name, middle name, last name, house
number, street address (street name), city, state, zip code, and zest
key. Consult our [common issues
guide](https://github.com/zestai/zrp/blob/main/common_issues.rst) to
ensure your input data is the correct format.
- Output: A dataframe with the following columns: AAPI AIAN BLACK
HISPANIC WHITE source\_block\_group source\_zip\_code source\_bisg :
>>> zrp_output
=========== =========== =========== =========== =========== =========== ===================== ====================== ==================
AAPI AIAN BLACK HISPANIC WHITE source_block_group source_census_tract source_zip_code
=========== =========== =========== =========== =========== =========== ===================== ====================== ==================
ZEST_KEY
10 0.021916 0.021960 0.004889 0.012153 0.939082 1.0 0.0 0.0
100 0.009462 0.013033 0.003875 0.008469 0.965162 1.0 0.0 0.0
103 0.107332 0.000674 0.000584 0.021980 0.869429 1.0 0.0 0.0
106 0.177411 0.015208 0.003767 0.041668 0.761946 1.0 0.0 0.0
109 0.000541 0.000416 0.000376 0.000932 0.997736 1.0 0.0 0.0
... ... ... ... ... ... ... ... ...
556 NaN NaN NaN NaN NaN 0.0 0.0 0.0
557 NaN NaN NaN NaN NaN 0.0 0.0 0.0
=========== =========== =========== =========== =========== =========== ===================== ====================== ==================
One of the parameters to the [parent
class](https://github.com/zestai/zrp/blob/main/zrp/prepare/base.py) that
ZRP() inherits from is `file_path`. This parameter allows you to specify
where the `artifacts/` folder is outputted during the run of the ZRP.
Once the run is complete, the `artifacts/` folder will contain the
outputted race/ethnicity proxies and additional logs documenting the
validity of input data. `file_path` **need not** be specified. If it is
not defined, the `artifacts/` folder will be placed in the same
directory of the script running zrp. Subsequent runs will, however,
overwrite the files in `artifacts/`; providing a unique directory path
for `file_path` will avoid this.
ZRP Build
---------
**Summary of commands** :
>>> from zrp.modeling import ZRP_Build
>>> zest_race_predictor_builder = ZRP_Build('/path/to/desired/output/directory')
>>> zest_race_predictor_builder.fit()
>>> zrp_build_output = zest_race_predictor_builder.transform(input_training_data)
**Breaking down key commands** :
>>> zest_race_predictor_builder = ZRP_Build('/path/to/desired/output/directory')
- **ZRP\_Build(file\_path, zrp\_model\_name = \'zrp\_0\',
zrp\_model\_source =\'ct\')**
- What it does:
- Prepares the class that builds the new custom ZRP model.
> -----------------------------------------------------------------------------
> Parameters
> ------------ ----------------------------------------------------------------
> file_path : {str} The path where pipeline, model, and
> supporting data are saved.
>
> zrp_model_name : {str} Name of zrp_model.
>
> zrp_model_source : {str} Indicates the source of
> zrp_modeling data to use.
> -----------------------------------------------------------------------------
>
> You can find more detailed parameter descriptions in the [ZRP\_Build
> class](https://github.com/zestai/zrp/blob/main/zrp/modeling/pipeline_builder.py).
> ZRP\_Build() also inherits initlizing parameters from its [parent
> class](https://github.com/zestai/zrp/blob/main/zrp/prepare/base.py).
>>> zrp_build_output = zest_race_predictor_builder.transform(input_training_data)
- **zest\_race\_predictor\_builder.transform(df)**
- What it does:
- Builds a new custom ZRP model trained off of user input data
when supplied with standard ZRP requirements including name,
address, and race
- Produces a custom model-pipeline. The pipeline, model, and
supporting data are saved automatically to
\"\~/data/experiments/model\_source/data/\" in the support
files path defined.
- The class assumes data is not broken into train and test
sets, performs this split itself, and outputs predictions on
the test set.
> -----------------------------------------------------------------------------
> Parameters
> ------------ ----------------------------------------------------------------
> df : {DataFrame} Pandas dataframe containing input data
> (see below for necessary columns)
>
> -----------------------------------------------------------------------------
Input data, **df**, into this pipeline **MUST** contain the following
columns: first name, middle name, last name, house number, street
address (street name), city, state, zip code, zest key, and race.
Consult our [common issues
guide](https://github.com/zestai/zrp/blob/main/common_issues.rst) to
ensure your input data is the correct format.
- Output: A dictionary of race & ethnicity probablities and labels.
As mentioned in the ZRP Predict section above, once the run is complete,
the `artifacts/` folder will contain the outputted race/ethnicity
proxies and additional logs documenting the validity of input data.
Similarly, defining `file_path` **need not** be specified, but providing
a unique directory path for `file_path` will avoid overwriting the
[artifacts/]{.title-ref} folder. When running ZRP Build, however,
`artifacts/` also contains the processed test and train data, trained
model, and pipeline.
### Additional Runs of Your Custom Model
After having run ZRP\_Build() you can re-use your custom model just like
you run the packaged model. All you must do is specify the path to the
generated model and pipelines (this path is the same path as
\'/path/to/desired/output/directory\' that you defined previously when
running ZRP\_Build() in the example above; we call this \'pipe\_path\').
Thus, you would run: :
>>> from zrp import ZRP
>>> zest_race_predictor = ZRP('pipe_path')
>>> zest_race_predictor.fit()
>>> zrp_output = zest_race_predictor.transform(input_dataframe)
Validation
==========
The models included in this package were trained on publicly-available
voter registration data and validated multiple times: on hold out sets
of voter registration data and on a national sample of PPP loan
forgiveness data. The results were consistent across tests: 20-30% more
African Americans correctily identified as African American, and 60%
fewer whites identified as people of color as compared with the status
quo BISG method.
To see our validation analysis with Alabama voter registration data,
please check out [this
notebook](https://github.com/zestai/zrp/blob/main/examples/analysis/Alabama_Case_Study.md).
Performance on the national PPP loan forgiveness dataset was as follows
(comparing ZRP softmax with the BISG method):
*African American*
| Statistic | BISG | ZRP | Pct. Diff |
|---------------------|-------|-------|-----------|
| True Positive Rate | 0.571 | 0.700 | +23% (F) |
| True Negative Rate | 0.954 | 0.961 | +01% (F) |
| False Positive Rate | 0.046 | 0.039 | -15% (F) |
| False Negative Rate | 0.429 | 0.300 | -30% (F) |
*Asian American and Pacific Islander*
| Statistic | BISG | ZRP | Pct. Diff |
|---------------------|-------|-------|-----------|
| True Positive Rate | 0.683 | 0.777 | +14% (F) |
| True Negative Rate | 0.982 | 0.977 | -01% (U) |
| False Positive Rate | 0.018 | 0.023 | -28% (F) |
| False Negative Rate | 0.317 | 0.223 | -30% (F) |
*Non-White Hispanic*
| Statistic | BISG | ZRP | Pct. Diff |
|---------------------|-------|-------|-----------|
| True Positive Rate | 0.599 | 0.711 | +19% (F) |
| True Negative Rate | 0.979 | 0.973 | -01% (U) |
| False Positive Rate | 0.021 | 0.027 | -29% (F) |
| False Negative Rate | 0.401 | 0.289 | -28% (F) |
*White, Non-Hispanic*
| Statistic | BISG | ZRP | Pct. Diff |
|---------------------|-------|-------|-----------|
| True Positive Rate | 0.758 | 0.906 | +19% (F) |
| True Negative Rate | 0.758 | 0.741 | -02% (U) |
| False Positive Rate | 0.242 | 0.259 | +07% (U) |
| False Negative Rate | 0.241 | 0.094 | -61% (F) |
Authors
=======
> - [Kasey
> Matthews](https://www.linkedin.com/in/kasey-matthews-datadriven/)
> (Zest AI Lead)
> - [Piotr Zak](https://www.linkedin.com/in/piotr-zak-datadriven/) (Algomine)
> - [Austin Li](https://www.linkedin.com/in/austinwli/) (Harvard T4SG)
> - [Christien
> Williams](https://www.linkedin.com/in/christienwilliams/) (Schmidt
> Futures)
> - [Sean Kamkar](https://www.linkedin.com/in/sean-kamkar/) (Zest AI)
> - [Jay Budzik](https://www.linkedin.com/in/jaybudzik/) (Zest AI)
Contributing
============
Contributions are encouraged! For small bug fixes and minor
improvements, feel free to just open a PR. For larger changes, please
open an issue first so that other contributors can discuss your plan,
avoid duplicated work, and ensure it aligns with the goals of the
project. Be sure to also follow the [Code of
Conduct](https://github.com/zestai/zrp/blob/main/CODE_OF_CONDUCT.md).
Thanks!
Maintainers
-----------
Maintainers should additionally consult our documentation on
[releasing](https://github.com/zestai/zrp/blob/main/releasing.rst).
Follow the steps there to push new releases to Pypi and Github releases.
With respect to Github releases, we provide new releases to ensure
relevant pipelines and look up tables requisite for package download and
use are consistently up to date.
Wishlist
========
Support for the following capabilities is planned:
- add multiracial classification output support
- national validation datasets and validation partners
- pointers to additional training data
- add support for gender and other protected bases
License
=======
The package is released under the [Apache-2.0
License](https://opensource.org/licenses/Apache-2.0).
Results and Feedback
====================
Generate interesting results with the tool and want to share it or other
interesting feedback? Get in touch via <[email protected]>.
| zrp | /zrp-0.3.2.tar.gz/zrp-0.3.2/README.md | README.md |
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.24"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:]) | zrpc | /zrpc-0.3.1.tar.gz/zrpc-0.3.1/distribute_setup.py | distribute_setup.py |
import numpy as np
class HiddenMarkov:
def forward(self, Q, V, A, B, O, PI): # 使用前向算法
N = len(Q) #可能存在的状态数量
M = len(O) # 观测序列的大小
alphas = np.zeros((N, M)) # alpha值
T = M # 有几个时刻,有几个观测序列,就有几个时刻
for t in range(T): # 遍历每一时刻,算出alpha值
indexOfO = V.index(O[t]) # 找出序列对应的索引
for i in range(N):
if t == 0: # 计算初值
alphas[i][t] = PI[t][i] * B[i][indexOfO] # P176(10.15)
print(
'alpha1(%d)=p%db%db(o1)=%f' % (i, i, i, alphas[i][t]))
else:
alphas[i][t] = np.dot(
[alpha[t - 1] for alpha in alphas],
[a[i] for a in A]) * B[i][indexOfO] # 对应P176(10.16)
print('alpha%d(%d)=[sigma alpha%d(i)ai%d]b%d(o%d)=%f' %
(t, i, t - 1, i, i, t, alphas[i][t]))
# print(alphas)
P = np.sum([alpha[M - 1] for alpha in alphas]) # P176(10.17)
# alpha11 = pi[0][0] * B[0][0] #代表a1(1)
# alpha12 = pi[0][1] * B[1][0] #代表a1(2)
# alpha13 = pi[0][2] * B[2][0] #代表a1(3)
def backward(self, Q, V, A, B, O, PI): # 后向算法
N = len(Q) # 可能存在的状态数量
M = len(O) # 观测序列的大小
betas = np.ones((N, M)) # beta
for i in range(N):
print('beta%d(%d)=1' % (M, i))
for t in range(M - 2, -1, -1):
indexOfO = V.index(O[t + 1]) # 找出序列对应的索引
for i in range(N):
betas[i][t] = np.dot(
np.multiply(A[i], [b[indexOfO] for b in B]),
[beta[t + 1] for beta in betas])
realT = t + 1
realI = i + 1
print(
'beta%d(%d)=[sigma a%djbj(o%d)]beta%d(j)=(' %
(realT, realI, realI, realT + 1, realT + 1),
end='')
for j in range(N):
print(
"%.2f*%.2f*%.2f+" % (A[i][j], B[j][indexOfO],
betas[j][t + 1]),
end='')
print("0)=%.3f" % betas[i][t])
# print(betas)
indexOfO = V.index(O[0])
P = np.dot(
np.multiply(PI, [b[indexOfO] for b in B]),
[beta[0] for beta in betas])
print("P(O|lambda)=", end="")
for i in range(N):
print(
"%.1f*%.1f*%.5f+" % (PI[0][i], B[i][indexOfO], betas[i][0]),
end="")
print("0=%f" % P)
def viterbi(self, Q, V, A, B, O, PI):
N = len(Q) #可能存在的状态数量
M = len(O) # 观测序列的大小
deltas = np.zeros((N, M))
psis = np.zeros((N, M))
I = np.zeros((1, M))
for t in range(M):
realT = t + 1
indexOfO = V.index(O[t]) # 找出序列对应的索引
for i in range(N):
realI = i + 1
if t == 0:
deltas[i][t] = PI[0][i] * B[i][indexOfO]
psis[i][t] = 0
print('delta1(%d)=pi%d * b%d(o1)=%.2f * %.2f=%.2f' %
(realI, realI, realI, PI[0][i], B[i][indexOfO],
deltas[i][t]))
print('psis1(%d)=0' % (realI))
else:
deltas[i][t] = np.max(
np.multiply([delta[t - 1] for delta in deltas],
[a[i] for a in A])) * B[i][indexOfO]
print(
'delta%d(%d)=max[delta%d(j)aj%d]b%d(o%d)=%.2f*%.2f=%.5f'
% (realT, realI, realT - 1, realI, realI, realT,
np.max(
np.multiply([delta[t - 1] for delta in deltas],
[a[i] for a in A])), B[i][indexOfO],
deltas[i][t]))
psis[i][t] = np.argmax(
np.multiply(
[delta[t - 1] for delta in deltas],
[a[i]
for a in A])) + 1 #由于其返回的是索引,因此应+1才能和正常的下标值相符合。
print('psis%d(%d)=argmax[delta%d(j)aj%d]=%d' %
(realT, realI, realT - 1, realI, psis[i][t]))
print(deltas)
print(psis)
I[0][M - 1] = np.argmax([delta[M - 1] for delta in deltas
]) + 1 #由于其返回的是索引,因此应+1才能和正常的下标值相符合。
print('i%d=argmax[deltaT(i)]=%d' % (M, I[0][M - 1]))
for t in range(M - 2, -1, -1):
I[0][t] = psis[int(I[0][t + 1]) - 1][t + 1]
print('i%d=psis%d(i%d)=%d' % (t + 1, t + 2, t + 2, I[0][t]))
print("状态序列I:", I) | zrq-pkg | /zrq_pkg-0.0.3.tar.gz/zrq_pkg-0.0.3/zrq_pkg/HMM.py | HMM.py |
from math import exp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# data
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, [0,1,-1]])
# print(data)
return data[:,:2], data[:,-1]
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# print(X)
class LogisticReressionClassifier:
def __init__(self, max_iter=200, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
def sigmoid(self, x):
return 1 / (1 + exp(-x))
def data_matrix(self, X):
data_mat = []
for d in X:
data_mat.append([1.0, *d])
return data_mat
def fit(self, X, y):
# label = np.mat(y)
data_mat = self.data_matrix(X) # m*n
self.weights = np.zeros((len(data_mat[0]), 1), dtype=np.float32)
for iter_ in range(self.max_iter):
for i in range(len(X)):
result = self.sigmoid(np.dot(data_mat[i], self.weights))
error = y[i] - result
self.weights += self.learning_rate * error * np.transpose(
[data_mat[i]])
print('LogisticRegression Model(learning_rate={},max_iter={})'.format(
self.learning_rate, self.max_iter))
# def f(self, x):
# return -(self.weights[0] + self.weights[1] * x) / self.weights[2]
def score(self, X_test, y_test):
right = 0
X_test = self.data_matrix(X_test)
for x, y in zip(X_test, y_test):
result = np.dot(x, self.weights)
if (result > 0 and y == 1) or (result < 0 and y == 0):
right += 1
return right / len(X_test) | zrq-pkg | /zrq_pkg-0.0.3.tar.gz/zrq_pkg-0.0.3/zrq_pkg/LogisticRegression.py | LogisticRegression.py |
# amazon_buddy
 [](https://pypi.python.org/pypi/amazon_buddy)
## Description
Amazon scraper.
## Install
~~~~bash
pip install amazon_buddy
# or
pip3 install amazon_buddy
~~~~
## Usage
~~~~python
import json
from amazon_buddy import AmazonBuddy, Category, SortType
products = AmazonBuddy.search_products('face wash', sort_type=SortType.PRICE_HIGH_TO_LOW, min_price=0, category=Category.BEAUTY_AND_PERSONAL_CARE, max_results=1000, debug=True)
print(products)
reviews = AmazonBuddy.get_reviews(asin='B0758GYJK2')
print(reviews)
~~~~
| zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/README.md | README.md |
# System
import urllib.parse
from typing import Optional
# Local
from .enums.product_condition import ProductCondition
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------- class: Rh --------------------------------------------------------------- #
class RH:
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
@classmethod
def create_rh(
cls,
# min_rating: Optional[float] = None,
min_price: Optional[float] = None,
max_price: Optional[float] = None,
product_condition: Optional[ProductCondition] = None,
include_unavailable: Optional[bool] = None
) -> Optional[str]:
# rh_rating = cls.__create_min_rating_rh(min_rating)
rh_price = cls.__create_price_rh(min_price, max_price)
rh_product_condition = cls.__create_condition_rh(product_condition)
rh_include_unavailable = cls.__create_include_unavailable_rh(include_unavailable)
rhs = []
# if rh_rating:
# rhs.append('p_72:' + rh_rating)
if rh_price:
rhs.append('p_36:' + rh_price)
if rh_product_condition:
rhs.append('p_n_condition-type:' + rh_product_condition)
if rh_include_unavailable:
rhs.append('p_n_availability:' + rh_include_unavailable)
return urllib.parse.quote(','.join(rhs)) if len(rhs) > 0 else None
# ------------------------------------------------------- Private methods -------------------------------------------------------- #
# @staticmethod
# def __create_min_rating_rh(min_rating: Optional[float] = None) -> Optional[str]:
# if min_rating:
# if 1 < min_rating:
# if min_rating > 4:
# min_rating = 4
# return '124891' + str(9-int(min_rating)) + '011'
# return None
@staticmethod
def __create_price_rh(
min_price: Optional[float] = None,
max_price: Optional[float] = None
) -> Optional[str]:
rh = ''
if min_price and min_price > 0:
rh += str(int(min_price * 100)) + '-'
if max_price and max_price > 0 and (not min_price or min_price < max_price):
if len(rh) == 0:
rh = '-'
rh += str(int(max_price * 100))
return rh if len(rh) > 0 else None
@staticmethod
def __create_condition_rh(product_condition: Optional[ProductCondition] = None) -> Optional[str]:
return str(product_condition.value) if product_condition else None
@staticmethod
def __create_include_unavailable_rh(include_unavailable: Optional[bool] = None) -> Optional[str]:
return '1248816011' if include_unavailable else None
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/zs_amazon_buddy/rh.py | rh.py |
# System
from typing import Optional, List, Dict, Callable, Union
import urllib.parse
import os, time
# Local
from .enums.category import Category
from .enums.sort_type import SortType
from .models.search_result_product import SearchResultProduct
from .models.product import Product
from .models.review import Review
from .models.review_image import ReviewImage
from .filter import ProductFilter, ReviewFilter
from .parser import Parser
from .request import Request
from .rh import RH
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------- class: AmazonBuddy ---------------------------------------------------------- #
class AmazonBuddy:
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
@classmethod
def get_product_details(
cls,
#url
asin: str,
# request
proxy: Optional[Union[str, List[str]]] = None,
user_agent: Optional[str] = None,
# other
debug: bool = False
) -> Optional[Product]:
try:
return Parser.parse_product(
Request(
user_agent,
keep_cookies=True,
proxy=proxy,
debug=debug
).get('https://www.amazon.com/dp/{}'.format(asin)),
debug=debug
)
except Exception as e:
if debug:
print(e)
return None
@classmethod
def get_product_reviews_with_images(
cls,
# url
asin: str,
# request
proxy: Optional[Union[str, List[str]]] = None,
user_agent: Optional[str] = None,
# other
debug: bool = False
) -> Optional[List[ReviewImage]]:
try:
return Parser.parse_reviews_with_images(
Request(
user_agent,
keep_cookies=True,
proxy=proxy,
debug=debug
).get('https://www.amazon.com/gp/customer-reviews/aj/private/reviewsGallery/get-data-for-reviews-image-gallery-for-asin?asin={}'.format(asin)),
debug=debug
)
except Exception as e:
if debug:
print(e)
return None
@classmethod
def get_related_searches(
cls,
# url
search_term: str,
category: Optional[Union[Category, str]] = Category.ALL_DEPARTMENTS,
# request
proxy: Optional[Union[str, List[str]]] = None,
user_agent: Optional[str] = None,
# other
debug: bool = False
) -> Optional[List[str]]:
category = category or Category.ALL_DEPARTMENTS
if type(category) == type(Category.ALL_DEPARTMENTS):
category = category.value
return cls.__get_related_searches(
'https://www.amazon.com/s?k={}&i={}'.format(urllib.parse.quote(search_term), category),
Request(user_agent, keep_cookies=True, proxy=proxy, debug=debug)
)
@classmethod
def get_trends(
cls,
search_combinations: List[str],
# url
category: Optional[Union[Category, str]] = Category.ALL_DEPARTMENTS,
locale: str = 'en_US',
# request
proxy: Optional[Union[str, List[str]]] = None,
user_agent: Optional[str] = None,
# other
return_dict: bool = False,
max_results_per_letter: int = 10,
debug: bool = False
) -> Union[List[str], Dict[str, List[str]]]:
suggestions = {}
request = Request(proxy, user_agent, keep_cookies=True, debug=debug)
for search_word in search_combinations:
suggestions[search_word] = cls.__get_suggestions(
category,
search_word,
locale,
max_results_per_letter,
request,
debug
)
if return_dict:
return suggestions
else:
suggestions_ = []
for v in suggestions.values():
suggestions_.extend(v)
return suggestions_
@classmethod
def search_products(
cls,
# url
search_term: str,
category: Optional[Union[Category, str]] = Category.ALL_DEPARTMENTS,
sort_type: Optional[SortType] = None,
# # rh
# product_condition: Optional[ProductCondition] = None,
# include_unavailable: Optional[bool] = None,
# filter
min_price: Optional[float] = None,
max_price: Optional[float] = None,
min_rating: Optional[float] = None,
min_reviews: Optional[int] = 3,
ignored_asins: List[str] = [],
ignored_title_strs: List[str] = [],
# request
proxy: Optional[Union[str, List[str]]] = None,
user_agent: Optional[str] = None,
# other
max_results: int = 100,
use_us_address: bool = True,
debug: bool = False
) -> Optional[List[SearchResultProduct]]:
category = category or Category.ALL_DEPARTMENTS
if type(category) == type(Category.ALL_DEPARTMENTS):
category = category.value
base_url = 'https://www.amazon.com/s?k={}&i={}'.format(urllib.parse.quote(search_term), category)
rh = RH.create_rh(min_price=min_price, max_price=max_price)
request = Request(user_agent, keep_cookies=True, proxy=proxy, debug=debug)
# cat_id, ratings = cls.__get_search_cat_and_ratings(search_term, request)
suggested_rh = cls.__get_suggested_rh(base_url, min_rating, request) if min_rating else None
if suggested_rh:
rh = suggested_rh + ('%2C' + rh) if rh else ''
# if category == Category.ALL_DEPARTMENTS:
# # if cat_id:
# # rh = 'n%3A' + cat_id + ('%2C' + rh if rh else '')
# pass
# else:
# base_url += 'i={}'.format(urllib.parse.quote(category.value))
if sort_type:
base_url += '&s={}'.format(sort_type.value)
if rh:
base_url += '&rh={}'.format(rh)
request = Request(user_agent, keep_cookies=True, debug=debug)
if use_us_address:
request.set_us_address()
return cls.__solve(base_url, 'page', request, ProductFilter(min_price, max_price, min_rating, min_reviews, ignored_asins, ignored_title_strs), Parser.parse_products, max_results, debug=debug)
@classmethod
def get_reviews(
cls,
# url
asin: str,
# filter
min_rating: float = 3.0,
# request
proxy: Optional[Union[str, List[str]]] = None,
user_agent: Optional[str] = None,
# other
max_results: int = 100,
debug: bool = False
) -> Optional[List[Review]]:
request = Request(user_agent, keep_cookies=True, proxy=proxy, debug=debug)
request.get('https://www.amazon.com/dp/{}'.format(asin))
base_url = 'https://www.amazon.com/product-reviews/{}?ie=UTF8&reviewerType=all_reviews&sortBy=helpful'.format(asin)
return cls.__solve(base_url, 'pageNumber', request, ReviewFilter(min_rating), Parser.parse_reviews, max_results, debug=debug)
# ------------------------------------------------------- Private methods -------------------------------------------------------- #
@staticmethod
def __solve(base_url: str, page_param_name: str, request: Request, filter, parse: Callable, max_results: int, debug: bool = False) -> List:
p = 0
l = []
max_try = 3
current_try = 1
max_p = 50
while len(l) < max_results and p <= max_p:
p += 1
url = base_url + '&{}={}'.format(page_param_name, p)
request.debug = False
new_elements = parse(request.get(url), debug=True)
filtered = filter.filter(new_elements)
l.extend(filtered)
if debug:
print('URL: {} - Found {}|{}|{} - Page {}'.format(url, len(new_elements), len(filtered), len(l), p), end='\n')
if len(new_elements) < 20 or (p > 25 and len(filtered) == 0):
if current_try >= max_try:
return l
current_try += 1
time.sleep(1)
continue
current_try = 1
print('Found {} - pages checked {}'.format(len(l), p))
return l if len(l) > 0 else None
@staticmethod
def __get_suggested_rh(
url: str,
min_rating: int,
request: Request,
max_try: int = 3
) -> Optional[str]:
current_try = 1
while current_try <= max_try:
rh = Parser.parse_suggested_rh(request.get(url), int(min_rating), debug=request.debug)
if rh:
return rh
time.sleep(1)
current_try += 1
return None
@staticmethod
def __get_related_searches(url: str, request: Request, max_try: int = 3) -> Optional[List[str]]:
current_try = 1
while current_try <= max_try:
rs = Parser.parse_related_searches(request.get(url), debug=request.debug)
if rs:
return rs
time.sleep(1)
current_try += 1
return None
@staticmethod
def __get_product_reviews_with_images(url: str, request: Request, max_try: int = 3) -> Optional[List[ReviewImage]]:
current_try = 1
while current_try <= max_try:
reviews = Parser.parse_reviews_with_images(request.get(url), debug=request.debug)
if reviews:
return reviews
time.sleep(1)
current_try += 1
return None
@staticmethod
def __get_suggestions(category: Category, search_word: str, locale: str, max_results: int, request: Request, debug: bool) -> List[str]:
import time
from kcu import request
url = 'https://completion.amazon.com/api/2017/suggestions?lop={}&site-variant=desktop&client-info=amazon-search-ui&mid=ATVPDKIKX0DER&alias={}&ks=65&prefix={}&event=onKeyPress&limit=11&fb=1&suggestion-type=KEYWORD&_={}'.format(locale, category.value, search_word, int(time.time()))
suggestions = []
try:
j = request.get(url, debug=debug).json()
for suggestion in j['suggestions']:
suggestion = suggestion['value']
if suggestion not in suggestions:
suggestions.append(suggestion)
if len(suggestions) >= max_results:
return suggestions
return suggestions
except Exception as e:
if debug:
print(e)
return suggestions
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/zs_amazon_buddy/amazon_buddy.py | amazon_buddy.py |
# System
from typing import Optional, List
from requests import Response
# Pip
from bs4 import BeautifulSoup as bs
from kcu import request, kjson
# Local
from .models.search_result_product import SearchResultProduct
from .models.review import Review
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------------------- class: ProductFilter --------------------------------------------------------- #
class ProductFilter:
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
min_price: Optional[float] = None,
max_price: Optional[float] = None,
min_rating: Optional[float] = None,
min_reviews: Optional[int] = None,
ignored_asins: Optional[List[str]] = None,
ignored_title_strs: Optional[List[str]] = None
):
self.min_price = min_price or 0
self.max_price = max_price or 9999999999999
self.min_rating = min_rating or 0
self.min_reviews = min_reviews or 0
self.ignored_asins = [ia.lower() for ia in ignored_asins] if ignored_asins else []
self.ignored_title_strs = [ts.lower() for ts in ignored_title_strs] if ignored_title_strs else []
def filter(self, products: List[SearchResultProduct]) -> List[SearchResultProduct]:
filtered = []
for p in products:
try:
if p.price < self.min_price:
# print(p.price, '<', self.min_price)
continue
elif p.price > self.max_price:
# print(p.price, '>', self.min_price)
continue
elif p.rating < self.min_rating:
# print(p.rating, '<', self.min_rating)
continue
elif p.review_count < self.min_reviews:
# print(p.review_count, '<', self.min_reviews)
continue
elif p.asin.lower() in self.ignored_asins:
# print('ignored asin', p.asin)
continue
elif self.__contains_in(p.title, self.ignored_title_strs):
# print('ignored title str', p.title)
continue
filtered.append(p)
# if p.price >= self.min_price and p.price <= self.max_price and p.rating >= self.min_rating and p.review_count >= self.min_reviews and p.asin.lower() not in self.ignored_asins and not self.__contains_in(p.title, self.ignored_title_strs):
# filtered.append(p)
except:
pass
self.ignored_asins.append(p.asin.lower())
return filtered
@staticmethod
def __contains_in(s: str, strs: List[str]) -> bool:
s = s.lower()
for ss in strs:
if ss in s:
return True
return False
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------------------- class: ReviewFilter ---------------------------------------------------------- #
class ReviewFilter:
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
min_rating: Optional[int] = None
):
self.min_rating = min_rating or 0
self.ignored_ids = []
def filter(self, reviews: List[Review]) -> List[Review]:
filtered = []
for r in reviews:
r_id = r.id.lower()
if r_id in self.ignored_ids:
# print('includes')
continue
if r.rating < self.min_rating:
# print('low rating')
continue
self.ignored_ids.append(r_id)
filtered.append(r)
return filtered
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/zs_amazon_buddy/filter.py | filter.py |
# System
import html, json
from typing import Optional, List, Dict
from requests import Response
# Pip
from bs4 import BeautifulSoup as bs
from kcu import request, kjson, strings
from unidecode import unidecode
# Local
from .models.search_result_product import SearchResultProduct
from .models.product import Product
from .models.review import Review
from .models.review_image import ReviewImage
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------ class: Parser ------------------------------------------------------------- #
class Parser:
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
@classmethod
def parse_product(cls, response: Optional[Response], debug: bool = False) -> Optional[Product]:
if not response or response.status_code not in [200, 201]:
return None
categories = []
features = []
videos = []
soup = bs(response.content, 'lxml')
parsed_json = cls.__json_loads(strings.between(response.text, 'var obj = jQuery.parseJSON(\'', '\')'))
if parsed_json is None:
return None
images = parsed_json
title = parsed_json['title'].strip()
asin = parsed_json['mediaAsin']
videos = parsed_json['videos']
try:
for feature in soup.find('div', {'class':'a-section a-spacing-medium a-spacing-top-small'}).find_all('span', {'class':'a-list-item'}):
try:
features.append(feature.get_text().strip())
except:
pass
except Exception as e:
if debug:
print(e)
try:
for cat_a in soup.find('div', {'id':'wayfinding-breadcrumbs_container'}).find_all('a', class_='a-link-normal a-color-tertiary'):
try:
categories.append(bs(cat_a.text, "lxml").text.replace('\\', '/').replace('<', ' ').replace('>', ' ').strip().lower())
except:
pass
except Exception as e:
if debug:
print(e)
try:
price_text = soup.find('span', {'id':'priceblock_ourprice'}).text.replace('$', '').strip()
price = float(price_text)
except:
price = None
try:
table_for_product_info = soup.find('table', {'id':'productDetails_detailBullets_sections1', 'class':'a-keyvalue prodDetTable'})
details = {}
if table_for_product_info is not None:
for tr in table_for_product_info.find_all('tr'):
key = tr.find('th').get_text().strip()
if key is not None and key not in ['Customer Reviews', 'Best Sellers Rank']:
value = tr.find('td').get_text().strip()
details[key] = value
except:
pass
image_details = {}
if 'colorToAsin' in images and images['colorToAsin'] is not None:
colors = images['colorToAsin']
for color_name, color_dict in colors.items():
_asin = color_dict['asin']
image_details[_asin] = {
'name' : color_name,
'image_urls' : []
}
images_by_color = images['colorImages'][color_name]
for elem in images_by_color:
if 'hiRes' in elem:
image_details[_asin]['image_urls'].append(elem['hiRes'])
added_video_urls = []
for elem in videos:
try:
vid_url = elem['url']
print(vid_url)
if vid_url in added_video_urls:
continue
video = {'url':vid_url}
video['title'] = elem['title'].strip()
video['height'] = int(elem['videoHeight'] if 'videoHeight' in elem else elem['height'])
video['width'] = int(elem['videoWidth'] if 'videoWidth' in elem else elem['width'])
videos.append(video)
added_video_urls.append(vid_url)
except Exception as e:
if debug:
print(e)
if image_details is None or image_details == {}:
try:
images_json = cls.__json_loads(strings.between(response.text, '\'colorImages\': { \'initial\': ', '}]},') + '}]')
if images_json is not None:
image_details[asin] = {
'name' : asin,
'image_urls' : []
}
for image_json in images_json:
try:
image_details[asin]['image_urls'].append(image_json['large'])
except Exception as e:
if debug:
print(e)
except:
pass
associated_asins = []
try:
associated_asins_json = cls.__json_loads(strings.between(response.text, 'dimensionToAsinMap :', '},').strip() + '}')
if associated_asins_json is not None:
for val in associated_asins_json.values():
associated_asins.append(val)
except:
pass
return Product(title, asin, price, categories, features, details, image_details, videos)
@classmethod
def parse_reviews_with_images(cls, response: Optional[Response], debug: bool = False) -> Optional[List[ReviewImage]]:
# 'https://www.amazon.com/gp/customer-reviews/aj/private/reviewsGallery/get-data-for-reviews-image-gallery-for-asin?asin='
if not response or response.status_code not in [200, 201]:
return None
try:
reviews_json = cls.__json_loads(response.text)
except Exception as e:
if debug:
print(e)
return None
reviews = {}
details = reviews_json['images']
for elem in details:
try:
author = elem['associatedReview']['author']['name']
text = elem['associatedReview']['text']
clean_text = bs(text, "lxml").text.replace(' ', ' ')
review_key = elem['associatedReview']['reviewId']
if review_key in reviews:
review = reviews[review_key]
else:
review = {
'author': author,
'text': clean_text,
'rating': elem['associatedReview']['overallRating'],
'image_urls': []
}
if 'scores' in elem['associatedReview'] and 'helpfulVotes' in elem['associatedReview']['scores']:
review['upvotes'] = int(elem['associatedReview']['scores']['helpfulVotes'])
else:
review['upvotes'] = 0
img_url = elem['mediumImage']
review['image_urls'].append(img_url)
reviews[review_key] = review
except Exception as e:
if debug:
print(e)
return [ReviewImage(r['author'], r['text'], r['rating'], r['image_urls'], r['upvotes'])
for r in sorted(reviews.values(), key=lambda k: k['upvotes'], reverse=True)]
@classmethod
def parse_products(cls, response: Optional[Response], debug: bool = False) -> List[SearchResultProduct]:
if not response or response.status_code not in [200, 201]:
return []
products = []
try:
soup = bs(response.text, 'lxml')
for div in [div for div in soup.find_all('div') if div.has_attr('data-asin') and len(div['data-asin']) > 0]:
try:
asin = div['data-asin']
title = unidecode(html.unescape((div.find('span', class_='a-size-base-plus a-color-base a-text-normal') or div.find('span', class_='a-size-medium a-color-base a-text-normal')).text))
try:
price = float(div.find('span', class_='a-price').find('span', class_='a-price-whole').text.replace(',', '')) + float(div.find('span', class_='a-price').find('span', class_='a-price-fraction').text.replace(',', '')) / 100
except Exception as e:
price = None
# if debug:
# print(e)
try:
spans = [span['aria-label'] for span in div.find_all('span') if span.has_attr('aria-label')]
rating = float(spans[0].split(' ')[0])
review_count = int(spans[1].replace(',', ''))
except:
rating = 0
review_count = 0
products.append(SearchResultProduct(asin, title, price, rating, review_count))
except Exception as e:
pass
# if debug:
# print(e)
except Exception as e:
if debug:
print(e)
return products
@classmethod
def parse_reviews(cls, response: Optional[Response], debug: bool = False) -> List[Review]:
if not response or response.status_code not in [200, 201]:
return None
reviews = []
try:
soup = bs(response.text, 'lxml')
for div in soup.find_all('div', {'data-hook':'review'}):
try:
id = div['id']
name = unidecode(html.unescape(div.find('span', class_='a-profile-name').text.strip()))
rating = int(div.find('i', class_='a-icon-star').find('span').text.split('.')[0].strip())
title = unidecode(html.unescape(div.find(None, {'data-hook':'review-title'}).find('span').text.strip()))
text = unidecode(html.unescape(div.find('span', {'data-hook':'review-body'}).find('span').text.strip()))
try:
helpful_score = int(div.find('span', {'data-hook':'review-vote-statement'}.text.split(' ')[0]))
except:
helpful_score = 0
reviews.append(Review(id, name, rating, helpful_score, title, text))
except Exception as e:
if debug:
print(e)
except Exception as e:
if debug:
print(e)
return reviews
@classmethod
def parse_suggested_rh(cls, response: Optional[Response], min_stars: int, debug: bool = False) -> Optional[str]:
if not response or response.status_code not in [200, 201]:
return None
try:
soup = bs(response.content, 'lxml')
for a in soup.find_all('a', class_='a-link-normal s-navigation-item'):
try:
if not a.find('i', class_='a-icon a-icon-star-medium a-star-medium-{}'.format(min_stars)):
continue
href = a['href']
if 'rh=n%3A' in href:
return 'n' + href.split('rh=n')[1].split('&')[0]
except Exception as e:
if debug:
print(e)
except Exception as e:
if debug:
print(e)
return None
@classmethod
def parse_related_searches(cls, response: Optional[Response], debug: bool = False) -> Optional[List[str]]:
if not response or response.status_code not in [200, 201]:
return None
searches = []
try:
soup = bs(response.content, 'lxml')
for a in soup.find_all('a', class_='a-link-normal s-no-outline'):
try:
img = a.find('img')
if not img or not a['href'].startswith('/s'):
continue
searches.append(img['alt'].replace(', End of \'Related searches\' list', ''))
except Exception as e:
if debug:
print(e)
except Exception as e:
if debug:
print(e)
return searches
# -------------------------------------------------------- Private methods -------------------------------------------------------- #
@staticmethod
def __json_loads(s: str) -> Optional[Dict]:
try:
return json.loads(s)
except:
try:
return json.loads(s.replace('\\\'', '\''))
except Exception as e:
print(e)
return None
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/zs_amazon_buddy/parser.py | parser.py |
# System
from typing import Optional, List, Union
from requests import Response
import random
# Pip
from kcu.request import request, RequestMethod
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------ class: Request ------------------------------------------------------------ #
class Request:
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
user_agent: Optional[str] = None,
proxy: Optional[Union[str, List[str]]] = None,
keep_cookies: bool = True,
debug: bool = False
):
self.user_agent = user_agent
self.cookies = None
self.keep_cookies = keep_cookies
self.debug = debug
if type(proxy) == list:
proxy = random.choice(proxy) if len(proxy) > 0 else None
self.proxy = proxy
def set_us_address(self):
self.keep_cookies = True
self.get('https://www.amazon.com')
self.__request('https://www.amazon.com/gp/delivery/ajax/address-change.html', RequestMethod.POST, body={
'locationType':'LOCATION_INPUT',
'zipCode':'90001',
'storeContext':'generic',
'deviceType':'web',
'pageType':'Gateway',
'actionSource':'glow',
'almBrandId':'undefined'
})
def get(self, url: str):
return self.__request(url, RequestMethod.GET)
# ------------------------------------------------------- Private methods -------------------------------------------------------- #
def __request(self, url: str, method: RequestMethod, body: Optional[dict] = None) -> Optional[Response]:
headers = {
'Host': 'www.amazon.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'DNT': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'Upgrade-Insecure-Requests': '1',
'TE': 'Trailers'
}
if self.cookies:
headers['Cookie'] = self.cookies
res = request(url, method, headers=headers, user_agent=self.user_agent, data=body, debug=self.debug, max_request_try_count=2, sleep_time=0.5, proxy_ftp=self.proxy, proxy_http=self.proxy, proxy_https=self.proxy)
if self.keep_cookies and res and res.cookies:
cookie_strs = []
for k, v in res.cookies.get_dict().items():
cookie_strs.append(k+'='+v)
self.cookies = '; '.join(cookie_strs)
return res
# ------------------------------------------------------ Public properties ------------------------------------------------------- #
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
# ------------------------------------------------------ Private properties ------------------------------------------------------ #
# ------------------------------------------------------- Private methods -------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/zs_amazon_buddy/request.py | request.py |
# System
from typing import Optional, List, Dict, Union
# Pip
from jsoncodable import JSONCodable
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------ class: Product ------------------------------------------------------------ #
class Product(JSONCodable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
title: str,
asin: str,
price: float,
categories: List[str],
features: List[str],
details: Dict[str, str],
images: Dict[str, Dict[str, Union[str, List[str]]]],
videos_details: List[Dict[str, Union[str, int]]]
):
self.title = title
self.asin = asin
self.price = price
self.categories = categories or []
self.features = features or []
self.details = details
self.videos = []
self.video_urls = []
self.images = {}
self.associated_asins = []
self.asins = [asin]
self.image_urls = []
if images:
for assoc_asin, image_dict in images.items():
if assoc_asin != asin:
self.associated_asins.append(assoc_asin)
if assoc_asin not in self.asins:
self.asins.append(assoc_asin)
if image_dict is not None and 'image_urls' in image_dict:
image_urls = image_dict['image_urls']
self.images[assoc_asin] = ProductImageSet(assoc_asin, image_dict['name'], image_urls)
for image_url in image_urls:
if image_url not in self.image_urls:
self.image_urls.append(image_url)
if videos_details:
for video in videos_details:
if 'title' in video and 'height' in video and 'width' in video and 'url' in video:
self.videos.append(ProductVideo(video['url'], video['title'], video['height'], video['width']))
self.video_urls.append(video['url'])
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------- class: ProductImageSet -------------------------------------------------------- #
class ProductImageSet(JSONCodable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
asin: str,
name: str,
urls: List[str]
):
self.asin = asin
self.name = name
self.urls = urls
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------- class: ProductVideo --------------------------------------------------------- #
class ProductVideo(JSONCodable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
url: str,
title: str,
height: int,
width: int
):
self.url = url
self.title = title
self.height = height
self.width = width
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/zs_amazon_buddy/models/product.py | product.py |
# System
from enum import Enum
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------- class: Category ------------------------------------------------------------ #
class Category(Enum):
ALL_DEPARTMENTS = 'aps'
AUDIBLE_BOOKS_AND_ORIGINALS = 'audible'
ALEXA_SKILLS = 'alexa-skills'
AMAZON_DEVICES = 'amazon-devices'
AMAZON_FRESH = 'amazonfresh'
AMAZON_WAREHOUSE = 'warehouse-deals'
APPLIANCES = 'appliances'
APPS_AND_GAMES = 'mobile-apps'
ARTS_CRAFTS_AND_SEWING = 'arts-crafts'
AUTOMOTIVE_PARTS_AND_ACCESSORIES = 'automotive'
BABY = 'baby-products'
BEAUTY_AND_PERSONAL_CARE = 'beauty'
BOOKS = 'stripbooks'
CDS_AND_VINYL = 'popular'
CELL_PHONES_AND_ACCESSORIES = 'mobile'
CLOTHING_SHOES_AND_JEWELRY = 'fashion'
CLOTHING_WOMEN = 'fashion-womens'
CLOTHING_MEN = 'fashion-mens'
CLOTHING_GIRLS = 'fashion-girls'
CLOTHING_BOYS = 'fashion-boys'
CLOTHING_BABY = 'fashion-baby'
UNDER_10_DOLLARS = 'under-ten-dollars'
AMAZON_PANTRY = 'pantry'
COLLECTIBLES_AND_FINE_ART = 'collectibles'
COMPUTERS = 'computers'
COURSES = 'courses'
CREDIT_AND_PAYMENT_CARDS = 'financial'
DIGITAL_EDUCATIONAL_RESOURCES = 'edu-alt-content'
DIGITAL_MUSIC = 'digital-music'
ELECTRONICS = 'electronics'
GARDEN_AND_OUTDOOR = 'lawngarden'
GIFT_CARDS = 'gift-cards'
GROCERY_AND_GOURMET_FOOD = 'grocery'
HANDMADE = 'handmade'
HEALTH_HOUSEHOLD_AND_BABY_CARE = 'hpc'
HOME_AND_BUSINESS_SERVICES = 'local-services'
HOME_AND_KITCHEN = 'garden'
INDUSTRIAL_AND_SCIENTIFIC = 'industrial'
JUST_FOR_PRIME = 'prime-exclusive'
KINDLE_STORE = 'digital-text'
LUGGAGE_AND_TRAVEL_GEAR = 'fashion-luggage'
MAGAZINE_SUBSCRIPTIONS = 'magazines'
MOVIES_AND_TV = 'movies-tv'
MUSICAL_INSTRUMENTS = 'mi'
OFFICE_PRODUCTS = 'office-products'
PET_SUPPLIES = 'pets'
PREMIUM_BEAUTY = 'luxury-beauty'
PRIME_VIDEO = 'instant-video'
SMART_HOME = 'smart-home'
SOFTWARE = 'software'
SPORTS_AND_OUTDOORS = 'sporting'
SUBSCRIPTION_BOXES = 'subscribe-with-amazon'
TOOLS_AND_HOME_IMPROVEMENT = 'tools'
TOYS_AND_GAMES = 'toys-and-games'
VEHICLES = 'vehicles'
VIDEO_GAMES = 'videogames'
WHOLE_FOODS_MARKET = 'wholefoods'
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-amazon-buddy | /zs_amazon_buddy-0.0.2.tar.gz/zs_amazon_buddy-0.0.2/zs_amazon_buddy/enums/category.py | category.py |
from typing import List, Dict, Optional
from kcu.request import request
from .helpers.parser import Parser
from .utils.url_creator import AmazonURLCreator as URLCreator
class Amazon:
def __init__(self):
self.parser = Parser()
def get_product_ids_and_next_page(
self,
url: str,
user_agent: Optional[str] = None,
random_ua: bool = True
) -> (Optional[List[str]], Optional[str]):
try:
response = request(url, user_agent=user_agent, fake_useragent=random_ua)
return (self.parser.parse_products_page(response), self.parser.next_products_page(response))
except Exception as e:
print('get_product_ids_and_next_page', e)
return None, None
def get_asins_from_grid_based_page(
self,
url: str,
user_agent: Optional[str] = None,
random_ua: bool = True
) -> Optional[List[str]]:
try:
return self.parser.parse_products_page_grid_style(request(url, user_agent=user_agent, fake_useragent=random_ua))
except Exception as e:
print('get_asins_from_grid_based_page', e)
return None
def get_product_details(
self,
asin: str,
user_agent: Optional[str] = None,
random_ua: bool = True
) -> Optional[Dict]:
try:
return self.parser.parse_product(
request(URLCreator.product_url(asin), user_agent=user_agent, fake_useragent=random_ua)
)
except Exception as e:
print('get_product_details', e)
return None
def get_product_reviews_with_images(
self,
asin: str,
user_agent: Optional[str] = None,
random_ua: bool = True
) -> Optional[Dict]:
try:
return self.parser.parse_reviews_with_images(
request(URLCreator.product_reviews_with_images_url(asin), user_agent=user_agent, fake_useragent=random_ua)
)
except Exception as e:
print('get_product_reviews_with_images', e)
return None
def get_product_reviews(
self,
asin: str,
star_rating: str = 'five_star',
user_agent: Optional[str] = None,
random_ua: bool = True
) -> Optional[List[Dict]]:
try:
return self.parser.parse_reviews(
request(URLCreator.product_reviews_url(asin, star_rating=star_rating, page_num=1), user_agent=user_agent, fake_useragent=random_ua)
)
except Exception as e:
print('get_product_reviews', e)
return None | zs-amazon-scraper | /zs_amazon_scraper-0.2.8.tar.gz/zs_amazon_scraper-0.2.8/zs_amazon_scraper/amazon.py | amazon.py |
from typing import Optional, Dict, List
import json
from bs4 import BeautifulSoup
from kcu import strings
class Parser():
def parse_product(self, response) -> Optional[Dict]:
categories = []
features = []
video_urls = []
soup = BeautifulSoup(response.content, 'lxml')
parsed_json = self.__json_loads(strings.between(response.text, 'var obj = jQuery.parseJSON(\'', '\')'))
if parsed_json is None:
return None
title = parsed_json['title']
asin = parsed_json['mediaAsin']
images = parsed_json
videos = parsed_json['videos']
features = []
try:
for feature in soup.find('div', {'class':'a-section a-spacing-medium a-spacing-top-small'}).find_all('span', {'class':'a-list-item'}):
try:
features.append(feature.get_text().strip())
except:
pass
except:
pass
try:
categories_container = soup.find('div', {'id':'wayfinding-breadcrumbs_container'})
for category_a in categories_container.find_all('a', {'class':'a-link-normal a-color-tertiary'}):
try:
categories.append(BeautifulSoup(category_a.text, "lxml").text.replace('\\', '/').replace('<', ' ').replace('>', ' ').strip().lower())
except:
pass
except:
pass
# print('categories', categories)
try:
price_text = soup.find('span', {'id':'priceblock_ourprice'}).text.replace('$', '').strip()
price = float(price_text)
except:
price = None
table_for_product_info = soup.find('table', {'id':'productDetails_detailBullets_sections1', 'class':'a-keyvalue prodDetTable'})
product_information_dict = {}
if table_for_product_info is not None:
for tr in table_for_product_info.find_all('tr'):
key = tr.find('th').get_text().strip()
if key is not None and key not in ['Customer Reviews', 'Best Sellers Rank']:
value = tr.find('td').get_text().strip()
product_information_dict[key] = value
image_details = {}
if 'colorToAsin' in images and images['colorToAsin'] is not None:
colors = images['colorToAsin']
for color_name, color_dict in colors.items():
_asin = color_dict['asin']
image_details[_asin] = {
'name' : color_name,
'image_urls' : []
}
images_by_color = images['colorImages'][color_name]
for elem in images_by_color:
if 'hiRes' in elem:
image_details[_asin]['image_urls'].append(elem['hiRes'])
for url in videos:
if 'url' in url:
video_urls.append(url['url'])
if image_details is None or image_details == {}:
try:
images_json = self.__json_loads(strings.between(response.text, '\'colorImages\': { \'initial\': ', '}]},') + '}]')
if images_json is not None:
image_details[asin] = {
'name' : asin,
'image_urls' : []
}
for image_json in images_json:
try:
image_details[asin]['image_urls'].append(image_json['large'])
except Exception as e:
print(e)
pass
except:
pass
associated_asins = []
try:
associated_asins_json = self.__json_loads(strings.between(response.text, 'dimensionToAsinMap :', '},').strip() + '}')
if associated_asins_json is not None:
for val in associated_asins_json.values():
associated_asins.append(val)
except:
pass
return {
'title': title,
'price': price,
'categories': categories,
'features': features,
'product information': product_information_dict,
'images': image_details,
'videos_url': video_urls,
'associated_asins': associated_asins
}
def parse_reviews_with_images(self, response) -> Optional[List[Dict]]:
# 'https://www.amazon.com/gp/customer-reviews/aj/private/reviewsGallery/get-data-for-reviews-image-gallery-for-asin?asin='
try:
reviews_json = self.__json_loads(response.text)
except Exception as e:
print(e)
return None
reviews = {}
details = reviews_json['images']
for elem in details:
try:
author = elem['associatedReview']['author']['name']
text = elem['associatedReview']['text']
clean_text = BeautifulSoup(text, "lxml").text.replace(' ', ' ')
review_key = author
if review_key in reviews:
review = reviews[review_key]
else:
review = {
'author':author,
'text': clean_text,
'rating':elem['associatedReview']['overallRating'],
'image_urls':[]
}
if 'scores' in elem['associatedReview'] and 'helpfulVotes' in elem['associatedReview']['scores']:
review['upvotes'] = int(elem['associatedReview']['scores']['helpfulVotes'])
else:
review['upvotes'] = 0
img_url = elem['mediumImage']
review['image_urls'].append(img_url)
reviews[review_key] = review
except:
pass
return sorted(list(reviews.values()), key=lambda k: k['upvotes'], reverse=True)
def parse_reviews(self, response) -> Optional[List[str]]:
soup = BeautifulSoup(response.content, 'lxml')
reviews = []
for elem in soup.find_all('span', {'data-hook':'review-body'}):
try:
reviews.append({
'text':elem.find('span').get_text().strip(),
'rating':5
})
except:
pass
return reviews
def parse_products_page_grid_style(self, response):
soup = BeautifulSoup(response.content, 'lxml')
asin_ids = []
products_container = soup.find('ol', {'id':'zg-ordered-list'})
for li in products_container.find_all('li'):
try:
product_url = li.find('a', href=True)
asin = product_url['href'].split('dp/')[1].split('/')[0]
asin_ids.append(asin)
except Exception as e:
print('parse_products_page_grid_style', e)
return asin_ids
def parse_products_page(self, response):
asin_ids = []
soup = BeautifulSoup(response.content, 'lxml')
results = soup.find_all('span', {'class':'a-declarative'})
for elem in results:
try:
asin_id = strings.between(elem['data-a-popover'], 'asin=', '&')
if asin_id is not None:
asin_ids.append(asin_id)
except:
pass
return asin_ids
def next_products_page(self, response):
soup = BeautifulSoup(response.content, 'lxml')
next_pag = soup.find('li', {'class':'a-last'})
next_page_url = next_pag.find('a', href=True)
return next_page_url['href']
def __json_loads(self, s: str) -> Optional[Dict]:
try:
return json.loads(s)
except:
try:
return json.loads(s.replace('\\\'', '\''))
except Exception as e:
print(e)
return None | zs-amazon-scraper | /zs_amazon_scraper-0.2.8.tar.gz/zs_amazon_scraper-0.2.8/zs_amazon_scraper/helpers/parser.py | parser.py |
import logging
import requests
import simplejson
from abc import ABC
from unittest.mock import patch
from rest_framework import status
from rest_framework.exceptions import ValidationError
from zs_utils.exceptions import CustomException
from django.utils.translation import gettext as _
from zs_base_api.base_api import BaseAPI
from zs_base_api.utils import response_keeper, SessionWithResponseKeeper
from zs_base_api.constants import API_ERROR_REASONS
from zs_base_api.services import ApiRequestLogService
__all__ = [
"APIAction",
"APIActionError",
]
class APIActionError(CustomException):
"""
Исключения для APIAction
"""
pass
class APIAction(ABC):
"""
Базовый класс экшенов.
"""
# Атрибуты
# ------------------------------------------------------------------------------
# Описание
description = "Действие с внешним API"
# Стандартное исключение
exception_class = APIActionError
# Предварительные экшены
ancestor_actions = {}
# Сущность
ENTITY_REQUIRED = False
entity_model = None
entity_name = None
# Параметры вызова
VALIDATE_PARAMS = False
required_params = []
allowed_params = []
# Логирование
SAVE_REQUEST_LOG = False
# Таймаут запроса к стороннему API
RESPONSE_TIMEOUT = 30
# Логирование запросов (получение requests.Response)
PATCH_REQUEST_WITH_RESPONSE_KEEPER = False
REQUEST_PATCH_TARGET = (
"requests.sessions.Session" # requests.Session для requests.request()
)
# Инициализация
# ------------------------------------------------------------------------------
def __init__(self, instance=None, propagate_exception: bool = False, **kwargs):
self.logger = logging.getLogger()
self.instance = instance
if instance:
self.copy_instance(instance=instance)
else:
self.set_action_variables(**kwargs)
self.propagate_exception = propagate_exception
self.validate_action_variables()
def copy_instance(self, instance, **kwargs) -> None:
"""
Копирование экземпляра класса instance (вынесение его атрибутов в новый экземпляр)
"""
self.set_entity(**kwargs)
if (not self.entity) and (self.entity_model == instance.entity_model):
self.entity = instance.entity
if self.entity_name:
setattr(self, self.entity_name, self.entity)
# Ссылка на счетчик запросов к внешнему API
self.request_counter = instance.request_counter
def set_action_variables(self, **kwargs) -> None:
"""
Установка атрибутов экшена
"""
self.set_entity(**kwargs)
# Инициализация счетчика запросов к внешнему API
self.request_counter = 0
def set_entity(self, entity=None, entity_id: str = None, **kwargs) -> None:
"""
Установка объекта в атрибут
"""
self.entity = None
if self.entity_name:
if self.entity_name in kwargs:
entity = kwargs[self.entity_name]
if f"{self.entity_name}_id" in kwargs:
entity_id = kwargs[f"{self.entity_name}_id"]
if entity or entity_id:
if not self.entity_name:
raise ValueError("Необходимо определить атрибут 'entity_name'.")
if not self.entity_model:
raise ValueError("Необходимо определить атрибут 'entity_model'.")
if entity:
self.entity = entity
else:
self.entity = self.entity_model.objects.get(id=entity_id)
if self.entity_name:
setattr(self, self.entity_name, self.entity)
def validate_action_variables(self) -> None:
"""
Валидация атрибутов экземпляра
"""
self.validate_entity()
def validate_entity(self) -> None:
"""
Валидация объекта
"""
if self.entity:
if not isinstance(self.entity, self.entity_model):
raise ValueError(
f"Параметры 'entity' и '{self.entity_name}' должны быть экземплярами модели '{self.entity_model}'."
)
elif self.ENTITY_REQUIRED:
raise ValueError(
f"В конструкторе класса {self.__class__.__name__} необходимо задать один из следующих параметров: "
f"'entity', 'entity_id', '{self.entity_name}', '{self.entity_name}_id'."
)
# Вывод сообщения
# ------------------------------------------------------------------------------
def _get_message_template(self) -> str:
"""
Шаблон сообщения
"""
message = self.description
if message.endswith("."):
message = message[:-1]
return message
def get_success_internal_message(self) -> str:
return _("{message_template}: успешно выполнено.").format(
message_template=self._get_message_template()
)
def get_failure_internal_message(self) -> str:
return _("{message_template}: возникла ошибка.").format(
message_template=self._get_message_template()
)
def get_success_message(self, objects) -> str:
"""
Получение сообщения об успешной отработки экшена
"""
return self.get_success_internal_message()
def get_failure_message(self, error: Exception) -> str:
"""
Получение сообщения об ошибке error при выполнении экшена
"""
if isinstance(error, CustomException):
if error.message_dict and error.message_dict.get("external_message"):
external_message = error.message_dict["external_message"]
else:
external_message = error.message
# Стандартное сообщение для известного типа ошибок
if (not external_message) and (error.reason in API_ERROR_REASONS):
external_message = API_ERROR_REASONS[error.reason]
if not external_message:
external_message = ""
else:
external_message = str(error)
internal_message = self.get_failure_internal_message()
if internal_message:
message = internal_message + " " + external_message
else:
message = external_message
return message
# Пайплайн
# ------------------------------------------------------------------------------
def set_used_action(self, action_class: type["APIAction"]) -> "APIAction":
"""
Инициализация используемого экшена.
"""
action = action_class(instance=self, propagate_exception=True)
if not hasattr(self, "used_actions"):
self.used_actions: list["APIAction"] = []
self.used_actions.append(action)
return action
def set_used_actions(self) -> None:
"""
Инициализация используемых экшенов.
"""
pass
def execute_ancestor_actions(self) -> None:
"""
Выполнение предварительных экшенов и передача их результатов в качестве параметров текущего экшена.
"""
for results_name, action_class in self.ancestor_actions.items():
if not self.params.get(results_name):
is_success, message, objects = action_class(
instance=self, propagate_exception=True
)(**self.params)
self.params[results_name] = objects.get("results", objects)
def __call__(self, **kwargs) -> tuple:
"""
Запуск экшена (контроллер)
"""
is_success = False
message = None
objects = {}
try:
# Инициализация используемых экшенов
self.set_used_actions()
# Обработка входных параметров
self.params = self.get_params(**kwargs)
# Действия перед началом выполнения данного экшена
self.before_request()
# Выполнение предварительных экшенов и передача их результатов в качестве параметров данного экшена
self.execute_ancestor_actions()
# Выполнение данного экшена
objects: dict = self.run_action(**self.params)
if not objects:
objects = {"results": {}, "response": None}
# Действия после успешного выполнения данного экшена
self.success_callback(objects=objects, **self.params)
except CustomException as error:
response: requests.Response = error.response
if not error.reason:
error.reason = API_ERROR_REASONS.unknown
is_success = False
message: str = self.get_failure_message(error=error)
objects = {
"results": {},
"errors": error,
"error_reason": error.reason,
"response": response,
}
# Действия после неудачного выполнения данного экшена
self.failure_callback(objects)
if self.propagate_exception:
raise
except ValidationError as error: # TODO: не обрабатывать ValidationError?
is_success = False
message: str = self.get_failure_message(error=error)
objects = {
"results": {},
"errors": error,
"error_reason": API_ERROR_REASONS.data_validation,
"response": None,
}
# Действия после неудачного выполнения данного экшена
self.failure_callback(objects)
if self.propagate_exception:
raise
else:
is_success = True
message = self.get_success_message(objects)
if is_success:
self.logger.info(message)
else:
self.logger.warning(message)
return is_success, message, objects
@classmethod
def simple_run(cls, **kwargs) -> dict:
"""
Вызов конструктора с propagate_exception=True, а затем вызов __call__ и возврат objects["results"].
"""
kwargs["propagate_exception"] = True
action = cls(**kwargs)
return action.__call__(**kwargs)[2].get("results", {})
# Методы, поддерживающие переопределение
# ------------------------------------------------------------------------------
def before_request(self, **kwargs) -> None:
"""
Действия перед выполнением запроса
"""
pass
def success_callback(self, objects: dict, **kwargs) -> None:
"""
Действия после успешного выполнения данного экшена.
"""
pass
def failure_callback(self, objects: dict, **kwargs) -> None:
"""
Действия после неудачного выполнения данного экшена.
"""
pass
def get_params(self, **kwargs) -> dict:
"""
Определение параметров вызова экшена.
"""
return kwargs
@property
def possible_params(self):
"""
Возможные параметры (required_params + allowed_params)
"""
return (
self.required_params + self.allowed_params + ["size", "page"]
) # TODO: где нужны size и page?
def clean_api_request_params(self, raw_params: dict) -> dict:
"""
Валидация и отчистка параметров запроса к API
"""
# Проверка наличия обязательных параметров
for param in self.required_params:
if raw_params.get(param) is None:
raise self.exception_class(f"Обязательный параметр '{param}' не задан.")
# Фильтрация допустимых параметров
params = {
param: raw_params[param]
for param in self.possible_params
if param in raw_params
}
return params
def get_api_class_init_params(self, **kwargs) -> dict:
"""
Получение параметров для инициализации API класса
"""
raise NotImplementedError(
"Необходимо определить метод 'get_api_class_init_params'."
)
def get_api(self, **kwargs) -> BaseAPI:
"""
Получение API класса
"""
if not getattr(self, "api_class", None):
raise NotImplementedError("Необходимо определить атрибут 'api_class'.")
init_params: dict = self.get_api_class_init_params(**kwargs)
init_params["response_timeout"] = self.RESPONSE_TIMEOUT
return self.api_class(**init_params)
def make_request(self, **kwargs) -> dict:
"""
Исполнение запроса через API класс к API сервиса
"""
self.api: BaseAPI = self.get_api(**kwargs)
try:
response = self.api.make_request(**kwargs)
except (requests.exceptions.Timeout, requests.exceptions.ConnectTimeout):
raise self.exception_class(reason=API_ERROR_REASONS.timeout)
results = self.get_response_results(response=response)
return {"results": results, "response": response}
def get_response_results(self, response: requests.Response) -> dict:
"""
Извлечение результатов выполнения запроса из response
"""
if response is not None:
try:
return response.json()
except simplejson.JSONDecodeError:
pass
return {}
def run_action(self, **kwargs) -> dict:
"""
Запрос к API сервиса с последующей обработкой ответа
"""
if self.VALIDATE_PARAMS:
params = self.clean_api_request_params(raw_params=kwargs)
else:
params = kwargs
# Инкрементирование счетчика запросов к API маркетплейса
self.incr_request_counter()
exception = None
try:
if self.PATCH_REQUEST_WITH_RESPONSE_KEEPER:
with patch(self.REQUEST_PATCH_TARGET, SessionWithResponseKeeper):
response_data: dict = self.make_request(**params)
# Извлекаем из декоратора сохраненный response
response_attr = "raw_response"
response: requests.Response = getattr(
response_keeper, response_attr, None
)
if response is not None:
response_data["response"] = response
setattr(response_keeper, response_attr, None)
else:
response_data: dict = self.make_request(**params)
except self.exception_class as error:
response: requests.Response = error.response
if response is None:
results = {}
else:
results = self.get_response_results(response=response)
if (not results) and error.message_dict:
results = error.message_dict
exception = error
else:
results: dict = response_data["results"]
response: requests.Response = response_data.get("response")
response_is_success: bool = self.response_is_success(
results=results, response=response, exception=exception
)
if response_is_success:
results: dict = self.format_success_response_results(
results=results, response=response
)
exception = None
else:
error_message = self.get_error_message(results=results, response=response)
if (not error_message) and exception:
error_message = exception.message
if exception is None:
exception = self.exception_class(
message=error_message, response=response
)
# Определение типа ошибки
if not exception.reason:
exception.reason = self.get_error_reason(
results=results,
response=response,
error_message=error_message,
)
# Сохранение лога
if (not response_is_success) or self.SAVE_REQUEST_LOG:
try:
self.save_request_log(results=results, response=response)
except Exception as error:
self.logger.critical(
f"Не удалось сохранить лог запроса к внешнему API: {str(error)}"
)
if exception is not None:
raise exception
return {"results": results, "response": response}
def incr_request_counter(self) -> None:
"""
Увеличение счётчика запросов
"""
self.request_counter += 1
if self.instance:
self.instance.incr_request_counter()
def response_is_success(
self,
results: dict,
response: requests.Response,
exception: CustomException = None,
) -> bool:
"""
Проверка, что запрос успешный
"""
if exception is not None:
return False
if response is not None:
return status.is_success(response.status_code)
return True
def format_success_response_results(
self, results: dict, response: requests.Response
) -> dict:
"""
Форматирование данных ответа успешного запроса
"""
return results
def get_error_message(self, results: dict, response: requests.Response) -> str:
"""
Получение сообщения об ошибке из ответа response или results
"""
return ""
def get_error_reason(
self, results: dict, response: requests.Response, error_message: str
) -> API_ERROR_REASONS:
"""
Получение причины ошибки
"""
if (response is not None) and getattr(response, "status_code", None):
code: int = response.status_code
if str(code).startswith("5"):
return API_ERROR_REASONS.system
elif code in [401, 403]:
return API_ERROR_REASONS.invalid_token
elif code == 404:
return API_ERROR_REASONS.object_not_found
elif response.status_code == 429:
return API_ERROR_REASONS.request_limit
def get_apirequest_log_extra_data(self) -> dict:
return {}
def save_request_log(self, results: dict, response: requests.Response):
if (response is not None) and isinstance(response, requests.Response):
ApiRequestLogService.save_api_request_log_by_response(
response=response,
**self.get_apirequest_log_extra_data(),
) | zs-base-api | /zs_base_api-0.0.1.tar.gz/zs_base_api-0.0.1/zs_base_api/base_action.py | base_action.py |
import requests
import simplejson
from urllib.parse import parse_qs, urlparse
from rest_framework import status
from django.apps import apps
from django.conf import settings
from zs_base_api import models
__all__ = [
"ApiRequestLogService",
]
class ApiRequestLogService:
"""
Сервис для работы с моделью AbstractAPIRequestLog
"""
@classmethod
def get_request_log_model(cls) -> type[models.AbstractAPIRequestLog] | None:
if settings.API_REQUEST_LOG_MODEL.get(
"app_label"
) and settings.API_REQUEST_LOG_MODEL.get("model_name"):
return apps.get_model(
app_label=settings.API_REQUEST_LOG_MODEL["app_label"],
model_name=settings.API_REQUEST_LOG_MODEL["model_name"],
)
@classmethod
def save_api_request_log(
cls,
status_code: int,
url: str,
method: str,
request_headers: dict = None,
response_headers: dict = None,
request_body: dict = None,
response_body: dict = None,
response_time: float = None,
save_if_is_success: bool = True,
**extra_fields,
) -> models.AbstractAPIRequestLog:
"""
Создание экземпляра модели AbstractAPIRequestLog по переданным данным (сохранение данных)
"""
if (not save_if_is_success) and status.is_success(status_code):
return None
request_log_model = cls.get_request_log_model()
if not request_log_model:
raise ValueError(
"Необходимо задать настройку API_REQUEST_LOG_MODEL (путь к модели)."
)
return request_log_model.objects.create(
# Данные запроса
url=url,
method=method,
params=parse_qs(urlparse(url).query),
request_headers=request_headers,
request_body=request_body,
# Данные ответа
response_time=response_time,
status_code=status_code,
response_headers=response_headers,
response_body=response_body,
**extra_fields,
)
@classmethod
def save_api_request_log_by_response(
cls,
response: requests.Response,
save_if_is_success: bool = True,
**extra_fields,
) -> models.AbstractAPIRequestLog:
"""
Создание экземпляра модели AbstractAPIRequestLog по переданному requests.Response (сохранение данных)
"""
request = response.request
request_body = request.body
if isinstance(request_body, bytes):
try:
request_body = request_body.decode()
except UnicodeDecodeError:
request_body = str(request_body)
if isinstance(request_body, str):
try:
request_body = simplejson.loads(request_body)
except simplejson.JSONDecodeError:
pass
try:
response_body = response.json()
except simplejson.JSONDecodeError:
response_body = None
return cls.save_api_request_log(
status_code=response.status_code,
url=request.url,
method=request.method,
request_headers=dict(request.headers),
response_headers=dict(response.headers),
request_body=request_body,
response_body=response_body,
response_time=response.elapsed.microseconds / 1000,
save_if_is_success=save_if_is_success,
**extra_fields,
) | zs-base-api | /zs_base_api-0.0.1.tar.gz/zs_base_api-0.0.1/zs_base_api/services.py | services.py |
import logging
import requests
import re
from abc import ABC
__all__ = [
"BaseAPI",
]
class BaseAPI(ABC):
required_params = []
allowed_params = []
array_payload = False # Тело запроса -- список, а не словарь
name = None
http_method = None
resource_method = None
production_api_url = None
sandbox_api_url = None
def __init__(
self,
logger=None,
is_sandbox: bool = False,
response_timeout: float = 30,
**kwargs,
):
self.logger = logger or logging.getLogger(self.__class__.__name__)
self.is_sandbox = is_sandbox
self.response_timeout = response_timeout
self.validate_attrs()
def validate_attrs(self) -> None:
"""
Валидация обязательных атрибутов у дочерних классов
"""
for attr in [
"http_method",
"resource_method",
"production_api_url",
]:
if not getattr(self, attr, None):
raise ValueError(f"Необходимо определить атрибут '{attr}'.")
if self.is_sandbox and (not self.sandbox_api_url):
raise ValueError(
"Для использования тестового режима необходимо указать 'sandbox_api_url'"
)
if self.array_payload:
if (
self.required_params and (self.required_params != ["array_payload"])
) or self.allowed_params:
raise ValueError(
"Если стоит флаг 'array_payload', то единственным возможным и обязательным параметром является параметр 'array_payload'."
)
self.required_params = ["array_payload"]
@property
def headers(self) -> dict:
"""
Headers для запросов к API
"""
return {}
def build_url(self, params: dict) -> str:
"""
Составление url для запросов к API (подстановка эндпоинта и url-параметром в url)
"""
url = self.sandbox_api_url if self.is_sandbox else self.production_api_url
url += self.resource_method
required_path_params: list = re.findall(
pattern=r"\{([^\}]+)\}", string=self.resource_method
)
path_params = {}
for param in required_path_params:
if params.get(param):
path_params[param] = params[param]
else:
raise ValueError(f"Отсутствует обязательный параметр '{param}'.")
if path_params:
url = url.format(**path_params)
if url[-1] == "/":
url = url[:-1]
return url
def get_clean_params(self, params: dict, drop_none_values: bool = False) -> dict:
"""
Отчистка и валидация параметров запроса
В итоговый запрос попадут только ключи из required_params и allowed_param
"""
clean_params = {}
files = {}
for req_param in self.required_params:
if req_param not in params:
raise ValueError(
f"Обязательный параметр запроса '{req_param}' не задан."
)
if isinstance(params[req_param], bytes):
files[req_param] = params[req_param]
else:
clean_params[req_param] = params[req_param]
for allowed_param in self.allowed_params:
if allowed_param in params:
if isinstance(params[allowed_param], bytes):
files[allowed_param] = params[allowed_param]
else:
clean_params[allowed_param] = params[allowed_param]
if drop_none_values:
clean_params = {k: v for k, v in clean_params.items() if v is not None}
clean_params["files"] = files if files else None
return clean_params
def get_payload(self, params: dict) -> dict | list:
"""
Получение body для POST запросов
"""
if self.array_payload:
return params["array_payload"]
return params
def get_request_params(self, **kwargs) -> dict:
"""
Получение всех параметров, необходимых для запроса (url, headers, params, json, files)
"""
request_params = {
"url": self.build_url(kwargs),
"headers": self.headers,
}
clean_params = self.get_clean_params(kwargs)
files = clean_params.pop("files", None)
if files:
request_params["files"] = files
if self.http_method in ["POST", "PUT", "PATCH"]:
request_params["json"] = self.get_payload(params=clean_params)
else:
request_params["params"] = clean_params
return request_params
def make_request(self, **kwargs) -> requests.Response:
"""
Непосредственный запрос к API
"""
return requests.request(
method=self.http_method,
timeout=self.response_timeout,
**self.get_request_params(**kwargs),
) | zs-base-api | /zs_base_api-0.0.1.tar.gz/zs_base_api-0.0.1/zs_base_api/base_api.py | base_api.py |
from abc import abstractmethod
from ebaysdk.exception import ConnectionError as BaseEbayTradingAPIError
from ebaysdk.trading import Connection as BaseEbayTradingAPI
from ebaysdk.response import Response as EbaySDKResponse
from zs_base_api.base_api import BaseAPI
from ebay_sdk.data.marketplace.marketplace_to_site import EbayDomainCodeToSiteID
class EbayTradingAPI(BaseAPI):
def __init__(
self,
access_token: str,
client_id: str,
client_secret: str,
dev_id: str,
site_id: str = None,
domain_code: str = None,
debug: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.dev_id = dev_id
self.site_id = site_id
self.domain_code = domain_code
self.debug = debug
def validate_attrs(self) -> None:
pass
@property
@abstractmethod
def method_name(self) -> str:
pass
@abstractmethod
def get_params(self, **kwargs) -> dict:
pass
def _clean_data(self, data: dict) -> dict:
cleaned_data = {}
for key, value in data.items():
if value or type(value) in [bool, int]:
if isinstance(value, dict):
cleaned_data.update({key: self._clean_data(value)})
else:
cleaned_data.update({key: value})
return cleaned_data
def get_site_id(self, site_id: str = None, domain_code: str = None) -> str:
if site_id:
if site_id not in EbayDomainCodeToSiteID.values():
raise BaseEbayTradingAPIError(msg=f"Неизвестный 'site_id'={site_id}")
elif domain_code:
if domain_code not in EbayDomainCodeToSiteID.keys():
raise BaseEbayTradingAPIError(
msg=f"Маркетплейс '{domain_code}' не поддерживается Trading API."
)
else:
site_id = EbayDomainCodeToSiteID[domain_code]
else:
site_id = EbayDomainCodeToSiteID["default"]
return site_id
def make_request(self, **kwargs) -> EbaySDKResponse:
params = self.get_params(**kwargs)
cleaned_params = self._clean_data(params)
api = BaseEbayTradingAPI(
iaf_token=self.access_token,
appid=self.client_id,
devid=self.dev_id,
certid=self.client_secret,
siteid=self.get_site_id(site_id=self.site_id, domain_code=self.domain_code),
config_file=None,
debug=self.debug,
)
return api.execute(
verb=self.method_name,
data=cleaned_params,
) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/base_trading_api.py | base_trading_api.py |
from zs_base_api.base_api import BaseAPI
from ebay_sdk.utils import custom_quote
class EbayAPI(BaseAPI):
production_api_url = "https://api.ebay.com/"
sandbox_api_url = "https://api.sandbox.ebay.com/"
params_to_quote = [
"sku",
"inventoryItemGroupKey",
"merchantLocationKey",
]
# TODO: валидация?
MAX_LIMIT = 100
MAX_OFFSET = 99
def __init__(self, access_token: str, marketplace_id: str = None, **kwargs):
super().__init__(**kwargs)
self.access_token = access_token
self.marketplace_id = marketplace_id
@property
def headers(self):
# DOCS: https://developer.ebay.com/api-docs/static/rest-request-components.html#marketpl
DOMAIN_TO_LOCALE = {
"EBAY_AT": "de-AT",
"EBAY_AU": "en-AU",
"EBAY_BE": "fr-BE",
"EBAY_CA": "en-CA",
"EBAY_CH": "de-CH",
"EBAY_DE": "de-DE",
"EBAY_ES": "es-ES",
"EBAY_FR": "fr-FR",
"EBAY_GB": "en-GB",
"EBAY_HK": "zh-HK",
"EBAY_IE": "en-IE",
"EBAY_IN": "en-GB",
"EBAY_IT": "it-IT",
"EBAY_MY": "en-MY",
"EBAY_NL": "nl-NL",
"EBAY_PH": "en-PH",
"EBAY_PL": "pl-PL",
"EBAY_TH": "th-TH",
"EBAY_TW": "zh-TW",
}
locale = DOMAIN_TO_LOCALE.get(self.marketplace_id)
if not locale:
locale = "en-US"
return {
"Authorization": f"Bearer {self.access_token}",
"Content-Language": locale,
"Content-Type": "application/json",
"Accept-Language": locale,
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Accept-Encoding": "application/gzip",
"X-EBAY-C-MARKETPLACE-ID": self.marketplace_id,
}
def build_url(self, params: dict):
next_url = params.pop("next_url", None)
if next_url:
url = self.sandbox_api_url if self.is_sandbox else self.production_api_url
if next_url.startswith("/"):
url += next_url
else:
url = next_url
return url
else:
return super().build_url(params=params)
def get_path_params(self, params: dict):
path_params = super().get_path_params(params)
return {
param: custom_quote(value)
for param, value in path_params.items()
if value and (param in self.params_to_quote)
}
def get_clean_params(self, params: dict) -> dict:
clean_params = {
"next_url": params.get("next_url"),
"payload": params.get("payload"),
}
if not params.get("next_url"):
clean_params.update(super().get_clean_params(params))
for param in self.params_to_quote:
if clean_params.get(param):
clean_params[param] = custom_quote(clean_params[param])
return clean_params
def get_payload(self, params: dict):
return params.pop("payload", None) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/base_api.py | base_api.py |
import base64
import simplejson
import logging
import urllib
import requests
from zs_utils.exceptions import CustomException
from django.utils.translation import gettext as _
from .model import environment, oAuth_token
class EbayOAuthClientError(CustomException):
pass
class EbayOAuthClient:
def __init__(
self,
client_id: str,
client_secret: str,
redirect_uri: str,
is_sandbox: bool,
user_scopes: list = None,
app_scopes: list = None,
logger=None,
):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
if is_sandbox:
self.env_type = environment.SANDBOX
else:
self.env_type = environment.PRODUCTION
self.user_scopes = user_scopes
self.app_scopes = app_scopes
self.headers = self._generate_request_headers()
self.logger = logger or logging.getLogger(__name__)
def _generate_request_headers(self) -> dict:
b64_string = f"{self.client_id}:{self.client_secret}".encode()
b64_encoded_credential = base64.b64encode(b64_string).decode("utf-8")
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": f"Basic {b64_encoded_credential}",
}
return headers
def generate_user_authorization_url(self, state: str = None) -> str:
param = {
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"prompt": "login",
"scope": " ".join(self.user_scopes),
}
if state is not None:
param.update({"state": state})
query = urllib.parse.urlencode(param)
return f"{self.env_type.web_endpoint}?{query}"
def _get_token(self, data: dict) -> oAuth_token:
response = requests.post(
self.env_type.api_endpoint, data=data, headers=self.headers
)
content = simplejson.loads(response.content)
status = response.status_code
if status != requests.codes.ok:
raise EbayOAuthClientError(
message=_("Не удалось получить токен: {error}").format(
error=content["error_description"]
)
)
return oAuth_token(**content)
def get_application_access_token(self) -> oAuth_token:
body = {
"grant_type": "client_credentials",
"redirect_uri": self.redirect_uri,
"scope": " ".join(self.app_scopes),
}
return self._get_token(data=body)
def exchange_code_for_access_token(self, code: str) -> oAuth_token:
body = {
"grant_type": "authorization_code",
"redirect_uri": self.redirect_uri,
"code": code,
}
return self._get_token(data=body)
def get_user_access_token(self, refresh_token: str) -> oAuth_token:
body = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"scope": " ".join(self.user_scopes),
}
return self._get_token(data=body) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/oauth/oauth_client.py | oauth_client.py |
# from pysftp import CnOpts, Connection
# cnopts = CnOpts()
# cnopts.hostkeys = None
# credentials = {
# "host": "mip.ebay.com",
# "port": 22,
# "username": "zone-smart",
# "password": "v^1.1#i^1#r^1#I^3#f^0#p^3#t^Ul4xMF82OjQ5REY2QzJDQjM2RTVERDE5MDIyRTAwNDU3NUQ3MzVBXzNfMSNFXjI2MA==",
# }
# mip_dirs = [
# "availability",
# "deleteInventory",
# "distribution",
# "inventory",
# "location",
# "order",
# "orderfulfillment",
# "product",
# "reports",
# ]
# parser = argparse.ArgumentParser(description="Получение результатов загрузки eBay feed")
# class StoreNameValuePair(argparse.Action):
# def __call__(self, parser, namespace, values, option_string=None):
# n, v = values.split("=")
# setattr(namespace, n, v)
# parser.add_argument("file_name", action=StoreNameValuePair, help="Имя файла")
# parser.add_argument(
# "localpath",
# action=StoreNameValuePair,
# help="Путь на локальной машине для загруженного файла",
# )
# parser.add_argument(
# "mip_dir",
# action=StoreNameValuePair,
# help="Папка на сервере mip.ebay.com, в которую был загружен feed-файл",
# )
# args = parser.parse_args()
# file_name = args.file_name
# print("Имя ранее загруженного файла:", file_name)
# localpath = os.path.join(os.path.dirname(__file__), args.localpath, file_name)
# localpath_dir = os.path.dirname(localpath)
# print("Результаты сохранятся в файл:", localpath)
# if not os.path.isdir(localpath_dir):
# raise AttributeError(f"Указанная папка {localpath_dir} не существует на локальной машине")
# mip_dir = args.mip_dir
# print("Папка для поиска результатов в системе eBay:", mip_dir)
# if mip_dir not in mip_dirs:
# raise AttributeError(
# f"Недопустимая папка для поиска результатов загрузки: {mip_dir}\n" f"Допустимые папки: {mip_dirs}"
# )
# def callback(ready, total):
# print(f"File was {'un' if ready!=total else ''}successfully downloaded:")
# print(f"{ready} of {total} bytes downloaded")
# def get_similar(filename, dirname):
# print("Поиск файлов в", dirname)
# print("Содержимое папки:", sftp.listdir(dirname))
# files = [
# os.path.join(dirname, file)
# for file in sftp.listdir(dirname)
# if (os.path.splitext(filename)[0] in file) and file.endswith(".csv")
# ]
# return files
# def get_last_modified(files):
# dt_objs = []
# for file in files:
# dt_str = "-".join(os.path.basename(file).split("-")[1:7])
# dt_obj = time.strptime(dt_str, "%b-%d-%Y-%H-%M-%S")
# dt_objs.append((dt_obj, file))
# if dt_objs:
# return max(dt_objs, key=lambda x: x[0])[1]
# with Connection(**credentials, cnopts=cnopts) as sftp:
# root_dir = os.path.join("store", mip_dir)
# sftp.chdir(root_dir)
# processed_files = []
# for date_dir in sftp.listdir("output"):
# processed_files += get_similar(file_name, os.path.join("output", date_dir))
# unprocessed_files = []
# if "inprogress" in sftp.listdir(""):
# # возможно папка тоже содержит вложенные папки по датам
# unprocessed_files = get_similar(file_name, "inprogress")
# found_files = processed_files + unprocessed_files
# print("Все найденные файлы:", found_files)
# last_modified = get_last_modified(found_files)
# print("Результаты последней загрузки:", last_modified)
# if "output" in last_modified:
# remotepath = last_modified
# else:
# print(f"Файл {last_modified} находится в обработке системой eBay, результаты пока недоступны.")
# remotepath = get_last_modified(filter(lambda file_path: "output" in file_path, found_files))
# if remotepath:
# print(f"Загружен последний обработанный системой eBay файл с тем же именем {remotepath}.")
# else:
# print(f"Обработанных системой eBay файлов с тем же именем {file_name} не найдено.")
# if remotepath:
# sftp.get(remotepath=remotepath, localpath=localpath, callback=callback)
# print(f"Файл {remotepath} загружен в {localpath}.")
# else:
# print("Файл не был загружен.") | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/mip/sftp_get.py | sftp_get.py |
from ebay_sdk.base_api import EbayAPI
class CreateInventoryLocation(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/location/methods/createInventoryLocation
"""
http_method = "POST"
resource_method = "sell/inventory/v1/location/{merchantLocationKey}"
required_params = ["merchantLocationKey"]
class DeleteInventoryLocation(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/location/methods/deleteInventoryLocation
"""
http_method = "DELETE"
resource_method = "sell/inventory/v1/location/{merchantLocationKey}"
required_params = ["merchantLocationKey"]
class DisableInventoryLocation(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/location/methods/disableInventoryLocation
"""
http_method = "POST"
resource_method = "sell/inventory/v1/location/{merchantLocationKey}/disable"
required_params = ["merchantLocationKey"]
class EnableInventoryLocation(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/location/methods/enableInventoryLocation
"""
http_method = "POST"
resource_method = "sell/inventory/v1/location/{merchantLocationKey}/enable"
required_params = ["merchantLocationKey"]
class GetInventoryLocation(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/location/methods/getInventoryLocation
"""
http_method = "GET"
resource_method = "sell/inventory/v1/location/{merchantLocationKey}"
required_params = ["merchantLocationKey"]
class GetInventoryLocations(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/location/methods/getInventoryLocations
"""
http_method = "GET"
resource_method = "sell/inventory/v1/location"
allowed_params = ["offset", "limit"]
class UpdateInventoryLocation(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/location/methods/updateInventoryLocation
"""
http_method = "POST"
resource_method = (
"sell/inventory/v1/location/{merchantLocationKey}/update_location_details"
)
required_params = ["merchantLocationKey"] | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/sell/inventory/location.py | location.py |
from ebay_sdk.base_api import EbayAPI
class CreateOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/createOffer
"""
http_method = "POST"
resource_method = "sell/inventory/v1/offer"
class UpdateOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/updateOffer
"""
http_method = "PUT"
resource_method = "sell/inventory/v1/offer/{offerId}"
required_params = ["offerId"]
class GetOffers(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/getOffers
"""
http_method = "GET"
resource_method = "sell/inventory/v1/offer"
required_params = ["sku"]
allowed_params = ["marketplace_id", "offset", "limit"]
class GetOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/getOffer
"""
http_method = "GET"
resource_method = "sell/inventory/v1/offer/{offerId}"
required_params = ["offerId"]
class DeleteOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/deleteOffer
"""
http_method = "DELETE"
resource_method = "sell/inventory/v1/offer/{offerId}"
required_params = ["offerId"]
class PublishOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/publishOffer
"""
http_method = "POST"
resource_method = "sell/inventory/v1/offer/{offerId}/publish"
required_params = ["offerId"]
class WithdrawOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/withdrawOffer
"""
http_method = "POST"
resource_method = "sell/inventory/v1/offer/{offerId}/withdraw"
required_params = ["offerId"]
class GetListingFees(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/getListingFees
"""
http_method = "POST"
resource_method = "sell/inventory/v1/offer/get_listing_fees"
class BulkCreateOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/bulkCreateOffer
"""
http_method = "POST"
resource_method = "sell/inventory/v1/bulk_create_offer"
class BulkPublishOffer(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/offer/methods/bulkPublishOffer
"""
http_method = "POST"
resource_method = "sell/inventory/v1/bulk_publish_offer" | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/sell/inventory/offer.py | offer.py |
from ebay_sdk.base_api import EbayAPI
# SINGLE ITEM API
class CreateOrReplaceInventoryItem(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/methods/createOrReplaceInventoryItem
"""
http_method = "PUT"
resource_method = "sell/inventory/v1/inventory_item/{sku}"
required_params = ["sku"]
class GetInventoryItem(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/methods/getInventoryItem
"""
http_method = "GET"
resource_method = "sell/inventory/v1/inventory_item/{sku}"
required_params = ["sku"]
class GetInventoryItems(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/methods/getInventoryItems
"""
http_method = "GET"
resource_method = "sell/inventory/v1/inventory_item"
allowed_params = ["offset", "limit"]
class DeleteInventoryItem(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/methods/deleteInventoryItem
"""
http_method = "DELETE"
resource_method = "sell/inventory/v1/inventory_item/{sku}"
required_params = ["sku"]
# PRODUCT COMPATIBILITY API
class ProductCompatibilityAPI(EbayAPI):
resource_method = "sell/inventory/v1/inventory_item/{sku}/product_compatibility"
required_params = ["sku"]
class CreateOrReplaceProductCompatibility(ProductCompatibilityAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/product_compatibility/methods/createOrReplaceProductCompatibility
""" # noqa
http_method = "PUT"
class GetProductCompatibility(ProductCompatibilityAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/product_compatibility/methods/getProductCompatibility
""" # noqa
http_method = "GET"
class DeleteProductCompatibility(ProductCompatibilityAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/product_compatibility/methods/deleteProductCompatibility
""" # noqa
http_method = "DELETE"
# BULK API
class BulkUpdatePriceQuantity(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/methods/bulkUpdatePriceQuantity
"""
http_method = "POST"
resource_method = "sell/inventory/v1/bulk_update_price_quantity"
class BulkCreateOrReplaceInventoryItem(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/methods/bulkCreateOrReplaceInventoryItem
""" # noqa
http_method = "POST"
resource_method = "sell/inventory/v1/bulk_create_or_replace_inventory_item"
class BulkGetInventoryItem(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/inventory/resources/inventory_item/methods/bulkGetInventoryItem
"""
http_method = "POST"
resource_method = "sell/inventory/v1/bulk_get_inventory_item" | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/sell/inventory/item.py | item.py |
import datetime
from dateutil.parser import parse
from ebay_sdk.base_api import EbayAPI
class PaymentDisputeAPI(EbayAPI):
production_api_url = "https://apiz.ebay.com/"
sandbox_api_url = "https://apiz.sandbox.ebay.com/"
class GetPaymentDispute(PaymentDisputeAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/fulfillment/resources/payment_dispute/methods/getPaymentDispute
"""
http_method = "GET"
resource_method = "sell/fulfillment/v1/payment_dispute/{payment_dispute_id}"
required_params = ["payment_dispute_id"]
class FetchEvidenceContent(PaymentDisputeAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/fulfillment/resources/payment_dispute/methods/fetchEvidenceContent
"""
http_method = "GET"
resource_method = "sell/fulfillment/v1/payment_dispute/{payment_dispute_id}/fetch_evidence_content"
required_params = ["payment_dispute_id", "evidence_id", "file_id"]
class GetActivities(PaymentDisputeAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/fulfillment/resources/payment_dispute/methods/getActivities
"""
http_method = "GET"
resource_method = (
"sell/fulfillment/v1/payment_dispute/{payment_dispute_id}/activity"
)
required_params = ["payment_dispute_id"]
class GetPaymentDisputeSummaries(PaymentDisputeAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/fulfillment/resources/payment_dispute/methods/getPaymentDisputeSummaries
"""
http_method = "GET"
resource_method = "sell/fulfillment/v1/payment_dispute/payment_dispute_summary"
allowed_params = [
"order_id",
"buyer_username",
"open_date_from",
"open_date_to",
"payment_dispute_status",
"limit",
"offset",
]
MAX_LIMIT = 200
def get_clean_params(self, params: dict) -> dict:
clean_params = super().get_clean_params(params)
if clean_params.get("open_date_to"):
clean_params["open_date_to"] = self.clean_open_date_to(
value=clean_params["open_date_to"]
)
if clean_params.get("open_date_from"):
clean_params["open_date_from"] = self.clean_open_date_from(
value=clean_params["open_date_from"]
)
return clean_params
def clean_open_date_to(self, value):
value = parse(value)
now = datetime.datetime.now()
if now < value:
value = now
elif (now - value).days >= 18 * 30:
raise self.exception_class(
'Разница между датой "open_date_to" и настоящим моментом не должна превышать 18 месяцев.'
)
return value
def clean_open_date_from(self, value):
value = parse(value)
now = datetime.datetime.now()
if now <= value:
raise self.exception_class(
'Дата "open_date_from" должна быть более ранней, чем сегодняшняя дата.'
)
elif (now - value).days >= 18 * 30:
raise self.exception_class(
'Разница между датой "open_date_from" и настоящим моментом не должна превышать 18 месяцев.'
)
return value | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/sell/fulfillment/payment_dispute.py | payment_dispute.py |
from ebay_sdk.base_api import EbayAPI
class GetOrder(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/fulfillment/resources/order/methods/getOrder
"""
http_method = "GET"
resource_method = "sell/fulfillment/v1/order/{orderId}"
required_params = ["orderId"]
class GetOrders(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/fulfillment/resources/order/methods/getOrders
"""
http_method = "GET"
resource_method = "sell/fulfillment/v1/order"
allowed_params = ["orderIds", "filter", "offset", "limit"]
MAX_OFFSET = 999
MAX_LIMIT = 1000
def get_clean_params(self, params: dict) -> dict:
clean_params = super().get_clean_params(params)
if clean_params.get("orderIds"):
clean_params["orderIds"] = self.clean_orderIds(
orderIds_string=clean_params["orderIds"]
)
if clean_params.get("filter"):
clean_params["filter"] = self.clean_filter(
filter_string=clean_params["filter"]
)
return clean_params
def clean_orderIds(self, orderIds_string):
message = ""
order_ids = [order_id.strip() for order_id in orderIds_string.split(",")]
if not (1 <= len(order_ids) <= 50):
message = f"Количество ID заказов должно лежать в диапазоне [1:50]. Передано ID заказов: {len(order_ids)}"
elif len(order_ids) != len(set(order_ids)):
message = (
f"Среди ID заказов есть повторяющиеся.\nСписок ID: {orderIds_string}"
)
else:
for order_id in order_ids:
for part in order_id.split("-"):
if not part.isdigit():
message += f"Недопустимый ID заказа: {order_id}.\n"
if message:
raise self.exception_class(message)
return ",".join(order_ids)
def clean_filter(self, filter_string):
# Docs: https://developer.ebay.com/api-docs/sell/fulfillment/resources/order/methods/getOrders#h2-input
def _percent_encode(string):
string = string.replace("[", "%5B")
string = string.replace("]", "%5D")
string = string.replace("{", "%7B")
string = string.replace("}", "%7D")
string = string.replace("|", "%7C")
return string
allowed_filters = ["creationdate", "lastmodifieddate", "orderfulfillmentstatus"]
allowed_orderfulfillmentstatuses = [
"{NOT_STARTED|IN_PROGRESS}",
"{FULFILLED|IN_PROGRESS}",
]
for pair in filter_string.split(","):
key, value = pair.split(":")[0], ":".join(pair.split(":")[1:])
if key == "orderfulfillmentstatus":
if value.strip() not in allowed_orderfulfillmentstatuses:
self.exception_class(
f"Недопустимое значение фильтра {key}: {value}. Допустимые значения: {allowed_orderfulfillmentstatuses}.\n"
)
elif key in ["creationdate", "lastmodifieddate"]:
pass
# TODO: проверить на соответствие шаблону YYYY-MM-DDThh:mm:ss.000Z
# if not is_datetime(...):
# message += f"Недопустимое значение фильтра {key}: {value}.\n"
# message += f"Значение должно соответствовать шаблону: [<datetime>..<datetime or empty string>]"
else:
self.exception_class(
f"Недопустимый фильтр: {key}. Допустимые фильтры: {allowed_filters}.\n"
)
return ",".join(
[
_percent_encode(filter_pair.strip())
for filter_pair in filter_string.split(",")
]
)
class IssueRefund(EbayAPI):
"""
Docs:
https://developer.ebay.com/api-docs/sell/fulfillment/resources/order/methods/issueRefund
"""
http_method = "POST"
resource_method = "sell/fulfillment/v1/order/{orderId}/issue_refund"
required_params = ["orderId"] | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/sell/fulfillment/order.py | order.py |
from ebay_sdk.base_api import EbayAPI
class TaxonomyAPI(EbayAPI):
@property
def headers(self):
headers = {
"Authorization": f"Bearer {self.access_token}",
"Accept": "application/json",
"Content-Type": "application/json",
"Accept-Encoding": "application/gzip",
}
return headers
class GetDefaultCategoryTreeId(TaxonomyAPI):
"""
Docs:
https://developer.ebay.com/api-docs/commerce/taxonomy/resources/category_tree/methods/getDefaultCategoryTreeId
"""
http_method = "GET"
resource_method = "commerce/taxonomy/v1/get_default_category_tree_id"
required_params = ["marketplace_id"]
class GetCategoryTree(TaxonomyAPI):
"""
Docs:
https://developer.ebay.com/api-docs/commerce/taxonomy/resources/category_tree/methods/getCategoryTree
"""
http_method = "GET"
resource_method = "commerce/taxonomy/v1/category_tree/{category_tree_id}"
required_params = ["category_tree_id"]
class GetCategorySubtree(TaxonomyAPI):
"""
Docs:
https://developer.ebay.com/api-docs/commerce/taxonomy/resources/category_tree/methods/getCategorySubtree
"""
http_method = "GET"
resource_method = (
"commerce/taxonomy/v1/category_tree/{category_tree_id}/get_category_subtree"
)
required_params = ["category_tree_id", "category_id"]
class GetCategorySuggestions(TaxonomyAPI):
"""
Docs:
https://developer.ebay.com/api-docs/commerce/taxonomy/resources/category_tree/methods/getCategorySuggestions
"""
http_method = "GET"
resource_method = (
"commerce/taxonomy/v1/category_tree/{category_tree_id}/get_category_suggestions"
)
required_params = ["category_tree_id", "q"]
class GetItemAspectsForCategory(TaxonomyAPI):
"""
Docs:
https://developer.ebay.com/api-docs/commerce/taxonomy/resources/category_tree/methods/getItemAspectsForCategory
"""
http_method = "GET"
resource_method = "commerce/taxonomy/v1/category_tree/{category_tree_id}/get_item_aspects_for_category"
required_params = ["category_tree_id", "category_id"]
class GetCompatibilityProperties(TaxonomyAPI):
"""
Docs:
https://developer.ebay.com/api-docs/commerce/taxonomy/resources/category_tree/methods/getCompatibilityProperties
"""
http_method = "GET"
resource_method = "commerce/taxonomy/v1/category_tree/{category_tree_id}/get_compatibility_properties"
required_params = ["category_tree_id", "category_id"]
class GetCompatibilityPropertyValues(TaxonomyAPI):
"""
Docs:
https://developer.ebay.com/api-docs/commerce/taxonomy/resources/category_tree/methods/getCompatibilityPropertyValues
"""
http_method = "GET"
resource_method = "commerce/taxonomy/v1/category_tree/{category_tree_id}/get_compatibility_property_values"
required_params = ["category_tree_id", "compatibility_property", "category_id"]
allowed_params = ["filter"] | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/commerce/taxonomy.py | taxonomy.py |
from datetime import datetime
from ebay_sdk.base_trading_api import EbayTradingAPI
__all__ = [
"GetEbayAccountBillingInfo",
"GetEbaySellingInfo",
"GetEbaySellingSummary",
]
class GetEbayAccountBillingInfo(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/GetAccount.html
"""
method_name = "GetAccount"
def get_params(
self,
account_history_selection: str,
begin_date: datetime = None, # 4 months ago -- ebay limit
end_date: datetime = None,
**kwargs,
):
assert account_history_selection in [
"BetweenSpecifiedDates",
"LastInvoice",
], "Недопустимое значение 'account_history_selection'."
if account_history_selection == "BetweenSpecifiedDates":
assert (
begin_date and end_date
), "Необходимо задать временной диапазон ('begin_date' и 'end_date')."
return {
"AccountHistorySelection": account_history_selection,
# "AccountEntrySortType": "AccountEntryCreatedTimeDescending",
"BeginDate": begin_date,
"EndDate": end_date,
"ExcludeBalance": False,
"ExcludeSummary": False,
}
class GetEbaySellingInfo(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/devzone/xml/docs/reference/ebay/GetMyeBaySelling.html
"""
method_name = "GetMyeBaySelling"
containers = [
"ActiveList",
"DeletedFromSoldList",
"DeletedFromUnsoldList",
"ScheduledList",
"SoldList",
"UnsoldList",
"SellingSummary",
]
def get_params(self, **kwargs):
return {
container: {"Include": kwargs.get(container, False)}
for container in self.containers
}
class GetEbaySellingSummary(GetEbaySellingInfo):
def get_params(self, **kwargs):
kwargs.update({container: False for container in self.containers})
kwargs["SellingSummary"] = True
return super().get_params(**kwargs) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/trading/billing.py | billing.py |
from ebay_sdk.base_trading_api import EbayTradingAPI
from .category import GetEbayCategoryFeatures
from .messages import AbstractSendMessage
__all__ = [
"GetCategoryBestOfferFeatures",
"GetListingBestOffers",
"RespondToListingBestOffer",
"SendBestOfferMessage",
]
class GetCategoryBestOfferFeatures(GetEbayCategoryFeatures):
def get_params(self, category_id: int, **kwargs):
kwargs["feature_ids"] = [
"BestOfferEnabled",
"BestOfferAutoDeclineEnabled",
"BestOfferAutoAcceptEnabled",
]
return super().get_params(category_id=category_id, **kwargs)
class GetListingBestOffers(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/DevZone/XML/docs/Reference/eBay/GetBestOffers.html
"""
method_name = "GetBestOffers"
def get_params(
self,
best_offer_id: int = None,
item_id: int = None,
active_only: bool = True,
**kwargs,
):
if active_only:
best_offer_status = "Active"
else:
best_offer_status = "All"
return {
"BestOfferID": best_offer_id,
"BestOfferStatus": best_offer_status,
"ItemID": item_id,
"DetailLevel": "ReturnAll",
}
class RespondToListingBestOffer(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/devzone/xml/docs/reference/ebay/RespondToBestOffer.html
"""
method_name = "RespondToBestOffer"
def get_params(
self,
action: str,
best_offer_id: int,
item_id: int,
counter_offer_price: float = None,
counter_offer_quantity: int = None,
seller_response: str = None,
**kwargs,
):
if not (action in ["Accept", "Counter", "Decline"]):
raise AttributeError('Недопустимое значение параметра "action".')
if best_offer_id and (not item_id):
raise ValueError(
"Если задан 'best_offer_id', то должен быть задан 'item_id'."
)
return {
"Action": action,
"BestOfferID": best_offer_id,
"ItemID": item_id,
"CounterOfferPrice": counter_offer_price,
"currencyID": "USD",
"CounterOfferQuantity": counter_offer_quantity,
"SellerResponse": seller_response,
}
class SendBestOfferMessage(AbstractSendMessage):
"""
AddMemberMessagesAAQToBidder:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/AddMemberMessagesAAQToBidder.html
HINT: item needs to be tested
"""
method_name = "AddMemberMessagesAAQToBidder"
def get_params(self, **kwargs):
params = super().get_params(**kwargs)
params.update({"CorrelationID": "1"})
return params | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/trading/best_offer.py | best_offer.py |
from datetime import datetime, timedelta
from ebay_sdk.base_trading_api import EbayTradingAPI
from ebay_sdk.data.enums import PlatformNotificationEventTypeEnum
__all__ = [
"GetNotificationSettings",
"GetAppNotificationSettings",
"GetUserNotificationSettings",
"GetNotificationsUsage",
"SetNotificationSettings",
"ReviseNotifications",
"SubscribeNotification",
"UnSubscribeNotification",
]
class GetNotificationSettings(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/GetNotificationPreferences.html
"""
method_name = "GetNotificationPreferences"
def get_params(self, preference_level: str, **kwargs):
assert preference_level in [
"Application",
"Event",
"User",
"UserData",
], f'Недопустимое значение "preference_level": {preference_level}'
return {
"PreferenceLevel": preference_level,
"OutputSelector": None,
}
# DEPRECATED
# def make_request(self, **kwargs):
# is_success, message, objects = super().make_request(**kwargs)
# if objects.get("errors", []):
# if objects["errors"][0].get("ErrorCode", None) == "12209":
# is_success = True
# objects["results"] = []
# return is_success, message, objects
class GetAppNotificationSettings(GetNotificationSettings):
def get_params(self, **kwargs):
kwargs["preference_level"] = "Application"
return super().get_params(**kwargs)
class GetUserNotificationSettings(GetNotificationSettings):
def get_params(self, **kwargs):
kwargs["preference_level"] = "User"
return super().get_params(**kwargs)
class GetNotificationsUsage(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/GetNotificationsUsage.html
FIX: timeout problem
"""
method_name = "GetNotificationsUsage"
def get_params(
self,
remote_id: str = None,
start_time: str = None,
end_time: str = None,
hours_ago: int = None,
**kwargs,
):
if hours_ago:
start_time = (datetime.now() - timedelta(hours=hours_ago)).isoformat()
end_time = datetime.now().isoformat()
elif not end_time:
end_time = datetime.now().isoformat()
return {
"ItemID": remote_id,
"StartTime": start_time,
"EndTime": end_time,
}
class SetNotificationSettings(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/SetNotificationPreferences.html
"""
method_name = "SetNotificationPreferences"
def get_params(
self,
alert_enable: bool = None,
app_enable: bool = None,
alert_email: str = None,
app_url: str = None,
delivery_urls: dict = None,
subscriptions: list = None,
user_identifier: str = None,
**kwargs,
):
params = {
"ApplicationDeliveryPreferences": {
"AlertEmail": alert_email,
"ApplicationURL": app_url,
"DeviceType": "Platform",
},
"UserDeliveryPreferenceArray": subscriptions,
}
if app_enable is not None:
params["ApplicationDeliveryPreferences"]["ApplicationEnable"] = (
"Enable" if app_enable else "Disable"
)
if alert_enable is not None:
params["ApplicationDeliveryPreferences"]["AlertEnable"] = (
"Enable" if alert_enable else "Disable"
)
if user_identifier:
params["UserData"] = {
"ExternalUserData": user_identifier,
}
if delivery_urls:
params["ApplicationDeliveryPreferences"].update(
{
"DeliveryURLDetails": [
{
"DeliveryURL": url,
"DeliveryURLName": url,
"Status": "Enable" if enable else "Disable",
}
for url, enable in delivery_urls.items()
]
}
)
params["DeliveryURLName"] = ",".join(list(delivery_urls.keys()))
return params
class ReviseNotifications(SetNotificationSettings):
def get_params(self, notifications: list, enable: bool, **kwargs):
for notification in notifications:
assert (
notification in PlatformNotificationEventTypeEnum
), f'Недопустимое значение "notification": {notification}'
kwargs["subscriptions"] = [
{
"NotificationEnable": [
{
"EventType": notification,
"EventEnable": "Enable" if enable else "Disable",
}
for notification in notifications
]
}
]
return super().get_params(**kwargs)
class SubscribeNotification(ReviseNotifications):
def get_params(self, **kwargs):
kwargs["enable"] = True
return super().get_params(**kwargs)
class UnSubscribeNotification(ReviseNotifications):
def get_params(self, **kwargs):
kwargs["enable"] = False
return super().get_params(**kwargs) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/trading/notifications.py | notifications.py |
import datetime
from ebay_sdk.base_trading_api import EbayTradingAPI
__all__ = [
"GetMessagesInfo",
"GetMessageSummary",
"GetMessageHeaderList",
"GetMessageList",
"SendMessage",
"GetMemberMessageList",
"AnswerOrderMessage",
"MarkMessageRead",
"MarkMessageUnread",
"DeleteMessageList",
]
class GetMessagesInfo(EbayTradingAPI):
"""
GetMyMessages:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/GetMyMessages.html
"""
method_name = "GetMyMessages"
def get_params(
self,
detail_level: str,
folder_id: int = None,
message_ids: list = [],
days_ago: int = None,
**kwargs,
):
assert detail_level in ["ReturnHeaders", "ReturnMessages", "ReturnSummary"]
assert folder_id in [None, "0", "1", "2", 0, 1, 2]
if message_ids:
assert (
len(message_ids) <= 10
), 'Размер "message_ids" не должен превышать 10.'
folder_id = None
days_ago = None
elif detail_level == "ReturnMessages":
raise AttributeError('Необходимо задать "message_ids".')
if days_ago:
EndTime = datetime.datetime.now() + datetime.timedelta(minutes=1)
StartTime = EndTime - datetime.timedelta(days=int(days_ago))
else:
StartTime = None
EndTime = None
return {
"DetailLevel": detail_level,
"FolderID": folder_id,
"MessageIDs": {"MessageID": message_ids},
"EndTime": EndTime,
"StartTime": StartTime,
}
class GetMessageSummary(GetMessagesInfo):
def get_params(self, **kwargs):
kwargs["detail_level"] = "ReturnSummary"
kwargs["folder_id"] = None
kwargs["message_ids"] = None
return super().get_params(**kwargs)
class GetMessageHeaderList(GetMessagesInfo):
def get_params(self, full_messages=False, **kwargs):
kwargs["detail_level"] = "ReturnHeaders"
return super().get_params(**kwargs)
class GetMessageList(GetMessagesInfo):
def get_params(self, **kwargs):
kwargs["detail_level"] = "ReturnMessages"
return super().get_params(**kwargs)
class AbstractSendMessage(EbayTradingAPI):
"""
Abstract class.
"""
def get_params(
self,
message_body: str,
parent_message_id: str,
message_media_url: str = None,
item_id: str = None,
recipient_id: str = None,
email_copy_to_sender: bool = False,
**kwargs,
):
params = {
"MemberMessage": {
"Body": message_body,
"EmailCopyToSender": email_copy_to_sender,
"ParentMessageID": parent_message_id,
"RecipientID": recipient_id,
},
"ItemID": item_id,
}
if message_media_url:
params["MemberMessage"]["MessageMedia"] = {
"MediaName": "Attached media",
"MediaURL": message_media_url,
}
return params
class SendMessage(AbstractSendMessage):
"""
AddMemberMessageRTQ:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/AddMemberMessageRTQ.html
"""
method_name = "AddMemberMessageRTQ"
def get_params(self, display_to_public=False, **kwargs):
if not kwargs.get("item_id", None):
display_to_public = False
if not kwargs.get("recipient_id", None):
raise AttributeError('Необходимо задать "recipient_id" или "item_id"')
params = super().get_params(**kwargs)
params["MemberMessage"].update({"DisplayToPublic": display_to_public})
return params
class GetMemberMessageList(EbayTradingAPI):
"""
GetMemberMessages:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/GetMemberMessages.html
"""
method_name = "GetMemberMessages"
def get_params(
self,
message_type="All",
message_status=None,
item_id=None,
sender_id=None,
days_ago: int = None,
**kwargs,
):
if message_type not in ["All", "AskSellerQuestion"]:
raise AttributeError('Недопустимое значение параметра "message_type"')
params = {"MailMessageType": message_type}
if message_status:
if not (message_status in ["Answered", "Unanswered"]):
raise AttributeError('Недопустимое значение параметра "message_status"')
params.update({"MessageStatus": message_status})
if item_id:
params.update({"ItemID": item_id})
elif sender_id:
params.update({"SenderID": sender_id})
else:
end_creation_time = datetime.datetime.now()
start_creation_time = end_creation_time - datetime.timedelta(days=days_ago)
params.update(
{
"EndCreationTime": end_creation_time,
"StartCreationTime": start_creation_time,
}
)
return params
class AnswerOrderMessage(AbstractSendMessage):
"""
AddMemberMessageAAQToPartner:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/AddMemberMessageAAQToPartner.html
HINT: item needs to be a part of an offer
"""
method_name = "AddMemberMessageAAQToPartner"
question_type_enum = [
"CustomizedSubject",
"General",
"MultipleItemShipping",
"None",
"Payment",
"Shipping",
]
def get_params(
self, item_id, recipient_id, subject, question_type="None", **kwargs
):
if not (question_type in self.question_type_enum):
raise AttributeError('Недопустимое значение параметра "question_type"')
params = super().get_params(**kwargs)
params["MemberMessage"].update(
{
"QuestionType": question_type,
"Subject": subject,
}
)
return params
class ReviseMessages(EbayTradingAPI):
"""
ReviseMyMessages:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/ReviseMyMessages.html
"""
method_name = "ReviseMyMessages"
def get_params(
self, message_ids: list, read: bool = None, folder_id: int = None, **kwargs
):
if read:
folder_id = None
return {
"MessageIDs": {
"MessageID": message_ids,
},
"Read": read,
"FolderID": folder_id,
}
class MarkMessageRead(ReviseMessages):
def get_params(self, **kwargs):
kwargs["read"] = True
return super().get_params(**kwargs)
class MarkMessageUnread(ReviseMessages):
def get_params(self, **kwargs):
kwargs["read"] = False
return super().get_params(**kwargs)
class DeleteMessageList(EbayTradingAPI):
"""
Docs:
https://developer.ebay.com/Devzone/XML/docs/Reference/eBay/DeleteMyMessages.html
"""
method_name = "DeleteMyMessages"
def get_params(self, message_ids: list, **kwargs):
return {
"MessageIDs": {"MessageID": message_ids},
} | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/core/trading/messages.py | messages.py |
MarketplaceToLang = {
"default": "en-US",
"EBAY_US": "en-US",
"EBAY_MOTORS_US": "en-US",
"EBAY_CA": "en-CA",
"EBAY_GB": "en-GB",
"EBAY_AU": "en-AU",
"EBAY_AT": "de-AT",
"EBAY_BE_FR": "fr-BE",
"EBAY_FR": "fr-FR",
"EBAY_DE": "de-DE",
"EBAY_IT": "it-IT",
"EBAY_BE_NL": "nl-BE",
"EBAY_NL": "nl-NL",
"EBAY_ES": "es-ES",
"EBAY_CH": "de-CH",
"EBAY_TW": "zh-TW",
"EBAY_CZ": "en-US", # not defined
"EBAY_DK": "en-US", # not defined
"EBAY_FI": "en-US", # not defined
"EBAY_GR": "en-US", # not defined
"EBAY_HK": "zh-HK",
"EBAY_HU": "en-US", # not defined
"EBAY_IN": "en-GB",
"EBAY_ID": "en-US", # not defined
"EBAY_IE": "en-IE",
"EBAY_IL": "en-US", # not defined
"EBAY_MY": "en-US",
"EBAY_NZ": "en-US", # not defined
"EBAY_NO": "en-US", # not defined
"EBAY_PH": "en-PH",
"EBAY_PL": "pl-PL",
"EBAY_PT": "en-US", # not defined
"EBAY_PR": "en-US", # not defined
"EBAY_RU": "ru-RU",
"EBAY_SG": "en-US",
"EBAY_ZA": "en-US", # not defined
"EBAY_SE": "en-US", # not defined
"EBAY_TH": "th-TH",
"EBAY_VN": "en-US",
"EBAY_CN": "en-US", # not defined
"EBAY_PE": "en-US", # not defined
"EBAY_CA_FR": "fr-CA",
"EBAY_JP": "en-US", # not defined
}
MarketplaceToLocale = {
"default": "en_US",
"EBAY_US": "en_US",
"EBAY_MOTORS_US": "en_US",
"EBAY_CA": "en_CA",
"EBAY_GB": "en_GB",
"EBAY_AU": "en_AU",
"EBAY_AT": "de_AT",
"EBAY_BE_FR": "fr_BE",
"EBAY_FR": "fr_FR",
"EBAY_DE": "de_DE",
"EBAY_IT": "it_IT",
"EBAY_BE_NL": "nl_BE",
"EBAY_NL": "nl_NL",
"EBAY_ES": "es_ES",
"EBAY_CH": "de_CH",
"EBAY_TW": "zh_TW",
"EBAY_CZ": "en_US", # not defined
"EBAY_DK": "en_US", # not defined
"EBAY_FI": "en_US", # not defined
"EBAY_GR": "en_US", # not defined
"EBAY_HK": "zh_HK",
"EBAY_HU": "en_US", # not defined
"EBAY_IN": "en_GB",
"EBAY_ID": "en_US", # not defined
"EBAY_IE": "en_IE",
"EBAY_IL": "en_US", # not defined
"EBAY_MY": "en_US",
"EBAY_NZ": "en_US", # not defined
"EBAY_NO": "en_US", # not defined
"EBAY_PH": "en_PH",
"EBAY_PL": "pl_PL",
"EBAY_PT": "en_US", # not defined
"EBAY_PR": "en_US", # not defined
"EBAY_RU": "ru_RU",
"EBAY_SG": "en_US",
"EBAY_ZA": "en_US", # not defined
"EBAY_SE": "en_US", # not defined
"EBAY_TH": "th_TH",
"EBAY_VN": "en_US",
"EBAY_CN": "en_US", # not defined
"EBAY_PE": "en_US", # not defined
"EBAY_CA_FR": "fr_CA",
"EBAY_JP": "en_US", # not defined
} | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/data/marketplace/marketplace_to_lang.py | marketplace_to_lang.py |
from model_utils import Choices
CancelStateEnum = Choices(
("CANCELED", "Заказ отменён"),
("IN_PROGRESS", "Сделан хотя бы один запрос на отмену заказа"),
("NONE_REQUESTED", "Запросов на отмену заказа нет"),
)
CancelRequestStateEnum = Choices(
("COMPLETED", "Продавец подтвердил отмену заказа"),
("REJECTED", "Продавец отказал в отмене заказа"),
("REQUESTED", "Запрос на отмену заказа ожидает ответа от продавца"),
)
FulfillmentInstructionsType = Choices(
("DIGITAL", "Цифровой вид"),
("PREPARE_FOR_PICKUP", "Готовится к In-Store Pickup"),
("SELLER_DEFINED", "Определяется продавцом"),
("SHIP_TO", "Отправляется продавцом"),
)
LineItemFulfillmentStatusEnum = Choices(
("FULFILLED", "Фулфилмент завершен"),
("IN_PROGRESS", "Фулфилмент в процессе"),
("NOT_STARTED", "Фулфилмент не начат"),
)
SoldFormatEnum = Choices(
("AUCTION", "Аукцион"),
("FIXED_PRICE", "Фиксированная цена"),
("OTHER", "Другое"),
("SECOND_CHANCE_OFFER", "Second chance offer"),
)
OrderFulfillmentStatus = Choices(
("FULFILLED", "Фулфилмент завершен"),
("IN_PROGRESS", "Фулфилмент в процессе"),
("NOT_STARTED", "Фулфилмент не начат"),
)
OrderPaymentStatusEnum = Choices(
("FAILED", "Неудача"),
("FULLY_REFUNDED", "Деньги в полном объеме возвращены покупателю"),
("PAID", "Оплачено"),
("PARTIALLY_REFUNDED", "Деньги частично возвращены покупателю"),
("PENDING", "Ожидание"),
)
PaymentMethodTypeEnum = Choices(
("CREDIT_CARD", "Банковская карта"), ("PAYPAL", "Paypal")
)
PaymentStatusEnum = Choices(
("FAILED", "Неудача"),
("PAID", "Оплачено"),
("PENDING", "Ожидание"),
)
PaymentHoldStateEnum = Choices(
("HELD", "Заморожено"),
("HELD_PENDING", "Ожидается заморозка"),
("NOT_HELD", "Не заморожено"),
("RELEASE_CONFIRMED", "Подтверждено размораживание"),
("RELEASE_FAILED", "Неудачное размораживание"),
("RELEASE_PENDING", "Ожидается размораживание"),
("RELEASED", "Разморожено"),
)
RefundStatusEnum = Choices(
("FAILED", "Неудача"),
("PENDING", "Ожидание"),
("REFUNDED", "Деньги возвращены"),
)
TaxTypeEnum = Choices(
("GST", "Goods and Services import tax"),
("PROVINCE_SALES_TAX", "Provincial sales tax"),
("REGION", "Regional sales tax"),
("STATE_SALES_TAX", "State sales tax"),
("VAT", "Value-Added tax (VAT)"),
)
DisputeStateEnum = Choices(
("OPEN", "Открыто"),
("ACTION_NEEDED", "Требуется действие"),
("CLOSED", "Закрыто"),
) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/data/enums/order_enums.py | order_enums.py |
from model_utils import Choices
AvailabilityTypeEnum = Choices(
("IN_STOCK", "В наличии"),
("OUT_OF_STOCK", "Нет в наличии"),
("SHIP_TO_STORE", "Ожидается пополнение"),
)
PackageTypeEnum = Choices(
("LETTER", "Бумага"),
("BULKY_GOODS", "Bulky good"),
("CARAVAN", "Caravan"),
("CARS", "Автомобиль"),
("EUROPALLET", "Euro pallet"),
("EXPANDABLE_TOUGH_BAGS", "Expandable tough bag"),
("EXTRA_LARGE_PACK", "Extra large pack"),
("FURNITURE", "Мебель"),
("INDUSTRY_VEHICLES", "Industry vehicle"),
("LARGE_CANADA_POSTBOX", "A Canada Post large box"),
("LARGE_CANADA_POST_BUBBLE_MAILER", "Canada Post large bubble mailer"),
("LARGE_ENVELOPE", "Большой конверт"),
("MAILING_BOX", "Mailing box"),
("MEDIUM_CANADA_POST_BOX", "Medium Canada Post box"),
("MEDIUM_CANADA_POST_BUBBLE_MAILER", "Medium Canada Post bubble mailer"),
("MOTORBIKES", "Мотоцикл"),
("ONE_WAY_PALLET", "One-way pallet"),
("PACKAGE_THICK_ENVELOPE", "Толстый конверт"),
("PADDED_BAGS", "Padded bag"),
("PARCEL_OR_PADDED_ENVELOPE", "Посылка или мягкий конверт"),
("ROLL", "Roll"),
("SMALL_CANADA_POST_BOX", "Small Canada Post box"),
("SMALL_CANADA_POST_BUBBLE_MAILER", "Small Canada Post bubble mailer"),
("TOUGH_BAGS", "Tough bag"),
("UPS_LETTER", "Письмо UPS"),
("USPS_FLAT_RATE_ENVELOPE", "USPS flat-rate envelope"),
("USPS_LARGE_PACK", "USPS large pack"),
("VERY_LARGE_PACK", "USPS very large pack"),
("WINE_PAK", "Wine pak"),
)
ShippingServiceTypeEnum = Choices(
("DOMESTIC", "Внутренняя доставка"),
("INTERNATIONAL", "Международная доставка"),
)
SoldOnEnum = Choices(
("ON_EBAY", "Товар продавался по указанной цене на сайте eBay"),
("OFF_EBAY", "Товар продавался по указанной цене на сторонних сайтах"),
(
"ON_AND_OFF_EBAY",
"Товар продавался по указанной цене как на сайте eBay, так и на сторонних сайтах",
),
)
MinimumAdvertisedPriceHandlingEnum = Choices(
("NONE", "Не использовать"),
("PRE_CHECKOUT", "До оформления заказа"),
("DURING_CHECKOUT", "После оформления заказа"),
)
# https://developer.ebay.com/api-docs/sell/inventory/types/slr:ListingStatusEnum
ListingStatusEnum = Choices(
("ACTIVE", "ACTIVE"),
("OUT_OF_STOCK", "OUT_OF_STOCK"),
("INACTIVE", "INACTIVE"),
("ENDED", "ENDED"),
("EBAY_ENDED", "EBAY_ENDED"),
("NOT_LISTED", "NOT_LISTED"),
) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/data/enums/listing_enums.py | listing_enums.py |
from model_utils import Choices
ShippingCarriersEnum = Choices(
("Other", "Другое"),
("DHL", "DHL Express"),
("DHLEKB", "DHL EKB"),
("DHLEXPRESS", "DHL Express"),
("DHLGlobalMail", "DHL Global Mail"),
("UPS", "United Parcel Service"),
("USPS", "U.S. Postal Service"),
("GENERIC", "Generic"),
)
AllShippingCarriersEnum = Choices(
("Other", "Use this code for any carrier not listed here."),
("A1CourierServices", "A-1 Courier"),
("ABF", "ABF Freight"),
("AeroPost", "AeroPost"),
("ALLIEDEXPRESS", "Allied Express"),
("AMWST", "AMWST"),
("AnPost", "An Post"),
("APC", "APC Postal Logistics"),
("ARAMEX", "Aramex"),
("ARVATO", "Arvato"),
("ASM", "ASM"),
("AustralianAirExpress", "Australian Air Express"),
("AustraliaPost", "Australia Post"),
("AVRT", "Averitt Express"),
("Bartolini", "BRT Bartolini"),
("BELGIANPOST", "Belgian Post Group"),
("BKNS", "BKNS"),
("BluePackage", "Blue Package Delivery"),
("BPost", "bpost"),
("BusinessPost", "BusinessPost"),
("CanPar", "Canpar Courier"),
("CENF", "Central Freight Lines"),
("CEVA", "CEVA Logistics"),
("ChinaPost", "China Post"),
("Chronoexpres", "Chronoexpres"),
("Chronopost", "Chronopost"),
("CHUKOU1", "Chukou1"),
("ChunghwaPost", "Chunghwa Post"),
("CitiPost", "CitiPost"),
("CityLink", "Citylink"),
("ClickandQuick", "Click & Quick"),
("CNWY", "XPO Logistics (formerly Con-way Freight)"),
("ColiposteDomestic", "Coliposte Domestic"),
("ColiposteInternational", "Coliposte International"),
("Colissimo", "Colissimo"),
("CollectPlus", "CollectPlus"),
("Correos", "Correos"),
("CPC", "CPC Logistics"),
("DAIPost", "DAI Post"),
("DayandRoss", "Day & Ross"),
("DBSchenker", "DB Schenker"),
("DeutschePost", "Deutsche Post"),
("DHL", "DHL Express"),
("DHLEKB", "DHL EKB"),
("DHLEXPRESS", "DHL Express"),
("DHLGlobalMail", "DHL Global Mail"),
("DieSchweizerischePost", "Die Schweizerische Post"),
("DPD", "DPD (Dynamic Parcel Distribution)"),
("DPXThailand", "DPX Thailand"),
("EGO", "E-go"),
("Exapaq", "DPD France (formerly Exapaq)"),
("Fastway", "Fastway"),
("FASTWAYCOURIERS", "Fastway Couriers"),
("FedEx", "FedEx"),
("FedExSmartPost", "FedEx SmartPost"),
("FLYT", "Flyt"),
("FLYTExpress", "Flyt Express"),
("FlytExpressUSDirectline", "Flyt Express US Direct line"),
("FourPX", "4PX"),
("FourPXCHINA", "4PX China"),
("FourPXExpress", "4PX Express"),
("FourPXLTD", "4PX Express Co. Ltd"),
("FulfilExpressAccStation", "FulfilExpress-AccStation"),
("FulfilExpresseForCity", "FulfilExpress-eForCity"),
("FulfilExpressEverydaySource", "FulfilExpress-EverydaySource"),
("FulfilExpressiTrimming", "FulfilExpress-iTrimming"),
("GLS", "GLS (General Logistics Systems)"),
("HDUSA", "MXD Group (formerly Home Direct USA)"),
("Hermes", "Hermes Group"),
("HongKongPost", "Hong Kong Post"),
("HUNTEREXPRESS", "Hunter Express"),
("iLoxx", "iloxx eService"),
("IndiaPost", "India Post"),
("IndonesiaPost", "Indonesia Post"),
("Interlink", "Interlink Express"),
("InterPost", "InterPost"),
("IoInvio", "IoInvio"),
("Iparcel", "UPS i-parcel"),
("IsraelPost", "Israel Post"),
("JapanPost", "Japan Post"),
("KIALA", "Kiala (UPS Access Point)"),
("KoreaPost", "Korea Post"),
("Landmark", "Landmark Global"),
("LAPOSTE", "La Poste"),
("MALAYSIAPOST", "Malaysia Post"),
("MannaFreight", "Manna Distribution Services"),
("Metapack", "Metapack"),
("MNGTurkey", "MNG Kargo"),
("MondialRelay", "Mondial Relay"),
("MRW", "MRW"),
("MSI", "MSI Transportation"),
("Nacex", "Nacex"),
("NEMF", "New England Motor Freight"),
("ODFL", "Old Dominion Freight Line"),
("ONTRACK", "OnTrac Shipping"),
("OsterreichischePostAG", "Osterreichische Post"),
("Parcelforce", "Parcelforce"),
("ParcelPool", "International Bridge Domestic delivery"),
("Philpost", "PHLPost (Philippine Postal Corporation)"),
("Pilot", "Pilot Freight Services"),
("PITD", "PITT OHIO"),
("PocztaPolska", "Poczta Polska"),
("Pocztex", "Pocztex"),
("PosteItaliane", "Poste Italiane"),
("POSTITALIANO", "Post Italiano"),
("PostNL", "PostNL"),
("PostNordNorway", "PostNord"),
("Quantium", "Quantium Solutions"),
("RETL", "Reddaway"),
("RoyalMail", "Royal Mail"),
("SAIA", "Saia LTL Freight"),
("SDA", "SDA Express Courier"),
("SINGAPOREPOST", "Singapore Post"),
("Siodemka", "Siodemka (DPD Poland)"),
("SioliandFontana", "Sioli & Fontana"),
("SkynetMalaysia", "Skynet (Malaysia)"),
("SMARTSEND", "Smart Send Courier Service"),
("Sogetras", "SGT Corriere Espresso"),
("Spediamo", "Spediamo"),
("SpeeDee", "Spee-Dee Delivery Service"),
("StarTrack", "StarTrack"),
("SuntekExpressLTD", "Suntek Express LTD"),
("SwissPost", "Swiss Post"),
("TELE", "TELE"),
("TEMANDO", "Temando (shipping broker)"),
("THAILANDPOST", "Thailand Post"),
("Toll", "Toll (Japan Post)"),
("TPG", "TPG Logistics"),
("UBI", "UBI Smart Parcel"),
("UKMail", "UK Mail"),
("UPS", "United Parcel Service"),
("USPS", "U.S. Postal Service"),
("USPSCeP", "USPS Commercial ePacket"),
("USPSPMI", "USPS Priority Mail International"),
("VietnamPost", "Vietnam Post"),
("VITR", "Vitran Express"),
("Winit", "WIN.IT America"),
("WNdirect", "wnDirect"),
("WPX", "WPX Delivery Solutions"),
("YANWEN", "YANWEN Express"),
("Yodel", "Yodel"),
("YRC", "YRC Freight"),
) | zs-ebay-sdk | /zs_ebay_sdk-0.0.1-py3-none-any.whl/ebay_sdk/data/enums/shipping_carriers_enum.py | shipping_carriers_enum.py |
import os
import time
import cv2
import numpy as np
from openvino.inference_engine import IECore # openvino/inference_engine/ie_api.so
class Model_SCRFD_Openvino:
"""
性别年龄检测
openvino==2022.2.0 openvino框架88M 测试/home/lq_07/wzs/code/综合/new_project/皮肤样本照_0 910张图片
openvino 2.6M 运行, mean time:0.010537598421285441
openvino_fp16 1.4M 运行, mean time:0.011047713573162372
"""
def __init__(self, fp16=True):
pth_dir = os.path.dirname(os.path.abspath(__file__)) # score
if fp16 == True:
path = os.path.join(pth_dir, 'checkpoints', 'openvino_fp16', 'scrfd_500m_kps.xml')
else:
path = os.path.join(pth_dir, 'checkpoints', 'openvino', 'scrfd_500m_kps.xml')
ie = IECore()
net = ie.read_network(model=path)
net.batch_size = 1
self.model = ie.load_network(network=net, device_name="CPU")
print(f'加载人脸检测openvino模型:{path}')
time.sleep(2)
# self.input_blob = next(iter(net.input_info)) # <class 'str'> images
# self.out_blob = next(iter(net.outputs)) # <class 'str'> 9个输出
# self.n, self.c, self.h, self.w = net.input_info[self.input_blob].input_data.shape # 1 3 640, 640
self.inpWidth = 640
self.inpHeight = 640
self.confThreshold = 0.5
self.nmsThreshold = 0.5
self.keep_ratio = True
self.fmc = 3
self._feat_stride_fpn = [8, 16, 32]
self._num_anchors = 2
self.dry_run()
def dry_run(self):
dummy_input = np.ones((1, 3, 640, 640), np.float32)
input_name = 'images'
for i in range(10):
print('dry_run', i)
self.model.infer(inputs={input_name: dummy_input})
def resize_image(self, srcimg):
padh, padw, newh, neww = 0, 0, self.inpHeight, self.inpWidth
if self.keep_ratio and srcimg.shape[0] != srcimg.shape[1]:
hw_scale = srcimg.shape[0] / srcimg.shape[1]
if hw_scale > 1:
newh, neww = self.inpHeight, int(self.inpWidth / hw_scale)
img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
padw = int((self.inpWidth - neww) * 0.5)
img = cv2.copyMakeBorder(img, 0, 0, padw, self.inpWidth - neww - padw, cv2.BORDER_CONSTANT,
value=0) # add border
else:
newh, neww = int(self.inpHeight * hw_scale) + 1, self.inpWidth
img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
padh = int((self.inpHeight - newh) * 0.5)
img = cv2.copyMakeBorder(img, padh, self.inpHeight - newh - padh, 0, 0, cv2.BORDER_CONSTANT, value=0)
else:
img = cv2.resize(srcimg, (self.inpWidth, self.inpHeight), interpolation=cv2.INTER_AREA)
return img, newh, neww, padh, padw
def distance2bbox(self, points, distance, max_shape=None):
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
return np.stack([x1, y1, x2, y2], axis=-1)
def distance2kps(self, points, distance, max_shape=None):
preds = []
for i in range(0, distance.shape[1], 2):
px = points[:, i % 2] + distance[:, i]
py = points[:, i % 2 + 1] + distance[:, i + 1]
if max_shape is not None:
px = px.clamp(min=0, max=max_shape[1])
py = py.clamp(min=0, max=max_shape[0])
preds.append(px)
preds.append(py)
return np.stack(preds, axis=-1)
def run(self, srcimg):
ori_height, ori_width = srcimg.shape[0:2]
img, newh, neww, padh, padw = self.resize_image(srcimg)
data = cv2.dnn.blobFromImage(img, 1.0 / 128, (self.inpWidth, self.inpHeight), (127.5, 127.5, 127.5),
swapRB=True)
# data: <class 'numpy.ndarray'> float32 (1, 3, 640, 640) rgb
# -> dict,9个关键字, 'out0'-'out8', onnx返回9个元素的tuple
input_name = 'images'
# output_name = ['out0', 'out3', 'out6', 'out1', 'out4', 'out7', 'out2', 'out5', 'out8']
results = self.model.infer(inputs={input_name: data})
outs = [results['out0'], results['out3'], results['out6'], results['out1'], results['out4'],
results['out7'], results['out2'], results['out5'], results['out8']]
# (1, 12800, 1) out0
# (1, 12800, 4) out3
# (1, 12800, 10) out6
# (1, 3200, 1) out1
# (1, 3200, 4) out4
# (1, 3200, 10) out7
# (1, 800, 1) out2
# (1, 800, 4) out5
# (1, 800, 10) out8
# inference output
scores_list, bboxes_list, kpss_list = [], [], []
for idx, stride in enumerate(self._feat_stride_fpn): # [8, 16, 32] 每个点预测两个框
scores = outs[idx * self.fmc][0]
bbox_preds = outs[idx * self.fmc + 1][0] * stride
kps_preds = outs[idx * self.fmc + 2][0] * stride
height = self.inpHeight // stride
width = self.inpHeight // stride
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32) # 牛
anchor_centers = (anchor_centers * stride).reshape((-1, 2)) # (6400,2)
if self._num_anchors > 1:
anchor_centers = np.stack([anchor_centers] * self._num_anchors, axis=1).reshape((-1, 2))
pos_inds = np.where(scores >= self.confThreshold)[0]
# (n, 2) (n, 4) <class 'numpy.ndarray'> float32
bboxes = self.distance2bbox(anchor_centers, bbox_preds)
pos_scores = scores[pos_inds]
pos_bboxes = bboxes[pos_inds]
scores_list.append(pos_scores)
bboxes_list.append(pos_bboxes)
kpss = self.distance2kps(anchor_centers, kps_preds)
# kpss = kps_preds
kpss = kpss.reshape((kpss.shape[0], -1, 2))
pos_kpss = kpss[pos_inds]
kpss_list.append(pos_kpss)
scores = np.vstack(scores_list).ravel() # ravel()展平
bboxes = np.vstack(bboxes_list)
kpss = np.vstack(kpss_list)
bboxes[:, 2:4] = bboxes[:, 2:4] - bboxes[:, 0:2] # (ltx,lty,rbx,rby) -> (ltx,lty,w,h)
ratioh, ratiow = srcimg.shape[0] / newh, srcimg.shape[1] / neww
bboxes[:, 0] = (bboxes[:, 0] - padw) * ratiow
bboxes[:, 1] = (bboxes[:, 1] - padh) * ratioh
bboxes[:, 2] = bboxes[:, 2] * ratiow
bboxes[:, 3] = bboxes[:, 3] * ratioh
kpss[:, :, 0] = (kpss[:, :, 0] - padw) * ratiow
kpss[:, :, 1] = (kpss[:, :, 1] - padh) * ratioh
# -> 二维numpy, 包含人脸的索引号,一般shape都为(1,1),当出现多人脸时返回(n,1), 当没有人脸时返回tuple元祖类型,
# 不管是返回<class 'numpy.ndarray'>或<class 'tuple'>,都可以使用len(indices)来判断人脸的个数
indices = cv2.dnn.NMSBoxes(bboxes.tolist(), scores.tolist(), self.confThreshold, self.nmsThreshold)
if len(indices) == 0:
return np.array([])
else:
# 当有一个或多个人脸时, 按置信度排序
face_positions = []
for i in range(len(indices)):
face_position = []
index = indices[i]
ltx = int(bboxes[index, 0])
lty = int(bboxes[index, 1])
rbx = int(bboxes[index, 0] + bboxes[index, 2])
rby = int(bboxes[index, 1] + bboxes[index, 3])
ltx = self.clamp(0, ori_width, ltx)
lty = self.clamp(0, ori_height, lty)
rbx = self.clamp(0, ori_width, rbx)
rby = self.clamp(0, ori_height, rby)
face_position.extend([ltx, lty, rbx, rby])
for j in range(5):
x = int(kpss[index, j, 0])
y = int(kpss[index, j, 1])
x = self.clamp(0, ori_width, x)
y = self.clamp(0, ori_height, y)
face_position.extend([x, y])
face_position.append(float(scores[index]))
face_positions.append(face_position)
face_positions = np.array(face_positions)
return face_positions
def clamp(self, min_value, max_value, data):
if data < min_value:
data = min_value
elif data > max_value:
data = max_value
return data | zs-face-detection | /zs_face_detection-1.0.tar.gz/zs_face_detection-1.0/zs_face_detection/inference_openvino_face_detection.py | inference_openvino_face_detection.py |
ZeroStack Post Install Test
===========================
A set of small tests to ensure your ZeroStack cluster is running properly.
**Dependencies**
In order to run the post install tests download the cloud admin RC and certificate files out of the admin.local BU.
1. Zerostack rc file
2. Zerostack certificate file
**RC and Cert Files**
Pull the RC file from the newly installed ZeroStack cluster.
LogIn -> Click on BU List -> Click on admin.local BU -> Click on Project -> zs_default -> More tab -> API
When you get the RC file, make sure to specify your password and the fully qualified path to the cert file.
**Run Tests**
$ source ~/zsrc.txt
$ zs-post-install
If you have already run the test, and would like to run it again, delete the PostInstall BU tat was created.
**OS Requierments**
CentOS 7
$ yum install -y epel-release,python-pip,hdparm
Ubuntu 14.04 / 16.04 / 18.04
$ apt install -y python-pip
**Pre-flight Prerequisites**
PIP will install all of the packages needed to run the ZS-Post-Install test, if they are not present.
$ pip install urllib3==1.24.1
**Installing**
To install the preflight check on the system, follow these steps. Make sure all of the pre-requisite packages have been installed.
$ pip install zs-postinstall-test
**Running the tests**
Run the post install test with the following command.
$ zs-post-install
Build and Submit
----------------
**GIT - development / nightly**
1. git clone https://github.com/Zerostack-open/post-install-test.git
2. cd zs-post-install-test
3. python setup.py bdist_wheel
**PIP - Development**
1. sudo python -m pip install --upgrade pip setuptools wheel
2. sudo python -m pip install tqdm
3. sudo python -m pip install --user --upgrade twine
Authors
-------
**Jonathan Arrance** - *Initial work* - [Zerostack-open](https://github.com/Zerostack-open)
See also the list of [contributors](https://github.com/JonathanArrance) who participated in this project.
License
-------
This project is licensed under the MIT License - see the [LICENSE.md](https://github.com/Zerostack-open/zs-post-install-test/blob/master/LICENSE) file for details
| zs-postinstall-test | /zs_postinstall_test-0.0.1b0-py3-none-any.whl/zs_postinstall_test-0.0.1b0.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
import subprocess
import socket
import os
class host_check():
def __init__(self):
pass
def host_ipmi(self):
#check if the host has IPMI
try:
proc = subprocess.Popen("sudo dmidecode --type 38", stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
output = str(output).strip()
output = output.split('\n')
out = {'out':False,'result':'Fail','optional':True,'text':'Host IPMI'}
if ('IPMI Device Information' in output):
out = {'out':True,'result':'Pass','optional':True,'text':'Host IPMI'}
except Exception as e:
out = {'out':None,'result':e,'optional':True,'text':'Host IPMI'}
return out
def host_usb(self):
#check if the host has usb ports
#if so how many
try:
proc = subprocess.Popen("sudo lspci | grep 'USB2'", stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
output = str(output).strip()
output = len(output.split('\n'))
out = {'out':0,'result':'Fail','optional':False,'text':'Host USB2 Ports'}
if(output > 0):
out = {'out':output,'result':'Pass','optional':False,'text':'Host USB2 Ports'}
except Exception as e:
out = {'out':None,'optional':False,'result':e,'text':'Host USB2 Ports'}
return out
def host_disks(self):
#check if there are any disks in the host
#if so are they attached to a raid controller
try:
proc = subprocess.Popen("sudo lsblk -d -o name", stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
output = str(output).strip()
output = output.split('\n')
output.pop(0)
out = {'out':0,'result':'Fail','optional':False,'text':'Host Disks'}
if(output > 0):
out = {'out':output,'result':'Pass','optional':False,'text':'Host Disks'}
except Exception as e:
out = {'out':None,'optional':False,'result':e,'text':'Host Disks'}
return out
def host_memory(self):
#how much ram in gig does the host have
#make sure it is more than 32GB
out = {'out':0,'result':'Fail','optional':False,'text':'Host Memory'}
try:
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
mem_gib = mem_bytes/(1024.**3)
if(int(mem_gib) == 64):
out = {'out':str(int(mem_gib))+'GB'+' - minimum, 96GB recommended.','result':'Pass','optional':False,'text':'Host Memory'}
elif(int(mem_gib) >= 96):
out = {'out':str(int(mem_gib))+'GB'+' - recommended amount of memory','result':'Pass','optional':False,'text':'Host Memory'}
else:
out = {'out':str(int(mem_gib))+'GB'+' - minimum memory, 64GB, not available','result':'Fail','optional':False,'text':'Host Memory'}
except Exception as e:
out = {'out':None,'optional':False,'result':e,'text':'Host Memory'}
return out
def host_name(self):
hostname = socket.gethostname()
return {'out':hostname,'result':'Pass','optional':True,'text':'Host Name'} | zs-preflight | /zs_preflight-0.0.8b0-py3-none-any.whl/zspreflight/host.py | host.py |
import subprocess
class compute_check():
def __init__(self):
self.cpuinfo = self._get_cpu_info()
def cpu_architecture(self):
#cpu x86-64 or not
#output - result true/false
# - optional true/false
# - cpu - cpu architecture
if(self.cpuinfo['Architecture'] == 'x86_64'):
return {'result':'Pass','optional':False,'out':self.cpuinfo['Architecture'],'text':'CPU Architecture'}
else:
return {'result':'Fail','optional':False,'out':self.cpuinfo['Architecture'],'text':'CPU Architecture'}
def cpu_type(self):
#cpu family
#cpu manufacturer
if(self.cpuinfo['Vendor ID'] == 'GenuineIntel' or self.cpuinfo['Vendor ID'] == 'AuthenticAMD'):
return {'result':'Pass','optional':False,'out':self.cpuinfo['Vendor ID'],'text':'CPU Vendor'}
else:
return {'result':'Fail','optional':False,'out':self.cpuinfo['Vendor ID'],'text':'CPU Vendor'}
def cpu_core_count(self):
#cpu count
#how many cpu cores
#is it hyperthreading
out = {}
if(int(self.cpuinfo['Thread(s) per core']) >= 2):
self.cpuinfo['hyperthreading'] = True
self.htcores = int(self.cpuinfo['Thread(s) per core']) * int(self.cpuinfo['Core(s) per socket'])
out = {
'cpu_sockets':self.cpuinfo['CPU(s)'],
'cpu_cores':self.cpuinfo['Core(s) per socket'],
'hyperthreading':self.cpuinfo['hyperthreading'],
'out':self.htcores,
'result':'Fail',
'optional':False,
'text':'Physical CPU Cores + HT'
}
elif(int(self.cpuinfo['Thread(s) per core']) == 1):
self.cpuinfo['hyperthreading'] = False
self.htcores = int(self.cpuinfo['Core(s) per socket'])
out = {
'cpu_sockets':self.cpuinfo['CPU(s)'],
'cpu_cores':self.cpuinfo['Core(s) per socket'],
'hyperthreading':self.cpuinfo['hyperthreading'],
'out':self.htcores,
'result':'Fail',
'optional':False,
'text':'Physical CPU Cores'
}
else:
out = {
'cpu_cores':1,
'cpu_sockets':1,
'hyperthreading':False,
'out':1,
'result':'Fail',
'optional':False,
'text':'Physical CPU Cores'
}
if(int(self.cpuinfo['Core(s) per socket']) >= 4):
out['result'] = 'Pass'
out['text'] = 'Physical cores'
if(int(self.cpuinfo['Core(s) per socket']) >= 8):
out['result'] = 'Pass'
out['text'] = 'Physical cores'
return out
def cpu_virt_extensions(self):
#check if the host is physical or virtual
if('Virtualization' not in self.cpuinfo):
return {'result':'Fail',
'optional':False,
'cpuvendor':self.cpuinfo['Vendor ID'],
'out':'Unknown',
'text':'Virtual extensions'
}
if(self.cpuinfo['Vendor ID'] == 'GenuineIntel'):
if(self.cpuinfo['Virtualization'] == 'VT-x'):
return {'result':'Pass',
'optional':False,
'cpuvendor':self.cpuinfo['Vendor ID'],
'out':self.cpuinfo['Virtualization'],
'text':'Virtual extensions'
}
elif(self.cpuinfo['Vendor ID'] == 'AuthenticAMD'):
if(self.cpuinfo['Virtualization'] == 'AMD-V'):
return {'result':'Pass',
'optional':False,
'cpuvendor':self.cpuinfo['Vendor ID'],
'out':self.cpuinfo['Virtualization'],
'text':'Virtual extensions'
}
else:
return {'result':'Fail',
'optional':False,
'cpuvendor':self.cpuinfo['Vendor ID'],
'out':None,
'text':'Virtual extensions'
}
def cpu_virtualized(self):
if('Hypervisor vendor' in self.cpuinfo):
self.virtcpu = True
self.hypervisor = self.cpuinfo['Hypervisor vendor']
self.virttype = self.cpuinfo['Virtualization type']
self.result = 'Fail'
else:
self.virtcpu = False
self.hypervisor = None
self.virttype = None
self.result = 'Pass'
return {'text':'CPU Not Virtualized','result':self.result,'optional':False,'virtualcpu':self.virtcpu,'out':self.hypervisor,'virtualization':self.virttype}
def _get_cpu_info(self):
#Convert a comand line string type output to a dict and clean up everything
try:
proc = subprocess.Popen("lscpu", stdout=subprocess.PIPE, shell=True)
(output, err) = proc.communicate()
output = str(output).strip()
output = output.split('\n')
cpu_dict = {}
for out in output:
split = out.split(':')
cpu_dict[split[0]] = str(split[1]).strip()
return cpu_dict
except Exception as e:
return e
"""
#coming soon
def _get_cpu_family(self,cpu_type,cpu_model):
if(cpu_type == 'AuthenticAMD'):
self.cpus = [{'cpu_model':}]
pass
else:
#intel
self.cpus = [{}]
pass
for cpu in cpus:
if cpu_model == cpu['cpu_model']:
return cpu
""" | zs-preflight | /zs_preflight-0.0.8b0-py3-none-any.whl/zspreflight/compute.py | compute.py |
import subprocess
import fnmatch
class storage_check():
def __init__(self):
self.drives = self._get_drives()
self.controllers = self._get_storage_controllers()
def internal_drive_count(self):
#how many total drives
ssd = self.drives['ssd_host_disks']
hdd = self.drives['hdd_host_disks']
total = ssd + hdd
out = {'out':ssd + hdd,'result':'Fail','optional':False,'text':'Drive Count'}
if(total >= 2):
out = {'out':ssd + hdd,'result':'Pass','optional':False,'text':'Drive Count'}
return out
def check_disk_size(self):
#are the disks in the system the right size
#out = {'valid':False,'size':0,'name':None}
drive = []
for disk in self.drives['disks']:
size = float(disk['size'][:-1])
#convert size to MB
if(disk['size'][-1:] == 'G'):
size = size * 1000
elif(disk['size'][-1:] == 'T'):
size = size * 1000000
if(size >= 512):
self.valid_size = 'Pass'
else:
self.valid_size = 'Fail'
disk['size_valid'] = self.valid_size
drive.append(disk)
self.drives['out'] = drive
self.drives['text'] = 'Disk Specs'
return self.drives
"""
Coming soon
def check_disk_controller(self):
#Are disks attached to a raid controller
for controller in self.controllers['storage_controllers']
"""
def get_disk_controllers(self):
out = {'out':[],'optional':True,'result':'Pass','text':'Storage Controllers'}
if(len(self.controllers['storage_controllers'])):
out = {'out':self.controllers['storage_controllers'],'optional':True,'result':'Pass','text':'Storage Controllers'}
return out
def get_disk_IO(self):
#check the generic disk IO for the discovered disks
drive_out = self._get_drive_stats()
return drive_out
###########Internal
def _get_drive_stats(self):
"""
Return an array of dictionaries
"""
drives = self._get_drives()
drive_out = []
for drive in drives['disks']:
#get drive reads
try:
proc = subprocess.Popen("sudo hdparm -Tt /dev/%s"%(drive['name']), stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
outputs = str(output).strip().split('\n')
outputs.pop(0)
except Exception as e:
print e
#get the drive writes
try:
spec2 = {}
proc2 = subprocess.check_output(['sudo', 'dd', 'if=/dev/zero', 'of=/dev/%s'%(drive['name']),'bs=1024','count=1000','oflag=dsync'], stderr=subprocess.STDOUT)
outputs2 = str(proc2).strip().split('\n')
spec2['name'] = drive['name']
spec2['test'] = 'Write 1024KB, Count 1000'
outputs2 = str(outputs2[2]).split(',')
spec2['speed'] = str(outputs2[3]).strip()
spec2['throughput'] = "1024kb in %s"%str(outputs2[2]).strip()
drive_out.append(spec2)
except Exception as e:
print e
#Build the drive read formatted out
for output in outputs:
spec = {}
split = output.split(':')
spec['name'] = drive['name']
spec['test'] = str(split[0]).strip()
for s in split:
speed = s.split('=')
spec['speed'] = str(speed[1]).strip()
spec['throughput'] = str(speed[0]).strip()
drive_out.append(spec)
return drive_out
def _get_drives(self):
try:
proc = subprocess.Popen("lsblk -d -o name,rota,size", stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
output = str(output).strip()
output = output.split('\n')
output.pop(0)
disks = []
ssd_count = 0
hdd_count = 0
out = {'ssd_host_disks':ssd_count,'hdd_host_disks':hdd_count,'disks':disks,'result':False,'optional':False}
for o in output:
disk = {}
split = o.split()
disk['name'] = split[0]
if(fnmatch.fnmatch(disk['name'], 'sr*')):
continue
if(split[1] == '1'):
hdd_count += 1
disk['type'] = 'hdd'
else:
ssd_count += 1
disk['type'] = 'ssd'
disk['size'] = split[2]
disks.append(disk)
out = {'ssd_host_disks':ssd_count,'hdd_host_disks':hdd_count,'disks':disks,'result':True,'optional':False}
except Exception as e:
out = {'ssd_host_disks':None,'hdd_host_disks':None,'disks':None,'result':e,'optional':False}
return out
def _get_storage_controllers(self):
#Get the storage controllers and return them
try:
proc = subprocess.Popen("lspci | grep 'IDE\|SATA\|RAID\|SCSI'", stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
output = str(output).strip()
output = output.split('\n')
sc_count = len(output)
controllers = []
out = {'storage_controller_count':0,'storage_controller_type':controllers,'result':False}
if(sc_count > 0):
for o in output:
split = o.split(': ')
controllers.append({'controller':split[1],'pci':split[0][0:7],'type':split[0][8:]})
out = {'storage_controller_count':len(controllers),'storage_controllers':controllers,'result':True}
except Exception as e:
out = {'storage_controller_count':None,'storage_controllers':[],'result':e}
return out | zs-preflight | /zs_preflight-0.0.8b0-py3-none-any.whl/zspreflight/storage.py | storage.py |
import subprocess
import os
class network_check():
def __init__(self):
pass
def nic_count(self):
out = {'out':0,'result':'Fail','optional':False,'text':'System NIC Count'}
try:
proc = subprocess.Popen("sudo ls -I br* -I lo -I vir* /sys/class/net/", stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
output = str(output).strip()
output = len(output.split('\n'))
out = {'out':output,'result':'Pass','optional':False,'text':'System NIC Count'}
except Exception as e:
out = {'out':'Unknown','result':e,'optional':False,'text':'System NIC Count'}
return out
def nic_type(self):
#nic speed
#nic type
nic = []
out = {'result':'Fail','optional':False,'out':nic,'text':'System Nic Info'}
#supress output
null = open(os.devnull, 'w')
try:
output = self._list_nics()
for o in output:
#if (os.path.isdir("/sys/class/net/"+o)):
#skip if the net card does not exist
# continue
did = subprocess.Popen("sudo ls -al /sys/class/net/%s/device"%o, stdout=null, shell=True)
(didout,err) = did.communicate()
didout = str(didout).strip()[-7:]
#use lspci and grep to get nicbrand
brand = subprocess.Popen("sudo lspci | grep %s"%didout, stdout=subprocess.PIPE, shell=True)
(brandout,err) = brand.communicate()
nic_brand = str(brandout).strip()[29:]
if(nic_brand == ''):
nic_brand = 'Unknown'
try:
speed = open("/sys/class/net/%s/speed"%o,'r')
if(int(speed.read()) < 0 or int(speed.read())):
nic.append({'nic_name':o,'nic_speed':0,'nic_brand':nic_brand,'text':'NIC not recommended'})
elif(int(speed.read()) > 0):
nic_speed = int(speed.read())
if(nic_speed == 1000):
nic.append({'nic_name':o,'nic_speed':nic_speed,'nic_brand':nic_brand,'text':'NIC minimum config'})
elif(nic_speed >= 10000):
nic.append({'nic_name':o,'nic_speed':nic_speed,'nic_brand':nic_brand,'text':'NIC recommended config'})
except Exception as e:
nic.append({'nic_name':o,'nic_speed':'Unknown','nic_brand':nic_brand,'text':'NIC Unknown'})
out = {'result':'Pass','optional':False,'out':nic,'text':'System Nic Info'}
except Exception as e:
out = {'result':e,'optional':False,'out':nic,'text':'System Nic Info'}
return out
def nic_configured(self):
#check if the nic is configured and up
con_out = []
out = {'out':'Host Not Connected','result':'Fail','optional':False,'text':'NIC UP','nic_out':con_out}
try:
output = self._list_nics()
link = 0
for o in output:
proc = subprocess.Popen("ip link show | grep %s"%o, stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
con = str(output).strip().split()
if(con[8] == 'UP'):
link +=1
con_out.append({'nic':con[1],'state':con[8],'mtu':con[4],'mode':con[10]})
except Exception as e:
out = {'out':'Unknown','result':e,'optional':False,'text':'NIC UP','nic_out':con_out}
if(len(con_out) >= 1 and link >= 1):
out = {'out':'Host Connected','result':'Pass','optional':False,'text':'NIC UP','nic_out':con_out}
else:
out = {'out':'Host Not Connected','result':'Fail','optional':False,'text':'NIC UP','nic_out':con_out}
return out
def connected_to_network(self):
#get the local IP config, and if connected to the local network
#ping the gateway
#ping the local IP
con_out = []
out = {'out':'Local Net Down','result':'Fail','optional':False,'text':'Local Net Comm','nic_out':con_out}
nics = self._list_nics()
try:
link = False
for n in nics:
ip_info = self._get_nic_ip_info(n)
#ping the local ip
ping = {'nic':n,'gateway':'Down','local':'Down'}
gateway = os.system("ping -c 1 " + ip_info['gateway'] + "> /dev/null 2>&1")
local = os.system("ping -c 1 " + ip_info['ip'] + "> /dev/null 2>&1")
if gateway == 0:
ping['gateway'] = 'Up'
if local == 0:
ping['local'] = 'Up'
if(ping['local'] == 'Up' and ping['gateway'] == 'Up'):
link = True
con_out.append(ping)
except Exception as e:
con_out.append(e)
out = {'out':'Local Net Unknown','result':'Fail','optional':False,'text':'Local Net Comm','nic_out':con_out}
if(len(con_out) >= 1 and link == True):
out = {'out':'Local Net Up','result':'Pass','optional':False,'text':'Local Net Comm','nic_out':con_out}
return out
def connected_to_internet(self):
#check to see if host connected to internet
con_out = []
out = {'out':'Local Net Down','result':'Fail','optional':False,'text':'Local Net Comm','nic_out':con_out}
nics = self._list_nics()
try:
link = False
for n in nics:
ip_info = self._get_nic_ip_info(n)
except Exception as e:
con_out.append(e)
out = {'out':'Internet Down','result':'Fail','optional':False,'text':'Local Net Comm','nic_out':con_out}
####Internal functions
def _list_nics(self):
#return list of nic cards
try:
proc = subprocess.Popen("sudo ls -I br* -I lo -I vir* /sys/class/net/", stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
output = str(output).strip().split()
except Exception as e:
output = []
return output
def _get_nic_ip_info(self,nic):
try:
proc = subprocess.Popen("ip addr | grep '%s' -A2 | grep 'inet' | head -1 | awk '{print $2}' | cut -f1 -d'/'"%nic, stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
ip = str(output).strip()
except Exception as e:
ip = e
try:
proc2 = subprocess.Popen("/sbin/ip route | awk '/default/ { print $3 }'", stdout=subprocess.PIPE, shell=True)
(output2,err2) = proc2.communicate()
gateway = str(output2).strip()
except Exception as e:
gateway = e
return {'ip':ip,'gateway':gateway} | zs-preflight | /zs_preflight-0.0.8b0-py3-none-any.whl/zspreflight/network.py | network.py |
ZeroStack preflight system check
================================
The ZeroStack pre-flight system check can be used by an administrator or ZeroStack SE to determine if the hardware in question is compatible with the ZeroStack cloud operating system.
The preflight check will ensure that your physical server will work with the Zerostack ZCOS. Once this script is run, and
all of the checks are varified, you will be able to install the ZCOS.
The preflight check will make sure the hardware adhears to the Zerostack minimal viable hardware spec.
1. Overall system configuration
2. CPU architecture
3. Storage requierments
4. Networking
Please check the Ubuntu HCL to verify your results.
[Ubuntu Server HCL](https://certification.ubuntu.com/server/)
Once all of the results have been verified, please send them to your SE.
Getting Started
---------------
In order to get the preflight check working, you will need to make sure python 2.7 or 3.x is installed on the system the preflight check will run on.
PIP will also be requierd in order to install zs-preflight and the supporting packages.
**OS Requierments**
CentOS 7
$ yum install -y epel-release,python-pip,hdparm
Ubuntu 14.04 / 16.04 / 18.04
$ apt install -y python-pip
**Pre-flight Prerequisites**
In order to get the zspreflight system working you will need to install the following packages on the sytstem you are running the preflight check from.
PIP will install all of the packages needed to run the ZS-Preflight system, if they are not present.
$ pip install paramiko
$ pip install gspread
$ pip install oauth2client
**Installing**
To install the preflight check on the system, follow these steps. Make sure all of the pre-requisite packages have been installed.
$ pip install zs-preflight
**Running the tests**
Run the pre-flight check with the following command.
$ preflight
Build and submit
----------------
**GIT - development / nightly**
1. git clone https://github.com/Zerostack-open/zs-preflight.git
2. cd zspreflight
3. python setup.py bdist_wheel
**PIP - Development**
1. sudo python -m pip install --upgrade pip setuptools wheel
2. sudo python -m pip install tqdm
3. sudo python -m pip install --user --upgrade twine
**TODO**
1. Upload data to Gsheet
2. Fire off preflight from zspreflight on a remote system
Authors
-------
**Jonathan Arrance** - *Initial work* - [Zerostack-open](https://github.com/Zerostack-open)
See also the list of [contributors](https://github.com/JonathanArrance) who participated in this project.
License
-------
This project is licensed under the MIT License - see the [LICENSE.md](https://github.com/Zerostack-open/zs-preflight/blob/master/LICENSE) file for details
| zs-preflight | /zs_preflight-0.0.8b0-py3-none-any.whl/zs_preflight-0.0.8b0.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
# selenium_youtube
 [](https://pypi.python.org/pypi/selenium_youtube/) [](https://pypi.python.org/pypi/selenium_youtube/)
## Install
````shell
pip install --upgrade selenium-youtube
# or
pip3 install --upgrade selenium-youtube
````
## Usage
````python
from selenium_youtube.youtube import Youtube
youtube = Youtube(
'path_to_cookies_folder',
'path_to_extensions_folder'
)
result = youtube.upload('path_to_video', 'title', 'description', ['tag1', 'tag2'])
````
## Dependencies
[beautifulsoup4](https://pypi.org/project/beautifulsoup4), [kcu](https://pypi.org/project/kcu), [kstopit](https://pypi.org/project/kstopit), [kyoutubescraper](https://pypi.org/project/kyoutubescraper), [noraise](https://pypi.org/project/noraise), [selenium](https://pypi.org/project/selenium), [selenium-firefox](https://pypi.org/project/selenium-firefox), [selenium-uploader-account](https://pypi.org/project/selenium-uploader-account)
## Credits
[Péntek Zsolt](https://github.com/Zselter07) | zs-selenium-youtube | /zs_selenium_youtube-2.0.20.tar.gz/zs_selenium_youtube-2.0.20/README.md | README.md |
# System
from typing import List, Dict, Optional, Tuple, Callable, Union
import time, json
from sys import platform
# Pip
from selenium_uploader_account import SeleniumUploaderAccount, Proxy, BaseAddonInstallSettings
from noraise import noraise
from kcu import strings
from kstopit import signal_timeoutable
from kyoutubescraper import YoutubeScraper, ChannelAboutData
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup as bs
# Local
from .enums.visibility import Visibility
from .enums.upload_status import UploadStatus
from .enums.analytics_period import AnalyticsPeriod
from .enums.analytics_tab import AnalyticsTab
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------------------------- Defines ---------------------------------------------------------------- #
YT_URL = 'https://www.youtube.com'
YT_STUDIO_URL = 'https://studio.youtube.com'
YT_UPLOAD_URL = 'https://www.youtube.com/upload'
YT_LOGIN_URL = 'https://accounts.google.com/signin/v2/identifier?service=youtube'
YT_STUDIO_VIDEO_URL = 'https://studio.youtube.com/video/{}/edit/basic'
YT_WATCH_VIDEO_URL = 'https://www.youtube.com/watch?v={}'
YT_PROFILE_URL = 'https://www.youtube.com/channel/{}'
YT_PROFILE_CONTENT_URL = 'https://studio.youtube.com/channel/{}/videos'
YT_SEARCH_URL = 'https://www.youtube.com/results?search_query={}'
MAX_TITLE_CHAR_LEN = 100
MAX_DESCRIPTION_CHAR_LEN = 5000
MAX_TAGS_CHAR_LEN = 400
MAX_TAG_CHAR_LEN = 30
LOGIN_INFO_COOKIE_NAME = 'LOGIN_INFO'
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------- class: Youtube ------------------------------------------------------------- #
class Youtube(SeleniumUploaderAccount):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
# cookies
cookies_folder_path: Optional[str] = None,
cookies_id: Optional[str] = None,
pickle_cookies: bool = False,
# proxy
proxy: Optional[Union[Proxy, str]] = None,
# proxy - legacy (kept for convenience)
host: Optional[str] = None,
port: Optional[int] = None,
# addons
addons_folder_path: Optional[str] = None,
addon_settings: Optional[List[BaseAddonInstallSettings]] = None,
# addons - legacy (kept for convenience)
extensions_folder_path: Optional[str] = None,
# other paths
geckodriver_path: Optional[str] = None,
firefox_binary_path: Optional[str] = None,
profile_path: Optional[str] = None,
# profile settings
private: bool = False,
full_screen: bool = True,
language: str = 'en-us',
user_agent: Optional[str] = None,
disable_images: bool = False,
# option settings
screen_size: Optional[Tuple[int, int]] = None, # (width, height)
headless: bool = False,
mute_audio: bool = False,
home_page_url: Optional[str] = None,
# find function
default_find_func_timeout: int = 2.5,
# login
prompt_user_input_login: bool = True,
login_prompt_callback: Optional[Callable[[str], None]] = None,
login_prompt_timeout_seconds: int = 60*5
):
super().__init__(
# cookies
cookies_folder_path=cookies_folder_path,
cookies_id=cookies_id,
pickle_cookies=pickle_cookies,
# proxy
proxy=proxy,
# proxy - legacy (kept for convenience)
host=host,
port=port,
# addons
addons_folder_path=addons_folder_path,
addon_settings=addon_settings,
# addons - legacy (kept for convenience)
extensions_folder_path=extensions_folder_path,
# other paths
geckodriver_path=geckodriver_path,
firefox_binary_path=firefox_binary_path,
profile_path=profile_path,
# profile settings
private=private,
full_screen=full_screen,
language=language,
user_agent=user_agent,
disable_images=disable_images,
# option settings
screen_size=screen_size,
headless=headless,
mute_audio=mute_audio,
home_page_url=home_page_url,
# find function
default_find_func_timeout=default_find_func_timeout,
# login
prompt_user_input_login=prompt_user_input_login,
login_prompt_callback=login_prompt_callback,
login_prompt_timeout_seconds=login_prompt_timeout_seconds,
)
if not self.did_log_in_at_init:
self.__dismiss_alerts()
# ---------------------------------------------------------- Overrides ----------------------------------------------------------- #
def _upload_function(self) -> Callable:
return self.upload
def _home_url(self) -> str:
return YT_URL
def _get_current_user_id(self) -> Optional[str]:
return self.get_current_channel_id()
def _profile_url_format(self) -> Optional[str]:
return YT_PROFILE_URL
def _login_via_cookies_needed_cookie_names(self) -> Union[str, List[str]]:
return LOGIN_INFO_COOKIE_NAME
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
def get_sub_and_video_count(self, channel_id: str) -> Optional[Tuple[int, int]]:
return YoutubeScraper(user_agent=self.user_agent, proxy=self.proxy.string).get_sub_and_video_count(channel_id=channel_id)
def get_channel_about_data(
self,
user_name: Optional[str] = None,
channel_id: Optional[str] = None,
channel_url_name: Optional[str] = None
) -> Optional[ChannelAboutData]:
return YoutubeScraper(user_agent=self.user_agent, proxy=self.proxy.string).get_channel_about_data(
user_name=user_name,
channel_id=channel_id,
channel_url_name=channel_url_name
)
def watch_video(
self,
video_id: str,
percent_to_watch: float = -1, # 0-100 # -1 means all
like: bool = False
) -> Tuple[bool, bool]: # watched, liked
watched = False
liked = False
try:
self.get(YT_WATCH_VIDEO_URL.format(video_id))
length_s = float(strings.between(self.browser.driver.page_source, 'detailpage\\\\u0026len=', '\\\\'))
play_button = self.browser.find_by('button', class_='ytp-large-play-button ytp-button', timeout=0.5)
if play_button and play_button.is_displayed():
play_button.click()
time.sleep(1)
while True:
ad = self.browser.find_by('div', class_='video-ads ytp-ad-module', timeout=0.5)
if not ad or not ad.is_displayed():
break
time.sleep(0.1)
watched = True
seconds_to_watch = percent_to_watch / 100 * length_s if percent_to_watch >= 0 else length_s
if seconds_to_watch > 0:
self.print('Goinng to watch', seconds_to_watch)
time.sleep(seconds_to_watch)
return watched, self.like(video_id) if like and self.is_logged_in else False
except Exception as e:
self.print(e)
return watched, liked
def like(self, video_id: str) -> bool:
if not self.is_logged_in:
print('Error - \'upload\': Isn\'t logged in')
return False
self.get(YT_WATCH_VIDEO_URL.format(video_id))
try:
buttons_container = self.browser.find_by('div', id_='top-level-buttons', class_='style-scope ytd-menu-renderer', timeout=1.5)
if buttons_container:
button_container = self.browser.find_by('ytd-toggle-button-renderer', class_='style-scope ytd-menu-renderer force-icon-button style-text', timeout=0.5, in_element=buttons_container)
if button_container:
button = self.browser.find_by('button', id_='button', timeout=0.5, in_element=button_container)
if button:
attr = button.get_attribute('aria-pressed')
if attr and attr == 'false':
button.click()
return True
return False
except Exception as e:
self.print(e)
return False
def upload(
self,
video_path: str,
title: str,
description: str,
tags: Optional[List[str]] = None,
made_for_kids: bool = False,
visibility: Visibility = Visibility.PUBLIC,
thumbnail_image_path: Optional[str] = None,
timeout: Optional[int] = 60*3, # 3 min
extra_sleep_after_upload: Optional[int] = None,
extra_sleep_before_publish: Optional[int] = None
) -> (bool, Optional[str]):
if not self.is_logged_in:
print('Error - \'upload\': Isn\'t logged in')
return False, None
res = self.__upload(
video_path=video_path,
title=title,
description=description,
tags=tags,
made_for_kids=made_for_kids,
visibility=visibility,
thumbnail_image_path=thumbnail_image_path,
extra_sleep_after_upload=extra_sleep_after_upload,
extra_sleep_before_publish=extra_sleep_before_publish,
timeout=timeout
)
if isinstance(res, Exception):
self.print(res)
return False, None
return res
def get_current_channel_id(self, _click_avatar: bool = False, _get_home_url: bool = False) -> Optional[str]:
if not self.is_logged_in:
print('Error - \'upload\': Isn\'t logged in')
return None
if _get_home_url:
self.get(YT_URL)
try:
if _click_avatar:
avatar_button = self.browser.find_by('button', id_='avatar-btn', timeout=0.5)
if avatar_button:
avatar_button.click()
href_containers = self.browser.find_all_by('a', class_='yt-simple-endpoint style-scope ytd-compact-link-renderer', timeout=0.5)
if href_containers:
for href_container in href_containers:
href = href_container.get_attribute('href')
if href and 'channel/' in href:
return strings.between(href, 'channel/', '?')
except Exception as e:
self.print(e)
if not _click_avatar:
return self.get_current_channel_id(_click_avatar=True, _get_home_url=_get_home_url)
elif not _get_home_url:
return self.get_current_channel_id(_click_avatar=False, _get_home_url=True)
return None
def load_video(self, video_id: str):
self.get(self.__video_url(video_id))
def comment_on_video(
self,
video_id: str,
comment: str,
pinned: bool = False,
timeout: Optional[int] = 15
) -> (bool, bool):
if not self.is_logged_in:
print('Error - \'upload\': Isn\'t logged in')
return False, False
res = self.__comment_on_video(
video_id=video_id,
comment=comment,
pinned=pinned,
timeout=timeout
)
if isinstance(res, Exception):
self.print(res)
return False, False
return res
def get_channel_video_ids(
self,
channel_id: Optional[str] = None,
ignored_titles: Optional[List[str]] = None
) -> List[str]:
video_ids = []
ignored_titles = ignored_titles or []
channel_id = channel_id or self.current_user_id
try:
self.get(self.__channel_videos_url(channel_id))
last_page_source = self.browser.driver.page_source
while True:
self.browser.scroll(1500)
i=0
max_i = 100
sleep_time = 0.1
should_break = True
while i < max_i:
i += 1
time.sleep(sleep_time)
if len(last_page_source) != len(self.browser.driver.page_source):
last_page_source = self.browser.driver.page_source
should_break = False
break
if should_break:
break
soup = bs(self.browser.driver.page_source, 'lxml')
elems = soup.find_all('a', {'id':'video-title', 'class':'yt-simple-endpoint style-scope ytd-grid-video-renderer'})
for elem in elems:
if 'title' in elem.attrs:
should_continue = False
title = elem['title'].strip().lower()
for ignored_title in ignored_titles:
if ignored_title.strip().lower() == title:
should_continue = True
break
if should_continue:
continue
if 'href' in elem.attrs and '/watch?v=' in elem['href']:
vid_id = strings.between(elem['href'], '?v=', '&')
if vid_id is not None and vid_id not in video_ids:
video_ids.append(vid_id)
except Exception as e:
self.print(e)
return video_ids
@noraise(default_return_value=False)
def check_analytics(
self,
tab: AnalyticsTab = AnalyticsTab.OVERVIEW,
period: AnalyticsPeriod = AnalyticsPeriod.LAST_28_DAYS
) -> bool:
if not self.current_user_id:
self.print('No channel ID found')
return False
self.get('{}/channel/{}/analytics/tab-{}/period-{}'.format(YT_STUDIO_URL.rstrip('/'), self.current_user_id, tab.value, period.value))
return True
@noraise(default_return_value=(False, 0))
def get_violations(self) -> Tuple[bool, int]: # has_warning, strikes
self.get(YT_STUDIO_URL)
violations_container = self.browser.find_by('div', class_='style-scope ytcd-strikes-item')
if not violations_container:
return False, 0
violations_label = self.browser.find_by('div', class_='label style-scope ytcp-badge', in_element=violations_container)
if not violations_label:
return False, 0
violation_text = violations_label.text.strip().lower()
violation_text_number = 0
try:
violation_text_number = int(violation_text)
except:
pass
return True, violation_text_number
@noraise(default_return_value=False)
def add_endscreen(self, video_id: str, max_wait_seconds_for_processing: float = 0) -> bool:
self.get(YT_STUDIO_VIDEO_URL.format(video_id))
start_time = time.time()
while True:
attrs = self.browser.get_attributes(self.browser.find_by('ytcp-text-dropdown-trigger', id_='endscreen-editor-link'))
if not attrs or 'disabled' in attrs:
if time.time() - start_time < max_wait_seconds_for_processing:
time.sleep(1)
continue
return False
else:
break
self.browser.find_by('ytcp-text-dropdown-trigger', id_='endscreen-editor-link').click()
time.sleep(0.5)
self.browser.find_all_by('div', class_='card style-scope ytve-endscreen-template-picker')[0].click()
time.sleep(0.5)
self.browser.find_by('ytcp-button', id_='save-button').click()
time.sleep(2)
return self.browser.find_by('ytve-endscreen-editor-options-panel', class_='style-scope ytve-editor', timeout=0.5) is None
@noraise(default_return_value=False)
def remove_welcome_popup(
self,
offset: Tuple[int, int] = (20, 20)
) -> bool:
timeout = 5
self.get(YT_STUDIO_URL, force=True)
self.__dismiss_welcome_popup(offset=offset, timeout=timeout)
self.get(YT_UPLOAD_URL, force=True)
return self.__dismiss_welcome_popup(offset=offset, timeout=timeout)
@noraise(default_return_value=None)
def bulk_set_videos_to_private(
self
) -> None:
channel_id = self._get_current_user_id()
self.get(YT_PROFILE_CONTENT_URL.format(channel_id))
time.sleep(2)
self.browser.find_by('input', class_='text-input style-scope ytcp-chip-bar').click()
time.sleep(0.5)
self.browser.find_by('paper-item', id='text-item-6').click()
time.sleep(0.5)
self.browser.find_by('ytcp-checkbox-lit', {'test-id':'PUBLIC'}).click()
time.sleep(0.5)
self.browser.find_by('ytcp-button', id='apply-button').click()
time.sleep(0.5)
next_page_button = self.browser.find_by('ytcp-icon-button', id='navigate-after')
next_page_status = next_page_button.get_attribute('aria-disabled')
while next_page_status=='false':
self.__change_to_private_on_current_page()
start_time = time.time()
update_label = self.browser.find_by('div', class_='label loading-text style-scope ytcp-bulk-actions')
while update_label is not None:
update_label = self.browser.find_by('div', class_='label loading-text style-scope ytcp-bulk-actions')
time.sleep(0.5)
if time.time()-start_time >= 300:
self.quit()
return
next_page_button = self.browser.find_by('ytcp-icon-button', id='navigate-after', timeout=5)
next_page_status = next_page_button.get_attribute('aria-disabled')
print('aria-disabled is', next_page_status, type(next_page_status))
next_page_button.click()
time.sleep(2.5)
public_vids = self.browser.find_by('iron-icon', {'icon':'icons:visibility'})
if next_page_status is None or next_page_status is 'false' or not public_vids:
self.quit()
return
self.quit()
return
@noraise(default_return_value=None)
def __change_to_private_on_current_page(
self
) -> None:
try:
self.browser.find_by('ytcp-checkbox-lit', id='selection-checkbox').click()
time.sleep(0.5)
edit_container = self.browser.find_by('ytcp-select', class_='top-dropdown bulk-actions-edit style-scope ytcp-bulk-actions')
self.browser.find_by('ytcp-dropdown-trigger', class_='style-scope ytcp-text-dropdown-trigger', in_element=edit_container).click()
time.sleep(0.5)
self.browser.find_by('paper-item', {'test-id':'VISIBILITY'}).click()
time.sleep(0.5)
self.browser.find_by('ytcp-form-select', class_='style-scope ytcp-bulk-actions-editor-visibility').click()
time.sleep(0.5)
self.browser.find_by('paper-item', {'test-id':'PRIVATE'}).click()
time.sleep(0.5)
self.browser.find_by('ytcp-button', id='submit-button').click()
time.sleep(0.5)
self.browser.find_by('ytcp-checkbox-lit', id='confirm-checkbox').click()
time.sleep(0.5)
self.browser.find_by('ytcp-button', id='confirm-button', class_='style-scope ytcp-confirmation-dialog').click()
time.sleep(2.5)
except Exception as e:
print(e)
return
def bulk_reset_videos(
self,
affiliate_tag: str
):
channel_id = self.get_current_channel_id()
url = YT_PROFILE_CONTENT_URL.format(channel_id) + "/upload?filter=%5B%7B%22name%22%3A%22VISIBILITY%22%2C%22value%22%3A%5B%22PRIVATE%22%5D%7D%5D&sort=%7B%22columnType%22%3A%22date%22%2C%22sortOrder%22%3A%22DESCENDING%22%7D"
self.browser.get(url)
time.sleep(1.5)
search_videos = self.browser.find_all_by('a', id='thumbnail-anchor')
video_urls = []
for vid in search_videos:
link_suffix = vid.get_attribute('href')
print(link_suffix)
if link_suffix and link_suffix not in video_urls:
video_urls.append(YT_STUDIO_URL + link_suffix)
print(video_urls)
# ------------------------------------------------------- Private methods -------------------------------------------------------- #
@signal_timeoutable(name='Upload')
def __upload(
self,
video_path: str,
title: str,
description: str,
tags: Optional[List[str]] = None,
made_for_kids: bool = False,
visibility: Visibility = Visibility.PUBLIC,
thumbnail_image_path: Optional[str] = None,
extra_sleep_after_upload: Optional[int] = None,
extra_sleep_before_publish: Optional[int] = None,
timeout: Optional[int] = None
) -> (bool, Optional[str]):
self.get(YT_URL)
time.sleep(1.5)
try:
self.get(YT_UPLOAD_URL)
time.sleep(1.5)
self.save_cookies()
self.browser.find_by('input', type='file').send_keys(video_path)
self.print('Upload: uploaded video')
if extra_sleep_after_upload is not None and extra_sleep_after_upload > 0:
time.sleep(extra_sleep_after_upload)
self.__dismiss_welcome_popup()
title_field = self.browser.find_by('div', id_='textbox', timeout=5) or self.browser.find_by(id_='textbox', timeout=5)
time.sleep(0.5)
title_field.send_keys(Keys.BACK_SPACE)
try:
time.sleep(0.5)
title_field.send_keys(Keys.COMMAND if platform == 'darwin' else Keys.CONTROL, 'a')
time.sleep(0.5)
title_field.send_keys(Keys.BACK_SPACE)
except Exception as e:
self.print(e)
time.sleep(0.5)
title_field.send_keys('a')
time.sleep(0.5)
title_field.send_keys(Keys.BACK_SPACE)
time.sleep(0.5)
title_field.send_keys(title[:MAX_TITLE_CHAR_LEN])
self.print('Upload: added title')
description_container = self.browser.find(By.XPATH, "/html/body/ytcp-uploads-dialog/paper-dialog/div/ytcp-animatable[1]/ytcp-uploads-details/div/ytcp-uploads-basics/ytcp-mention-textbox[2]")
description_field = self.browser.find(By.ID, "textbox", element=description_container)
description_field.click()
time.sleep(0.5)
description_field.clear()
time.sleep(0.5)
description_field.send_keys(description[:MAX_DESCRIPTION_CHAR_LEN])
self.print('Upload: added description')
if thumbnail_image_path is not None:
try:
self.browser.find(By.XPATH, "//input[@id='file-loader']").send_keys(thumbnail_image_path)
time.sleep(0.5)
self.print('Upload: added thumbnail')
except Exception as e:
self.print('Upload: Thumbnail error: ', e)
self.browser.find(By.XPATH, "/html/body/ytcp-uploads-dialog/paper-dialog/div/ytcp-animatable[1]/ytcp-uploads-details/div/div/ytcp-button/div").click()
self.print("Upload: clicked more options")
if tags:
tags_container = self.browser.find(By.XPATH, "/html/body/ytcp-uploads-dialog/paper-dialog/div/ytcp-animatable[1]/ytcp-uploads-details/div/ytcp-uploads-advanced/ytcp-form-input-container/div[1]/div[2]/ytcp-free-text-chip-bar/ytcp-chip-bar/div")
tags_field = self.browser.find(By.ID, 'text-input', tags_container)
tags_field.send_keys(','.join([t for t in tags if len(t) <= MAX_TAG_CHAR_LEN])[:MAX_TAGS_CHAR_LEN-1] + ',')
self.print("Upload: added tags")
kids_selection_name = 'MADE_FOR_KIDS' if made_for_kids else 'NOT_MADE_FOR_KIDS'
kids_section = self.browser.find(By.NAME, kids_selection_name)
self.browser.find(By.ID, 'radioLabel', kids_section).click()
self.print('Upload: did set', kids_selection_name)
self.browser.find(By.ID, 'next-button').click()
self.print('Upload: clicked first next')
self.browser.find(By.ID, 'next-button').click()
self.print('Upload: clicked second next')
visibility_main_button = self.browser.find(By.NAME, visibility.name)
self.browser.find(By.ID, 'radioLabel', visibility_main_button).click()
self.print('Upload: set to', visibility.name)
try:
video_url_container = self.browser.find(By.XPATH, "//span[@class='video-url-fadeable style-scope ytcp-video-info']", timeout=2.5)
video_url_element = self.browser.find(By.XPATH, "//a[@class='style-scope ytcp-video-info']", element=video_url_container, timeout=2.5)
video_id = video_url_element.get_attribute('href').split('/')[-1]
except Exception as e:
self.print(e)
video_id = None
i=0
if extra_sleep_before_publish is not None and extra_sleep_before_publish > 0:
time.sleep(extra_sleep_before_publish)
while True:
try:
upload_progress_element = self.browser.find_by(
'ytcp-video-upload-progress',
class_='style-scope ytcp-uploads-dialog',
timeout=0.2
)
upload_status = UploadStatus.get_status(self.browser, upload_progress_element)
if upload_status in [UploadStatus.PROCESSING_SD, UploadStatus.PROCESSED_SD_PROCESSING_HD, UploadStatus.PROCESSED_ALL]:
done_button = self.browser.find(By.ID, 'done-button')
if done_button.get_attribute('aria-disabled') == 'false':
done_button.click()
self.print('Upload: published')
time.sleep(3)
self.get(YT_URL)
return True, video_id
except Exception as e:
self.print(e)
i += 1
if i >= 20:
done_button = self.browser.find(By.ID, 'done-button')
if done_button.get_attribute('aria-disabled') == 'false':
done_button.click()
self.print('Upload: published')
time.sleep(3)
self.get(YT_URL)
return True, video_id
raise
time.sleep(1)
except Exception as e:
self.print(e)
self.get(YT_URL)
return False, None
# returns (commented_successfully, pinned_comment_successfully)
@signal_timeoutable(name='Comment')
def __comment_on_video(
self,
video_id: str,
comment: str,
pinned: bool = False,
timeout: Optional[int] = None
) -> (bool, bool):
self.load_video(video_id)
time.sleep(1)
self.browser.scroll(150)
time.sleep(1)
self.browser.scroll(100)
time.sleep(1)
self.browser.scroll(100)
try:
# time.sleep(10000)
header = self.browser.find_by('div', id_='masthead-container', class_='style-scope ytd-app')
self.print('comment: looking for \'comment_placeholder_area\'')
comment_placeholder_area = self.browser.find_by('div', id_='placeholder-area', timeout=5)
self.print('comment: scrollinng to \'comment_placeholder_area\'')
self.browser.scroll_to_element(comment_placeholder_area, header_element=header)
time.sleep(0.5)
self.print('comment: getting focus')
try:
self.browser.find_by('div', id_='simple-box', class_='style-scope ytd-comments-header-renderer',timeout=0.5).click()
self.browser.find_by('ytd-comment-simplebox-renderer', class_='style-scope ytd-comments-header-renderer',timeout=0.5).click()
# comment_placeholder_area.click()
self.browser.find_by('div', id_='placeholder-area', timeout=0.5).click()
except Exception as e:
self.print(e)
self.print('comment: sending keys')
# self.browser.find_by('div', id_='contenteditable-root', timeout=0.5).click()
self.browser.find_by('div', id_='contenteditable-root', timeout=0.5).send_keys(comment)
self.print('comment: clicking post_comment')
self.browser.find_by('ytd-button-renderer', id_='submit-button', class_='style-scope ytd-commentbox style-primary size-default',timeout=0.5).click()
# self.browser.find(By.XPATH, "//ytd-button-renderer[@id='submit-button' and @class='style-scope ytd-commentbox style-primary size-default']", timeout=0.5).click()
if not pinned:
return True, False
try:
try:
dropdown_menu = self.browser.find_by('yt-sort-filter-sub-menu-renderer', class_='style-scope ytd-comments-header-renderer')
self.browser.scroll_to_element(dropdown_menu, header_element=header)
time.sleep(0.5)
self.print('comment: clicking dropdown_trigger (open)')
self.browser.find_by('paper-button', id_='label', class_='dropdown-trigger style-scope yt-dropdown-menu', in_element=dropdown_menu, timeout=2.5).click()
try:
dropdown_menu = self.browser.find_by('paper-button', id_='label', class_='dropdown-trigger style-scope yt-dropdown-menu', in_element=dropdown_menu, timeout=2.5)
dropdown_elements = [elem for elem in self.browser.find_all_by('a', in_element=dropdown_menu, timeout=2.5) if 'yt-dropdown-menu' in elem.get_attribute('class')]
last_dropdown_element = dropdown_elements[-1]
if last_dropdown_element.get_attribute('aria-selected') == 'false':
time.sleep(0.25)
self.print('comment: clicking last_dropdown_element')
last_dropdown_element.click()
else:
self.print('comment: clicking dropdown_trigger (close) (did not click last_dropdown_element (did not find it))')
self.browser.find_by('paper-button', id_='label', class_='dropdown-trigger style-scope yt-dropdown-menu', in_element=dropdown_menu, timeout=2.5).click()
except Exception as e:
self.print(e)
self.browser.find_by('paper-button', id_='label', class_='dropdown-trigger style-scope yt-dropdown-menu', in_element=dropdown_menu, timeout=2.5).click()
except Exception as e:
self.print(e)
# self.browser.scroll(100)
time.sleep(2.5)
for comment_thread in self.browser.find_all_by('ytd-comment-thread-renderer', class_='style-scope ytd-item-section-renderer'):
pinned_element = self.browser.find_by('yt-icon', class_='style-scope ytd-pinned-comment-badge-renderer', in_element=comment_thread, timeout=0.5)
pinned = pinned_element is not None and pinned_element.is_displayed()
if pinned:
continue
try:
# button_3_dots
button_3_dots = self.browser.find_by('yt-icon-button', id_='button', class_='dropdown-trigger style-scope ytd-menu-renderer', in_element=comment_thread, timeout=2.5)
self.browser.scroll_to_element(button_3_dots, header_element=header)
time.sleep(0.5)
self.print('comment: clicking button_3_dots')
button_3_dots.click()
popup_renderer_3_dots = self.browser.find_by('ytd-menu-popup-renderer', class_='ytd-menu-popup-renderer', timeout=2)
time.sleep(1.5)
try:
self.browser.driver.execute_script("arguments[0].scrollIntoView();", self.browser.find_by('a',class_='yt-simple-endpoint style-scope ytd-menu-navigation-item-renderer', in_element=popup_renderer_3_dots, timeout=2.5))
self.browser.find_by('a',class_='yt-simple-endpoint style-scope ytd-menu-navigation-item-renderer', in_element=popup_renderer_3_dots, timeout=2.5).click()
except:
try:
self.browser.find_by('ytd-menu-navigation-item-renderer',class_='style-scope ytd-menu-popup-renderer', in_element=popup_renderer_3_dots, timeout=2.5).click()
except Exception as e:
try:
self.browser.find_by('paper-item',class_='style-scope ytd-menu-navigation-item-renderer', in_element=popup_renderer_3_dots, timeout=2.5).click()
except Exception as e:
pass
confirm_button_container = self.browser.find_by('yt-button-renderer', id_='confirm-button', class_='style-scope yt-confirm-dialog-renderer style-primary size-default', timeout=5)
# confirm button
self.print('comment: clicking confirm_button')
self.browser.find_by('a', class_='yt-simple-endpoint style-scope yt-button-renderer', in_element=confirm_button_container, timeout=2.5).click()
time.sleep(2)
return True, True
except Exception as e:
self.print(e)
return True, False
except Exception as e:
self.print(e)
return True, False
# could not find new comment
self.print('no_new_comments')
return True, False
except Exception as e:
self.print('comment error:', e)
return False, False
def __dismiss_alerts(self):
dismiss_button_container = self.browser.find_by('div', id_='dismiss-button', timeout=1.5)
if dismiss_button_container:
dismiss_button = self.browser.find_by('paper-button', id_='button', timeout=0.5, in_element=dismiss_button_container)
if dismiss_button:
dismiss_button.click()
iframe = self.browser.find_by('iframe', class_='style-scope ytd-consent-bump-lightbox', timeout=2.5)
if iframe:
self.browser.driver.switch_to.frame(iframe)
agree_button = self.browser.find_by('div', id_='introAgreeButton', timeout=2.5)
if agree_button:
agree_button.click()
if iframe:
self.browser.driver.switch_to.default_content()
@noraise(default_return_value=False)
def __dismiss_welcome_popup(
self,
offset: Tuple[int, int] = (20, 20),
timeout: Optional[int] = 2
) -> bool:
return self.browser.move_to_element(
element=self.browser.find_by('iron-overlay-backdrop', class_='opened', timeout=timeout),
offset=offset,
click=True
)
def __video_url(self, video_id: str) -> str:
return YT_URL + '/watch?v=' + video_id
def __channel_videos_url(self, channel_id: str) -> str:
return YT_URL + '/channel/' + channel_id + '/videos?view=0&sort=da&flow=grid'
# ---------------------------------------------------------------------------------------------------------------------------------------- # | zs-selenium-youtube | /zs_selenium_youtube-2.0.20.tar.gz/zs_selenium_youtube-2.0.20/zs_selenium_youtube/youtube.py | youtube.py |
#########
zs.bibtex
#########
.. image:: https://travis-ci.org/zerok/zs.bibtex.svg?branch=master
:target: https://travis-ci.org/zerok/zs.bibtex
This package for now only includes a quite basic parser for BibTeX which
converts a bibliography and its entries into simple dict-like data structures
and also checks crossreferences if used.
.. warning::
The parser does not (and probably never will) support some of the more
advanced BibTeX-features like preambles.
It also doesn't convert things like accented characters into unicode but
leaves them as they were in the original input.
Usage
=====
A simple example on how to use it::
from zs.bibtex.parser import parse_string
data = '''@article{mm09,
author = {Max Mustermann},
title = {The story of my life},
year = {2009},
journal = {Life Journale}
}'''
bibliography = parse_string(data)
article = bibliography['mm09']
A bibliography as well as each entry in it offers a ``validate()`` method
which checks aspects like cross-references on the bibliography and fields on
the entries. It also supports an optional ``raise_unsupported``
keyword-argument which raises an exception once a possibly unsupported field
is used in an entry.
The information about what fields are required and optional for what kind of
entry is based on the `BibTeX article`_ on Wikipedia.
If you're working with a file you can also use a small helper function called
``parse_file(file_or_path, encoding='utf-8', validate=False)`` which works on a
given filepath or file-like object and returns a bibliography object for the
content of that file.
Custom entry types
==================
Out of the box zs.bibtex supports following entry types for validation:
- article
- book
- booklet
- incollection
- inproceedings
- conference
- inbook
- manual
- masterthesis
- misc
- phdthesis
- proceedings
- techreport
- unpublished
For details on which of these requires what fields please take a look at the
``zs.bibtex.structures`` module.
But if you are in a situation where you need a different entry type, you can
also easily register your own.
First you have to create a subclass of the ``zs.bibtex.structures.Entry``
class::
from zs.bibtex import structures
class MyEntryType(structures.Entry):
required_fields = ('required_field_1', ('either_this', 'or_that', ), )
optional_fields = ('optional_field_1', )
and then simply register it::
structures.TypeRegistry.register('mytype', MyEntryType')
.. _BibTeX article: http://en.wikipedia.org/wiki/Bibtex
| zs.bibtex | /zs.bibtex-1.0.0.tar.gz/zs.bibtex-1.0.0/README.rst | README.rst |
from ..bibtex import exceptions
class TypeRegistry(object):
"""
Global registry for entry types.
"""
_registry = {}
@classmethod
def register(cls, name, type_):
"""
Register a new type for an entry-type. The 2nd argument has to be a
subclass of structures.Entry.
"""
if not issubclass(type_, Entry):
raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_))
cls._registry[name.lower()] = type_
@classmethod
def get_type(cls, name):
"""
Retrieve a type from the registry using its name as used in a bibtex
file.
"""
return cls._registry.get(name.lower())
class Bibliography(dict):
"""
A counter for all entries of a BibTeX file. It also contains the
cross-reference validator.
"""
def __init__(self):
self.crossrefs = []
super(Bibliography, self).__init__()
def validate(self, **kwargs):
"""
Validates each entry (passing the provided arguments down to them and
also tries to resolve all cross-references between the entries.
"""
self.check_crossrefs()
for value in self.values():
value.validate(**kwargs)
def check_crossrefs(self):
"""
Checks all crossreferences found in the bibliography. If one can not
be resolved from *this* Bibliography instance, a BrokenCrossReferences
exception is raised.
"""
broken = []
for value in self.values():
crossref = value.get('crossref')
if crossref is not None:
if crossref not in self:
broken.append(value)
if len(broken):
raise exceptions.BrokenCrossReferences('One or more cross reference could not'
' be resolved', broken)
def add(self, entry):
"""
Add an entry based on its ``name``-attribute to the Bibliography.
"""
self[entry.name] = entry
class Entry(dict):
"""
A slightly enhanced dict structure that acts as representation of an entry
within a bibliography. It also comes with a generic validator for required
and potentially unsupported fields and acts as base-class for the actual
entry-types commonly used in BibTeX.
"""
required_fields = ('title',)
optional_fields = ('key', )
def __init__(self, name=None, **kwargs):
super(Entry, self).__init__(**kwargs)
self.name = name
def validate(self, raise_unsupported=False):
"""
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
"""
fields = set(self.keys())
flattened_required_fields = set()
required_errors = []
for field in self.required_fields:
found = False
if isinstance(field, (list, tuple)):
# Check all alternatives
for real_f in field:
if real_f in fields:
flattened_required_fields.add(real_f)
found = True
else:
flattened_required_fields.add(field)
if field in fields:
found = True
if not found:
required_errors.append(field)
unsupported_fields = fields - flattened_required_fields \
- set(self.optional_fields)
if len(required_errors) or (raise_unsupported
and len(unsupported_fields)):
raise exceptions.InvalidStructure("Missing or unsupported fields found",
required_fields=required_errors,
unsupported_fields=unsupported_fields)
# The following required_fields/optiona_fields attributes are based on
# http://en.wikipedia.org/wiki/Bibtex
class Article(Entry):
"""Article in a journal, magazine etc."""
required_fields = ('author', 'title', 'journal', 'year')
optional_fields = ('volume', 'number', 'pages', 'month', 'note', 'key')
TypeRegistry.register('article', Article)
class Book(Entry):
"""A book that has already been published or at least has a publisher."""
required_fields = (('author', 'editor'), 'title', 'publisher', 'year')
optional_fields = ('address', 'pages', 'volume', 'series', 'edition',
'month', 'note', 'key')
TypeRegistry.register('book', Book)
class Booklet(Entry):
"""
Similar to a book in the sense that it is bound but without a "real"
publisher.
"""
required_fields = Entry.required_fields
optional_fields = ('author', 'howpublished', 'address', 'month', 'year',
'note', 'key')
TypeRegistry.register('booklet', Booklet)
class Incollection(Entry):
"""Part of a book but with its own title."""
required_fields = ('author', 'title', 'year', 'booktitle'),
optional_fields = ('editor', 'pages', 'organization', 'publisher',
'address', 'month', 'note', 'key')
TypeRegistry.register('incollection', Incollection)
class Inproceedings(Incollection):
"""Article that is part of a conference proceedings."""
pass
TypeRegistry.register('inproceedings', Inproceedings)
class Conference(Inproceedings):
"""Similar to ``Inproceedings``."""
required_fields = ('author', 'title', 'booktitle', 'year')
optional_fields = ('editor', 'pages', 'organization', 'publisher',
'address', 'month', 'note', 'key')
TypeRegistry.register('conference', Conference)
class Inbook(Entry):
"""Part of a book."""
required_fields = (('author', 'editor'), 'title', 'publisher', 'year',
('chapter', 'pages'))
optional_fields = ('volume', 'series', 'address', 'edition', 'month',
'note', 'key')
TypeRegistry.register('inbook', Inbook)
class Manual(Entry):
"""A technical manual."""
required_fields = ('title',)
optional_fields = ('author', 'organization', 'address', 'edition', 'year',
'month', 'note', 'key')
TypeRegistry.register('manual', Manual)
class Mastersthesis(Entry):
"""A Master's thesis"""
required_fields = ('author', 'title', 'school', 'year')
optional_fields = ('address', 'month', 'note', 'key')
TypeRegistry.register('mastersthesis', Mastersthesis)
class Misc(Entry):
"""Type of document that doesn't fit into any of the other categories."""
required_fields = []
optional_fields = ('author', 'title', 'howpublished', 'month', 'year',
'note', 'key')
TypeRegistry.register('misc', Misc)
class Phdthesis(Mastersthesis):
"""A Ph.D. thesis."""
pass
TypeRegistry.register('phdthesis', Phdthesis)
class Proceedings(Entry):
"""Conference proceedings."""
required_fields = ('title', 'year')
optional_fields = ('editor', 'publisher', 'organization', 'address',
'month', 'note', 'key')
TypeRegistry.register('proceedings', Proceedings)
class Techreport(Entry):
"""A technical report published by an institution."""
required_fields = ('author', 'title', 'institution', 'year')
optional_fields = ('type', 'number', 'address', 'month', 'note', 'key')
TypeRegistry.register('techreport', Techreport)
class Unpublished(Entry):
"""A not yet published document that already has an author and a title."""
required_fields = ('author', 'title', 'note',)
optional_fields = ('month', 'year', 'key',)
TypeRegistry.register('unpublished', Unpublished) | zs.bibtex | /zs.bibtex-1.0.0.tar.gz/zs.bibtex-1.0.0/src/zs/bibtex/structures.py | structures.py |
from __future__ import with_statement
import string
import re
import codecs
import pyparsing as pp
from . import structures, exceptions
def normalize_value(text):
"""
This removes newlines and multiple spaces from a string.
"""
result = text.replace('\n', ' ')
result = re.subn('[ ]{2,}', ' ', result)[0]
return result
###############################################################################
# Actions
def parse_field(source, loc, tokens):
"""
Returns the tokens of a field as key-value pair.
"""
name = tokens[0].lower()
value = normalize_value(tokens[2])
if name == 'author' and ' and ' in value:
value = [field.strip() for field in value.split(' and ')]
return (name, value)
def parse_entry(source, loc, tokens):
"""
Converts the tokens of an entry into an Entry instance. If no applicable
type is available, an UnsupportedEntryType exception is raised.
"""
type_ = tokens[1].lower()
entry_type = structures.TypeRegistry.get_type(type_)
if entry_type is None or not issubclass(entry_type, structures.Entry):
raise exceptions.UnsupportedEntryType(
"%s is not a supported entry type" % type_
)
new_entry = entry_type()
new_entry.name = tokens[3]
for key, value in [t for t in tokens[4:-1] if t != ',']:
new_entry[key] = value
return new_entry
def parse_bibliography(source, loc, tokens):
"""
Combines the parsed entries into a Bibliography instance.
"""
bib = structures.Bibliography()
for entry in tokens:
bib.add(entry)
return bib
def parse_bstring(source, loc, tokens):
"""
Combines all the found subtokens into a single string.
"""
return ''.join(tokens)
###############################################################################
# Grammar
comment = pp.Literal('%') + pp.SkipTo(pp.LineEnd(), include=True)
bstring_nested = pp.Forward()
bstring_nested << '{' + pp.ZeroOrMore(bstring_nested | pp.Regex('[^{}]+')) + '}'
bstring = pp.Suppress('{') + pp.ZeroOrMore(pp.Or([bstring_nested, pp.Regex('[^{}]+')])).leaveWhitespace() + pp.Suppress('}')
bstring.setParseAction(parse_bstring)
label = pp.Regex(r'[a-zA-Z0-9-_:/]+')
field_value = pp.Or([
bstring,
pp.Regex(r'[0-9]+'),
pp.QuotedString(quoteChar='"', multiline=True, escChar='\\'),
pp.QuotedString(quoteChar="'", multiline=True, escChar='\\')
])
field = (label + '=' + field_value).setName("field")
field.setParseAction(parse_field)
entry_content = field + pp.ZeroOrMore(',' + field) + pp.Optional(',')
entry = ('@' + label + "{" + label + "," + entry_content + "}").setName("entry")
entry.setParseAction(parse_entry)
bibliography = (pp.OneOrMore(entry)).setName("bibliography")
bibliography.setParseAction(parse_bibliography)
pattern = bibliography + pp.StringEnd()
pattern.ignore(comment)
###############################################################################
# Helper functions
def parse_string(str_, validate=False):
"""
Tries to parse a given string into a Bibliography instance. If ``validate``
is passed as keyword argument and set to ``True``, the Bibliography
will be validated using the standard rules.
"""
result = pattern.parseString(str_)[0]
if validate:
result.validate()
return result
def parse_file(file_or_path, encoding='utf-8', validate=False):
"""
Tries to parse a given filepath or fileobj into a Bibliography instance. If
``validate`` is passed as keyword argument and set to ``True``, the
Bibliography will be validated using the standard rules.
"""
try:
is_string = isinstance(file_or_path, basestring)
except NameError:
is_string = isinstance(file_or_path, str)
if is_string:
with codecs.open(file_or_path, 'r', encoding) as file_:
result = pattern.parseFile(file_)[0]
else:
result = pattern.parseFile(file_or_path)[0]
if validate:
result.validate()
return result | zs.bibtex | /zs.bibtex-1.0.0.tar.gz/zs.bibtex-1.0.0/src/zs/bibtex/parser.py | parser.py |
ZS is a simple, read-only, binary file format designed for
distributing, querying, and archiving arbitrarily large
record-oriented datasets (up to tens of terabytes and beyond). It
allows the data to be stored in compressed form, while still
supporting very fast queries for either specific entries, or for all
entries in a specified range of values (e.g., prefix searches), and
allows highly-CPU-parallel decompression. It also places an emphasis
on data integrity -- all data is protected by 64-bit CRC checksums --
and on discoverability -- every ZS file includes arbitrarily detailed
structured metadata stored directly inside it.
Basically you can think of ZS as a turbo-charged replacement for
storing data in line-based text file formats. It was originally
developed to provide a better way to work with the massive `Google N-grams
<http://storage.googleapis.com/books/ngrams/books/datasetsv2.html>`_,
but is potentially useful for data sets of any size.
.. image:: https://travis-ci.org/njsmith/zs.png?branch=master
:target: https://travis-ci.org/njsmith/zs
.. image:: https://coveralls.io/repos/njsmith/zs/badge.png?branch=master
:target: https://coveralls.io/r/njsmith/zs?branch=master
Documentation:
http://zs.readthedocs.org/
Installation:
You need either Python **2.7**, or else Python **3.3 or greater**.
Because ``zs`` includes a C extension, you'll also need a C compiler
and Python headers. On Ubuntu or Debian, for example, you get these
with::
sudo apt-get install build-essential python-dev
Once you have the ability to build C extensions, then on Python
3 you should be able to just run::
pip install zs
On Python 2.7, things are slightly more complicated: here, ``zs``
requires the ``backports.lzma`` package, which in turn requires the
liblzma library. On Ubuntu or Debian, for example, something like
this should work::
sudo apt-get install liblzma-dev
pip install backports.lzma
pip install zs
``zs`` also requires the following packages: ``six``, ``docopt``,
``requests``. However, these are all pure-Python packages which pip
will install for you automatically when you run ``pip install zs``.
Downloads:
http://pypi.python.org/pypi/zs/
Code and bug tracker:
https://github.com/njsmith/zs
Contact:
Nathaniel J. Smith <[email protected]>
Developer dependencies (only needed for hacking on source):
* Cython: needed to build from checkout
* nose: needed to run tests
* nose-cov: because we use multiprocessing, we need this package to
get useful test coverage information
* nginx: needed to run HTTP tests
License:
2-clause BSD, see LICENSE.txt for details.
| zs | /zs-0.10.0.zip/zs-0.10.0/README.rst | README.rst |
The command-line ``zs`` tool
=============================
.. currentmodule:: zs
The ``zs`` tool can be used from the command-line to create, view,
and check ZS files.
The main ``zs`` command on its own isn't very useful. It can tell
you what version you have -- these docs were built with:
.. command-output:: zs --version
And it can tell you what subcommands are available:
.. command-output:: zs --help
These subcommands are documented further below.
.. note:: In case you have the Python :mod:`zs` package installed,
but somehow do not have the ``zs`` executable available on your
path, then it can also be invoked as ``python -m zs``. E.g., these
two commands do the same thing::
$ zs dump myfile.zs
$ python -m zs dump myfile.zs
.. _zs make:
``zs make``
------------
``zs make`` allows you to create ZS files. In its simplest form, it
just reads in a text file, and writes out a ZS file, treating each
line as a separate record.
For example, if we have this data file (a tiny excerpt from the `Web
1T <http://catalog.ldc.upenn.edu/LDC2006T13>`_ dataset released by
Google; note that the last whitespace in each line is a tab
character):
.. command-output:: cat tiny-4grams.txt
:cwd: example/scratch
Then we can compress it into a ZS file by running:
.. Note that if you change this command, then you should also update
the copy of tiny-4grams.zs that is stored in the example/
directory, so that the rest of the examples in the documentation
will match:
.. command-output:: zs make '{"corpus": "doc-example"}' tiny-4grams.txt tiny-4grams.zs --codec deflate
:cwd: example/scratch
:shell:
The first argument specifies some arbitrary metadata that will be
saved into the ZS file, in the form of a `JSON <http://json.org>`_
string; the second argument names the file we want to convert; and the
third argument names the file we want to create.
The ``--codec`` argument lets us choose which compression method we
use; usually you should stick with the default (which is lzma), but
until readthedocs.org responds to our bug report we can't use lzma
here in the docs. Sorry.
.. note:: You must ensure that your file is sorted before running
``zs make``. (If you don't, then it will error out and scold you.)
GNU sort is very useful for this task -- but don't forget to set
``LC_ALL=C`` in your environment before calling sort, to make sure
that it uses ASCIIbetical ordering instead of something
locale-specific.
When your file is too large to fit into RAM, GNU sort will spill
the data onto disk in temporary files. When your file is too large
to fit onto disk, then a useful incantation is::
gunzip -c myfile.gz | env LC_ALL=C sort --compress-program=lzop \
| zs make "{...}" - myfile.zs
The ``--compress-program`` option tells sort to automatically
compress and decompress the temporary files using the ``lzop``
utility, so that you never end up with uncompressed data on
disk. (``gzip`` also works, but will be slower.)
Many other options are also available:
.. command-output:: zs make --help
.. _zs info:
``zs info``
------------
``zs info`` displays some general information about a ZS file. For example:
.. command-output:: zs info tiny-4grams.zs
:cwd: example/
The most interesting part of this output might be the ``"metadata"``
field, which contains arbitrary metadata describing the file. Here we
see that our custom key was indeed added, and that ``zs make`` also
added some default metadata. (If we wanted to suppress this we could
have used the ``--no-default-metadata`` option.) The ``"data_sha256"``
field is, as you might expect, a `SHA-256
<https://en.wikipedia.org/wiki/SHA-256>`_ hash of the data contained
in this file -- two ZS files will have the same value here if and
only if they contain exactly the same logical records, regardless of
compression and other details of physical file layout. The ``"codec"``
field tells us which kind of compression was used. The other fields
have to do with more obscure technical
aspects of the ZS file format; see the documentation for the
:class:`ZS` class and the :ref:`file format specification <format>`
for details.
``zs info`` is fast, even on arbitrarily large files, because it
looks at only the header and the root index; it doesn't have to
uncompress the actual data. If you find a large ZS file on the web
and want to see its metadata before downloading it, you can pass an
HTTP URL to ``zs info`` directly on the command line, and it will
download only as much of the file as it needs to.
``zs info`` doesn't take many options:
.. command-output:: zs info --help
.. _zs dump:
``zs dump``
------------
So ``zs info`` tells us *about* the contents of a ZS file, but how
do we get our data back out? That's the job of ``zs dump``. In the
simplest case, it simply dumps the whole file to standard output, with
one record per line -- the inverse of ``zs make``. For example, this
lets us "uncompress" our ZS file to recover the original file:
.. command-output:: zs dump tiny-4grams.zs
:cwd: example/
But we can also extract just a subset of the data. For example, we can
pull out a single line (notice the use of ``\t`` to specify a tab
character -- Python-style backslash character sequences are fully
supported):
.. command-output:: zs dump tiny-4grams.zs --prefix="not done extensive testing\t"
:cwd: example/
Or a set of related ngrams:
.. command-output:: zs dump tiny-4grams.zs --prefix="not done extensive "
:cwd: example/
Or any arbitrary range:
.. command-output:: zs dump tiny-4grams.zs --start="not done ext" --stop="not done fast"
:cwd: example/
Just like ``zs info``, ``zs dump`` is fast -- it reads only the data
it needs to to satisfy your query. (Of course, if you request the
whole file, then it will read the whole file -- but it does this in an
optimized way; see the ``-j`` option if you want to tune how many CPUs
it uses for decompression.) And just like ``zs info``, ``zs dump``
can directly take an HTTP URL on the command line, and will download
only as much data as it has to.
We also have several options to let us control the output format. ZS
files allow records to contain arbitrary data, which means that it's
possible to have a record that contains a newline embedded in
it. So we might prefer to use some other character to mark the ends of
records, like `NUL <https://en.wikipedia.org/wiki/Null_character>`_::
$ zs dump tiny-4grams.zs --terminator="\x00"
...but putting the output from that into these docs would be hard to
read. Instead we'll demonstrate with something sillier:
.. command-output:: zs dump tiny-4grams.zs --terminator="XYZZY" --prefix="not done extensive "
:cwd: example/
Of course, this will still have a problem if any of our records
contained the string "XYZZY" -- in fact, our records could in theory
contain *anything* we might choose to use as a terminator, so if we
have an arbitrary ZS file whose contents we know nothing about, then
none of the options we've seen so far is guaranteed to work. The
safest approach is to instead use a format in which each record is
explicitly prefixed by its length. ``zs dump`` can produce
length-prefixed output with lengths encoded in either u64le or uleb128
format (see :ref:`integer-representations` for details about what
these are).
.. command-output:: zs dump tiny-4grams.zs --prefix="not done extensive " --length-prefixed=u64le | hd
:cwd: example/
:shell:
Obviously this is mostly intended for when you want to read the data
into another program. For example, if you had a ZS file that was
compressed using the lzma codec and you wanted to convert it to the
deflate codec, the easiest and safest way to do that is with a command
like::
$ zs dump --length-prefixed=uleb128 myfile-lzma.zs | \
zs make --length-prefixed=uleb128 --codec=deflate \
"$(zs info -m myfile-lzma.zs)" - myfile-deflate.zs
If you're using Python, of course, the most convenient way to read a
ZS file into your program is not to use ``zs dump`` at all, but to use
the :mod:`zs` library API directly.
Full options:
.. command-output:: zs dump --help
.. warning:: Due to limitations in the multiprocessing module in
Python 2, ``zs dump`` can be poorly behaved if you hit control-C
(e.g., refusing to exit).
On a Unix-like platform, if you have a ``zs dump`` that is ignoring
control-C, then try hitting control-Z and then running ``kill
%zs``.
The easy workaround to this problem is to use Python 3 to run
``zs``. The not so easy workaround is to implement a custom process
pool manager for Python 2 -- patches accepted!
.. _zs validate:
``zs validate``
----------------
This command can be used to fully validate a ZS file for
self-consistency and compliance with the specification (see
:ref:`format`); this makes it rather useful to anyone trying to write
new software to generate ZS files.
It is also useful because it verifies the SHA-256 checksum and all of
the per-block checksums, providing extremely strong protection against
errors caused by disk failures, cosmic rays, and other such
annoyances. However, this is not usually necessary, since the ``zs``
commands and the :mod:`zs` library interface never return any data
unless it passes a 64-bit checksum. With ZS you can be sure that your
results have not been corrupted by hardware errors, even if you never
run ``zs validate`` at all.
Full options:
.. command-output:: zs validate --help
| zs | /zs-0.10.0.zip/zs-0.10.0/doc/cmdline.rst | cmdline.rst |
.. _format:
On-disk layout of ZS files
===========================
This page provides a complete specification of version **0.10** of
the ZS file format, along with rationale for specific design
choices. It should be read by anyone who plans to implement a new
reader or writer for the format, or is just interested in how things
work under the covers.
Overview
--------
ZS is a read-only database format designed to store a `multiset
<https://en.wikipedia.org/wiki/Multiset>`_ of records, where each
record is an uninterpreted string of binary data. The main design
goals are:
* Locating an arbitrary record, or sorted span of records, should be
fast.
* Doing a streaming read of a large span of records should be fast.
* Hardware is unreliable, especially on the scale of terabytes and
years, and ZS is designed for long-term archival of multi-terabyte
data. Therefore it must be possible to quickly and reliably validate
the integrity of the data returned by every operation.
* It should be reasonably efficient to access files over slow, "dumb"
transports like HTTP.
* Files should be as small as possible while achieving the above
goals.
The main complication influencing ZS's design is that compression is
necessary to achieve reasonable storage sizes, but decompression is
slow, block-oriented, and inherently serial, which puts the last goal
in direct conflict with the first two. Compressing a chunk of data is
like wrapping it up into an opaque bundle. The only way to find
something inside is to first unwrap (decompress) the whole thing. This
is why it won't work to simply write our data into a large text file
and then use a standard compression program like ``gzip`` on the whole
thing. If we did this, then the only way to find any piece of data
would be to decompress the whole file, which takes ages. Instead, we
need some way to split our data up into multiple smaller bundles. Once
we've done this, reading individual records can be fast, because we
only have to unwrap a single small bundle, not a huge one. And, it
turns out, splitting up our data into multiple bundles also makes bulk
reads faster. For a large read, we have to unpack the same amount of
total data regardless of whether it's divided into small bundles or
not, so the total work is constant. But, in the multiple-bundle case,
we can easily divvy up this work across multiple CPUs, and thus finish
the job more quickly. So, small bundles are great -- but, they also
have a downside: if we make our bundles too small, then the
compression algorithm won't be able to find many redundancies to
compress out, and so our compression ratio will not be very good. In
particular, trying to compress individual records would be hopeless.
Our solution is to bundle records together into moderately-sized
blocks, and then compress each block. Then we add some framing to let
us figure out where each block starts and ends, and add an index
structure to let us quickly find which blocks contain records that
match some query, and ta-da, we have a ZS file. The resulting
structure looks like this:
.. image:: /figures/format-overview.*
:width: 100%
Fast lookup for arbitrary records is supported by a tree-based
indexing scheme: the header contains a pointer to the "root" index
block, which in turn refers to other index blocks, which refer to
other index blocks, until eventually the lowest-level index blocks
refer to data blocks. By following these links, we can locate any
arbitrary record in :math:`O(\log n)` time.
In addition, we require data blocks to be arranged in sorted order
within the file. This allows us to do streaming reads starting from
any point, which makes for nicely efficient disk access patterns. And
range queries are supported by combining these two access strategies:
first we traverse the index to figure out which blocks contain records
that fall into our range, and then we do a streaming read across these
blocks.
General notes
-------------
Language
''''''''
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
document are to be interpreted as described in `RFC 2119
<https://www.ietf.org/rfc/rfc2119.txt>`_.
Checksumming
''''''''''''
To achieve our data integrity goals, every byte in a ZS file that
could possibly contain undetected corruption is protected by a 64-bit
CRC. Specifically, we use the same CRC-64 calculation that the `.xz
file format <http://tukaani.org/xz/xz-file-format.txt>`_ does. The
`Rocksoft model <http://www.ross.net/crc/crcpaper.html>`_ parameters
for this CRC are: polynomial = 0x42f0e1eba9ea3693, reflect in = True,
init = 0xffffffffffffffff, reflect out = True, xor out =
0xffffffffffffffff, check = 0x995dc9bbdf1939fa.
.. _integer-representations:
Integer representations
'''''''''''''''''''''''
Within the ZS header, we make life easier for simple tools like `file
<https://en.wikipedia.org/wiki/File_%28command%29>`_ by encoding all
integers using fixed-length 64-bit little-endian format (``u64le`` for
short).
Outside of the header, integers are encoded in the *uleb128* format,
familiar from the `DWARF debugging format
<https://en.wikipedia.org/wiki/DWARF>`_. Okay, maybe not so
familiar. This is a simple variable-length encoding for unsigned
integers of arbitrary size using **u**\nsigned **l**\ittle-**e**\ndian
**b**\ase-**128**. To read a uleb128 value, you proceed from the
beginning of the string, one byte at a time. The lower 7 bits of each
byte give you the next 7 bits of your integer. This is little-endian,
so the first byte gives you the least-significant 7 bits of your
integer, then the next byte gives you bits 8 through 15, the one after
that the bits 16 through 23, etc. The 8th, most-significant bit of
each byte serves as a continuation byte. If this is 1, then you keep
going and read the next byte. If it is 0, then you are
done. Examples::
uleb128 string <-> integer value
-------------- -------------
00 0x00
7f 0x7f
80 01 0x80
ff 20 0x107f
80 80 80 80 20 2 ** 33
(This format is also used by `protocol buffers
<https://en.wikipedia.org/wiki/Protocol_Buffers>`_, by the `XZ file
format <http://tukaani.org/xz/xz-file-format-1.0.4.txt>`_, and
others.) This format allows for redundant representations by adding
leading zeros, e.g. the value 0 could also be written ``80
00``. However, doing so is forbidden; all values MUST be encoded in
their shortest form.
Layout details
--------------
Here's the big picture -- refer to it while reading the full details
below.
.. image:: /figures/format-details.*
:width: 100%
ZS files consist of a *magic number*, followed by a *header*, followed by
a sequence of *blocks*. Blocks come in two types: *data blocks*, and
*index blocks*.
.. _magic-numbers:
Magic number
''''''''''''
To make it easy to distinguish ZS files from non-ZS files, every
valid ZS file begins with 8 `magic bytes
<https://en.wikipedia.org/wiki/File_format#Magic_number>`_. Specifically,
these ones (written in hex, with ASCII below)::
ab 5a 53 66 69 4c 65 01 # Good magic
Z S f i L e
If there's ever an incompatible ZS version 2, we'll use the last byte
as a version number.
Writing out a large ZS file is an involved operation that might take a
long time. It's possible for a hardware or software problem to occur
and cause this process to be aborted before the file is completely
written, leaving behind a partial, corrupt ZS file. Because ZS is
designed as a reliable archival format we would like to avoid the
possibility of confusing a corrupt file with a correct one, and
because writing ZS files can be slow, after a crash we would like to
be able to reliably determine whether the writing operation completed,
and whether we can trust the file left behind. Therefore we also
define a second magic number to be used specifically for partial ZS
files::
ab 5a 53 74 6f 42 65 01 # Bad magic
Z S t o B e
It is RECOMMENDED that ZS file writers perform the following sequence:
* Write out the ``ZStoBe`` magic number.
* Write out the rest of the ZS file.
* Update the header to its final form (including, e.g., the offset of
the root block).
* (IMPORTANT) Sync the file to disk using ``fsync()`` or equivalent.
* Replace the ``ZStoBe`` magic number with the correct
``ZSfiLe`` magic number.
Following this procedure guarantees that, modulo disk corruption, any
file which begins with the correct ZS magic will in fact be a
complete, valid ZS file.
Any file which does not begin with the correct ZS magic is not a valid
ZS file, and MUST be rejected by ZS file readers. Files with the
``ZStoBe`` magic are not valid ZS files. However, polite ZS readers
SHOULD generally check for the ``ZStoBe`` magic, and if encountered,
provide an informative error message while rejecting the file.
.. _format-header:
Header
''''''
The header contains the following fields, in order:
* Length (``u64le``): The length of the data in the header. This does
not include either the length field itself, or the trailing CRC --
see diagram.
* Root index offset (``u64le``): The position in the file where the
root index block begins.
* Root index length (``u64le``): The number of bytes in the root index
block. This *includes* the root index block's length and CRC fields;
the idea is that doing a single read of this length, at the given
offset, will give us the root index itself. This is an important
optimization when IO has high-latency, as when accessing a ZS file
over HTTP.
* Total file length (``u64le``): The total number of bytes contained
in this ZS file; the same thing you'd get from ``ls -l`` or
similar.
.. warning:: To guarantee data integrity, readers MUST validate the
file length field; our CRC checks alone cannot detect file
truncation if it happens to coincide with a block boundary.
* SHA-256 of data (32 bytes): The SHA-256 hash of the stream one would
get by extracting all data block payloads and concatenating
them. The idea is that this value uniquely identifies the logical
contents of a ZS file, regardless of storage details like
compression mode, block size, index fanout, etc.
* Codec (16 bytes): A null-padded ASCII string specifying the codec
(compression method) used. Currently defined codecs include:
* ``none``: Block payloads are stored in raw, uncompressed form.
* ``deflate``: Block payloads are stored using the deflate format as
defined in `RFC 1951 <https://tools.ietf.org/html/rfc1951>`_. Note
that this is different from both the gzip format (RFC 1952) and
the zlib format (RFC 1950), which use different framing and
checksums. ZS provides its own framing and checksum, so we just
use raw deflate streams.
* ``lzma2;dsize=2^20``: Block payloads are represented as raw LZMA2
bitstreams that can be decompressed using a dictionary size of
:math:`2^20` bytes (i.e., 1 MiB); this means that each decoder
needs an upper bound of ~2 MiB of memory. Note that while it might
look parametrized, this is a simple literal string -- for example,
using the encoder string ``lzma2;dsize=2^21`` is illegal. This
means you can use the standard XZ presets 0 and 1, including the
"extreme" 0e and 1e modes, but not higher. This is pretty
reasonable, since there is never any advantage to using a
dictionary size that is larger than a single block payload, and we
expect >1 MiB blocks to be rare; but, if there is demand, we may
add further modes with larger dictionary sizes.
As compared to using XZ format, raw LZMA2 streams are ~0.5%
smaller, so that's nice. And, more importantly, the use of raw
streams dramatically reduces the complexity requirements on
readers, which is important for an archival format. Doing things
this way means that readers don't need to be prepared to handle
the multi-gigabyte dictionary sizes, complicated filter chains,
multiple checksums, etc., which the XZ format allows.
* Metadata length (``u64le``): The length of the next field:
* Metadata (UTF-8 encoded JSON): This field allows arbitrary metadata
to be attached to a ZS file. The only restriction is that the
encoded value MUST be what JSON calls an "object" (also known as a
dict, hash table, etc. -- basically, the outermost characters have
to be ``{}``). But this object can contain arbitrarily complex
values (though we recommend restricting yourself to strings for the
keys). See :ref:`metadata-conventions`.
* <extensions> (??): Compliant readers MUST ignore any data occurring
between the end of the metadata field and the end of the header (as
defined by the header length field). This space may be used in the
future to add backwards-compatible extensions to the ZS
format. (Backwards-incompatible extensions, of course, will include
a change to the magic number.)
* CRC-64-xz (``u64le``): A checksum of all the header data. This does
not include the length field, but does include everything between it
and the CRC. See diagram.
Blocks
''''''
Blocks themselves all have the same format:
* Length (``uleb128``): The length of the data in the block. This does
not include either the length field itself, or the trailing CRC --
see diagram.
* Level (``u8``): A single byte encoding the "level" of this
block. Data blocks are level 0. Index blocks can have any level
between 1 and 63 (inclusive). Other levels are reserved for future
backwards-compatible extensions; compliant readers MUST silently
ignore any block with its level field set to 64 or higher.
* Compressed payload (arbitrary data): The rest of the block after the
level is a compressed representation of the payload. This should be
decompressed according to the value of the codec field in the
header, and then interpreted according to the rules below.
* CRC-64-xz (``u64le``): CRC of the data in the block. This does not
include the length field -- see diagram. Note that this is
calculated directly on the raw disk representation of the block,
compression and all.
Technically we don't need to store the length at the beginning of each
block, because every block also has its length stored either in an
index block or (for the root block) in the header. But, storing the
length directly at the beginning of each block makes it much simpler
to write naive streaming decoders, reduces seeks during streaming
reads, and adds negligible space overhead.
Data block payload
''''''''''''''''''
Data block payloads encode a list of records. Each record has the
form:
* Record length (``uleb128``): The number of bytes in this record.
* Record contents (arbitrary data): That many bytes of data, making up
the contents of this record.
Then this is repeated as many times as you want.
Every data block payload MUST contain at least one record.
Index block payload
'''''''''''''''''''
Index block payloads encode a list of references to other index or
data blocks.
Each index payload entry has the form:
* Key length (``uleb128``): The number of bytes in the "key".
* Key value (arbitrary data): That many bytes of data, making up the
"key" for the pointed-to block. (See below for the invariants this
key must satisfy.)
* Block offset (``uleb128``): The file offset at which the pointed-to
block is located.
* Block length (``uleb128``): The length of the pointed-to block. This
*includes* the root index block's length and CRC fields; the idea is
that doing a single read of this length, a the given offset, will
give us the root index itself. This is an important optimization
when IO has high-latency, as when accessing a ZS file over HTTP.
Then this is repeated as many times as you want.
Every index block payload MUST contain at least one entry.
Key invariants
--------------
All comparisons here use ASCIIbetical order, i.e., lexicographic
comparisons on raw byte values, as returned by ``memcmp()``.
We require:
* The records in each data block payload MUST be listed in sorted order.
* If data block A occurs earlier in the file (at a lower offset) than
data block B, then all records in A are REQUIRED to be
less-than-or-equal-to all records in B.
* Every block, except for the root block, MUST be referenced by
exactly one index block.
* An index block of level :math:`n` MUST only reference blocks of
level :math:`n - 1`. (Data blocks are considered to have level 0.)
* The keys in each index block payload MUST occur in sorted order.
* To every block, we assign a span of records as follows: data blocks
span the records they contain. Index blocks span all the records
that are spanned by the blocks that they point to
(recursively). Given this definition, we can state the key invariant
for index blocks: every index key MUST be less-than-or-equal-to the
*first* record which is spanned by the pointed-to block, and MUST be
greater-than-or-equal-to all records which come before this record.
.. note:: According to this definition, it is always legal to simply
take the first record spanned by a block, and use that for its
key. But we do not guarantee this; advanced implementations might
take advantage of this flexibility to choose shorter keys that are
just long enough to satisfy the invariant above. (In particular,
there's nothing in ZS stopping you from having large individual
records, up into the megabyte range and beyond, and in this case
you might well prefer not to copy the whole record into the index
block.)
Notice that all invariants use non-strict inequalities; this is
because the same record might occur multiple times in different
blocks, making strict inequalities impossible to guarantee.
Notice also that there is no requirement about where index blocks
occur in the file, though in general each index will occur after the
blocks it points to, because unless you are very clever you can't
write an index block until after you have written the pointed-to
blocks and recorded their disk offsets.
Specification history
---------------------
.. Also update the first line of this file whenever we add stuff to
the format.
* Version 0.10: Remove support for the ``bz2`` compression format.
* Version 0.9: First public release.
| zs | /zs-0.10.0.zip/zs-0.10.0/doc/format.rst | format.rst |
.. ZS documentation master file, created by
sphinx-quickstart on Sun Nov 24 18:21:57 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
ZS: a file format for compressed sets
=====================================
ZS is a simple, read-only, binary file format designed for
distributing, querying, and archiving arbitarily large data sets (up
to tens of terabytes and beyond) -- so long as those data sets can be
represented as a set of arbitrary binary records. Of course it works
on small data sets too. You can think of it as an alternative to
storing data in tab- or comma-separated files -- each line in such a file
becomes a record in a ZS file. But ZS has a number of advantages over
these traditional formats:
.. all measurements on this page use tera/giga/mega in the SI sense,
i.e., 10^whatever, not 2^whatever.
actual sizes:
eng-us-all 3-gram uncompressed: 10165995927614 bytes
as distributed: 1278862975747 bytes
zs file with, lzma, -z 0e, 384k block size: 753997475853 bytes
* ZS files are **small**: ZS files (optionally) store data in
compressed form. The 3-gram counts from the 2012 US English release
of the `Google N-grams
<http://storage.googleapis.com/books/ngrams/books/datasetsv2.html>`_
are distributed as a set of gzipped text files in tab-separated
format, and take 1.3 terabytes of space. Uncompressed, this data set
comes to more than 10 terabytes (and would be even more if loaded
into a database). The same data in a ZS file with the default
settings (LZMA compression) takes just 0.75 terabytes -- this is
more than 41% smaller than the current distribution format, and
13.5x smaller than the raw data.
.. Benchmarks in next paragraph:
On hericium.ucsd.edu:
# raw gunzip
njsmith@hericium:/local/scratch/njs/google_books_ngrams_v2$ gunzip -c googlebooks-eng-us-all-3gram-20120701-th.gz | pv -rabt > /dev/null
506GB 0:47:34 [ 181MB/s] [ 181MB/s]
converted from mebibytes/s to megabytes/s:
-> 190 megabytes/s
# --prefix="sc" is a convenient way to get a large-enough sample
# to be meaningful, small-enough to not take too long, and
# to be representative of the kinds of actual-text-ngrams that
# we care about most (as compared to the ngrams that are just
# punctuation line noise, which may have different compression
# properties)
# ZS dump lzma, 8 CPUs (note -j7 b/c this does not count the main thread)
njsmith@hericium:/local/corpora/google-books-v2/eng-us-all$ time zs dump google-books-eng-us-all-20120701-3gram.zs --prefix="sc" -j 7 | pv -Wrabt | wc -c
20.1GB 0:00:55 [ 374MB/s] [ 374MB/s]
21616070118
real 0m55.567s
user 5m40.845s
sys 1m4.348s
--> 389 megabytes/s (not mebibytes/s)
# ZS dump lzma, 16 CPUs (which is all of the cores, but there is
# 2x hyperthreading, so -j16 is not a no-op)
njsmith@hericium:/local/corpora/google-books-v2/eng-us-all$ time zs dump google-books-eng-us-all-20120701-3gram.zs --prefix="sc" -j 16 | pv -Wrabt | wc -c
20.1GB 0:00:42 [ 486MB/s] [ 486MB/s]
21616070118
real 0m42.971s
user 6m54.638s
sys 1m15.189s
--> 503 megabytes/s (not mebibytes)
# ZS dump lzma, 1 CPU
njsmith@hericium:/local/corpora/google-books-v2/eng-us-all$ time zs dump google-books-eng-us-all-2
0120701-3gram.zs --prefix="sc" -j0 | pv -Wrabt | wc -c
20.1GB 0:07:14 [47.4MB/s] [47.4MB/s]
21616070118
real 7m15.289s
user 6m35.841s
sys 0m50.835s
--> 49.7 megabytes/s (not mebibytes)
With v0.9.0 of the zs tools, we get ~linear scaling until -j
reaches something in the 8-10 range -- it looks like the main
thread becomes the bottleneck here.
raw disk throughput:
njsmith@hericium:/local/corpora/google-books-v2/eng-us-all$ pv google-books-eng-us-all-20120701-5gram.zs -rabt >/dev/null
58.7GB 0:08:18 [63.8MB/s] [ 121MB/s]
^C
-> 126.9 megabytes/s
and 3x this is = 380 MB/s.
* Nonetheless, ZS files are **fast**: Decompression is an inherently
slow and serial operation, which means that reading compressed files
can easily become the bottleneck in an analysis. Google distributes
the 3-gram counts in many separate ``.gz`` files; one of these, for
example, contains just the n-grams that begin with the letters
"th". Using a single core on a handy compute server, we find that
we can get decompressed data out of this ``.gz`` file at ~190
MB/s. At this rate, reading this one file takes more than 47
minutes -- and that's before we even begin analyzing the data inside
it.
The LZMA compression used in our ZS file is, on its own, slower than
gzip. If we restrict ourselves to a single core, then we can only
read our ZS file at ~50 MB/s. However, ZS files allow for
multithreaded decompression. Using 8 cores, gunzip runs at... still
~190 MB/s, because gzip decompression cannot be parallelized. On
those same 8 cores, our ZS file decompresses at ~390 MB/s -- a
nearly linear speedup. This is also ~3x faster than our test server
can read an *un*\compressed file from disk.
.. Benchmarks in next paragraph:
On hericium.ucsd.edu, using lzma/384k:
%timeit list(zs.ZS("google-books-eng-us-all-20120701-3gram.zs", parallelism=0).search(prefix=b"this is fun\t1955\t"))
10 loops, best of 3: 26.6 ms per loop
"5 disk seeks" = 'zs info' says root level is 3
Speedup:
* In fact, ZS files are **really, REALLY fast**: Suppose we want to
know how many different Google-scanned books published in the USA in
1955 used the phrase "this is fun". ZS files have a limited indexing
ability that lets you quickly locate any arbitrary span of records
that fall within a given sorted range, or share a certain textual
prefix. This isn't as nice as a full-fledged database system that
can query on any column, but it can be extremely useful for data
sets where the first column (or first several columns) are usually
used for lookup. Using our example file, finding the "this is fun"
entry takes 5 disk seeks and ~25 milliseconds of CPU time --
something like 85 ms all told. (And hot cache performance -- e.g.,
when performing repeated queries in the same file -- is even
better.) The answer, by the way, is 27 books::
$ zs dump --prefix='this is fun\t1955\t' google-books-eng-us-all-20120701-3gram.zs
this is fun 1955 27 27
When this data is stored as gzipped text, then only way to locate an
individual record, or span of similar records, is start
decompressing the file from the beginning and wait until the records
we want happen to scroll by, which in this case -- as noted above --
could take more than 45 minutes. Using ZS makes this query ~33,000x
faster.
* ZS files contain **rich metadata**: In addition to the raw data
records, every ZS file contains a set of structured metadata in the
form of an arbitrary `JSON <http://json.org>`_ document. You can use
this to store information about this file's record format (e.g.,
column names), notes on data collection or preprocessing steps,
recommended citation information, or whatever you like, and be
confident that it will follow your data where-ever it goes.
* ZS files are **network friendly**: Suppose you know you just want to
look up a few individual records that are buried inside that 0.75
terabyte file, or want a large span of records that are still much
smaller than the full file (e.g., all 3-grams that begin "this
is"). With ZS, you don't have to actually download the full 0.75
terabytes of data. Given a URL to the file, the ZS tools can find
and fetch just the parts of the file you need, using nothing but
standard HTTP. Of course going back and forth to the server does add
overhead; if you need to make a large number of queries then it
might be faster (and kinder to whoever's hosting the file!) to just
download it. But there's no point in throwing around gigabytes of
data to answer a kilobyte question.
.. Below is disabled until RTD installs the packages we need for
lzma to work:
If you have the ZS tools installed, you can try it right now. Here's
a live run from the readthedocs.org servers, which are nowhere near
UCSD:
.. sneaky hack: we set the TIME envvar in conf.py to get nicer
output from the 'time' command called here
.. command-output:: time zs dump --prefix='this is fun\t' http://bolete.ucsd.edu/njsmith/google-books-eng-us-all-20120701-3gram.zs
:shell:
:ellipsis: 2,-4
If you have the ZS tools installed, you can try it right now. Here's
a real trace of a computer in Dallas searching the 3-gram database
stored at UC San Diego. Note that the computer in San Diego has no
special software installed at all -- this is just a static file
that's available for download over HTTP::
$ time zs dump --prefix='this is fun\t' http://bolete.ucsd.edu/njsmith/google-books-eng-us-all-20120701-3gram.zs
this is fun 1729 1 1
this is fun 1848 1 1
...
this is fun 2008 435 420
this is fun 2009 365 352
Real time elapsed: 1.425 seconds
* ZS files are **ever-vigilant**: Computer hardware is simply not
reliable, especially on scales of years and terabytes. I've dealt
with RAID cards that would occasionally flip a single bit in the
data that was being read from disk. How confident are you that this
won't be a key bit that totally changes your results? Standard text files
provide no mechanism for detecting data corruption. Gzip and other
traditional compression formats provide some protection, but it's
only guaranteed to work if you read the entire file from start to
finish and then remember to check the error code at the end, every
time. But ZS is different: it protects every bit of data with 64-bit CRC
checksums, and the software we distribute will never show you any
data that hasn't first been double-checked for
correctness. (Fortunately, the cost of this checking is negligible;
all the times quoted above include these checks). If it matters to
you whether your analysis gets the right answer, then ZS is a good
choice.
* Relying on the ZS format creates **minimal risk**: The ZS file
format is simple and :ref:`fully documented <format>`; an average
programmer with access to standard libraries could write a working
decompressor in a few hours. The reference implementation is
BSD-licensed, undergoes exhaustive automated testing (>98% coverage)
after every checkin, and just in case there are any ambiguities in
the English spec, we also have a complete :ref:`file format
validator <zs validate>`, so you can confirm that your files match
the spec and be confident that they will be readable by any
compliant implementation.
* ZS files have a name **composed entirely of sibilants**: How many
file formats can say *that*?
This manual documents the reference implementation of the ZS file
format, which includes both a command-line ``zs`` tool for
manipulating ZS files and a fast and featureful Python API, and also
provides a complete specification of the ZS file format in enough
detail to allow independent implementations.
Contents:
.. toctree::
:maxdepth: 2
logistics.rst
cmdline.rst
library.rst
conventions.rst
datasets.rst
format.rst
changes.rst
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zs | /zs-0.10.0.zip/zs-0.10.0/doc/index.rst | index.rst |
Project logistics
=================
Documentation:
http://zs.readthedocs.org/
Installation:
You need either Python **2.7**, or else Python **3.3 or greater**.
Because ``zs`` includes a C extension, you'll also need a C compiler
and Python headers. On Ubuntu or Debian, for example, you get these
with::
sudo apt-get install build-essential python-dev
Once you have the ability to build C extensions, then on Python
3 you should be able to just run::
pip install zs
On Python 2.7, things are slightly more complicated: here, ``zs``
requires the ``backports.lzma`` package, which in turn requires the
liblzma library. On Ubuntu or Debian, for example, something like
this should work::
sudo apt-get install liblzma-dev
pip install backports.lzma
pip install zs
``zs`` also requires the following packages: ``six``, ``docopt``,
``requests``. However, these are all pure-Python packages which pip
will install for you automatically when you run ``pip install zs``.
Downloads:
http://pypi.python.org/pypi/zs/
Code and bug tracker:
https://github.com/njsmith/zs
Contact:
Nathaniel J. Smith <[email protected]>
Developer dependencies (only needed for hacking on source):
* Cython: needed to build from checkout
* nose: needed to run tests
* nose-cov: because we use multiprocessing, we need this package to
get useful test coverage information
* nginx: needed to run HTTP tests
License:
2-clause BSD, see LICENSE.txt for details.
| zs | /zs-0.10.0.zip/zs-0.10.0/doc/logistics.rst | logistics.rst |
The ``zs`` library for Python
=============================
.. module:: zs
Quickstart
----------
Using the example file we created when demonstrating :ref:`zs make`,
we can write:
.. ipython:: python
from zs import ZS
z = ZS("example/tiny-4grams.zs")
for record in z:
print(record.decode("utf-8"))
# Notice that on Python 3.x, we search using byte strings, and we get
# byte strings back.
# (On Python 2.x, byte strings are the same as regular strings.)
for record in z.search(prefix=b"not done extensive testing\t"):
print(record.decode("utf-8"))
for record in z.search(prefix=b"not done extensive "):
print(record.decode("utf-8"))
for record in z.search(start=b"not done ext", stop=b"not done fast"):
print(record.decode("utf-8"))
Error reporting
---------------
:mod:`zs` defines two exception types.
.. autoexception:: ZSError
.. autoexception:: ZSCorrupt
Reading
-------
Reading ZS files is accomplished by instantiating an object of type
:class:`ZS`:
.. autoclass:: ZS
Basic searches
''''''''''''''
.. class:: ZS
.. automethod:: search
.. automethod:: __iter__
File attributes and metadata
''''''''''''''''''''''''''''
:class:`ZS` objects provides a number of read-only attributes
that give general information about the ZS file:
.. class:: ZS
.. attribute:: metadata
:annotation:
A .zs file can contain arbitrary metadata in the form of a
JSON-encoded dictionary. This attribute contains this metadata in
unpacked form.
.. attribute:: root_index_offset
:annotation:
The file offset of the root index block, as stored in the
:ref:`header <format-header>`.
.. attribute:: root_index_length
:annotation:
The length of the root index block, as stored in the
:ref:`header <format-header>`.
.. attribute:: total_file_length
:annotation:
The proper length of this file, as stored in the :ref:`header
<format-header>`.
.. attribute:: codec
:annotation:
The compression codec used on this file, as a byte string.
.. attribute:: data_sha256
:annotation:
A strong hash of the underlying data records contained in this
file. If two files have the same value here, then they are
guaranteed to represent exactly the same data (i.e., return the
same records to the same queries), though they might be stored
using different compression algorithms, have different metadata,
etc.
.. autoattribute:: root_index_level
:annotation:
Fast bulk operations
''''''''''''''''''''
If you want to perform some computation on many records (e.g., all the
records in your file), then these functions are the most efficient way
to do that.
.. class:: ZS
.. automethod:: block_map
.. automethod:: block_exec
High-level operations
'''''''''''''''''''''
.. class:: ZS
.. automethod:: dump
.. automethod:: validate
Writing
-------
In case you want a little more control over ZS file writing than you
can get with the ``zs make`` command-line utility (see :ref:`zs
make`), you can also access the underlying ZS-writing code directly
from Python by instantiating a :class:`ZSWriter` object.
.. autoclass:: ZSWriter
.. automethod:: add_data_block
.. automethod:: add_file_contents
.. automethod:: finish
.. automethod:: close
.. attribute:: closed
Boolean attribute indicating whether this ZSWriter is closed.
| zs | /zs-0.10.0.zip/zs-0.10.0/doc/library.rst | library.rst |
.. _conventions:
Conventions
===========
Our experience is that most data sets have various unique features, so
ZS is an unopinionated format: we give you a hunk of JSON and a pile
of binary records, and let you figure out what to put in them. But, it
is nice to have some conventions for how to handle common
situations. As more people use the format, these will probably evolve,
but for now here are some notes.
.. _metadata-conventions:
Metadata
--------
[XX document the metadata being used in the current gbooks files]::
"build-info": {
"host": "morel.ucsd.edu",
"user": "njsmith",
"time": "2014-04-21T23:56:47.225267Z",
"version": "zs 0.0.0-dev"
},
"corpus": "google-books-eng-us-all-20120701",
"subset": "2gram",
"record-format": {
"separator": "\t",
"column-types": [
"utf8",
"int",
"int",
"int"
],
"type": "separated-values",
"column-names": [
"ngram",
"year",
"match_count",
"volume_count"
]
}
Some other items you might want to consider including:
* Information on the preprocessing pipeline that led to this file (for
answering questions like, "is this the version that had
case-sensitivity enabled, or disabled?")
* Bibliographic references for papers that users of this data might
want to refer to or cite.
* Your contact information.
* Any relevant DOIs or `ORCIDs <http://orcid.org/>`_.
.. _record-format-conventions:
Record format
-------------
The ZS format itself puts absolutely no limitations on the contents of
individual records. You can encode your data any way you feel
like. However, because indexing is done by ASCIIbetical sort order,
it's probably a good idea to choose an encoding which makes sort order
meaningful. Some general principles:
* **Put the field you want to index on first**; if you want to be able to
index on multiple fields simultaneously, put them first, second,
third, etc.
* **Beware of quoting.** This arises especially for common CSV formats,
where fields containing the characters ``,`` or ``"`` often get
special handling. For example, suppose we have some nicely organized
n-grams::
to be , or not to be
to be disjoint and out of frame
'Tis Hamlet 's character . " Naked ! "
'Tis now the very witching time
What a piece of work is a man !
If we encode these as a column in CSV format, and then sort, we end
up with::
"'Tis Hamlet 's character . "" Naked ! """
"to be , or not to be"
'Tis now the very witching time
What a piece of work is a man !
to be disjoint and out of frame
Notice that every entry that contained a ``,`` or ``"`` has been
wrapped in ``"``\'s. If we want to find n-grams beginning ``to be``
or ``'Tis`` then a simple prefix search will no longer work; when we
want to find records with the prefix ``foo`` we have to remember
always to search for both ``foo`` and ``"foo``.
Ideally there is some character that you know will never occur in
any field, and you can use that for your separator -- then no
quoting is ever needed. This might be tab (\t), or if you get
desperate then there are other options like NUL (\00) or newline
(\n) -- though with these latter options you'll lose some of the
convenience of browsing your data with simple tools like :ref:`zs
dump`, and may have to play around a bit more with :ref:`zs make`'s
options to construct your file in the first place.
Alternatively, other quoting schemes (e.g., replacing ``,`` with
``\\,`` and ``\\`` with ``\\\\``) may not perfectly preserve
sorting, but they do preserve prefix searches, which is often the
important thing.
* **Beware of standard number formats.** String-wise, ``"10"`` is less
than ``"2"``, which is a problem if you want to be able to do range
queries on numeric data in ZS files. Some options for working around
this include using fixed-width strings (``"10"`` and ``"02"``), or
using some kind of big-endian binary encoding. Note that the ASCII
space character (0x20) sorts before all printing characters,
including digits. This means that instead of padding with zeroes
like in ``"02"``, it will also work to pad with spaces, ``"
2"``. Fixed width formats in general can be cumbersome to work with,
but they do have excellent sorting properties.
In the Google n-grams, the year field fortunately turns out to be
fixed width (at least until Google starts scanning papyruses). And
for the actual count fields, this formatting issue doesn't arise,
because we have no reason to index on them.
* **Beware of little-endian Unicode and surrogate pairs.** ASCII,
UTF-8, and UTF-32BE all have sensible sort orders (i.e.,
ASCIIbetical sort on the encoded strings is the same as
lexicographic sort on code points). This is definitely not true for
UTF-16LE or UTF-32LE, and is not *quite* true for UTF-16BE, because
of the existence of surrogate pairs (`see
e.g. <https://ssl.icu-project.org/docs/papers/utf16_code_point_order.html>`_).
Of course, if all you want are exact prefix searches, then these issues
don't really matter.
We recommend using UTF-8 unless you have a good reason not to.
Note that the ``zs`` command-line tool has a mild bias towards
UTF-8, in that if you pass it raw Unicode characters for
``--start``, ``--stop``, or ``--prefix``, then it encodes them as
UTF-8 before doing the search.
If these issues turn out to cause enough problems, it may makes sense
at some point to define a revised version of the ZS format which has
an explicit schema for record contents, and uses a content-sensitive
sort order (e.g., one that performs numeric comparison on numeric
fields).
| zs | /zs-0.10.0.zip/zs-0.10.0/doc/conventions.rst | conventions.rst |
Publically available ZS datasets
================================
[XX expand]
The v2 Google Books eng-us-all n-grams are here, at least temporarily:
http://bolete.ucsd.edu/njsmith/
Note that these files use "0gram" to refer to what Google calls
"totalcounts", thus preserving the rule that n-gram counts are
normalized by (n-1)-gram counts. The simple dependency arcs that
Google calls "0grams" are not included, since they seem to have been
superseded by later data releases.
The `zscontrib <https://github.com/njsmith/zscontrib>`_ repository has
some scripts to fetch an arbitrary v2 Google Books sub-corpus and
build it into a set of .zs files.
| zs | /zs-0.10.0.zip/zs-0.10.0/doc/datasets.rst | datasets.rst |
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:]) | zs.rstaddons | /zs.rstaddons-0.1.tar.gz/zs.rstaddons-0.1/ez_setup.py | ez_setup.py |
zs2decode
#########
.. image:: https://travis-ci.org/cpetrich/zs2decode.svg?branch=master
:target: https://travis-ci.org/cpetrich/zs2decode
zs2decode is a Python (2.7, 3.3, 3.4, 3.5, 3.6) implementation of a
decoder for Zwick ``zs2`` files.
``zs2`` files contain measurements and meta data. zs2decode is able to
parse these files. It contains support functions to output the result as
text file or XML for further processing.
The following script converts a ``zs2`` file into XML::
import zs2decode.parser
import zs2decode.util
zs2_file_name = 'my_data_file.zs2'
xml_output_file = 'my_data_file.xml'
# load and decompress file
data_stream = zs2decode.parser.load(zs2_file_name)
# separate binary data stream into chunks
raw_chunks = zs2decode.parser.data_stream_to_chunks(data_stream)
# convert binary chunk data into lists of Python objects
chunks = zs2decode.parser.parse_chunks(raw_chunks)
# output as text file
with open(xml_dump_file, 'wb') as f:
f.write( zs2decode.util.chunks_to_XML(chunks) )
An example script to extract measurement time series from the XML is
provided in the ``examples`` folder.
Documentation is available at `<http://zs2decode.readthedocs.org/>`_
and source code at `<https://github.com/cpetrich/zs2decode.git>`_.
| zs2decode | /zs2decode-0.3.1.tar.gz/zs2decode-0.3.1/README.rst | README.rst |
=========
Changelog
=========
This document records all notable changes to ``zs2decode``.
`0.3.1` (2017-09-26)
------------------------
* Updated documentation of chunks with data type 0xEE, sub-type 0x0011 to correspond to the way the parser works since 0.3.0.
`0.3.0` (2017-09-22)
---------------------
* Refactored parsing of data in ``QS_`` chunks (0xEE sub-type 0x0011) to be more general. The format code lost its prominent status, affecting the structure of type and format signatures in the output. There is no syntax for unparsed data anymore.
* Utility functions return bytes/bytearray to avoid double-UTF8 encoding of strings.
* Values in XML elements are now JSON encoded rather than based on repr(). This affects the spelling of booleans, and tuples are output as lists.
* Changed order of path elements printed together with closing elements in text output.
* Added and modified format strings for ``QS_`` chunks.
* Chunk names affected by character substitution are now maintained in XML files in the ``name`` attribute.
* Added functionality to encode XML files to zs2.
* Improved platform independence with respect to integer byte lengths.
* Output of audit log decoder has been made more basic to be compatible with syntax fo EE11 ``QS_`` chunks.
`0.2.1-dev` (unreleased)
-------------------------
* Example script ``raw_data_dump_from_xml.py`` can now extract data from zs2 files with slightly different structure.
* ``util.chunks_to_XML()`` will now perform character substitutions if necessary to create valid XML element names. ``util.chunks_to_text_dump()`` continues to maintain the orginal names.
* License included in wheel.
* Added changelog.
`0.2.0` (2017-09-15)
---------------------
* Added interpretation of data type 0xEE, sub-type 0x0005.
* Refactored xml output to perform correct entity substitution.
* Refactored interpretation of single-precision numbers. Previously, some results were not optimal.
`0.1` (2015-12-07)
---------------------
* Initial Release.
| zs2decode | /zs2decode-0.3.1.tar.gz/zs2decode-0.3.1/CHANGELOG.rst | CHANGELOG.rst |
.. _section-ee11:
Chunk type-specific data structures
===================================
Chunks with data of type ``0xEE`` and sub-type ``0x0011``
contain data organized as a `record`_.
Chunk type-specific data structures are used by the
event audit system to store the event log,
and by the `ZIMT scripting language`_ to store
properties and parameters of variables. This record format
appears to be indicated by one or more values in record data
(one of them being the first byte of the record data).
The record formats of the chunk types given here are guesses.
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| Data | Chunk data |
| type | |
+ +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | Sub-type | Byte length of | Record data |
| | | and record data | |
+========+========+========+========+========+========+========+========+========+========+========+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | ... | n |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
|``0xEE``|``0x11``|``0x00``|``LSB`` | | |``MSB`` | | | | |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
.. _record: https://en.wikipedia.org/wiki/Record_%28computer_science%29
.. _ZIMT scripting language: http://www.zwick.com/en/products/testxpert-ii-testing-software-intelligent-and-reliable/rd-and-academia/flexible-management.html
ZIMT parameter and property chunks
----------------------------------
The record data of the chunks with data type ``0xEE`` and
sub-type ``0x0011`` is described in the tables below. For
the sake of brevity, the following definition of record
elements will be used:
byte
1 byte
byte (boolean)
1 byte with values limited to ``0x00`` and ``0x01``
word
2-byte integer
long
4-byte integer
single
4-byte single-precision float
double
8-byte double-precision float
string
variable-length unicode string (see :ref:`section-unicode-string-definition`)
list
variable-length data list (see :ref:`section-data-list-definition`)
tuple
ordered group of values
ZIMT property chunks
^^^^^^^^^^^^^^^^^^^^
+---------------------+------------------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Count | Type |
+=====================+======+===========================================================+
|``QS_Par`` | 1 + byte ``0x01`` |
+ +------+-----------------------------------------------------------+
| | 1 + byte (boolean) |
+ +------+-----------------------------------------------------------+
| | 2 | byte |
+ +------+-----------------------------------------------------------+
| | 1 | byte (boolean) |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ValPar`` | 1 | byte ``0x01`` |
+ +------+-----------------------------------------------------------+
| | 1 | double |
+ +------+-----------------------------------------------------------+
| | 1 | string |
+ +------+-----------------------------------------------------------+
| | 1 | word |
+ +------+-----------------------------------------------------------+
| | 9 | byte ``0x00`` |
+---------------------+------+-----------------------------------------------------------+
| || Record contains value, unit, and ID. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_TextPar`` | 1 | byte ``0x01`` |
+ +------+-----------------------------------------------------------+
| | 4 | string |
+---------------------+------+-----------------------------------------------------------+
| || String 2 contains language information, |
| || strings 3 and 4 are empty. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_SelPar`` | 1 | byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 1 | long |
+ +------+-----------------------------------------------------------+
| | 1 | list of longs |
+ +------+-----------------------------------------------------------+
| | 4 | string |
+---------------------+------+-----------------------------------------------------------+
| || The first long is ``0xFFFFFFFF`` if the list is not empty. |
| || String 2 contains language information, |
| || strings 3 and 4 are empty. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ValArrPar`` | 1 | byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 1 | string |
+ +------+-----------------------------------------------------------+
| | 1 | word |
+ +------+-----------------------------------------------------------+
| | 1 | byte, usually ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 1 | list of longs |
+---------------------+------+-----------------------------------------------------------+
| || The word contains an ID value. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ValArrParElem`` | 1 | byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 1 | list of tuples of type (long, double) |
+---------------------+------+-----------------------------------------------------------+
| || Tuples are pairs of index and value. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ArrPar`` | 1 | byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 1 | list of longs |
+ +------+-----------------------------------------------------------+
| | 1 | byte |
+---------------------+------+-----------------------------------------------------------+
ZIMT parameter chunks
^^^^^^^^^^^^^^^^^^^^^
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ParProp`` | 1 | byte ``0x07`` |
+ +------+-----------------------------------------------------------+
| | 9 | byte (boolean) |
+ +------+-----------------------------------------------------------+
| | 1 | word |
+ +------+-----------------------------------------------------------+
| | 9 | string |
+ +------+-----------------------------------------------------------+
| | 3 | word |
+ +------+-----------------------------------------------------------+
| | 5 | string |
+ +------+-----------------------------------------------------------+
| | 1 | long ``0x00000000`` |
+ +------+-----------------------------------------------------------+
| | 2 | word |
+ +------+-----------------------------------------------------------+
| | 1 | byte |
+ +------+-----------------------------------------------------------+
| | 1 | string |
+ +------+-----------------------------------------------------------+
| | 4 | byte (boolean) |
+---------------------+------+-----------------------------------------------------------+
| || Bytes 6 and 9 seem to always be ``0x00``. |
| || The 3 words are ``0x0000``, ``0xFFFF``, ``0xFFFF``. |
| || The last 4 bytes are ``0x00``, ``0x01``, ``0x00``, ``0x01``. |
+---------------------+------+-----------------------------------------------------------+
or
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ParProp`` | 1 | byte ``0x07`` |
+ +------+-----------------------------------------------------------+
| | 9 | byte (boolean) |
+ +------+-----------------------------------------------------------+
| | 1 | word |
+ +------+-----------------------------------------------------------+
| | 9 | string |
+ +------+-----------------------------------------------------------+
| | 3 | word |
+ +------+-----------------------------------------------------------+
| | 5 | string |
+ +------+-----------------------------------------------------------+
| | 1 | long ``0x00000002`` |
+ +------+-----------------------------------------------------------+
| | 2 | word |
+ +------+-----------------------------------------------------------+
| | 1 | byte |
+ +------+-----------------------------------------------------------+
| | 1 | long |
+ +------+-----------------------------------------------------------+
| | 1 | string |
+ +------+-----------------------------------------------------------+
| | 4 | byte (boolean) |
+---------------------+------+-----------------------------------------------------------+
| || The last 4 bytes are ``0x00``, ``0x01``, ``0x00``, ``0x01``. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------------------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ValProp`` | 1 + byte ``0x01`` |
+ +------+-----------------------------------------------------------+
| | 1 + byte (boolean) |
+ +------+-----------------------------------------------------------+
| | 2 | byte |
+ +------+-----------------------------------------------------------+
| | 1 | byte (boolean) |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------------------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_TextProp`` | 1 + byte ``0x01`` |
+ +------+-----------------------------------------------------------+
| | 4 + byte |
+ +------+-----------------------------------------------------------+
| | 4 | byte (boolean) |
+---------------------+------+-----------------------------------------------------------+
| || The last byte is ``0x01`` |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_SelProp`` | 1 | byte ``0x04`` |
+ +------+-----------------------------------------------------------+
| | 3 | byte (values) |
+ +------+-----------------------------------------------------------+
| | 1 | list of 4 strings |
+ +------+-----------------------------------------------------------+
| | 1 | list of 4 strings |
+ +------+-----------------------------------------------------------+
| | 1 | list of strings |
+ +------+-----------------------------------------------------------+
| | 1 | list of strings |
+ +------+-----------------------------------------------------------+
| | 1 | list of words |
+ +------+-----------------------------------------------------------+
| | 1 | list of longs |
+ +------+-----------------------------------------------------------+
| | 1 | list of strings |
+---------------------+------+-----------------------------------------------------------+
| || Record data may end after the first three bytes. |
| || If present, all lists are of the same length. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------------------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ValArrParProp`` | 1 + byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 4 + byte |
+ +------+-----------------------------------------------------------+
| | 1 | word |
+ +------+-----------------------------------------------------------+
| | 4 | byte |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------------------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_SkalProp`` | 1 + byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 2 + string |
+ +------+-----------------------------------------------------------+
| | 2 | byte (boolean) |
+---------------------+------+-----------------------------------------------------------+
| || First string may contain a ZIMT script. |
| || The booleans seem to indicate validity of the respective |
| | strings. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_ValSetting`` | 1 | byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 2 | string |
+ +------+-----------------------------------------------------------+
| | 1 | long |
+ +------+-----------------------------------------------------------+
| | 1 | string |
+ +------+-----------------------------------------------------------+
| | 3 | byte |
+ +------+-----------------------------------------------------------+
| | 1 | word |
+ +------+-----------------------------------------------------------+
| | 2 | byte |
+ +------+-----------------------------------------------------------+
| | 1 | list of words |
+ +------+-----------------------------------------------------------+
| | 1 | list of strings |
+ +------+-----------------------------------------------------------+
| | 1 | byte |
+ +------+-----------------------------------------------------------+
| | 10 | byte |
+---------------------+------+-----------------------------------------------------------+
| || The leading strings are usually empty. |
| || The long is small-valued. |
| || The word is either ``0x0000`` or ``0xFFFF``. |
| || If not empty, the list of words contains ID values. |
| || If not empty, the last string contains a variable name. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------------------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_NumFmt`` | 1 + byte ``0x02`` |
+ +------+-----------------------------------------------------------+
| | 4 + byte |
+ +------+-----------------------------------------------------------+
| | 1 | double |
+---------------------+------+-----------------------------------------------------------+
| || The value of the double float is usually ``0.1``. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_Plaus`` | 1 | byte ``0x01`` |
+ +------+-----------------------------------------------------------+
| | 9 | byte, usually ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 6 | byte, usually ``0xFF`` or ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 1 | word, usually ``0xFFFE`` or ``0x0000`` |
+ +------+-----------------------------------------------------------+
| | 6 | byte, usually ``0xFF`` or ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 1 | word, usually ``0x7FFE`` or ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 6 | byte, usually ``0x00`` |
+---------------------+------+-----------------------------------------------------------+
| || Note that data in this chunk differ from ``QS_Tol`` |
| || only in length. |
+---------------------+------+-----------------------------------------------------------+
+---------------------+------+-----------------------------------------------------------+
|Chunk type name |Record data |
+ +------+-----------------------------------------------------------+
| |Number| Type |
+=====================+======+===========================================================+
|``QS_Tol`` | 1 | byte ``0x01`` |
+ +------+-----------------------------------------------------------+
| | 9 | byte, usually ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 6 | byte, usually ``0xFF`` or ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 1 | word, usually ``0xFFFE`` or ``0x0000`` |
+ +------+-----------------------------------------------------------+
| | 6 | byte, usually ``0xFF`` or ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 1 | word, usually ``0x7FFE`` or ``0x00`` |
+ +------+-----------------------------------------------------------+
| | 3 | byte, usually ``0x00`` |
+---------------------+------+-----------------------------------------------------------+
| || Note that data in this chunk differ from ``QS_Plaus`` |
| || only in length. |
+---------------------+------+-----------------------------------------------------------+
Event audit chunk
-----------------
The event audit log is stored in a chunk type with name
``Entry``. The description below represents the parsing
algorithm used before version 0.3.0. In the current
implementation, the chunk is parsed heuristically as bytes
and strings.
[START OBSOLETE DESCRIPTION]
The first byte of the record (i.e., format code)
corresponding to the description here is
``0x02``. A large number of Entry--Record-Format-Codes
(ERFC) and associated records are defined.
However, it appears to be possible to split the
record data into its constituents without interpreting the
format code explicitly.
The procedure is described in the
Section :ref:`section-entry-parsing`.
In addition to strings, the following prefixed data types
are defined that are specific to ``Entry`` chunks:
+--------+---------+----------------+--------------+
| Prefix | Data block | Total length |
+ +---------+----------------+ of data type |
| | Length | Interpretation | (bytes) |
| | (bytes) | | |
+========+=========+================+==============+
|``0x07``| 8 | 1 double | 9 |
+--------+---------+----------------+--------------+
|``0x64``| 4 | 1 long | 5 |
+--------+---------+----------------+--------------+
|``0x01``| 4 | 4 bytes | 5 |
+--------+---------+----------------+--------------+
|``0x04``| 1 | 1 byte | 2 |
+--------+---------+----------------+--------------+
Data type and chunk data of an ``Entry`` chunk start
as follows:
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| Data | Chunk data |
| type | |
+ +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | Sub-type | Byte length of format code | Record data |
| | | and record data | |
+ + + +--------+--------+--------+--------+--------+--------+
| | | | Format | ERFC | 3-tuple | String |
+========+========+========+========+========+========+========+========+========+========+========+========+========+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | ... |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
|``0xEE``|``0x11``|``0x00``|``LSB`` | | |``MSB`` |``0x02``| | | | | |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
.. _section-entry-parsing:
Parsing algorithm
^^^^^^^^^^^^^^^^^
The following algorithm appears to be able to parse record data
data into a list, regardless of record format code. The algorithm
is completely heuristic and is able to extract a lot of meaningful
information. However, it should be replaced with an algorithm
evaluating the ERFC code.
0. Go to start of record.
1. Read and output ERFC byte.
2. Interpret next 3 bytes as 3-tuple and output.
3. While there are bytes left to parse:
4. If string follows: interpret and output string, continue at 3.
5. If the next byte belongs to a prefixed data type and another
prefixed data type or string follows the current data block:
interpret prefixed data type and output,
continue at 3.
6. If another prefixed data type or string follows 4 bytes later:
interpret 4 bytes as 2 words and output, continue at 3.
7. If another prefixed data type or string follows 2 bytes later:
output 2 bytes, continue at 3.
8. Output next byte, continue at 3.
The test for follow-up prefixed data type or string needs to
verify that either the end of the string is reached **or**
1. that the following data starts with a prefix defined
for prefixed data types or with a string length followed
by ``0x00`` ``0x80``, indicating strings, **and**
2. that the following number of bytes is sufficient to hold
the entire prefixed data type or string.
The purpose of the follow-up test is to prevent the detection of
spurious unicode string markers ``LSB`` ``MSB`` ``0x00`` ``0x80``
in the binary prepresentation of double-precision floating point
numbers.
Interpretation
^^^^^^^^^^^^^^
Each ``Entry`` record begins with a common header, followed by a
detailed, entry-specific record. The common header contains the
following entries:
1. Entry-record-format-code
2. 3-tuple
3. User name currently logged into the system
4. Time in seconds, possibly since loading/saving a file.
5. An ID (always the same)
6. Empty string
7. Another ID (always the same)
8. The value ``0``
9. A string giving a human-readable, brief description of the event
10. Internal string describing the originator of the event
[END OBSOLETE DESCRIPTION] | zs2decode | /zs2decode-0.3.1.tar.gz/zs2decode-0.3.1/docs/special_chunks.rst | special_chunks.rst |
The zs2 file format
===================
File structure
--------------
Compression
^^^^^^^^^^^
``zs2`` files are `gzip-compressed`_ binary files. The first step to
decoding a file is to unpack the ``zs2`` file with a utility program
such as ``gunzip`` or `7-zip`_, or opening the file using the
`Python module`_ ``gzip``. This results in a binary data stream.
.. cssclass:: table-bordered
+----+--------------------+----+
| gzip compression |
+----+--------------------+----+
| | | |
| | data stream | |
| | | |
+----+--------------------+----+
.. _gzip-compressed: https://en.wikipedia.org/wiki/Gzip
.. _7-zip: http://www.7-zip.org/
.. _Python module: https://docs.python.org/2/library/gzip.html
Data stream structure
^^^^^^^^^^^^^^^^^^^^^
The data stream starts with a header signature and
is followed by chunks.
A typical ``zs2`` file contains over 100,000 chunks.
+--------------------+--------+
| Header (4 bytes) |
+--------------------+--------+
| Chunk 1 |
| |
+--------------------+--------+
| Chunk 2 |
| |
+--------------------+--------+
| ... |
| |
+--------------------+--------+
| Chunk n |
| |
+--------------------+--------+
Byte order
~~~~~~~~~~
The `byte order`_ of the *binary file* is **little-endian**. For
example, a 32-bit representation of the integer value ``1``
in the data stream would be ``0x01 0x00 0x00 0x00``.
.. _byte order: https://en.wikipedia.org/wiki/Endianness
Header
^^^^^^
The *binary file* starts with the 4-byte signature ``0xAF 0xBE 0xAD 0xDE``,
i.e. ``0xDEADBEAF`` in hexadecimal or ``3735928495`` in decimal.
+----------+----------+----------+----------+
| Offset 0 | Offset 1 | Offset 2 | Offset 3 |
+==========+==========+==========+==========+
| ``0xAF`` | ``0xBE`` | ``0xAD`` | ``0xDE`` |
+----------+----------+----------+----------+
This signature is followed immediately by the first chunk.
Chunks
^^^^^^
Chunks contain information on data stream structure, metadata or data.
There are two different chunk formats. One format is used by the
"End-of-Section" chunk, the other format is used by all other chunks.
The latter has the following structure:
an ASCII-encoded name of the chunk type, starting
with one byte length giving the length of the name
in bytes (Section :ref:`section-ascii-string-definition`),
followed by the data type of the particular
chunk (Section :ref:`section-data-types`) and actual chunk
data. In contrast, the End-of-Section chunk is simply
a single byte of value ``0xFF``. Both chunk structures
can be discriminated between because chunk type names
do not start with ``0xFF``.
The following chunk structures are possible:
+------------+------------+------------+------------+
| Chunk type name | Data type | Chunk |
| | code | data |
+============+============+============+============+
| ASCII-encoded, at least | 1 byte | 1 or more |
| 2 bytes | | bytes |
+------------+------------+------------+------------+
or, for the "End-of-Section" chunk,
+------------+------------+------------+------------+
| End of | | | |
| Section | | | |
+============+============+============+============+
| ``0xFF`` | | | |
| | | | |
+------------+------------+------------+------------+
The total length of the chunk can be anywhere from 1 byte upward.
In particular, the total chunk length is generally not a
multiple of 2 or 4 bytes.
.. note:: Less than 5% of the existing chunk types have no data type.
An example of a chunk with chunk type name ``ID`` would be:
+--------+--------+--------+--------+--------+--------+
| Chunk type name | Data | Chunk data |
+--------+--------+--------+ type + +
| Length | Name | | |
+--------+--------+--------+--------+--------+--------+
| ``2`` | ``I`` | ``D`` |``0x66``| ``48154`` |
+========+========+========+========+========+========+
| 1 | 2 | 3 | 4 | 5 | 6 |
+--------+--------+--------+--------+--------+--------+
|``0x02``|``0x49``|``0x44``|``0x66``|``0x1A``|``0xBC``|
+--------+--------+--------+--------+--------+--------+
.. _section-chunk-naming:
Chunk type naming
^^^^^^^^^^^^^^^^^
Chunk type names are Pascal ShortString-style ASCII strings defined in
Section :ref:`section-ascii-string-definition`.
Chunk types names are chosen to be readable ASCII text,
comprising mostly of underscore (``_``),
digits ``0`` to ``9``, and English letters ``A`` to ``Z`` and ``a`` to ``z``.
Very few chunk type names include other characters.
The chunk type name length is limited to 254 characters since an indicated length of
``255`` (``0xFF``) represents an "End-of-Section" chunk.
Also, chunk type names of length 0 (``0x00``) do not exist.
The three chunk types ``Key``, ``Elem``, and ``Val`` represent list items.
Digits are used at the end of chunk type names to enumerate the list items.
Within each list, numbers are consecutive in decimal format,
starting with zero.
For example, the list element ``Elem0`` will be followed by ``Elem1``.
``Elem9`` will be followed by ``Elem10`` etc.
If a list has only one entry, the number will be zero (e.g. ``Key0``).
.. note:: By convention, most chunk type names start with a capital letter
``A`` to ``Z`` and use *CamelCase* spelling for compound words
(i.e., approximately 95% of all chunk type names).
Names are derived from either English or German language.
The shortest chunk type names are ``x``, ``y``, ``X``, and ``Y``.
The longest chunk type name is
``AssignmentBetweenOrganizationDataAndTestProgramParamIds``
at 55 characters.
Chunk type names with special characters are rare. Those names
may start with ``nt&)m_`` prepended to a common *CamelCase* name,
e.g. ``nt&)m_CompressionType``.
Order of chunks
^^^^^^^^^^^^^^^
The order of some chunks is significant as they can establish
a partitioning into sections (chunks of data type ``0xDD`` start
a section that corresponding "End-of-Section" chunks end), chunk
lists (starting with the ``Count`` chunk), or key-value assignment
(``Key`` chunks immerdiately preceeding an ``Elem`` chunk).
Beyond that, chunk order seems to be free but follows predictable,
machine-generated patterns.
.. note:: The actual degree of flexibility in chunk ordering is defined
by the implementation of the ``textXpert II`` parser, which is
not known.
End-of-Section chunks
^^^^^^^^^^^^^^^^^^^^^
"End-of-Section" chunks contain only one byte, ``0xFF``.
They can be discriminated from regular chunks in that chunk type names
of length ``255`` (``0xFF``) do not exist.
End-of-Section chunks terminate the most recent section started
by a ``0xDD`` chunk.
End of data stream
^^^^^^^^^^^^^^^^^^
The end of the data stream is marked by the "End-of-Section" chunk that
terminates the root section of the data stream (the first chunk in the
data stream is of type ``0xDD``).
.. _section-data-types:
Data type codes
---------------
The 1-byte data type code determines type and, in most cases, the
length of the chunk data section in bytes. A chunk type may appear
with different data codes throughout the data stream.
The following type codes exist:
+-----------+------------+----------------------------------------+
| Data type | Length of | Type of data |
| code | chunk data | |
+===========+============+========================================+
| ``0x11`` | 4 | Integer [#intdef]_ |
+-----------+------------+----------------------------------------+
| ``0x22`` | 4 | Unsigned integer: value |
+-----------+------------+----------------------------------------+
| ``0x33`` | 4 | Signed integer: coordinates |
+-----------+------------+----------------------------------------+
| ``0x44`` | 4 | Unsigned integer: flag, color code |
+-----------+------------+----------------------------------------+
| ``0x55`` | 2 | Integer [#intdef]_ |
+-----------+------------+----------------------------------------+
| ``0x66`` | 2 | Integer [#intdef]_ |
+-----------+------------+----------------------------------------+
| ``0x88`` | 1 | Unsigned byte: type code |
+-----------+------------+----------------------------------------+
| ``0x99`` | 1 | Boolean: ``0``\ =False, ``1``\ =True |
+-----------+------------+----------------------------------------+
| ``0xAA`` | at least 4 | Unicode string [#aaee]_ |
+-----------+------------+----------------------------------------+
| ``0xBB`` | 4 | Single precision floating point number |
+-----------+------------+----------------------------------------+
| ``0xCC`` | 8 | Double precision floating point number |
+-----------+------------+----------------------------------------+
| ``0xDD`` | at least 1 | Document section start [#ddtype]_ |
+-----------+------------+----------------------------------------+
| ``0xEE`` | at least 6 | List of data [#aaee]_ |
+-----------+------------+----------------------------------------+
Data types ``0x00``, ``0x77``, and ``0xFF`` do not appear.
.. [#intdef] The interpretation of integers of data type codes
``0x11``, ``0x55`` and ``0x66`` depends on context.
They may be either signed or unsigned, depending on
the chunk type rather than the data type code.
Data type code ``0x11`` is used for a range of
purposes, including color codes (which would
typically be interpreted as unsigned
hexadecimal values) and flags of value
``0xffffffff`` (which would typically be written
as signed ``-1`` rather than unsigned ``4294967295``).
.. [#aaee] The length of the chunk data field for data types
``0xAA`` and ``0xEE`` is encoded as part of the
chunk data. See also Section
:ref:`section-data-list-definition`.
.. [#ddtype] Data type ``0xDD`` indicates that a chunk marks the
beginning of a structural or logical **section**.
The length of the chunk data field is encoded as part
of the chunk data.
Chunk data contain an ASCII-encoded section descriptor
that may be empty
(see Section :ref:`section-ascii-string-definition`).
Chunk data
----------
Data values
^^^^^^^^^^^
The chunk data section of all data types except ``0xAA``, ``0xDD``,
and ``0xEE`` contains one numerical or boolean value.
In multi-byte data sections, data are arranged ``LSB`` to ``MSB``
and interpreted according to the table on data type codes.
Data structures
^^^^^^^^^^^^^^^
All variable-length structures are stored following a common pattern.
There are three types of variable-length data structures,
* ASCII strings,
* lists, and
* unicode strings.
Each of them is preceeded by the length of the structure in multiples
of the units they contain.
For example, unicode strings will be preceeded by the number of logical
characters rather than bytes, and lists will be preceeded by the number
of entries in the list. (List entries are either numbers, strings, or
n-tuples.) As a result, empty lists and empty strings are represented
by a length indicator of ``0``.
.. _section-ascii-string-definition:
ASCII strings
~~~~~~~~~~~~~
ASCII-encoded strings are not intended to be printed to the user but help
stucture the document. They appear at two places: the chunk type name,
and the section descriptor in chunks of data type ``0xDD``.
+--------+--------+--------+--------+
| ASCII string |
+--------+--------+--------+--------+
| Length | Characters |
+========+========+========+========+
| 0 | 1 | ... | n |
+--------+--------+--------+--------+
| n | first | ... | last |
+--------+--------+--------+--------+
Chunk type names are at least one character in length while
empty ASCII strings may appear as section descriptors.
+--------+--------+--------+--------+
| Empty ASCII string |
+--------+--------+--------+--------+
| Length | Characters |
+========+========+========+========+
| 0 | | | |
+--------+--------+--------+--------+
|``0x00``| | | |
+--------+--------+--------+--------+
.. _section-data-list-definition:
Lists of data
~~~~~~~~~~~~~
Chunk data of variable length are always encoded in a particular lists
format.
Lists start with an indication of the number of items in the list.
This list length is encoded as 4-byte integer and may be ``0`` if no
list items follow. Bit 31 of the list length is ``0`` as this bit is
used as a marker for strings. Hence, lists can have up to
2,147,483,647 entries.
The list length parameter is followed by exactly the number of list
items specified.
All list items have the same data type.
List items may be n-tuples with constituents comprising different
data types.
Example of an empty list:
+--------+--------+--------+--------+
| Number of items in the list |
+--------+--------+--------+--------+
| ``0`` |
+========+========+========+========+
| 1 | 2 | 3 | 4 |
+--------+--------+--------+--------+
|``0x00``|``0x00``|``0x00``|``0x00``|
+--------+--------+--------+--------+
Example of a list containing 2 single-precision floating point numbers,
``10.1`` and ``1.0``:
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| Number of items in the list | Single-precision float | Single-precision float |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| ``2`` | ``10.1`` | ``1.0`` |
+========+========+========+========+========+========+========+========+========+========+========+========+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
|``0x02``|``0x00``|``0x00``|``0x00``|``0x9A``|``0x99``|``0x21``|``0x41``|``0x00``|``0x00``|``0x80``|``0x3F``|
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
Example of a list of 2 tuples that combine a 4-byte integer with a single-precision floating point number,
``(1, 10.1)`` and ``(2, 1.0)``:
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| Number of items | Tuple 1 | Tuple 2 |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| ``2`` | ``1`` | ``10.1`` | ``2`` | ``1.0`` |
+========+========+========+========+========+========+========+========+========+========+========+========+========+========+========+========+========+========+========+========+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
|``0x02``|``0x00``|``0x00``|``0x00``|``0x01``|``0x00``|``0x00``|``0x00``|``0x9A``|``0x99``|``0x21``|``0x41``|``0x02``|``0x00``|``0x00``|``0x00``|``0x00``|``0x00``|``0x80``|``0x3F``|
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
.. _section-unicode-string-definition:
Unicode strings
~~~~~~~~~~~~~~~
All characters and strings intended to de displayed to humans
are encoded in unicode `UCS-2/UTF-16`_ format.
Each character unit is two 2 bytes long.
Strings are lists 2-byte long elements with
bit 31 of the list length set to ``1`` (*"bit-31 marker"*).
For example, the Norwegian interjection *Skål* would be represented as
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| String length with bit-31 marker | | | | |
| | S | k | å | l |
+========+========+========+========+========+========+========+========+========+========+========+========+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
|``0x04``|``0x00``|``0x00``|``0x80``|``0x53``|``0x00``|``0x6B``|``0x00``|``0xE5``|``0x00``|``0x6C``|``0x00``|
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
.. _UCS-2/UTF-16: https://en.wikipedia.org/wiki/UTF-16
Data type ``0xAA``
^^^^^^^^^^^^^^^^^^
Chunk data of chunks with data type ``0xAA`` contain exactly
one unicode string (see Section :ref:`section-data-list-definition`).
For example, data type code and chunk data of the string "Hi" would be:
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | Chunk Data |
+ +--------+--------+--------+--------+--------+--------+--------+--------+
| Data | String length with bit-31 marker | | |
| type | | H | i |
+========+========+========+========+========+========+========+========+========+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
|``0xAA``|``0x02``|``0x00``|``0x00``|``0x80``|``0x48``|``0x00``|``0x69``|``0x00``|
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
Data type ``0xDD``
^^^^^^^^^^^^^^^^^^
Chunks of type ``0xDD`` start a structural section that is ended by
a corresponding End-of-Section chunk. The chunk data contain exactly
one ASCII-encoded string that serves as a section descriptor. For example,
data type code and section desciptor "Hi" would be:
+--------+--------+--------+--------+
| Data | Chunk data |
+ +--------+--------+--------+
| type | Length | H | i |
+========+========+========+========+
| 0 | 1 | 2 | 3 |
+--------+--------+--------+--------+
|``0xDD``|``0x02``|``0x48``|``0x69``|
+--------+--------+--------+--------+
Without section descriptor, data type code and chunk data would be:
+--------+--------+
| Data | Chunk |
| type | data |
+ +--------+
| | Length |
+========+========+
| 0 | 1 |
+--------+--------+
|``0xDD``|``0x00``|
+--------+--------+
Data type ``0xEE``
^^^^^^^^^^^^^^^^^^
Chunk data of type ``0xEE`` contain one list. The chunk data
start with a 2-byte long header that specifies the type of data in
the array, followed by a list as defined in
Section :ref:`section-data-list-definition`.
There are at least five different list data types defined as part of
data type ``0xEE``, which are ``0x0000``,
``0x0004``, ``0x0005``, ``0x0011``, and ``0x0016``.
+----------+------------+-----------+---------------------------------+
| Data type| Sub-type |Byte-length| Type of list elements |
| | |of elements| |
+==========+============+===========+=================================+
| ``0xEE`` | ``0x0000`` | n/a | n/a: empty list |
+----------+------------+-----------+---------------------------------+
| ``0xEE`` | ``0x0004`` | 4 | single-precision floating point |
+----------+------------+-----------+---------------------------------+
| ``0xEE`` | ``0x0005`` | 8 | double-precision floating point |
+----------+------------+-----------+---------------------------------+
| ``0xEE`` | ``0x0011`` | 1 | bytes of structured data record |
+----------+------------+-----------+---------------------------------+
| ``0xEE`` | ``0x0016`` | 4 | integer or boolean |
+----------+------------+-----------+---------------------------------+
The byte-list of sub-type ``0x0011`` is a wrapper for a mixed-type
data record whose interpretation depends on the chunk type
(see Section :ref:`section-ee11`).
This sub-type is used by the ``ZIMT`` script for measurement parameters
and settings, and to store the event audit log.
Sub-types ``0x0004`` and ``0x0005`` are used to store measurement time series recorded by
the testing machine.
Placeholder lists have sub-type ``0x0000``, followed by an empty list.
Sub-type ``0x0016`` seems to be used only to hold boolean values, with
``0x00000000`` and ``0x00000001`` representing ``False`` and ``True``,
respectively.
For example, data type code and chunk data of a list of sub-type ``0x0016``,
representing a list with one integer element of value ``0x12345678``,
would be:
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | Chunk Data |
+ +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| Data | Sub-type | Number of list | List element |
| type | | entries | |
+========+========+========+========+========+========+========+========+========+========+========+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
|``0xEE``|``0x16``|``0x00``|``0x01``|``0x00``|``0x00``|``0x00``|``0x78``|``0x56``|``0x34``|``0x12``|
+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
Chunk lists
-----------
Chunk lists are elements of the document structure. They consist of a
chunk of type ``Count`` specifying the number of items in the chunk list,
followed by a succession of exactly that number of list items.
Chunk lists can be nested.
The three chunk types ``Key``, ``Elem``, and ``Val`` represent list items.
They end always on an ordinal number in decimal representation (see
Section :ref:`section-chunk-naming`), i.e., ``0`` in the example in the table:
+----------+--------------------------------------------------+
| Chunk | Use |
| type | |
| name | |
+==========+==================================================+
| ``Key0`` | Singular list item with information stored |
| | in chunk data of ``Key0``. This chunk may |
| | immediately preceede an ``Elem`` chunk of the |
| | same enumeration (i.e., ``Elem0`` in this case).|
+----------+--------------------------------------------------+
| ``Elem0``| Singular list item with information stored in |
| | chunk data of ``Elem0``, or marker of the |
| | beginning of a list item with information |
| | stored in subsequent chunks |
| | (data type ``0xDD``). |
+----------+--------------------------------------------------+
| ``Val0`` | Singular list item, information is stored |
| | in chunk data of ``Val0``. |
+----------+--------------------------------------------------+
The ``Count`` chunk is preceeded by a structural chunk of data type
``0xDD`` that indicates the type of content or purpose of the list.
That preceeding chunk type does not need to be unique in the data stream.
| zs2decode | /zs2decode-0.3.1.tar.gz/zs2decode-0.3.1/docs/format.rst | format.rst |
# written by Chris Petrich, 2015-2017
# License: MIT
import xml.etree.ElementTree as ET
import ast
fn_in = 'my_data_file.xml'
fn_out_pattern = 'sample_data_%s.txt'
print('Reading XML')
with open(fn_in,'rt') as f:
data = f.read()
print('Parsing XML')
root = ET.fromstring(data)
print('Findig data')
def _get_list_elements(root, path, tag='Elem'):
return [element for element in root.findall(path) if element.tag.startswith(tag)]
def _get_type(root, path):
return root.find(path).attrib['type']
def _get_value(root, path):
element = root.find(path)
if element is None:
raise ValueError('No path %r' % path)
string = element.attrib['value']
dtype=_get_type(root, path)
if dtype in ('AA','DD'): return u'%s' % string
if dtype.startswith('EE11'):
# there seems to have been something gone wrong with xml entity replacement
string = string.replace("\\'","\'")
return ast.literal_eval(string)
if __name__=='__main__':
# get a mapping between channel number IDs and clear-text names
channel_names = {}
xml_channels = _get_list_elements(root, './Body/batch/SeriesDef/TestTaskDefs/Elem0/ChannelManager/ChannelManager/')
for xml_channel in xml_channels:
ID = _get_value(xml_channel, './ID')
name = _get_value(xml_channel, './Name/Text')
channel_names[ID]=name
if True:
# check if this file uses "short" paths (i.e., the original implementation)
xml_data = root.find('./Body/batch/Series')
xml_samples = _get_list_elements(xml_data, './SeriesElements/')
try:
IndexTimeChannel = _get_value(xml_samples[0], './SeriesElements/Elem0/RealTimeCapture/Trs/SingleGroupDataBlock/IndexTimeChannel')
except ValueError:
IndexTimeChannel = None
is_short = IndexTimeChannel is not None
if is_short:
xml_data = root.find('./Body/batch/Series')
data={}
xml_samples = _get_list_elements(xml_data, './SeriesElements/')
for sample_idx, xml_sample in enumerate(xml_samples):
# extract data for each sample contained in the file:
# get sample parameters, we'll only use this to get the sample name (ID 48154)
param_data = {}
xml_parameters = _get_list_elements(xml_sample, './EvalContext/ParamContext/ParameterListe/')
sample_name = 'no-name-defined-%i' % sample_idx
for xml_parameter in xml_parameters:
ID = _get_value(xml_parameter, './ID')
if ID == 48154:
new_sample_name = _get_value(xml_parameter, './QS_TextPar')[0]
sample_name = new_sample_name if new_sample_name != '' else sample_name
break
# now get the data
channel_data = {}
# nesting is significantly deeper in some files
IndexTimeChannel = _get_value(xml_sample, './SeriesElements/Elem0/RealTimeCapture/Trs/SingleGroupDataBlock/IndexTimeChannel')
time_channel_ID = None
xml_channels = _get_list_elements(xml_sample, './SeriesElements/Elem0/RealTimeCapture/Trs/SingleGroupDataBlock/DataChannels/')
for idx, xml_channel in enumerate(xml_channels):
data_array = _get_value(xml_channel, './DataArray')
ID = _get_value(xml_channel, './TrsChannelId')
channel_data[ID]={'data':data_array,
'name': channel_names[ID]}
if idx == IndexTimeChannel: time_channel_ID = ID
# collect in a dict
sample_data = {}
sample_data['channel_data']=channel_data
sample_data['time_channel_ID']=time_channel_ID
sample_data['sample_name']=sample_name
data[sample_name]=sample_data
else:
# this is more general but doesn't extract the names
# of the respective data series
data={}
xml_SingleGroupDataBlocks = root.findall('.//IndexTimeChannel/..')
for sample_idx, xml_sample in enumerate(xml_SingleGroupDataBlocks):
# todo: find names associated with data series
sample_name = 'data_group-%i' % sample_idx
# now get the data
channel_data = {}
IndexTimeChannel = _get_value(xml_sample, './IndexTimeChannel')
time_channel_ID = None
xml_channels = _get_list_elements(xml_sample, './DataChannels/')
for idx, xml_channel in enumerate(xml_channels):
data_array = _get_value(xml_channel, './DataArray')
ID = _get_value(xml_channel, './TrsChannelId')
channel_data[ID]={'data':data_array,
'name': channel_names[ID]}
if idx == IndexTimeChannel: time_channel_ID = ID
# collect in a dict
sample_data = {}
sample_data['channel_data']=channel_data
sample_data['time_channel_ID']=time_channel_ID
sample_data['sample_name']=sample_name
data[sample_name]=sample_data
# write one file per sample
sample_names = list(data.keys())
sample_names.sort()
for sample_name in sample_names:
if sample_name =='': continue
print(sample_name)
channels = list(data[sample_name]['channel_data'].keys())
if len(channels) == 0:
print(' no data channels --> skipping')
continue
channels.sort()
channels.remove(data[sample_name]['time_channel_ID'])
channels.insert(0,data[sample_name]['time_channel_ID'])
N=len(data[sample_name]['channel_data'][data[sample_name]['time_channel_ID']]['data'])
fn_out = fn_out_pattern % sample_name
out = []
line = '\t'.join(['"'+str(channel_names[channel])+'"' for channel in channels])
out.append(line)
line = '\t'.join([str(channel) for channel in channels])
out.append(line)
for row in range(N):
line=[]
for channel in channels:
line.append('%.9g' % data[sample_name]['channel_data'][channel]['data'][row])
out.append('\t'.join(line))
with open(fn_out, 'wt') as f:
f.write('\n'.join(out)) | zs2decode | /zs2decode-0.3.1.tar.gz/zs2decode-0.3.1/examples/raw_data_dump_from_xml.py | raw_data_dump_from_xml.py |
import requests
from .logger import setup_logger
logger = setup_logger(name=__name__)
def _zia_http_codes(response: requests.Response):
"""
Internal method to display HTTP error handling and response codes. For more information, please refer to
https://help.zscaler.com/zia/about-error-handling
:param response: (requests.Response Object)
"""
if response.status_code in [200, 201, 202, 204]:
return
elif response.status_code == 401:
raise ValueError(
f"{response.status_code} :Session is not authenticated or timed out"
)
elif response.status_code == 403:
raise ValueError(
f"{response.status_code} :API key disabled or SKU subscription missing or user role has not access"
)
elif response.status_code == 404:
raise ValueError("Resource does not exist")
elif response.status_code == 409:
raise ValueError(
"Request could not be processed because of possible edit conflict occurred"
)
elif response.status_code == 415:
raise ValueError("Unsupported media type")
elif response.status_code == 429:
raise ValueError("Exceeded the rate limit or quota")
elif response.status_code == 500:
raise ValueError("Unexpected error")
elif response.status_code == 503:
raise ValueError("Service is temporarily unavailable")
else:
print(response.content)
raise ValueError(f"Unexpected HTTP response code: {response.status_code}")
class HttpCalls(object):
"""
class to Perform HTTP calls
"""
def __init__(
self,
host: str,
header: dict = None,
verify: bool = True,
):
"""
to start this instance, host IP address or fqdn is required
:param host: (str) IP address or fqdn
:param header: (dict) HTTP header
:param verify: (bool) True to verify ssl cert with in HTTP call
"""
self.version = "1.1"
self.host = host
self.headers = {"Content-type": "application/json", "Cache-Control": "no-cache"}
if header:
self.headers.update(header)
self.cookies = None
self.verify = verify
def get_call(
self,
url: str,
cookies: dict = None,
headers: dict = None,
params: dict = None,
error_handling: bool = False,
) -> requests.Response:
"""
Method to perform a GET HTTP call
:param url: (str) url
:param cookies: (str) cookies
:param headers: (dict) Additional HTTP headers
:param params: (dict) Key,Value parameters in the URL after a question mark
:param error_handling: (bool) when TRUE will use Zscaler HTTP codes
:return: (requests.Response Object)
"""
full_url = f"{self.host}{url}"
if headers:
self.headers.update(headers)
try:
response = requests.get(
url=full_url,
headers=self.headers,
cookies=cookies,
params=params,
verify=self.verify,
)
if error_handling:
_zia_http_codes(response)
else:
if response.status_code not in [200, 201, 204]:
raise ValueError(f"{response.status_code} -> {response.content}")
return response
except requests.HTTPError as e:
raise ValueError(e)
def post_call(
self,
url: str,
payload: dict,
params: dict = None,
headers: dict = None,
cookies: dict = None,
error_handling: bool = False,
urlencoded: bool = False,
) -> requests.Response:
"""
Method to perform an HTTP POST call
:param url: (str) url
:param payload: (dict)
:param params: (dict)
:param headers: (dict)
:param cookies: (str) cookies
:param error_handling: (bool) when TRUE will use Zscaler HTTP codes
:param urlencoded: (bool)
:return: (requests.Response Object)
"""
full_url = f"{self.host}{url}"
try:
if urlencoded:
url_encoded_headers = headers
response = requests.post(
url=full_url,
params=params,
headers=url_encoded_headers,
cookies=cookies,
data=payload,
verify=self.verify,
)
else:
if headers:
self.headers.update(headers)
response = requests.post(
url=full_url,
params=params,
headers=self.headers,
cookies=cookies,
json=payload,
verify=self.verify,
)
if error_handling:
_zia_http_codes(response)
else:
if response.status_code not in [200, 201, 204]:
raise ValueError(f"{response.status_code} -> {response.content}")
return response
except requests.HTTPError as e:
raise ValueError(e)
def patch_call(
self,
url: str,
payload: dict,
cookies: dict = None,
) -> requests.Response:
"""
Method to perform an HTTP PATH call
:param url: (str) url
:param payload: (dict)
:param cookies: (str) cookies
:return: (requests.Response Object)
"""
full_url = f"{self.host}{url}"
try:
response = requests.patch(
url=full_url,
headers=self.headers,
cookies=cookies,
json=payload,
verify=self.verify,
)
if response.status_code not in [200, 201, 204]:
raise ValueError(response.status_code)
return response
except requests.HTTPError as e:
raise ValueError(e)
def put_call(
self,
url: str,
payload: dict,
params: dict = None,
headers: dict = None,
cookies: dict = None,
error_handling: bool = False,
) -> requests.Response:
"""
Method to perform an HTTP PUT call
:param url: (str) url
:param params: (dict) Parameters to add to url
:param payload: (dict)
:param headers: (dict)
:param cookies: (str) cookies
:param error_handling: (bool) when TRUE will use Zscaler HTTP codes
:return: (requests.Response Object)
"""
full_url = f"{self.host}{url}"
if headers:
self.headers.update(headers)
try:
response = requests.put(
url=full_url,
params=params,
headers=self.headers,
cookies=cookies,
json=payload,
verify=self.verify,
)
if error_handling:
_zia_http_codes(response)
else:
if response.status_code not in [200, 201, 204]:
try:
raise ValueError(
f"HTTPS Response code {response.status_code} : {response.json()}"
)
except ValueError:
raise ValueError(response.status_code)
return response
except requests.HTTPError as e:
raise ValueError(e)
def delete_call(
self,
url: str,
payload: dict = None,
headers: dict = None,
cookies: dict = None,
error_handling: bool = False,
) -> requests.Response:
"""
Method to perform an HTTP DELETE call
:param url: (str) url
:param payload: (dict) json payload
:param headers: (dict)
:param cookies: (str) cookies
:param error_handling: (bool) when TRUE will use Zscaler HTTP codes
:return: (requests.Response Object)
"""
full_url = f"{self.host}{url}"
if headers:
self.headers.update(headers)
try:
response = requests.delete(
url=full_url,
headers=self.headers,
cookies=cookies,
json=payload,
verify=self.verify,
)
if error_handling:
_zia_http_codes(response)
else:
if response.status_code not in [200, 201, 204]:
raise ValueError(response.status_code)
return response
except requests.HTTPError as e:
raise ValueError(e) | zscaler-api-talkers | /zscaler_api_talkers-6.0.0-py3-none-any.whl/zscaler_api_talkers/helpers/http_calls.py | http_calls.py |
import time
import requests
import requests.packages.urllib3.exceptions
from .logger import setup_logger
# Disable the InsecureRequestWarning
requests.packages.urllib3.disable_warnings(
category=requests.packages.urllib3.exceptions.InsecureRequestWarning
)
logger = setup_logger(name=__name__)
def get_user_agent() -> str:
return "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36"
def request_(
method: str,
url: str,
retries: int = 10,
wait_time: float = 5,
silence_logs: bool = False,
**kwargs,
) -> requests.Response:
"""
Submit to requests module with retry and error logic.
:param method: (str) ['get', 'put', 'post', 'delete', 'patch', 'head', 'options']
:param url: (str) URL to call.
:param retries: (int) If an error is reached how many times to re-attempt request.
:param wait_time: (float) Time to wait between re-attempts, when necessary.
:param silence_logs: (bool) Suppress error messages in logs. (Default: False)
:param kwargs: Options:
params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`.
data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the
:class:`Request`.
json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart
encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename',
fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``,
where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers``
a dict-like object containing additional headers to add for the file.
auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float,
or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple
allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection.
Defaults to ``True`` :type allow_redirects: bool
proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS
certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``.
stream: (optional) if ``False``, the response content will be immediately downloaded.
cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object :rtype: requests.Response
"""
retry_attempts = 0
result = None
while retry_attempts <= retries:
try:
passable_options = [
"params",
"data",
"json",
"headers",
"cookies",
"files",
"auth",
"allow_redirects",
"proxies",
"verify",
"stream",
"cert",
"timeout",
]
result = requests.request(
method=method.upper(),
url=url,
**{k: v for k, v in kwargs.items() if k in passable_options},
)
if result.status_code < 400:
break # Only stop looping if the status_code is reported as not an error.
except requests.exceptions.SSLError:
if not silence_logs:
logger.debug("Disabling SSL verification for the next request attempt.")
kwargs.update(
{
"verify": False,
}
)
continue # Skip the wait and try again but with SSL verification off.
except requests.exceptions.RequestException as e:
if not silence_logs:
logger.error(f"Encountered error: {e}")
except requests.packages.urllib3.exceptions as e:
if not silence_logs:
logger.error(f"Encountered error: {e}")
if not silence_logs:
logger.info(
f"Retrying request in {wait_time}s. Retries remaining: {retries - retry_attempts}"
)
retry_attempts += 1
time.sleep(wait_time)
if result.status_code == 400:
if not silence_logs:
logger.info(
f"Status code 400 indicates that the server cannot or will not process "
f"the request due to something that is perceived to be a client error."
)
elif result.status_code == 401:
if not silence_logs:
logger.info(
f"Status code 401 indicates the client request has not been completed "
f"because it lacks valid authentication credentials for the requested resource."
)
elif result.status_code == 404:
if not silence_logs:
logger.info(
f"Status code 404 indicates that the server cannot find the requested resource."
)
elif result.status_code == 405:
if not silence_logs:
logger.info(
f"Status code 405 indicates that the server knows the request method, "
f"but the target resource doesn't support this method."
)
elif result.status_code == 415:
if not silence_logs:
logger.info(
f"Status code 415 indicates that the server refuses to accept the request "
f"because the payload format is in an unsupported format; so stop trying!"
)
elif result.status_code == 429:
if not silence_logs:
logger.info(
f"Status code 429 indicates the user has sent too many requests in a "
f"given amount of time."
)
return result | zscaler-api-talkers | /zscaler_api_talkers-6.0.0-py3-none-any.whl/zscaler_api_talkers/helpers/utilities.py | utilities.py |
import json
import requests
from zscaler_api_talkers.helpers import HttpCalls, setup_logger
logger = setup_logger(name=__name__)
class ZpaTalker(object):
"""
ZPA API talker
Documentation: https://help.zscaler.com/zpa/zpa-api/api-developer-reference-guide
"""
def __init__(
self,
customer_id: int,
cloud: str = "https://config.private.zscaler.com",
client_id: str = None,
client_secret: str = "",
):
"""
:param cloud: (str) Example https://config.zpabeta.net
:param customer_id: (int) The unique identifier of the ZPA tenant
:param client_id: (str)
:param client_secret: (str)
"""
self.base_uri = cloud
self.hp_http = HttpCalls(
host=self.base_uri,
verify=True,
)
self.jsessionid = None
self.version = "1.3"
self.header = None
self.customer_id = customer_id
if client_id and client_secret:
self.authenticate(
client_id=client_id,
client_secret=client_secret,
)
def _obtain_all_results(
self,
url: str,
) -> list:
"""
API response can have multiple pages. This method return the whole response in a list
:param url: (str) url
:return: (list)
"""
result = []
if "?pagesize" not in url:
url = f"{url}?pagesize=500" # TODO: Move to parameters
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
)
if "list" not in response.json().keys():
return []
if int(response.json()["totalPages"]) > 1:
i = 0
while i <= int(response.json()["totalPages"]):
result = (
result
+ self.hp_http.get_call(
f"{url}&page={i}",
headers=self.header,
error_handling=True,
).json()["list"]
)
i += 1
else:
result = response.json()["list"]
return result
def authenticate(
self,
client_id: str,
client_secret: str,
) -> None:
"""
Method to obtain the Bearer Token. Refer to https://help.zscaler.com/zpa/adding-api-keys
:param client_id: (str) client id
:param client_secret. (str) client secret
return (json))
"""
url = f"/signin"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"client_id": client_id,
"client_secret": client_secret,
}
response = self.hp_http.post_call(
url,
headers=headers,
error_handling=True,
payload=payload,
urlencoded=True,
)
self.header = {
"Authorization": f"{response.json()['token_type']} {response.json()['access_token']}"
}
return
# app-server-controller
def list_servers(
self,
query: str = False,
server_id: int = None,
) -> json:
"""
Method to obtain all the configured Servers.
:param query: (str) Example ?page=1&pagesize=20&search=consequat
:param server_id: (int) Unique server id number
:return: (json)
"""
if server_id:
url = (
f"/mgmtconfig/v1/admin/customers/{self.customer_id}/server/{server_id}"
)
else:
if not query:
query = "?pagesize=500"
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/server{query}"
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
)
return response.json()
# application-controller
def list_application_segments(
self,
application_id: int = None,
) -> json or list:
"""
Method to obtain application segments
:param application_id: (int) Application unique identified id
:return: (json|list)
"""
if application_id:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/application/{application_id}"
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
)
return response.json()
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/application"
response = self._obtain_all_results(url)
return response
def add_application_segment(
self,
name: str,
health_reporting: str,
domain_names: list,
segment_group_id: str,
server_groups: list,
common_apps_dto: list = None,
segment_group_name: str = "",
health_check_type: str = "DEFAULT",
clientless_apps: list = None,
inspection_apps: list = None,
sra_apps: list = None,
tcp_port_range: dict = None,
tcp_port_ranges: list = None,
udp_port_ranges: list = None,
udp_port_range: dict = None,
description: str = "",
enabled: bool = True,
icmp_access_type: str = "NONE",
ip_anchored: bool = False,
double_encrypt: bool = False,
bypass_type: str = "NEVER",
is_cname_enabled: bool = True,
select_connector_close_to_app: bool = False,
passive_health_enabled: bool = True,
) -> json:
"""
Adds a new Application Segment for a ZPA tenant.
:param name: (str) App Name
:param health_reporting: (str) possible values: NONE, ON_ACCESS, CONTINUOUS
:param domain_names: (list) List of domains or IP addresses
:param segment_group_id: (str) Application Segment Group id
:param server_groups=(list) List of dictionaries, where key is id and value is serverGroupId [
{"id": "<serverGroupId>"}
]
:param common_apps_dto: (list) List of dictionaries, where appsConfig will list the apps with Browser Access
or Inspection
:param segment_group_name: (str) Application Segment Group Name
:param health_check_type: (str)
:param clientless_apps: (list) List of application domains in Application Segment with Browser access enabled
:param inspection_apps: (list) List of application domains in Application Segment with Inspection enabled
:param sra_apps: (list) List of application domains in Application Segment with Privileged Remote Access enabled
:param tcp_port_range: type dict. [{"from":int, "to":int}]
:param tcp_port_ranges: (list) ["from", "to"]. This will be deprecated in the future.
:param udp_port_range: type dict. [{"from":int, "to":int}]
:param udp_port_ranges: (list) ["from", "to"]. This will be deprecated in the future.
:param description: (str) Description
:param enabled: (bool) (True|False)
:param icmp_access_type: (str) possible values: PING_TRACEROUTING, PING, NONE
:param ip_anchored: (bool) (True|False)
:param double_encrypt: (bool) (True|False)
:param bypass_type: (str) possible values ALWAYS, NEVER, ON_NET
:param is_cname_enabled: (bool) (True|False)
:param select_connector_close_to_app: (bool) (True|False)
:param passive_health_enabled: (bool) (True|False)
:return: (json)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/application"
payload = {
"name": name,
"description": description,
"enabled": enabled,
"healthCheckType": health_check_type,
"healthReporting": health_reporting,
"icmpAccessType": icmp_access_type,
"ipAnchored": ip_anchored,
"doubleEncrypt": double_encrypt,
"bypassType": bypass_type,
"isCnameEnabled": is_cname_enabled,
"clientlessApps": clientless_apps,
"inspectionApps": inspection_apps,
"sraApps": sra_apps,
"commonAppsDto": common_apps_dto,
"selectConnectorCloseToApp": select_connector_close_to_app,
"passiveHealthEnabled": passive_health_enabled,
"tcpPortRanges": tcp_port_ranges,
"tcpPortRange": tcp_port_range,
"udpPortRange": udp_port_range,
"udpPortRanges": udp_port_ranges,
"domainNames": domain_names,
"segmentGroupId": segment_group_id,
"segmentGroupName": segment_group_name,
"serverGroups": server_groups,
}
response = self.hp_http.post_call(
url=url,
payload=payload,
headers=self.header,
error_handling=True,
)
return response.json()
def update_application_segment(
self,
application_id: int,
payload: dict,
) -> requests.Response:
"""
Updates the Application Segment details for the specified ID
:param application_id: (int) Application ID
:param payload: (dict)
:return: (requests.Response Object)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/application/{application_id}"
response = self.hp_http.put_call(
url=url,
payload=payload,
headers=self.header,
error_handling=True,
)
return response
def delete_application_segment(
self,
application_id: int,
) -> requests.Response:
"""
Updates the Application Segment details for the specified ID
:param application_id: (int) Application ID
:return: (requests.Response Object)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/application/{application_id}"
response = self.hp_http.delete_call(
url=url,
error_handling=True,
)
return response
# segment-group-controller
def list_segment_group(
self,
segment_group_id: int = None,
query: str = False,
) -> json or list:
"""
Get all the configured Segment Groups. If segmentGroupId obtains the segment sroup details
:param segment_group_id: (int) The unique identifier of the Segment Group.
:param query: (str) Example ?page=1&pagesize=20&search=consequat
return (json|list)
"""
if segment_group_id:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/segmentGroup/{segment_group_id}"
response = self.hp_http.get_call(
url, headers=self.header, error_handling=True
).json()
else:
if not query:
query = "?pagesize=500"
url = (
f"/mgmtconfig/v1/admin/customers/{self.customer_id}/segmentGroup{query}"
)
response = self._obtain_all_results(url)
return response
def add_segment_group(
self,
name: str,
description: str,
enabled: bool = True,
) -> json:
"""
Add a new segment group
:param name: (str) Name of segment Group
:param description: (str) Description
:param enabled: (bool): True or False
:return: (json)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/segmentGroup"
payload = {
"name": name,
"description": description,
"enabled": enabled,
}
response = self.hp_http.post_call(
url,
headers=self.header,
error_handling=True,
payload=payload,
)
return response.json()
def delete_segment_group(self, segmentGroupId: int) -> json:
"""
Deletes specified Segment Group.
:param segmentGroupId: The unique identifier of the Segment Group.
return: response
"""
url: str = f'/mgmtconfig/v1/admin/customers/{self.customerId}/segmentGroup/{segmentGroupId}'
response = self.hp_http.delete_call(url=url, error_handling=True)
return response
def update_segment_group(self, segmentGroupId: int, payload: object) -> json:
"""
Update Segment Group
:param segmentGroupId: type int. The unique identifier of the Segment Group.
:param payload: type dict. Segment Group details to be updated.
:return: Json
"""
url: str = f'/mgmtconfig/v1/admin/customers/{self.customerId}/segmentGroup/{segmentGroupId}'
response = self.hp_http.put_call(url, headers=self.header, error_handling=True, payload=payload)
return response
# connector-controller
def list_connector(
self,
connector_id: int = None,
) -> json or list:
"""
Get all the configured Segment Groups. If segmentGroupId obtains the segment group details
:param connector_id: The unique identifier of the App Connector.
return (json|list)
"""
if connector_id:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/connector/{connector_id}"
return self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
).json()
else:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/connector"
response = self._obtain_all_results(url)
return response
def delete_bulk_connector(
self,
ids: list,
) -> json:
"""
Get all the configured Segment Groups. If segmentGroupId obtains the segment sroup details
:param ids: (list) list of resources ids for bulk deleting the App Connectors.
return (json)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/connector/bulkDelete"
payload = {"ids": ids}
response = self.hp_http.post_call(
url=url,
headers=self.header,
error_handling=True,
payload=payload,
)
return response.json()
# Connector-group-controller
def list_connector_group(
self,
app_connector_group_id: int = None,
) -> json or list:
"""
Gets all configured App Connector Groups for a ZPA tenant.
:param app_connector_group_id: (int) The unique identifier of the Connector Group.
return (json|list)
"""
if app_connector_group_id:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/appConnectorGroup/{app_connector_group_id}"
return self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
).json()
else:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/appConnectorGroup"
response = self._obtain_all_results(url)
return response
def add_connector_group(self, name: str, description: str, latitude: str, longitude: str, location: str, upgradeDay: str = 'SUNDAY',
enabled: bool = True,
dnsQueryType: str = 'IPV4_IPV6', upgradeTimeInSecs: int = 66600,
overrideVersionProfile: bool = False, versionProfileId: int = None, tcpQuickAckApp: bool = False,
tcpQuickAckAssistant: bool = False, tcpQuickAckReadAssistant: bool = False, cityCountry: str = "",
countryCode: str = "", connectors: list = [], serverGroups: list = [], lssAppConnectorGroup: bool = False) -> json:
"""
:param name: type string. Name of App Connector Group
:param description: type string. Description
:param latitude: type string. Latitude of App Connector Group
:param longitude: type string. Longitude of App Connector Group
:param location: type string. Location of the App Connector Group
:param upgradeDay: type string. App Connectors in this group attempt to update to a newer version of the software during this specified day
:param upgradeTimeInSecs: type int. App Connectors in this group attempt to update to a newer version of the software during this specified time
:param overrideVersionProfile: type boolean. Whether the default version profile of the App Connector Group is applied or overridden
:param versionProfileId: type int. ID of the version profile
:param tcpQuickAckApp: type boolean. Whether TCP Quick Acknowledgement is enabled or disabled for the application. The tcpQuickAckApp, tcpQuickAckAssistant, and tcpQuickAckReadAssistant fields must all share the same value.
:param tcpQuickAckAssistant: type boolean. Whether TCP Quick Acknowledgement is enabled or disabled for the application
:param tcpQuickAckReadAssistant: type boolean. Whether TCP Quick Acknowledgement is enabled or disabled for the application
:param connectors: type dict. App Connector Id's part of the App Connector Group.
:param serverGroups: type dict. Server Groups part of App Connector Group
:param lssAppConnectorGroup: type boolean. Is App Connector Group reserved for LSS
"""
url: str = f'/mgmtconfig/v1/admin/customers/{self.customerId}/appConnectorGroup'
payload: dict[str | Any, object | Any] = {
"name": name,
"description": description,
"latitude": latitude,
"longitude": longitude,
"location": location,
"upgradeDay": upgradeDay,
"enabled": enabled,
"dnsQueryType": dnsQueryType,
"upgradeTimeInSecs": upgradeTimeInSecs,
"overrideVersionProfile": overrideVersionProfile,
"versionProfileId": versionProfileId,
"tcpQuickAckApp": tcpQuickAckApp,
"tcpQuickAckAssistant": tcpQuickAckAssistant,
"tcpQuickAckReadAssistant": tcpQuickAckReadAssistant,
"cityCountry": cityCountry,
"countryCode": countryCode,
"connectors": connectors,
"serverGroups": serverGroups,
"lssAppConnectorGroup": lssAppConnectorGroup
}
response = self.hp_http.post_call(url, headers=self.header, error_handling=True, payload=payload)
return response.json()
def update_connector_group(self, appConnectorGroupId: int, payload: dict) -> json:
"""
Update configured App Connector Groups for a ZPA tenant.
:param appConnectorGroupId: type int. The unique identifier of the Connector Group
:param payload: type dict. Details of App Connector group to be updated
return response
"""
url: str = f'/mgmtconfig/v1/admin/customers/{self.customerId}/appConnectorGroup/{appConnectorGroupId}'
response = self.hp_http.put_call(url, headers=self.header, error_handling=True, payload=payload)
return response
def delete_connector_group(self, appConnectorGroupId: int) -> json:
"""
Delete specified App Connector Group
:param appConnectorGroupId: type int. The unique identifier of the Connector Group
return response
"""
url: str = f'/mgmtconfig/v1/admin/customers/{self.customerId}/appConnectorGroup/{appConnectorGroupId}'
response = self.hp_http.delete_call(url, error_handling=True)
return response
# ba-certificate-controller-v-2
def list_browser_access_certificates(
self,
) -> list: # FIXME: duplicate but URL is slightly different.
"""
Get all Browser issued certificates
:return: (list)
"""
url = f"/mgmtconfig/v2/admin/customers/{self.customer_id}/clientlessCertificate/issued"
response = self._obtain_all_results(url)
return response
# enrollment-cert-controller
def list_enrollment_certificates(self) -> list:
"""
Get all the Enrollment certificates
:return: (list)
"""
url = f"/mgmtconfig/v2/admin/customers/{self.customer_id}/enrollmentCert"
response = self._obtain_all_results(url)
return response
def list_v1_browser_access_certificates(
self,
) -> list:
"""
Get all the issued certificates
:return: (list)
"""
url = (
f"/mgmtconfig/v1/admin/customers/{self.customer_id}/visible/versionProfiles"
)
response = self._obtain_all_results(url)
return response
# customer-version-profile-controller
def list_customer_version_profile(
self,
query: str = False,
) -> json:
"""
Get Version Profiles visible to a customer
:param query: (str) Example ?page=1&pagesize=20&search=consequat
:return: (json)
"""
if not query:
query = "?pagesize=500"
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/visible/versionProfiles{query}"
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
)
return response.json()
# cloud - connector - group - controller
def list_cloud_connector_group(
self,
group_id: int = None,
query: str = False,
) -> json:
"""
Get all configured Cloud Connector Groups. If id, Get the Cloud Connector Group details
:param group_id: (int)
:param query: (str) Example ?page=1&pagesize=20&search=consequat
:return: (json)
"""
if group_id:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/cloudConnectorGroup/{group_id}"
else:
if not query:
query = "?pagesize=500"
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/cloudConnectorGroup{query}"
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
)
return response.json()
# idp-controller-v-2
def list_idp(
self,
query: str = False,
) -> list:
"""
Method to Get all the idP details for a ZPA tenant
:param query: (str) HTTP query
:return: (list)
"""
if not query:
query = "?pagesize=500"
url = f"/mgmtconfig/v2/admin/customers/{self.customer_id}/idp{query}"
response = self._obtain_all_results(url)
return response
# provisioningKey-controller
def list_provisioning_key(
self,
association_type: str = "CONNECTOR_GRP",
) -> list:
"""
Gets details of all the configured provisioning keys.
:param association_type: (str) The supported values are CONNECTOR_GRP and SERVICE_EDGE_GRP.
:return: (list)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/associationType/{association_type}/provisioningKey"
response = self._obtain_all_results(url)
return response
# policy-set-controller
# scim-attribute-header-controller
def list_scim_attributes(
self,
idp_id: int,
query: str = False,
) -> json:
"""
:param idp_id: (int) The unique identifies of the Idp
:param query: (str) ?page=1&pagesize=20&search=consequat
:return: (json)
"""
if not query:
query = "?pagesize=500"
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/idp/{idp_id}/scimattribute{query}"
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
)
return response.json()
# scim-group-controller
def list_scim_groups(
self,
idp_id: int,
query: str = False,
) -> list:
"""
Method to list all SCIM groups
:param idp_id: (int) The unique identifies of the Idp
:param query: (str) ?page=1&pagesize=20&search=consequat
:return: (list)
"""
if not query:
query = "?pagesize=500"
url = f"/userconfig/v1/customers/{self.customer_id}/scimgroup/idpId/{idp_id}{query}"
response = self._obtain_all_results(url)
return response
# saml-attr-controller-v-2
def list_saml_attributes(self) -> list:
"""
Method to get all SAML attributes
:return: (list)
"""
url = f"/mgmtconfig/v2/admin/customers/{self.customer_id}/samlAttribute"
response = self._obtain_all_results(url)
return response
# global-policy-controller
def list_policies(
self,
policy_type: str = "ACCESS_POLICY",
) -> list:
"""list policie(s) by policy type,
:param policy_type: (str) Supported values Possible values = ACCESS_POLICY,GLOBAL_POLICY, TIMEOUT_POLICY,
REAUTH_POLICY, SIEM_POLICY, CLIENT_FORWARDING_POLICY,BYPASS_POLICY
:return: (list)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/policySet/rules/policyType/{policy_type}"
response = self._obtain_all_results(url)
return response
def list_policy_set(
self,
policy_type: str = "ACCESS_POLICY",
) -> json:
"""Gets the policy set for the specified policy type
:param policy_type: (str) Supported values are ACCESS_POLICY,GLOBAL_POLICY, TIMEOUT_POLICY,REAUTH_POLICY,
SIEM_POLICY, CLIENT_FORWARDING_POLICY,BYPASS_POLICY
:return: (json)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/policySet/policyType/{policy_type}"
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
)
return response.json()
def add_policy_set(
self,
app_operands: list,
rule_name: str,
action: str,
policy_set_id: int,
operands: list,
operator: str,
msg_string: str = None,
) -> json:
"""
Method to create a new access Policy
:param app_operands: (list) List of app_operands: Examples = [{
"objectType": "APP",
"lhs": "id",
"rhs": applicationId,
}]
:param rule_name: (str) Policy set Rule Name
:param action: (str) ALLOW / DENY
:param policy_set_id: (int) Global Policy ID. can be obtained from list_global_policy_id
:param operands: (list) List of operands. Example = [{
"objectType": "SAML",
"lhs": "<samlAttrId>",
"rhs": "<samlAttrValue>",
},{
"objectType": "SCIM",
"lhs": "<scimAttrId>",
"rhs": "<scimAttrValue>”
}]
:param operator: (str)
:param msg_string: (str)
:return: (json)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/policySet/{policy_set_id}/rule"
payload = {
"conditions": [
{"operands": app_operands},
{
"operands": operands,
"operator": operator,
},
],
# Seems here needs to be AND
"operator": "AND",
"name": rule_name,
"description": "Description",
"action": action,
"customMsg": msg_string,
}
logger.info(payload)
response = self.hp_http.post_call(
url=url,
headers=self.header,
error_handling=True,
payload=payload,
)
return response.json()
# Server Group Controller
def list_server_groups(
self,
group_id: int = None,
) -> json or list:
"""
Method to get all configured Server Groups. If groupI, get the Server Group details
:param group_id: (int) The unique identifier of the Server Group.
:return: (json|list)
"""
if group_id:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/serverGroup/{group_id}"
response = self.hp_http.get_call(
url,
headers=self.header,
error_handling=True,
).json()
else:
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/serverGroup"
response = self._obtain_all_results(url)
return response
def add_server_groups(
self,
name: str,
description: str,
connector_group_id: list,
) -> json:
"""
:param name: (str) Server Group Name
:param description: (str) Description
:param connector_group_id: (list) List of dictionaries with key as "id" and value connector_group_id.
[{"id": connector_group_id}]
:return: (json)
"""
url = f"/mgmtconfig/v1/admin/customers/{self.customer_id}/serverGroup"
payload = {
"enabled": True,
"dynamicDiscovery": True,
"name": name,
"description": description,
"servers": [],
"appConnectorGroups": connector_group_id,
}
response = self.hp_http.post_call(
url=url,
headers=self.header,
error_handling=True,
payload=payload,
)
return response.json()
def list_posture_profiles(
self,
query: str = False,
) -> list:
"""
Method to Get all the idP details for a ZPA tenant
:param query: (str) HTTP query
:return: (list)
"""
if not query:
query = "?pagesize=500"
url = f"/mgmtconfig/v2/admin/customers/{self.customer_id}/posture{query}"
response = self._obtain_all_results(url)
return response
def list_privileged_consoles(
self,
query: str = False,
) -> list:
"""
Method to Get all the privileged_remote_consoles for a ZPA tenant
:param query: (str) HTTP query
:return: (list)
"""
if not query:
query = "?pagesize=500"
url = f"/mgmtconfig/v2/admin/customers/{self.customer_id}/privilegedConsoles{query}"
response = self._obtain_all_results(url)
return response
def list_sra_consoles(self) -> list:
"""
Method to obtain list of sra consoles from all application segments
:return: (list)
"""
sra_list = []
app_segments = self.list_application_segments()
for apps in app_segments:
srap = apps.get("sraApps")
if srap is not None:
sra_list.extend(srap)
return sra_list
# Certificate Controller v2
def list_issued_certificates(
self,
query: str = None,
) -> list:
"""
Method to get all issued certificates
:return: (list)
"""
if not query:
query = "?pagesize=500" # TODO: Query never put into url.
url = f"/mgmtconfig/v2/admin/customers/{self.customer_id}/certificate/issued"
response = self._obtain_all_results(url)
return response | zscaler-api-talkers | /zscaler_api_talkers-6.0.0-py3-none-any.whl/zscaler_api_talkers/zpa/talker.py | talker.py |
import json
import pdb
import time
import requests
from zscaler_api_talkers.helpers.http_calls import HttpCalls
from zscaler_api_talkers.helpers.logger import setup_logger
logger = setup_logger(name=__name__)
class ClientConnectorTalker(object):
"""
Client Connector API talker
Documentation: under development
Currently in beta status
"""
def __init__(
self,
cloud: str,
client_id: str = "",
secret_key: str = "",
):
"""
:param cloud: (str) Top Level Domain (TLD) of the Zscaler cloud where tenant resides.
:param client_id: (str) Client ID
:param secret_key: (str) Secret Key
"""
self.base_uri = f"https://api-mobile.{cloud}/papi"
self.hp_http = HttpCalls(
host=self.base_uri,
verify=True,
)
self.jsession_id = None
self.version = "beta 0.1"
self.header = {}
if client_id and secret_key:
self.authenticate(
client_id=client_id,
secret_key=secret_key,
)
def authenticate(
self,
client_id: str,
secret_key: str,
):
"""
Method to authenticate.
:param client_id: (str) Client id
:param secret_key: (str) Client secret, obtained from portal.
"""
payload = {
"apiKey": client_id,
"secretKey": secret_key,
}
url = "/auth/v1/login"
response = self.hp_http.post_call(
url=url,
headers={"Accept": "*/*"},
payload=payload,
)
self.header = {"auth-token": response.json()["jwtToken"]}
def _obtain_all(
self,
url: str,
cookies: dict = None,
params: dict = None,
headers: dict = None,
) -> json:
"""
Internal method that queries all pages
:param url: (str) URL
:param cookies: (dict?) Cookies
:param params: (dict) Parameters to pass in request
:param headers: (dict) Headers to pass in request
:return: (json) JSON of results
"""
page = 1
result = []
while True:
response = self.hp_http.get_call(
f"{url}&page={page}",
cookies=cookies,
params=params,
headers=headers,
error_handling=True,
)
if response.json():
result += response.json()
page += 1
time.sleep(0.5)
else:
break
return result
def list_devices(
self,
username: str = None,
os_type: str = None,
) -> json:
"""
Gets the list of all enrolled devices of your organization and their basic details.
:param username: (str) Username in email format
:param os_type: (str) 1 - iOS, 2 - Android, 3 - Windows, 4 - macOS, 5 - Linux
:return: (json) JSON of results
"""
url = "/public/v1/getDevices?pageSize=500"
if username:
url += f'&username={username}'
if os_type:
url += f'&osType={os_type}'
response = self._obtain_all(
url=url,
headers=self.header,
)
return response
def list_otp(
self,
ud_id: int,
) -> json:
"""
Method to fetch the One Time Password for a specific device. These passwords are unique and tied to a device
UDID.
:param ud_id: (int) User device ID
:return: (json) JSON of results
"""
url = f"/public/v1/getOtp"
parameters = {
"udid": ud_id,
}
response = self.hp_http.get_call(
url=url,
params=parameters,
headers=self.header,
)
return response.json()
def list_passwords(
self,
ud_id: int,
):
"""
Method to fetch the One Time Password for a specific device. These passwords are unique and tied to a device UDID
:param ud_id: (int) User device ID
:return: (json) JSON of results
"""
url = f"/public/v1/getOtp"
parameters = {
"udid": ud_id,
}
response = self.hp_http.get_call(
url=url,
params=parameters,
headers=self.header,
)
return response.json()
def remove_devices(
self,
username: str = None,
client_connector_version: str = None,
ud_ids: list = None,
os_type: int = 0,
) -> json:
"""
Method to mark the device for removal (Device Removal Pending).
API currently can remove up to 30 devices per call
:param username: type str. Userna,e
:param ud_ids: type list. List of user devices ids
:param os_type: 0 ALL OS types, 1 IOS, 2 Android, 3 Windows, 4 macOS, 5 Linux
:param client_connector_version: Client connector version
:return: (json) JSON of results
"""
url = f"/public/v1/removeDevices"
payload = {
"userName": username,
"clientConnectorVersion": client_connector_version,
"udids": ud_ids,
"osType": os_type,
}
response = self.hp_http.post_call(
url=url,
headers=self.header,
payload=payload,
)
return response.json()
def force_remove_devices(
self,
username: str = None,
client_connector_version: str = None,
ud_ids: list = None,
os_type: int = 0,
) -> json:
"""
Force Remove, has the same effect as Remove, though it additionally moves the device straight to Removed and also
signals the cloud to invalidate the user’s session.
API currently can remove up to 30 devices per call
:param client_connector_version: (str) ZCC version
:param ud_ids: (list) List of user devices ids
:param os_type: (int) 0 ALL OS types, 1 IOS, 2 Android, 3 Windows, 4 macOS, 5 Linux
:param username:(str) Username
:return: (json) JSON of results
"""
if ud_ids is None:
ud_ids = []
url = f"/public/v1/forceRemoveDevices"
payload = {
"clientConnectorVersion": client_connector_version,
"udids": ud_ids,
"osType": os_type,
"userName": username
}
response = self.hp_http.post_call(
url=url,
headers=self.header,
payload=payload,
)
return response.json()
def list_download_service_status(
self,
) -> requests.Response.content:
"""
Method to download Service Status
:return: (str) String of results
"""
url = "/public/v1/downloadServiceStatus"
response = self.hp_http.get_call(
url=url,
headers=self.header,
)
return response.content
class ZccTalker(ClientConnectorTalker):
def __init__(
self,
cloud: str,
client_id: str = "",
secret_key: str = "",
):
logger.warning(
"Deprecating ZccTalker. Start using ClientConnectorTalker instead."
)
super().__init__(
cloud,
client_id,
secret_key,
) | zscaler-api-talkers | /zscaler_api_talkers-6.0.0-py3-none-any.whl/zscaler_api_talkers/client_connector/talker.py | talker.py |
import json
import pdb # noqa
import time
import requests
from zscaler_api_talkers.helpers import HttpCalls, setup_logger
from zscaler_api_talkers.zia.models import (
super_categories,
valid_category_ids,
valid_countries,
)
from zscaler_api_talkers.zia.helpers import _obfuscate_api_key
logger = setup_logger(name=__name__)
class ZiaTalker(object):
"""
ZIA API talker
Documentation:
https://help.zscaler.com/zia/zia-api/api-developer-reference-guide
"""
def __init__(
self,
cloud_name: str,
bearer: str = None,
api_key: str = "",
username: str = "",
password: str = "",
):
"""
Method to start the class
:param cloud_name: (str) Example: zscalerbeta.net, zscalerone.net, zscalertwo.net, zscalerthree.net,
zscaler.net, zscloud.net
:param bearer: (str) OAuth2.0 Bear token
"""
self.base_uri = f"https://zsapi.{cloud_name}/api/v1"
self.hp_http = HttpCalls(
host=self.base_uri,
verify=True,
)
self.cookies = None
self.headers = None
if bearer:
self.headers = {"Authorization": f"Bearer {bearer}"}
elif username and any([password, api_key]):
self.authenticate(
username=username,
api_key=api_key,
password=password,
)
def authenticate(
self,
api_key: str,
username: str,
password: str = None,
):
"""
Method to authenticate.
:param api_key: (str) API key
:param username: (str) A string that contains the email ID of the API admin
:param password: (str) A string that contains the password for the API admin
"""
timestamp, key = _obfuscate_api_key(api_key)
payload = {
"apiKey": key,
"username": username,
"password": password,
"timestamp": timestamp,
}
url = "/authenticatedSession"
response = self.hp_http.post_call(
url=url,
payload=payload,
)
self.cookies = {"JSESSIONID": response.cookies["JSESSIONID"]}
def authenticated_session(self) -> json:
"""
Checks if there is an authenticated session
:return: (json)
"""
url = "/authenticatedSession"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
)
return response.json()
def end_session(self) -> json:
"""
Method to end an authenticated session
:return: (json)
"""
url = "/authenticatedSession"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
payload={},
)
return response.json()
def _obtain_all(
self,
url: str,
) -> list:
"""
Internal method that queries all pages
:param url: (str) URL
:return: (list) List of results
"""
page = 1
result = []
while True:
response = self.hp_http.get_call(
url=f"{url}&page={page}",
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
if response.json():
result += response.json()
page += 1
time.sleep(1)
else:
break
return result
def get_status(self) -> json:
"""
Method to obtain the activation status for a configuration change
:return: (json) JSON object with the status
"""
url = "/status"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
)
return response.json()
def activate_status(self) -> json:
"""
Method to activate configuration changes
:return: (json) JSON object with the status
"""
url = "/status/activate"
response = self.hp_http.post_call(
url,
payload={},
cookies=self.cookies,
error_handling=True,
)
return response.json()
# Admin Audit Logs
def list_auditlog_entry_report(self) -> json:
"""
Gets the status of a request for an audit log report. After sending a POST request to /auditlogEntryReport
to generate a report, you can continue to call GET /auditlogEntryReport to check whether the report has
finished generating. Once the status is COMPLETE, you can send another GET request to
/auditlogEntryReport/download to download the report as a CSV file.
:return: (json)
"""
url = "/auditlogEntryReport"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
headers=self.headers,
error_handling=True,
)
return response.json()
def download_auditlog_entry_report(self) -> requests.Response:
"""
Gets the status of a request for an audit log report. After sending a POST request to /auditlogEntryReport
to generate a report, you can continue to call GET /auditlogEntryReport to check whether the report has
finished generating. Once the status is COMPLETE, you can send another GET request to
/auditlogEntryReport/download to download the report as a CSV file.
:return: (request.Response)
"""
url = "/auditlogEntryReport/download"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
headers=self.headers,
error_handling=True,
)
return response
def add_auditlog_entry_report(
self,
start_time: int,
end_time: int,
action_types: list = None,
category: str = None,
subcategories: list = None,
action_interface: str = None,
) -> requests.Response:
"""
Creates an audit log report for the specified time period and saves it as a CSV file. The report includes
audit information for every call made to the cloud service API during the specified time period. Creating a
new audit log report will overwrite a previously-generated report.
:param start_time: (int) The timestamp, in epoch, of the admin's last login
:param end_time: (int) The timestamp, in epoch, of the admin's last logout.
:param action_types: (list) The action performed by the admin in the ZIA Admin Portal or API
:param category: (str) The location in the Zscaler Admin Portal (i.e., Admin UI) where the actionType was
performed.
:param subcategories: (list) The area within a category where the actionType was performed.
:param action_interface: (str) The interface (i.e., Admin UI or API) where the actionType was performed.
:return: (requests.Response Object) 204 Successful Operation
"""
url = "/auditlogEntryReport"
payload = {
"startTime": start_time,
"endTime": end_time,
}
if category:
payload.update(category=category)
if subcategories:
payload.update(subcategories=subcategories)
if action_interface:
payload.update(actionInterface=action_interface)
if action_types:
payload.update(actionTypes=action_types)
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
# Admin & Role Management
def list_admin_users(
self,
user_id: int = None,
query: str = None,
) -> json:
"""
Gets a list of admin users. By default, auditor user information is not included.
:param user_id: (int) user ID
:param query: (str) HTTP query # TODO: What is this? Looks like it is just parameters
:return: (json) JSON of results
"""
if user_id:
url = f"/adminUsers/{user_id}"
return self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
).json()
else:
if query:
url = f"/adminUsers?{query}?pageSize=1000"
else:
url = "/adminUsers?pageSize=1000"
return self._obtain_all(url)
def add_admin_users(self, loginName: str, userName: str, email: str, password: str, role: dict, comments: str = '',
adminScopeType: str ='ORGANIZATION',
adminScopeScopeEntities: list =[],
adminScopescopeGroupMemberEntities: list =[],
isNonEditable: bool = False,
disabled: bool = False,
isAuditor: bool = False,
isPasswordLoginAllowed: object = False,
isSecurityReportCommEnabled: object = False,
isServiceUpdateCommEnabled: object = False,
isProductUpdateCommEnabled: object = False,
isPasswordExpired: object = False,
isExecMobileAppEnabled: object = False,
execMobileAppTokens: object = []) -> json:
"""
Adds a new Admininstrator.
:param loginName: string. Admin or auditor's login name. loginName is in email format
and uses the domain name associated to the Zscaler account.
:param userName: string. UserName.
:param email: string. Email Address.
:param password: string. Password for administrator. If admin single sign-on (SSO) is disabled, then this field is mandatory
:param role : Role of the Admin
:param comments: string. Comments.
:param adminScopeType: string. Scope of the admin.
:param adminScopeScopeEntities: list: Department or Location when adminScopeType is set to Deportment or Location.
:param adminScopescopeGroupMemberEntities: list. Location Groups when adminScopeType is set to Location Group.
:param isNonEditable: boolean. Indicates whether or not the admin can be edited or deleted. default: False.
:param disabled: boolean. If admin accounts is disabled. default: False.
:param isAuditor:boolean. Indicates if user is auditor. default: False.
:param isPasswordLoginAllowed: boolean. If password login is allowed. default: False.
:param isSecurityReportCommEnabled: boolean. Communication for Security Report is enabled. default: False.
:param isServiceUpdateCommEnabled: boolean. Communication setting for Service Update. default: False.
:param isProductUpdateCommEnabled: boolean. Communication setting for Product Update. default: False.
:param isPasswordExpired: boolean. Expire password to force user to change password on logon. default: False.
:param isExecMobileAppEnabled: boolean. Indicates whether or not Executive Insights App access is enabled for the admin. default: False.
:return:json()
"""
url = "/adminUsers"
payload = {
"loginName": loginName,
"userName": userName,
"email": email,
"password": password,
"role": role,
"comments": comments,
"adminScopeType": adminScopeType,
"adminScopeScopeEntities": adminScopeScopeEntities,
"adminScopescopeGroupMemberEntities": adminScopescopeGroupMemberEntities,
"isNonEditable": isNonEditable,
"disabled": disabled,
"isAuditor": isAuditor,
"isPasswordLoginAllowed": isPasswordLoginAllowed,
"isSecurityReportCommEnabled": isSecurityReportCommEnabled,
"isServiceUpdateCommEnabled": isServiceUpdateCommEnabled,
"isProductUpdateCommEnabled": isProductUpdateCommEnabled,
"isPasswordExpired": isPasswordExpired,
"isExecMobileAppEnabled": isExecMobileAppEnabled,
"execMobileAppTokens": execMobileAppTokens
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_admin_roles(
self,
query: str = None,
) -> json:
"""
Gets a name and ID dictionary of al admin roles
:param query: (str) HTTP query # TODO: What is this? Looks like it is just parameters
:return: (json)
"""
if query:
url = f"/adminRoles/lite?{query}"
else:
url = "/adminRoles/lite"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
# URL Categories
def list_url_categories(
self,
custom: bool = False,
) -> json:
"""
Gets information about all or custom URL categories
:param custom: (bool) If True it will return custom categories only. Default is False.
:return: (json)
"""
if custom:
url = "/urlCategories?customOnly=true"
else:
url = "/urlCategories"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_url_categories_lite(self) -> json:
"""
Gets a lightweight key-value list of all or custom URL categories.
:return: (json)
"""
url = "/urlCategories/lite"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_url_categories(
self,
name: str,
super_category: str,
type_list: str = None,
urls: list = None,
db_categorized_urls: list = None,
keywords_retaining_parent_category: list = None,
keywords: list = None,
custom_category: bool = False,
ip_ranges: list = None,
ip_ranges_retaining_parent_category: list = None,
description: str = None,
) -> json:
"""
Adds a new custom URL category.
:param name: (str) Name of the custom category. Possible values URL_CATEGORY, TLD_CATEGORY, ALL
:param super_category: (str) super category
:param type_list: (list)
:param urls: (list) List of urls
:param db_categorized_urls: (list) URL retaining parent category
:param keywords_retaining_parent_category: (list) Retained custom keywords from the parent URL category that is
associated to a URL category.
:param keywords: (list) Custom keywords associated to a URL category.
:param custom_category: (bool) Default False. Set to True for custom category
:param ip_ranges: (list) Custom IP address ranges associated to a URL category
:param ip_ranges_retaining_parent_category: (list) The retaining parent custom IP address ranges associated to a
URL category.
:param description: (str) Description or notes
:return: json
"""
if not type_list:
type_list = "URL_CATEGORY"
if keywords_retaining_parent_category is None:
keywords_retaining_parent_category = []
if super_category not in super_categories:
logger.error(f"Invalid Super Category: {super_categories}")
raise ValueError("Invalid super category")
if keywords is None:
keywords = []
if ip_ranges is None:
ip_ranges = []
url = "/urlCategories"
payload = {
"configuredName": name,
"customCategory": custom_category,
"superCategory": super_category,
"keywordsRetainingParentCategory": keywords_retaining_parent_category,
"keywords": keywords,
"urls": urls,
"dbCategorizedUrls": db_categorized_urls,
"ipRanges": ip_ranges,
"ipRangesRetainingParentCategory": ip_ranges_retaining_parent_category,
"type": type_list,
"description": description,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_raw_url_categories(
self,
payload: dict,
) -> json:
"""
Adds a new custom URL category.
:param payload: (dict)
:return: (json)
"""
url = "/urlCategories"
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def update_url_categories(
self,
category_id: str,
action: str = None,
configured_name: str = None,
urls: list = None,
db_categorized_urls: list = None,
keywords: list = None,
keywords_retaining_parent_category: list = None,
) -> json:
"""
Updates the URL category for the specified ID. If keywords are included within the request, then they will
replace existing ones for the specified URL category . If the keywords attribute is not included the request,
the existing keywords are retained. You can perform a full update for the specified URL category. However,
if attributes are omitted within the update request then clear the values for those attributes.
You can also perform an incremental update, to add or remove URLs for the specified URL category using the
action parameter.
:param category_id: (str) URL id
:param action: (str) Optional parameter. ADD_TO_LIST or REMOVE_FROM_LIST
:param configured_name: (str) Name of the custom category
:param urls: (list) List of urls
:param db_categorized_urls: (list) URL retaining parent category
:param keywords: (list)
:param keywords_retaining_parent_category: (list) List of key works
:return: (json)
"""
"""if categoryId not in valid_category_ids:
print(f'Error -> Invalid category id')
raise ValueError("Invalid category id")"""
url = f"/urlCategories/{category_id}"
parameters = {}
if action and action not in ["ADD_TO_LIST", "REMOVE_FROM_LIST"]:
logger.error(f"Invalid action: {action}")
raise ValueError("Invalid action")
else:
parameters.update({"action": action})
payload = {
"configuredName": configured_name,
}
if keywords_retaining_parent_category:
payload.update(
keywordsRetainingParentCategory=keywords_retaining_parent_category
)
if keywords:
payload.update(keywords=keywords)
if configured_name:
payload.update(configuredName=configured_name)
if urls:
payload.update(urls=urls)
if db_categorized_urls:
payload.update(dbCategorizedUrls=db_categorized_urls)
response = self.hp_http.put_call(
url,
params=parameters,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_url_categories(
self,
category_id: str,
) -> requests.Response:
"""
Deletes the custom URL category for the specified ID. You cannot delete a custom category while it is being
used by a URL policy or NSS feed. Also, predefined categories cannot be deleted.
:param category_id: (inst) Category ID
:return: (requests.Response)
"""
url = f"/urlCategories/{category_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def delete_url_filtering_rules(
self,
rule_id: int,
) -> requests.Response:
"""
Deletes the custom URL category for the specified ID. You cannot delete a custom category while it is being
used by a URL policy or NSS feed. Also, predefined categories cannot be deleted.
:param rule_id: (int) Rule Id
:return: (request.Response)
"""
url = f"/urlFilteringRules/{rule_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def list_url_categories_url_quota(self) -> json:
"""
Gets information on the number of unique URLs that are currently provisioned for your organization as well as
how many URLs you can add before reaching that number.
:return: (json)
"""
url = "/urlCategories/urlQuota"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_url_categories_id(
self,
category_id: int,
) -> json:
"""
Gets the URL category information for the specified ID
:param category_id: (int)
:return: (json)
"""
url = f"/urlCategories/{category_id}"
if category_id not in valid_category_ids:
logger.error(f"Invalid Category ID: {category_id}")
raise ValueError("Invalid Category ID")
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def url_lookup(
self,
url_list: list,
) -> list:
"""
Method to look up the categorization of the given list of URLs, ["abc.com","zyz.com"]
:param url_list: (list) List of urls
:return: (list)
"""
result = []
url = "/urlLookup"
# Verify urls format
list(set(url_list))
# Rate limit 1/sec and 400 hr and 100 URLs per call
list_of_lists = [url_list[i : i + 100] for i in range(0, len(url_list), 100)]
for item in list_of_lists:
response = self.hp_http.post_call(
url,
payload=item,
cookies=self.cookies,
headers=self.headers,
error_handling=True,
)
result.append(response.json())
time.sleep(1)
final_result = []
for i in result:
for j in i:
final_result.append(j)
return final_result
# URL filtering Policies
def list_url_filtering_rules(self) -> json:
"""
Gets a list of all of URL Filtering Policy rules
:return: (json)
"""
url = "/urlFilteringRules"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_url_filtering_rules( # FIXME: docstring lists params that aren't options and some params I don't know what their typehint should be.
self,
name: str,
order: int,
protocols: str,
state: str,
action: str,
url_categories: list = None,
request_methods: list = None,
description=None,
groups: list = None,
locations: list = None,
departments: list = None,
users: list = None,
rank: int = 7,
location_groups=None,
enforce_time_validity: bool = False,
validity_end_time=None,
validity_start_time=None,
validity_time_zone_id=None,
cbi_profile_id: int = 0,
block_override: bool = False,
**kwargs,
) -> json:
"""
Adds a URL Filtering Policy rule. If you are using the Rank feature, refer to About Admin Rank to
determine which value to provide for rank when adding a policy rule. If you are not using Admin Rank,
the rank value must be 7.
:param name: (str) Name of the rule
:param order: (int) Rule order
:param protocols: (str) Possible values: SMRULEF_ZPA_BROKERS_RULE, ANY_RULE, TCP_RULE, UDP_RULE,
DOHTTPS_RULE, TUNNELSSL_RULE, HTTP_PROXY, FOHTTP_RULE, FTP_RULE, HTTPS_RULE, HTTP_RULE, SSL_RULE, TUNNEL_RULE
:param state: (str) enabled/disabled
:param action: (str) Allow, Caution, Block
:param url_categories: (list) List of URL categories for which rule must be applied
:param request_methods: (list) Request method for which the rule must be applied. If not set, rule will be
applied to all methods
:param description: (str) Additional information about the URL Filtering rule
:param groups: (list) Name-ID pairs of groups for which rule must be applied
:param locations: (list) Each element is a dictionary: Name-ID pairs of locations for which rule must be applied
:param departments: (list) Name-ID pairs of departments for which rule will be applied
:param users: (list) Name-ID pairs of users for which rule must be applied
:param rank: (int) Admin rank of the admin who creates this rule
:param location_groups:
:param enforce_time_validity: (bool)
:param validity_end_time:
:param validity_start_time:
:param validity_time_zone_id:
:param cbi_profile_id: (int)
:param block_override: (bool) When set to true, a 'BLOCK' action triggered by the rule could be overridden.
If true and both overrideGroup and overrideUsers are not set, the BLOCK triggered by this rule could be
overridden for any users. If blockOverride is not set, 'BLOCK' action cannot be overridden.
:param timewindows: (list) Name-ID pairs of time interval during which rule must be enforced.
:param endUserNotificationUrl: (str) URL of end user notification page to be displayed when the rule
is matched. Not applicable if either 'overrideUsers' or 'overrideGroups' is specified.
:param overrideusers: Name-ID pairs of users for which this rule can be overridden. Applicable only if
blockOverride is set to 'true', action is 'BLOCK' and overrideGroups is not set.If this overrideUsers is not
set, 'BLOCK' action can be overridden for any user.
:param overridegroups: Name-ID pairs of groups for which this rule can be overridden. Applicable only if
blockOverride is set to 'true' and action is 'BLOCK'. If this overrideGroups is not set, 'BLOCK' action can
be overridden for any group
:return:
"""
url = "/urlFilteringRules"
payload = {
"blockOverride": block_override,
"cbiProfileId": cbi_profile_id,
"description": description,
"enforceTimeValidity": enforce_time_validity,
"name": name,
"order": order,
"protocols": protocols,
"urlCategories": url_categories,
"state": state,
"rank": rank,
"action": action,
}
payload.update(kwargs)
if locations:
payload.update(locations=locations)
if location_groups:
payload.update(locationGroups=location_groups)
if groups:
payload.update(groups=groups)
if departments:
payload.update(departments=departments)
if users:
payload.update(users=users)
if request_methods:
payload.update(requestMethods=request_methods)
if enforce_time_validity:
payload.update(validityStartTime=validity_start_time)
payload.update(validityEndTime=validity_end_time)
payload.update(validityTimeZoneId=validity_time_zone_id)
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def update_url_filtering_rules(
self,
rule_id: int,
**kwargs,
) -> json:
url = f"/urlFilteringRules/{rule_id}"
payload = kwargs
response = self.hp_http.put_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
# User Management
def list_departments(
self,
department_id: int = None,
) -> json or list:
"""
Gets a list of departments. The search parameters find matching values within the "name" or "comments"
attributes. if ID, gets the department for the specified ID
:param department_id: (int) department ID
:return: (json or list)
"""
if not department_id:
url = "/departments?pageSize=10000"
return self._obtain_all(url)
else:
url = f"/departments/{department_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_groups(
self,
group_id: int = None,
) -> json or list:
"""
Gets a list of groups if ID, gets the group for the specified ID
:param group_id: group ID
:return: (json)
"""
if not group_id:
url = "/groups?pageSize=10000"
return self._obtain_all(url)
else:
url = f"/groups/{group_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_users(
self,
user_id: int = None,
query: str = None,
) -> json or list:
"""
Gets a list of all users and allows user filtering by name, department, or group. The name search parameter
performs a partial match. The dept and group parameters perform a 'starts with' match. if ID,
gets user information for the specified ID
:param user_id: (int) user ID
:param query: (str)
:return: (json or list)
"""
url = "/users?pageSize=1000"
if user_id:
url = f"/users/{user_id}"
return self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
).json()
elif query:
url = f"/users?{query}&pageSize=1000"
return self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
).json()
return self._obtain_all(url)
def add_users(
self,
name: str,
email: str,
groups: list,
department: dict,
comments: str,
password: str,
admin_user: bool = False,
) -> json:
"""
Adds a new user. A user can belong to multiple groups, but can only belong to one department.
:param name: (str) user name
:param email: (str) user email address
:param groups: (list) List each member is a dictionary, key id, value name [{"id":1234, "name":"guest-wifi"}]
:param department: (dict) key is the id and value is the name {"id":1234, "name":"guests"}
:param comments: (str) Comments
:param password: (str) Password,
:param admin_user: (bool) True if user is admin user. default False
:return: (json)
"""
url = "/users"
payload = {
"name": name,
"email": email,
"groups": groups,
"department": department,
"comments": comments,
"password": password,
"adminUser": admin_user,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_bulk_users(
self,
user_ids: list,
) -> json:
"""
Bulk delete users up to a maximum of 500 users per request. The response returns the user IDs that were
successfully deleted.
:param user_ids: (list) List of user IDS to be deleted. Max 500 per bulk delete.
:return: (json)
"""
url = "/users/bulkDelete"
if len(user_ids) < 500:
payload = {"ids": user_ids}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
else:
raise ValueError("Maximum 500 users per request")
# Location Management
def list_locations(
self,
location_id: int = None,
) -> json:
"""
Gets locations only, not sub-locations. When a location matches the given search parameter criteria only its
parent location is included in the result set, not its sub-locations.
:param location_id: (int) Location id
:return: (json)
"""
url = "/locations"
if location_id:
url = f"/locations/{location_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_sublocations(
self,
location_id: int,
) -> json:
"""
Gets the sub-location information for the location with the specified ID
:param location_id: (int) Location id
:return: (json)
"""
url = "/locations"
if location_id:
url = f"/locations/{location_id}/sublocations"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_locations_groups(self) -> json:
"""
Gets information on location groups
:return: (json)
"""
url = "/locations/groups"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_bulk_locations(
self,
location_ids: list,
) -> json:
"""
Bulk delete locations up to a maximum of 100 users per request. The response returns the location IDs that
were successfully deleted.
:param location_ids: (list) List of location IDs
:return: (json)
"""
url = "/locations/bulkDelete"
if len(location_ids) < 100:
payload = {"ids": location_ids}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
else:
raise ValueError("Maximum 100 locations per request")
def delete_locations(
self,
location_id: int,
) -> requests.Response:
"""
Deletes the location or sub-location for the specified ID
:param location_id: (int) location ID
:return: (request.Response object)
"""
url = f"/locations/{location_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
# Traffic Forwarding
def list_gre_tunnels(
self,
gre_tunnel_id: int = None,
) -> json:
"""
Gets the GRE tunnel information for the specified ID
:param gre_tunnel_id: (int) Optional. The unique identifier for the GRE tunnel
:return: (json)
"""
url = "/greTunnels"
if gre_tunnel_id:
url = f"/greTunnels/{gre_tunnel_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_gre_tunnels(
self,
source_ip: str,
primary_dest_vip: dict,
secondary_dest_vip: dict,
internal_ip_range: str,
within_country: bool,
comment: str,
ip_unnumbered: bool,
) -> json:
"""
Adds a GRE tunnel configuration.
:param source_ip: (str) The source IP address of the GRE tunnel. This is typically a static IP address in the
organization or SD-WAN. This IP address must be provisioned within the Zscaler service using the /staticIP
endpoint.
:param primary_dest_vip: (dict) {id:value} where value is integer: Unique identifier of the GRE primary VIP
:param secondary_dest_vip: (dict) {id:value} where value is integer: Unique identifier of the GRE secondary VIP
:param internal_ip_range: (str) The start of the internal IP address in /29 CIDR range
:param within_country: (bool) Restrict the data center virtual IP addresses (VIPs) only to those within the
same country as the source IP address
:param comment: (str) Additional information about this GRE tunnel
:param ip_unnumbered: (bool?) This is required to support the automated SD-WAN provisioning of GRE tunnels,
when set to True gre_tun_ip and gre_tun_id are set to null
:return: (json)
"""
url = "/greTunnels"
payload = {
"sourceIp": source_ip,
"primaryDestVip": primary_dest_vip,
"secondaryDestVip": secondary_dest_vip,
"internalIpRange": internal_ip_range,
"withinCountry": within_country,
"comment": comment,
"ipUnnumbered": ip_unnumbered,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_gre_validate_and_get_available_internal_ip_ranges(self) -> json:
"""
Gets the next available GRE tunnel internal IP address ranges
:return: (json) List of available IP addresses
"""
url = "/greTunnels/availableInternalIpRanges"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_gre_recommended_vips(
self,
query: str,
) -> json:
"""
Gets a list of recommended GRE tunnel virtual IP addresses (VIPs), based on source IP address or
latitude/longitude coordinates.
:param query: (str) URL query. Example:
:return: (json) List of available IP addresses
"""
url = f"/vips/recommendedList?{query}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_gre_validate_ip(
self,
ip: str,
) -> json:
"""
Gets the static IP address and location mapping information for the specified GRE tunnel IP address
:param ip: (str) IP address of the GRE tunnel.
:return: (json)
"""
url = f"/greTunnels/validateIP/{ip}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_vpn_credentials(
self,
vpn_id: int = None,
) -> json:
"""
Gets VPN credentials that can be associated to locations.
:param vpn_id: (int) Optional. If specified, get VPN credentials for the specified ID.
"""
url = "/vpnCredentials"
if vpn_id:
url = f"/vpnCredentials/{vpn_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_vpn_credentials(
self,
fqdn: str,
pre_shared_key: str,
auth_type: str = "UFQDN",
comments: str = None,
) -> json:
"""
Adds VPN credentials that can be associated to locations.
:param fqdn: (str) Example [email protected]
:param pre_shared_key: (str) Pre-shared key. This is a required field for UFQDN and IP auth type
:param auth_type: (str) VPN authentication type.
valid options CN, IP, UFQDN,XAUTH
:param comments: (str) Additional information about this VPN credential.
:return: (json)
"""
url = "/vpnCredentials"
payload = {
"type": auth_type,
"fqdn": fqdn,
"preSharedKey": pre_shared_key,
}
if comments:
payload.update(comments=comments)
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_vpn_credentials(
self,
vpn_id: int,
) -> requests.Response: # TODO: Move to returning json
"""
Deletes the VPN credentials for the specified ID.
:param vpn_id: (int) The unique identifier for the VPN credential.
:return: (requests.Response object)
"""
url = f"/vpnCredentials/{vpn_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def list_static_ip(
self,
ip_id: int = None,
) -> json:
"""
Gets all provisioned static IP addresses.
:param ip_id: (str) Optional. If specified, get IP address for the specified id
:return: (json)
"""
url = "/staticIP"
if ip_id:
url = f"/staticIP/{ip_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_static_ip(
self,
ip_address: str,
geo_override: bool = False,
routable_ip: bool = True,
latitude: float = 0,
longitude: float = 0,
comment: str = "",
) -> json:
"""
Adds a static IP address
:param ip_address: (str) The static IP address
:param geo_override: (bool) If not set, geographic coordinates and city are automatically determined from the
IP address. Otherwise, the latitude and longitude coordinates must be provided.
:param routable_ip: (bool) Indicates whether a non-RFC 1918 IP address is publicly routable. This attribute is
ignored if there is no ZIA Private Service Edge associated to the organization.
:param latitude: (float) Required only if the geoOverride attribute is set. Latitude with 7 digit precision
after decimal point, ranges between -90 and 90 degrees.
:param longitude: (float) Required only if the geoOverride attribute is set. Longitude with 7 digit precision
after decimal point, ranges between -180 and 180 degrees.
:param comment: (str) Additional information about this static IP address
:return: (json)
"""
url = "/staticIP"
payload = {
"ipAddress": ip_address,
"latitude": latitude,
"longitude": longitude,
"routableIP": routable_ip,
"comment": comment,
}
if geo_override:
payload.update(geoOverrride=geo_override)
payload.update(latitude=latitude)
payload.update(longitude=longitude)
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_static_ip(
self,
ip_address_id: int,
) -> requests.Response:
"""
Deletes the static IP address for the specified ID.
:param ip_address_id: (int) The unique identifier for the provisioned static IP address.
:return: (request.Response object))
"""
url = f"/staticIP/{ip_address_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
# User Authentication Settings
def list_exempted_urls(self) -> json:
"""
Gets a list of URLs that were exempted from cookie authentication
:return: (json)
"""
url = "/authSettings/exemptedUrls"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_exempted_urls(
self,
urls: list,
) -> json:
"""
Adds URLs to the cookie authentication exempt list to the list
:param urls: (list) List of urls. Example ['url1','url2']
:return: (json)
"""
url = "/authSettings/exemptedUrls?action=ADD_TO_LIST"
payload = {"urls": urls}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_exempted_urls(
self,
urls: list,
) -> json:
"""
Removed URLs to the cookie authentication exempt list to the list
:param urls: (list) List of urls. Example ['url1','url2']
:return: (json)
"""
url = "/authSettings/exemptedUrls"
parameters = {"action": "REMOVE_FROM_LIST"}
payload = {"urls": urls}
response = self.hp_http.post_call(
url,
params=parameters,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
# Security Policy Settings
def list_security_whitelisted_urls(self) -> json:
"""
Gets a list of white-listed URLs
:return: (json)
"""
url = "/security"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def update_security_whitelisted_urls(
self,
urls: list,
) -> json:
"""
Updates the list of white-listed URLs. This will overwrite a previously-generated white list. If you need to
completely erase the white list, submit an empty list.
:param urls: (list) List of urls ['www.zscaler.com', 'www.example.com']
:return: (json)
"""
url = "/security"
payload = {"whitelistUrls": urls}
response = self.hp_http.put_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_security_blacklisted_urls(self) -> json:
"""
Gets a list of white-listed URLs
:return: (json)
"""
url = "/security/advanced"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def update_security_blacklisted_urls(
self,
urls: list,
) -> json:
"""
Updates the list of black-listed URLs. This will overwrite a previously-generated black list. If you need to
completely erase the black list, submit an empty list.
:param urls: (list)
:return: (json)
"""
url = "/security/advanced"
payload = {"blacklistUrls": urls}
response = self.hp_http.put_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_security_blacklist_urls(
self,
urls: list,
) -> requests.Response: # TODO: Move to return json
"""
Adds a URL from the black list. To add a URL to the black list.
:param urls: (list) List of urls
:return: (request.Response object)
"""
url = "/security/advanced/blacklistUrls"
parameters = {"action": "ADD_TO_LIST"}
payload = {"blacklistUrls": urls}
response = self.hp_http.post_call(
url,
payload=payload,
params=parameters,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def remove_security_blacklist_urls(
self,
urls: list,
) -> json:
"""
Removes a URL from the black list.
:param urls: (list) List of urls
:return: (json)
"""
url = "/security/advanced/blacklistUrls"
parameters = {"action": "REMOVE_FROM_LIST"}
payload = {"blacklistUrls": urls}
response = self.hp_http.post_call(
url,
payload=payload,
params=parameters,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
# DLP Policies
def list_dlp_dictionaries(
self,
dlp_dic_id: int = None,
) -> json:
"""
Gets a list of all DLP Dictionaries.
:param dlp_dic_id: (int) dlp dictionary id ( optional parameter)
:return: (json)
"""
url = "/dlpDictionaries"
if dlp_dic_id:
url = f"/dlpDictionaries/{dlp_dic_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_dlp_dictionaries_lite(self) -> json:
"""
Gets a list of all DLP Dictionary names and ID's only. T
:return: (json)
"""
url = "/dlpDictionaries/lite"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def validate_dlp_pattern(
self,
pattern: str,
) -> json:
"""
Validates the pattern used by a Pattern and Phrases DLP dictionary type, and provides error information if
the pattern is invalid.
:param pattern: (str) Regex pattern
:return: (json)
"""
payload = pattern
url = "/dlpDictionaries/validateDlpPattern"
response = self.hp_http.post_call(
url,
cookies=self.cookies,
payload=payload, # TODO: payload is typically dict but here it is str?
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_dlp_dictionaries(
self,
dlp_dic_id: int,
) -> requests.Response:
"""
Deletes the custom DLP category for the specified ID. You cannot delete predefined DLP dictionaries. You
cannot delete a custom dictionary while it is being used by a DLP Engine or policy. Also, predefined DLP
dictionaries cannot be deleted.
:param dlp_dic_id: (int) dlp dictionary ID
:return: (requests.Response object)
"""
url = f"/dlpDictionaries/{dlp_dic_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def add_dlp_dictionaries(
self,
dlp_dic_name: str,
custom_phrase_match_type: str = "MATCH_ANY_CUSTOM_PHRASE_PATTERN_DICTIONARY",
description: str = None,
phrases: list = None,
patterns: list = None,
) -> json:
"""
Adds a new custom DLP dictionary that uses either Patterns and/or Phrases.
:param dlp_dic_name: (str) Name
:param custom_phrase_match_type: (str) customPhraseMatchType
:param description: (str) description
:param phrases: (list) list of phrases. valid example:[
{"action": "PHRASE_COUNT_TYPE_UNIQUE", "phrase": "string"},
{"action": "PHRASE_COUNT_TYPE_UNIQUE", "phrase": "string"}
]
:param patterns: (list) list of patterns. valid example:[
{"action": "PATTERN_COUNT_TYPE_UNIQUE", "phrase": "string"},
{"action": "PATTERN_COUNT_TYPE_UNIQUE", "phrase": "string"}
]
:return: (json)
"""
if phrases is not None:
for i in phrases:
if i["action"] not in [
"PHRASE_COUNT_TYPE_UNIQUE",
"PHRASE_COUNT_TYPE_ALL",
]:
raise ValueError("Invalid action")
if patterns is not None:
for k in patterns:
if k["action"] not in [
"PATTERN_COUNT_TYPE_UNIQUE",
"PATTERN_COUNT_TYPE_ALL",
]:
raise ValueError("Invalid action")
if custom_phrase_match_type not in [
"MATCH_ALL_CUSTOM_PHRASE_PATTERN_DICTIONARY",
"MATCH_ANY_CUSTOM_PHRASE_PATTERN_DICTIONARY",
]:
raise ValueError("Invalid customPhraseMatchType")
url = "/dlpDictionaries"
payload = {
"name": dlp_dic_name,
"description": description,
"confidenceThreshold": None,
"customPhraseMatchType": custom_phrase_match_type,
"dictionaryType": "PATTERNS_AND_PHRASES",
"phrases": phrases,
"patterns": patterns,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_dlp_engines(
self,
dlp_engine_id: int = None,
) -> json:
"""
Get a list of DLP engines.
:param dlp_engine_id: (int) Optional value. The unique identifier for the DLP engine
:return: (json)
"""
url = "/dlpEngines"
if dlp_engine_id:
url = f"/dlpEngines/{dlp_engine_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_dlp_exact_data_match_schemas(self) -> json:
"""
Exact Data Match (EDM) templates (or EDM schemas) allow the Zscaler service to identify a record from a
structured data source that matches predefined criteria. Using the Index Tool, you can create an EDM template
that allows you to define the criteria (i.e., define the tokens) for your data records by importing a CSV
file. After the data is defined and submitted within the tool, you can then apply the EDM template to a custom
DLP dictionary or engine. This endpoint gets the EDM templates for all Index Tools used by the organization
:return: (json)
"""
url = "/dlpExactDataMatchSchemas"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_dlp_notification_templates(
self,
template_id: int = None,
) -> json:
"""
Gets a list of DLP notification templates
:param template_id: (int) Optional value. The unique identifier for a DLP notification template
:return: (json)
"""
url = "/dlpNotificationTemplates"
if template_id:
url = f"/dlpNotificationTemplates/{template_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_dlp_notification_templates(
self,
name: str,
subject: str,
plain_text_message: str,
html_message: str,
attach_content: bool = True,
tls_enabled: bool = True,
) -> json:
"""
:param name: (str) The DLP notification template name
:param subject: (str) The Subject line that is displayed within the DLP notification template
:param plain_text_message: (str) The template for the plain text UTF-8 message body that must be displayed in
the DLP notification email.
:param html_message: (str) The template for the HTML message body that myst tbe displayed in the DLP
notification email
:param attach_content: (bool) if set to True, the content that is violation is attached to the DLP
notification email
:param tls_enabled: (bool) If set to True tls will be used to send email.
:return: (json)
"""
url = "/dlpNotificationTemplates"
payload = {
"name": name,
"subject": subject,
"tlsEnabled": tls_enabled,
"attachContent": attach_content,
"plainTextMessage": plain_text_message,
"htmlMessage": html_message,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_dlp_notification_templates(
self,
template_id: int,
) -> requests.Response: # TODO: return json instead
"""
Deletes a DLP notification template
:param template_id: (int) The unique identifies for the DLP notification template
:return: (requests.Response Object)
"""
url = f"/dlpNotificationTemplates/{template_id}"
response = self.hp_http.delete_call(
url=url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def list_icap_server(
self,
icap_server_id: int = None,
) -> json:
"""
Gets a list of DLP notification templates
:param icap_server_id: (int) Optional value. The unique identifier for the DLP server using ICAP
:return: (json)
"""
url = "/icapServers"
if icap_server_id:
url = f"/icapServers/{icap_server_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_idm_profile(
self,
profile_id: int = None,
) -> json:
"""
List all the IDM templates for all Index Tools used by the organization. If profileId, it lists the IDM
template information for the specified ID.
:param profile_id: (int) Optional value. The unique identifier for the IDM template (or profile)
:return: (json)
"""
if profile_id:
url = f"/idmprofile/{profile_id}"
else:
url = "/idmprofile"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_web_dlp_rules(
self,
rule_id: int = None,
) -> json:
"""
list DLP policy rules, excluding SaaS Security API DLP policy rules. If ruleId, list DLP policy rule
information for the specified ID
:param rule_id: (int) Optional value. The unique identifier for theDLP rule
:return: (json)
"""
url = "/webDlpRules"
if rule_id:
url = f"/webDlpRules/{rule_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_web_dlp_rules(
self,
rule_id: int,
) -> json:
"""
Deletes a DLP policy rule. This endpoint is not applicable to SaaS Security API DLP policy rules.
:param rule_id: (int) The unique identifier for the DLP policy rule.
:return: (json)
"""
url = f"/webDlpRules/{rule_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
# Firewall Policies
def list_network_services_lite(
self,
) -> json:
"""
Gets a summary list of all network service groups.
:return: (json)
"""
response = self.hp_http.get_call(
"/networkServices/lite",
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_network_services(
self,
service_id: int = None,
) -> json:
"""
Gets a list of all network service groups. The search parameters find matching values within the "name" or
"description" attributes.
:param service_id: (int)
:return: (json)
"""
url = "/networkServices"
if service_id:
url = f"/networkServices/{service_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_network_services(
self,
name: str,
tag: str = None,
src_tcp_ports: list = None,
dest_tcp_ports: list = None,
src_udp_ports: list = None,
dest_udp_ports: list = None,
service_type: str = "CUSTOM",
description: str = None,
is_name_l10n_tag: bool = False,
) -> requests.Response: # TODO: return json
"""
Adds a new network service.
:param name: (str) Name
:param tag: (str)
:param src_tcp_ports:(list) Each element is [{"start": int, "end": int}]
:param dest_tcp_ports:(list) Each element is [{"start": int, "end": int}]
:param src_udp_ports:(list) Each element is [{"start": int, "end": int}]
:param dest_udp_ports:(list) Each element is [{"start": int, "end": int}]
:param service_type: (str) STANDARD|PREDEFINE|CUSTOM
:param description: (str) Description
:param is_name_l10n_tag: (bool)
:return: (requests.Response Object)
"""
url = "/networkServices"
payload = {
"id": 0,
"name": name,
"tag": tag,
"srcTcpPorts": src_tcp_ports,
"destTcpPorts": dest_tcp_ports,
"srcUdpPorts": src_udp_ports,
"destUdpPorts": dest_udp_ports,
"type": service_type,
"description": description,
"isNameL10nTag": is_name_l10n_tag,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def delete_network_services(
self,
service_id: int,
) -> requests.Response:
"""
:param service_id: (int) The unique identifier for the network service
:return: (requests.Response Object)
"""
url = f"/networkServices/{service_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=False,
headers=self.headers,
)
return response
def list_firewall_filtering_rules(
self,
rule_id: int = None,
) -> json:
"""
Gets all rules in the Firewall Filtering policy.
:param rule_id: (int)
:return: (json)
"""
url = "/firewallFilteringRules"
if rule_id:
url = f"/firewallFilteringRules/{rule_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_firewall_filtering_rules(
self,
name: str,
order: int,
state: str,
action: str,
description: str = None,
default_rule: bool = False,
predefined: bool = False,
src_ips: list = None,
dest_addresses: list = None,
dest_ip_groups: list = None,
src_ip_groups: list = None,
dest_ip_categories: list = None,
labels=None,
nw_services: list = None,
rank: int = 0,
) -> requests.Response:
"""
:param name: (str) Name of the Firewall Filtering policy rule ["String"]
:param order: (int), Rule order number of the Firewall Filtering policy rule
:param state: (str) Possible values : DISABLED or ENABLED
:param action: (str) Possible values: ALLOW, BLOCK_DROP, BLOCK_RESET, BLOCK_ICMP, EVAL_NWAPP
:param description: (str) Additional information about the rule
:param default_rule: (bool) Default is false.If set to true, the default rule is applied
:param predefined: (bool)
:param src_ips: (list) List of source IP addresses
:param dest_addresses: (list) List of destination addresses
:param dest_ip_groups: (list) List of user-defined destination IP address groups
:param src_ip_groups: (list) List of user defined source IP address groups
:param dest_ip_categories:(list) list of destination IP categories
:param labels: (?)
:param nw_services: (list) List of user-defined network services on with the rule is applied
:param rank: (int), Admin rank of the Firewall Filtering policy rule
:return: (requests.Response Object)
"""
url = "/firewallFilteringRules"
payload = {
"accessControl": "READ_WRITE",
"enableFullLogging": False,
"name": name,
"order": order,
"rank": rank,
"action": action,
"state": state,
"predefined": predefined,
"defaultRule": default_rule,
"description": description,
}
if src_ips:
payload.update(srcIps=src_ips)
if src_ip_groups:
payload.update(srcIpGroups=src_ip_groups)
if dest_addresses:
payload.update(destAddresses=dest_addresses)
if dest_ip_groups:
payload.update(destIpGroups=dest_ip_groups)
if labels:
payload.update(labels=labels)
if dest_ip_categories:
payload.update(destIpCategories=dest_ip_categories)
if nw_services:
payload.update(nwServices=nw_services)
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def delete_firewall_filtering_rules(
self,
rule_id: int,
) -> requests.Response:
"""
Deletes a Firewall Filtering policy rule for the specified ID.
:param rule_id: (int) The unique identifier for the policy rule
:return: (requests.Response Object)
"""
url = f"/firewallFilteringRules/{rule_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=False,
headers=self.headers,
)
return response
def list_ip_source_groups(
self,
ip_group_id: int = None,
) -> json:
"""
Gets a list of all IP source groups. The search parameters find matching values within the "name" or
"description" attributes.
:param ip_group_id: (int) Option ip group id
:return: (json)
"""
url = "/ipSourceGroups"
if ip_group_id:
url = f"/ipSourceGroups/{ip_group_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_ip_source_groups_lite(self) -> json:
"""
Gets a name and ID dictionary of all IP source groups
:return: (json)
"""
url = "/ipSourceGroups/lite"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_ip_destination_groups(
self,
ip_group_id: int = None,
) -> json:
"""
Gets a list of all IP source groups. The search parameters find matching values within the "name" or
"description" attributes.
:param ip_group_id: (int) Option ip group id
:return: (json)
"""
url = "/ipDestinationGroups/"
if ip_group_id:
url = f"/ipDestinationGroups/{ip_group_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_ip_destination_groups_lite(self) -> json:
"""
Gets a name and ID dictionary of all IP destination groups
:return: (json)
"""
url = "/ipDestinationGroups/lite"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_ip_source_groups(
self,
name: str,
ip_addresses: list,
description: str = None,
) -> json:
"""
:param name: (str) Name
:param ip_addresses: (list) List of IP addresses
:param description: (str) description
:return: (json)
"""
url = "/ipSourceGroups"
payload = {
"name": name,
"ipAddresses": ip_addresses,
"description": description,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def delete_ip_source_groups(
self,
ip_group_id: int,
) -> requests.Response:
"""
Deletes the IP source group for the specified ID
:param ip_group_id: (int) The unique identifies for the IP source group
:return: (requests.Response Object)
"""
url = f"/ipSourceGroups/{ip_group_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
payload={},
headers=self.headers,
)
return response
def delete_ip_destination_groups(
self,
ip_group_id: int,
) -> requests.Response:
"""
Deletes the IP destination group for the specified ID
:param ip_group_id: (int) The unique identifies for the IP source group
:return: (requests.Response Object)
"""
url = f"/ipDestinationGroups/{ip_group_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
payload={},
headers=self.headers,
)
return response
def add_ip_destination_groups(
self,
name: str,
dest_ip_group_type: str,
addresses: list,
ip_categories: list = None,
countries: list = None,
description: str = None,
) -> json:
"""
:param name: (str) Name
:param dest_ip_group_type: (str) Destination IP group type. Either DSTN_IP or DSTN_FQDN or DSTN_DOMAIN
:param addresses: (list) List of Destination IP addresses within the group.
:param ip_categories: (list) List of Destination IP address URL categories. You can identify destination based
on the URL category of the domain. Default value ANY
:param countries: (list) List of destination IP address countries. You can identify destinations based on
the location of a server.Default value ANY
:param description: (str) description
"""
if dest_ip_group_type not in [
"DSTN_IP",
"DSTN_FQDN",
"DSTN_DOMAIN",
]:
raise ValueError("Invalid destination type ")
if countries:
for i in countries:
if i not in valid_countries:
raise ValueError("Invalid country ")
else:
countries = []
if ip_categories:
for j in ip_categories:
if j not in valid_category_ids:
raise ValueError("Invalid country ")
else:
ip_categories = []
url = "/ipDestinationGroups"
payload = {
"name": name,
"type": dest_ip_group_type,
"addresses": addresses,
"ipCategories": ip_categories,
"countries": countries,
"description": description,
}
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
# Device Groups
def list_devices_groups(
self,
query: str = None,
) -> json:
"""
Gets a list of device groups
:param query: (str)
:return: (json)
"""
url = "/deviceGroups"
if query:
url = f"/deviceGroups?{query}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def list_devices(
self,
query: str = None,
) -> json:
"""
Gets a list of devices. Any given search parameters will be applied during device search. Search parameters
are based on device name, model, owner, OS type, and OS version. The devices listed can also be restricted
to return information only for ones belonging to specific users.
:param query: (str)
:return: (json)
"""
url = "/deviceGroups/devices"
if query:
url = f"/deviceGroups/devices?{query}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
# Rule Labels
def list_rule_labels(
self,
rule_label_id: int = None,
) -> json:
"""
Gets rule label information for the specified ID
:param rule_label_id: (int)
:return: (json)
"""
url = "/ruleLabels?pageSize=1000"
if rule_label_id:
url = f"/ruleLabels/{rule_label_id}"
response = self.hp_http.get_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_rule_label(
self,
name: str,
description: str = "",
payload: dict = None,
) -> json:
"""
Adds new rule labels with the given name
:param name: (str) name # FIXME: Not in passed attributes.
:param description: (str) description # FIXME: Not in passed attributes.
:param payload: (dict)
"""
url = "/ruleLabels"
if not payload:
payload = {"name": name, "description": description}
response = self.hp_http.post_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
payload=payload,
)
return response.json()
def delete_rule_label(self, rule_id: str):
url = f"/ruleLabels/{rule_id}"
response = self.hp_http.delete_call(
url,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response
def update_call(
self,
url: str,
payload: json,
) -> json:
"""
Generic PUT call. This call will overwrite all the configuration with the new payload
:param url: (str) url of Zscaler API call
:param payload: (json) Payload
:return: (json)
"""
response = self.hp_http.put_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json()
def add_call(
self,
url: str,
payload: json,
) -> json:
"""
Generic POST call. This call will add all the configuration with the new payload
:param url: (str) url of Zscaler API call
:param payload: (json) Payload
:return: (json)
"""
response = self.hp_http.post_call(
url,
payload=payload,
cookies=self.cookies,
error_handling=True,
headers=self.headers,
)
return response.json() | zscaler-api-talkers | /zscaler_api_talkers-6.0.0-py3-none-any.whl/zscaler_api_talkers/zia/talker.py | talker.py |
valid_category_ids = [
"ANY",
"NONE",
"OTHER_ADULT_MATERIAL",
"ADULT_THEMES",
"LINGERIE_BIKINI",
"NUDITY",
"PORNOGRAPHY",
"SEXUALITY",
"ADULT_SEX_EDUCATION",
"K_12_SEX_EDUCATION",
"SOCIAL_ADULT",
"OTHER_BUSINESS_AND_ECONOMY",
"CORPORATE_MARKETING",
"FINANCE",
"PROFESSIONAL_SERVICES",
"CLASSIFIEDS",
"TRADING_BROKARAGE_INSURANCE",
"CUSTOM_00",
"CUSTOM_01",
"CUSTOM_02",
"CUSTOM_03",
"CUSTOM_04",
"CUSTOM_05",
"CUSTOM_06",
"CUSTOM_07",
"CUSTOM_08",
"CUSTOM_09",
"CUSTOM_10",
"CUSTOM_11",
"CUSTOM_12",
"CUSTOM_13",
"CUSTOM_14",
"CUSTOM_15",
"CUSTOM_16",
"CUSTOM_17",
"CUSTOM_18",
"CUSTOM_19",
"CUSTOM_20",
"CUSTOM_21",
"CUSTOM_22",
"CUSTOM_23",
"CUSTOM_24",
"CUSTOM_25",
"CUSTOM_26",
"CUSTOM_27",
"CUSTOM_28",
"CUSTOM_29",
"CUSTOM_30",
"CUSTOM_31",
"CUSTOM_32",
"CUSTOM_33",
"CUSTOM_34",
"CUSTOM_35",
"CUSTOM_36",
"CUSTOM_37",
"CUSTOM_38",
"CUSTOM_39",
"CUSTOM_40",
"CUSTOM_41",
"CUSTOM_42",
"CUSTOM_43",
"CUSTOM_44",
"CUSTOM_45",
"CUSTOM_46",
"CUSTOM_47",
"CUSTOM_48",
"CUSTOM_49",
"CUSTOM_50",
"CUSTOM_51",
"CUSTOM_52",
"CUSTOM_53",
"CUSTOM_54",
"CUSTOM_55",
"CUSTOM_56",
"CUSTOM_57",
"CUSTOM_58",
"CUSTOM_59",
"CUSTOM_60",
"CUSTOM_61",
"CUSTOM_62",
"CUSTOM_63",
"CUSTOM_64",
"CUSTOM_65",
"CUSTOM_66",
"CUSTOM_67",
"CUSTOM_68",
"CUSTOM_69",
"CUSTOM_70",
"CUSTOM_71",
"CUSTOM_72",
"CUSTOM_73",
"CUSTOM_74",
"CUSTOM_75",
"CUSTOM_76",
"CUSTOM_77",
"CUSTOM_78",
"CUSTOM_79",
"CUSTOM_80",
"CUSTOM_81",
"CUSTOM_82",
"CUSTOM_83",
"CUSTOM_84",
"CUSTOM_85",
"CUSTOM_86",
"CUSTOM_87",
"CUSTOM_88",
"CUSTOM_89",
"CUSTOM_90",
"CUSTOM_91",
"CUSTOM_92",
"CUSTOM_93",
"CUSTOM_94",
"CUSTOM_95",
"CUSTOM_96",
"CUSTOM_97",
"CUSTOM_98",
"CUSTOM_99",
"CUSTOM_100",
"CUSTOM_101",
"CUSTOM_102",
"CUSTOM_103",
"CUSTOM_104",
"CUSTOM_105",
"CUSTOM_106",
"CUSTOM_107",
"CUSTOM_108",
"CUSTOM_109",
"CUSTOM_110",
"CUSTOM_111",
"CUSTOM_112",
"CUSTOM_113",
"CUSTOM_114",
"CUSTOM_115",
"CUSTOM_116",
"CUSTOM_117",
"CUSTOM_118",
"CUSTOM_119",
"CUSTOM_120",
"CUSTOM_121",
"CUSTOM_122",
"CUSTOM_123",
"CUSTOM_124",
"CUSTOM_125",
"CUSTOM_126",
"CUSTOM_127",
"CUSTOM_128",
"CUSTOM_129",
"CUSTOM_130",
"CUSTOM_131",
"CUSTOM_132",
"CUSTOM_133",
"CUSTOM_134",
"CUSTOM_135",
"CUSTOM_136",
"CUSTOM_137",
"CUSTOM_138",
"CUSTOM_139",
"CUSTOM_140",
"CUSTOM_141",
"CUSTOM_142",
"CUSTOM_143",
"CUSTOM_144",
"CUSTOM_145",
"CUSTOM_146",
"CUSTOM_147",
"CUSTOM_148",
"CUSTOM_149",
"CUSTOM_150",
"CUSTOM_151",
"CUSTOM_152",
"CUSTOM_153",
"CUSTOM_154",
"CUSTOM_155",
"CUSTOM_156",
"CUSTOM_157",
"CUSTOM_158",
"CUSTOM_159",
"CUSTOM_160",
"CUSTOM_161",
"CUSTOM_162",
"CUSTOM_163",
"CUSTOM_164",
"CUSTOM_165",
"CUSTOM_166",
"CUSTOM_167",
"CUSTOM_168",
"CUSTOM_169",
"CUSTOM_170",
"CUSTOM_171",
"CUSTOM_172",
"CUSTOM_173",
"CUSTOM_174",
"CUSTOM_175",
"CUSTOM_176",
"CUSTOM_177",
"CUSTOM_178",
"CUSTOM_179",
"CUSTOM_180",
"CUSTOM_181",
"CUSTOM_182",
"CUSTOM_183",
"CUSTOM_184",
"CUSTOM_185",
"CUSTOM_186",
"CUSTOM_187",
"CUSTOM_188",
"CUSTOM_189",
"CUSTOM_190",
"CUSTOM_191",
"CUSTOM_192",
"CUSTOM_193",
"CUSTOM_194",
"CUSTOM_195",
"CUSTOM_196",
"CUSTOM_197",
"CUSTOM_198",
"CUSTOM_199",
"CUSTOM_200",
"CUSTOM_201",
"CUSTOM_202",
"CUSTOM_203",
"CUSTOM_204",
"CUSTOM_205",
"CUSTOM_206",
"CUSTOM_207",
"CUSTOM_208",
"CUSTOM_209",
"CUSTOM_210",
"CUSTOM_211",
"CUSTOM_212",
"CUSTOM_213",
"CUSTOM_214",
"CUSTOM_215",
"CUSTOM_216",
"CUSTOM_217",
"CUSTOM_218",
"CUSTOM_219",
"CUSTOM_220",
"CUSTOM_221",
"CUSTOM_222",
"CUSTOM_223",
"CUSTOM_224",
"CUSTOM_225",
"CUSTOM_226",
"CUSTOM_227",
"CUSTOM_228",
"CUSTOM_229",
"CUSTOM_230",
"CUSTOM_231",
"CUSTOM_232",
"CUSTOM_233",
"CUSTOM_234",
"CUSTOM_235",
"CUSTOM_236",
"CUSTOM_237",
"CUSTOM_238",
"CUSTOM_239",
"CUSTOM_240",
"CUSTOM_241",
"CUSTOM_242",
"CUSTOM_243",
"CUSTOM_244",
"CUSTOM_245",
"CUSTOM_246",
"CUSTOM_247",
"CUSTOM_248",
"CUSTOM_249",
"CUSTOM_250",
"CUSTOM_251",
"CUSTOM_252",
"CUSTOM_253",
"CUSTOM_254",
"CUSTOM_255",
"CUSTOM_256",
"OTHER_DRUGS",
"MARIJUANA",
"OTHER_EDUCATION",
"CONTINUING_EDUCATION_COLLEGES",
"HISTORY",
"K_12",
"REFERENCE_SITES",
"SCIENCE_AND_TECHNOLOGY",
"OTHER_ENTERTAINMENT_AND_RECREATION",
"ENTERTAINMENT",
"TELEVISION_AND_MOVIES",
"MUSIC",
"STREAMING_MEDIA",
"RADIO_STATIONS",
"GAMBLING",
"OTHER_GAMES",
"SOCIAL_NETWORKING_GAMES",
"OTHER_GOVERNMENT_AND_POLITICS",
"GOVERNMENT",
"POLITICS",
"HEALTH",
"OTHER_ILLEGAL_OR_QUESTIONABLE",
"COPYRIGHT_INFRINGEMENT",
"COMPUTER_HACKING",
"QUESTIONABLE",
"PROFANITY",
"MATURE_HUMOR",
"ANONYMIZER",
"OTHER_INFORMATION_TECHNOLOGY",
"TRANSLATORS",
"IMAGE_HOST",
"FILE_HOST",
"SHAREWARE_DOWNLOAD",
"WEB_BANNERS",
"WEB_HOST",
"WEB_SEARCH",
"PORTALS",
"SAFE_SEARCH_ENGINE",
"CDN",
"OSS_UPDATES",
"DNS_OVER_HTTPS",
"OTHER_INTERNET_COMMUNICATION",
"INTERNET_SERVICES",
"DISCUSSION_FORUMS",
"ONLINE_CHAT",
"EMAIL_HOST",
"BLOG",
"P2P_COMMUNICATION",
"REMOTE_ACCESS",
"WEB_CONFERENCING",
"ZSPROXY_IPS",
"JOB_SEARCH",
"MILITANCY_HATE_AND_EXTREMISM",
"OTHER_MISCELLANEOUS",
"MISCELLANEOUS_OR_UNKNOWN",
"NEWLY_REG_DOMAINS",
"NON_CATEGORIZABLE",
"NEWS_AND_MEDIA",
"OTHER_RELIGION",
"TRADITIONAL_RELIGION",
"CULT",
"ALT_NEW_AGE",
"OTHER_SECURITY",
"ADWARE_OR_SPYWARE",
"ENCR_WEB_CONTENT",
"MALICIOUS_TLD",
"OTHER_SHOPPING_AND_AUCTIONS",
"SPECIALIZED_SHOPPING",
"REAL_ESTATE",
"ONLINE_AUCTIONS",
"OTHER_SOCIAL_AND_FAMILY_ISSUES",
"SOCIAL_ISSUES",
"FAMILY_ISSUES",
"OTHER_SOCIETY_AND_LIFESTYLE",
"ART_CULTURE",
"ALTERNATE_LIFESTYLE",
"HOBBIES_AND_LEISURE",
"DINING_AND_RESTAURANT",
"ALCOHOL_TOBACCO",
"SOCIAL_NETWORKING",
"SPECIAL_INTERESTS",
"SPORTS",
"TASTELESS",
"TRAVEL",
"USER_DEFINED",
"VEHICLES",
"VIOLENCE",
"WEAPONS_AND_BOMBS",
]
super_categories = [
"ENTERTAINMENT",
"MUSIC",
"OTHER_ENTERTAINMENT_AND_RECREATION",
"RADIO_STATIONS",
"STREAMING_MEDIA",
"TELEVISION_AND_MOVIES",
"NEWS_AND_MEDIA",
"USER_DEFINED",
"CLASSIFIEDS",
"CORPORATE_MARKETING",
"FINANCE",
"TRADING_BROKERAGE_INSURANCE",
"OTHER_BUSINESS_AND_ECONOMY",
"PROFESSIONAL_SERVICES",
"CONTINUING_EDUCATION_COLLEGES",
"HISTORY",
"K_12",
"OTHER_EDUCATION",
"REFERENCE_SITES",
"SCIENCE_AND_TECHNOLOGY",
"WEB_BANNERS",
"CDN",
"DNS_OVER_HTTPS",
"FILE_HOST",
"IMAGE_HOST",
"OSS_UPDATES",
"OTHER_INFORMATION_TECHNOLOGY",
"PORTALS",
"SAFE_SEARCH_ENGINE",
"SHAREWARE_DOWNLOAD",
"TRANSLATORS",
"WEB_HOST",
"WEB_SEARCH",
"BLOG",
"DISCUSSION_FORUMS",
"INTERNET_SERVICES",
"ONLINE_CHAT",
"OTHER_INTERNET_COMMUNICATION",
"P2P_COMMUNICATION",
"REMOTE_ACCESS",
"EMAIL_HOST",
"WEB_CONFERENCING",
"ZSPROXY_IPS",
"JOB_SEARCH",
"GOVERNMENT",
"OTHER_GOVERNMENT_AND_POLITICS",
"POLITICS",
"MISCELLANEOUS_OR_UNKNOWN",
"NEWLY_REG_DOMAINS",
"NON_CATEGORIZABLE",
"OTHER_MISCELLANEOUS",
"TRAVEL",
"VEHICLES",
"ADULT_SEX_EDUCATION",
"ADULT_THEMES",
"K_12_SEX_EDUCATION",
"LINGERIE_BIKINI",
"NUDITY",
"OTHER_ADULT_MATERIAL",
"PORNOGRAPHY",
"SEXUALITY",
"SOCIAL_ADULT",
"DRUGS",
"MARIJUANA",
"GAMBLING",
"ANONYMIZER",
"COMPUTER_HACKING",
"COPYRIGHT_INFRINGEMENT",
"MATURE_HUMOR",
"OTHER_ILLEGAL_OR_QUESTIONABLE",
"PROFANITY",
"QUESTIONABLE",
"MILITANCY_HATE_AND_EXTREMISM",
"TASTELESS",
"VIOLENCE",
"WEAPONS_AND_BOMBS",
"OTHER_GAMES",
"SOCIAL_NETWORKING_GAMES",
"HEALTH",
"ALT_NEW_AGE",
"CULT",
"OTHER_RELIGION",
"TRADITIONAL_RELIGION",
"ONLINE_AUCTIONS",
"SPECIALIZED_SHOPPING",
"OTHER_SHOPPING_AND_AUCTIONS",
"REAL_ESTATE",
"FAMILY_ISSUES",
"OTHER_SOCIAL_AND_FAMILY_ISSUES",
"SOCIAL_ISSUES",
"ALCOHOL_TOBACCO",
"LIFESTYLE",
"ART_CULTURE",
"DINING_AND_RESTAURANT",
"HOBBIES_AND_LEISURE",
"OTHER_SOCIETY_AND_LIFESTYLE",
"SOCIAL_NETWORKING",
"SPECIAL_INTERESTS",
"SPORTS",
"ENCR_WEB_CONTENT",
"MALICIOUS_TLD",
"OTHER_SECURITY",
"ADWARE_OR_SPYWARE",
]
valid_countries = [
"ANY",
"NONE",
"COUNTRY_AD",
"COUNTRY_AE",
"COUNTRY_AF",
"COUNTRY_AG",
"COUNTRY_AI",
"COUNTRY_AL",
"COUNTRY_AM",
"COUNTRY_AN",
"COUNTRY_AO",
"COUNTRY_AQ",
"COUNTRY_AR",
"COUNTRY_AS",
"COUNTRY_AT",
"COUNTRY_AU",
"COUNTRY_AW",
"COUNTRY_AZ",
"COUNTRY_BA",
"COUNTRY_BB",
"COUNTRY_BD",
"COUNTRY_BE",
"COUNTRY_BF",
"COUNTRY_BG",
"COUNTRY_BH",
"COUNTRY_BI",
"COUNTRY_BJ",
"COUNTRY_BM",
"COUNTRY_BN",
"COUNTRY_BO",
"COUNTRY_BR",
"COUNTRY_BS",
"COUNTRY_BT",
"COUNTRY_BV",
"COUNTRY_BW",
"COUNTRY_BY",
"COUNTRY_BZ",
"COUNTRY_CA",
"COUNTRY_CC",
"COUNTRY_CD",
"COUNTRY_CF",
"COUNTRY_CG",
"COUNTRY_CH",
"COUNTRY_CI",
"COUNTRY_CK",
"COUNTRY_CL",
"COUNTRY_CM",
"COUNTRY_CN",
"COUNTRY_CO",
"COUNTRY_CR",
"COUNTRY_CU",
"COUNTRY_CV",
"COUNTRY_CX",
"COUNTRY_CY",
"COUNTRY_CZ",
"COUNTRY_DE",
"COUNTRY_DJ",
"COUNTRY_DK",
"COUNTRY_DM",
"COUNTRY_DO",
"COUNTRY_DZ",
"COUNTRY_EC",
"COUNTRY_EE",
"COUNTRY_EG",
"COUNTRY_EH",
"COUNTRY_ER",
"COUNTRY_ES",
"COUNTRY_ET",
"COUNTRY_FI",
"COUNTRY_FJ",
"COUNTRY_FK",
"COUNTRY_FM",
"COUNTRY_FO",
"COUNTRY_FR",
"COUNTRY_FX",
"COUNTRY_GA",
"COUNTRY_GB",
"COUNTRY_GD",
"COUNTRY_GE",
"COUNTRY_GF",
"COUNTRY_GH",
"COUNTRY_GI",
"COUNTRY_GL",
"COUNTRY_GM",
"COUNTRY_GN",
"COUNTRY_GP",
"COUNTRY_GQ",
"COUNTRY_GR",
"COUNTRY_GS",
"COUNTRY_GT",
"COUNTRY_GU",
"COUNTRY_GW",
"COUNTRY_GY",
"COUNTRY_HK",
"COUNTRY_HM",
"COUNTRY_HN",
"COUNTRY_HR",
"COUNTRY_HT",
"COUNTRY_HU",
"COUNTRY_ID",
"COUNTRY_IE",
"COUNTRY_IL",
"COUNTRY_IN",
"COUNTRY_IO",
"COUNTRY_IQ",
"COUNTRY_IR",
"COUNTRY_IS",
"COUNTRY_IT",
"COUNTRY_JM",
"COUNTRY_JO",
"COUNTRY_JP",
"COUNTRY_KE",
"COUNTRY_KG",
"COUNTRY_KH",
"COUNTRY_KI",
"COUNTRY_KM",
"COUNTRY_KN",
"COUNTRY_KP",
"COUNTRY_KR",
"COUNTRY_KW",
"COUNTRY_KY",
"COUNTRY_KZ",
"COUNTRY_LA",
"COUNTRY_LB",
"COUNTRY_LC",
"COUNTRY_LI",
"COUNTRY_LK",
"COUNTRY_LR",
"COUNTRY_LS",
"COUNTRY_LT",
"COUNTRY_LU",
"COUNTRY_LV",
"COUNTRY_LY",
"COUNTRY_MA",
"COUNTRY_MC",
"COUNTRY_MD",
"COUNTRY_MG",
"COUNTRY_MH",
"COUNTRY_MK",
"COUNTRY_ML",
"COUNTRY_MM",
"COUNTRY_MN",
"COUNTRY_MO",
"COUNTRY_MP",
"COUNTRY_MQ",
"COUNTRY_MR",
"COUNTRY_MS",
"COUNTRY_MT",
"COUNTRY_MU",
"COUNTRY_MV",
"COUNTRY_MW",
"COUNTRY_MX",
"COUNTRY_MY",
"COUNTRY_MZ",
"COUNTRY_NA",
"COUNTRY_NC",
"COUNTRY_NE",
"COUNTRY_NF",
"COUNTRY_NG",
"COUNTRY_NI",
"COUNTRY_NL",
"COUNTRY_NO",
"COUNTRY_NP",
"COUNTRY_NR",
"COUNTRY_NU",
"COUNTRY_NZ",
"COUNTRY_OM",
"COUNTRY_PA",
"COUNTRY_PE",
"COUNTRY_PF",
"COUNTRY_PG",
"COUNTRY_PH",
"COUNTRY_PK",
"COUNTRY_PL",
"COUNTRY_PM",
"COUNTRY_PN",
"COUNTRY_PR",
"COUNTRY_PS",
"COUNTRY_PT",
"COUNTRY_PW",
"COUNTRY_PY",
"COUNTRY_QA",
"COUNTRY_RE",
"COUNTRY_RO",
"COUNTRY_RU",
"COUNTRY_RW",
"COUNTRY_SA",
"COUNTRY_SB",
"COUNTRY_SC",
"COUNTRY_SD",
"COUNTRY_SE",
"COUNTRY_SG",
"COUNTRY_SH",
"COUNTRY_SI",
"COUNTRY_SJ",
"COUNTRY_SK",
"COUNTRY_SL",
"COUNTRY_SM",
"COUNTRY_SN",
"COUNTRY_SO",
"COUNTRY_SR",
"COUNTRY_ST",
"COUNTRY_SV",
"COUNTRY_SY",
"COUNTRY_SZ",
"COUNTRY_TC",
"COUNTRY_TD",
"COUNTRY_TF",
"COUNTRY_TG",
"COUNTRY_TH",
"COUNTRY_TJ",
"COUNTRY_TK",
"COUNTRY_TM",
"COUNTRY_TN",
"COUNTRY_TO",
"COUNTRY_TL",
"COUNTRY_TR",
"COUNTRY_TT",
"COUNTRY_TV",
"COUNTRY_TW",
"COUNTRY_TZ",
"COUNTRY_UA",
"COUNTRY_UG",
"COUNTRY_UM",
"COUNTRY_US",
"COUNTRY_UY",
"COUNTRY_UZ",
"COUNTRY_VA",
"COUNTRY_VC",
"COUNTRY_VE",
"COUNTRY_VG",
"COUNTRY_VI",
"COUNTRY_VN",
"COUNTRY_VU",
"COUNTRY_WF",
"COUNTRY_WS",
"COUNTRY_YE",
"COUNTRY_YT",
"COUNTRY_RS",
"COUNTRY_ZA",
"COUNTRY_ZM",
"COUNTRY_ME",
"COUNTRY_ZW",
"COUNTRY_AX",
"COUNTRY_GG",
"COUNTRY_IM",
"COUNTRY_JE",
"COUNTRY_BL",
"COUNTRY_MF",
] | zscaler-api-talkers | /zscaler_api_talkers-6.0.0-py3-none-any.whl/zscaler_api_talkers/zia/models.py | models.py |
# Zscaler Internet Access (ZIA) Resource Provider
The ZIA Resource Provider lets you manage [ZIA](http://github.com/zscaler/pulumi-zia) resources. To use
this package, please [install the Pulumi CLI first](https://pulumi.com/).
## Installing
This package is available for several languages/platforms:
### Node.js (JavaScript/TypeScript)
To use from JavaScript or TypeScript in Node.js, install using either `npm`:
```bash
npm install @zscaler/pulumi-zia
```
or `yarn`:
```bash
yarn add @zscaler/pulumi-zia
```
### Python
To use from Python, install using `pip`:
```bash
pip install zscaler-pulumi-zia
```
### Go
To use from Go, use `go get` to grab the latest version of the library:
```bash
go get github.com/zscaler/pulumi-zia/sdk/go/...
```
### .NET
To use from .NET, install using `dotnet add package`:
```bash
dotnet add package zscaler.PulumiPackage.Zia
```
## Configuration
The following configuration points are available for the `zia` provider:
- `zia:username` (client id: `ZIA_USERNAME`) - (Required) This is the API username to interact with the ZIA cloud.
- `zia:password` (client secret: `ZIA_PASSWORD`) - (Required) This is the password for the API username to authenticate in the ZIA cloud.
- `zia:api_key` (customer id: `ZIA_API_KEY`) - (Required) This is the API Key used in combination with the ``username`` and ``password``
- `zia:zia_cloud` (cloud environment: `ZIA_CLOUD`) - (Required) The cloud name where the ZIA tenant is hosted. The supported values are:
- ``zscaler``
- ``zscalerone``
- ``zscalertwo``
- ``zscalerthree``
- ``zscloud``
- ``zscalerbeta``
- ``zscalergov``
## Reference
For detailed reference documentation, please visit [the Pulumi registry](https://www.pulumi.com/registry/packages/zia/api-docs/).
## Support
This template/solution are released under an as-is, best effort, support
policy. These scripts should be seen as community supported and Zscaler
Business Development Team will contribute our expertise as and when possible.
We do not provide technical support or help in using or troubleshooting the components
of the project through our normal support options such as Zscaler support teams,
or ASC (Authorized Support Centers) partners and backline
support options. The underlying product used (Zscaler Internet Access API) by the
scripts or templates are still supported, but the support is only for the
product functionality and not for help in deploying or using the template or
script itself. Unless explicitly tagged, all projects or work posted in our
GitHub repository at (<https://github.com/zscaler>) or sites other
than our official Downloads page on <https://support.zscaler.com>
are provided under the best effort policy.
| zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/README.md | README.md |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProviderArgs', 'Provider']
@pulumi.input_type
class ProviderArgs:
def __init__(__self__, *,
api_key: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
zia_cloud: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Provider resource.
"""
if api_key is None:
api_key = _utilities.get_env('ZIA_API_KEY')
if api_key is not None:
pulumi.set(__self__, "api_key", api_key)
if password is None:
password = _utilities.get_env('ZIA_PASSWORD')
if password is not None:
pulumi.set(__self__, "password", password)
if username is None:
username = _utilities.get_env('ZIA_USERNAME')
if username is not None:
pulumi.set(__self__, "username", username)
if zia_cloud is None:
zia_cloud = _utilities.get_env('ZIA_CLOUD')
if zia_cloud is not None:
pulumi.set(__self__, "zia_cloud", zia_cloud)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="ziaCloud")
def zia_cloud(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "zia_cloud")
@zia_cloud.setter
def zia_cloud(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zia_cloud", value)
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_key: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
zia_cloud: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The provider type for the zia package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ProviderArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The provider type for the zia package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param ProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_key: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
zia_cloud: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
if api_key is None:
api_key = _utilities.get_env('ZIA_API_KEY')
__props__.__dict__["api_key"] = None if api_key is None else pulumi.Output.secret(api_key)
if password is None:
password = _utilities.get_env('ZIA_PASSWORD')
__props__.__dict__["password"] = None if password is None else pulumi.Output.secret(password)
if username is None:
username = _utilities.get_env('ZIA_USERNAME')
__props__.__dict__["username"] = username
if zia_cloud is None:
zia_cloud = _utilities.get_env('ZIA_CLOUD')
__props__.__dict__["zia_cloud"] = zia_cloud
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["apiKey", "password"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(Provider, __self__).__init__(
'zia',
resource_name,
__props__,
opts)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "api_key")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "username")
@property
@pulumi.getter(name="ziaCloud")
def zia_cloud(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "zia_cloud") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/provider.py | provider.py |
import importlib.util
import inspect
import json
import os
import pkg_resources
import sys
import typing
import pulumi
import pulumi.runtime
from semver import VersionInfo as SemverVersion
from parver import Version as PEP440Version
def get_env(*args):
for v in args:
value = os.getenv(v)
if value is not None:
return value
return None
def get_env_bool(*args):
str = get_env(*args)
if str is not None:
# NOTE: these values are taken from https://golang.org/src/strconv/atob.go?s=351:391#L1, which is what
# Terraform uses internally when parsing boolean values.
if str in ["1", "t", "T", "true", "TRUE", "True"]:
return True
if str in ["0", "f", "F", "false", "FALSE", "False"]:
return False
return None
def get_env_int(*args):
str = get_env(*args)
if str is not None:
try:
return int(str)
except:
return None
return None
def get_env_float(*args):
str = get_env(*args)
if str is not None:
try:
return float(str)
except:
return None
return None
def _get_semver_version():
# __name__ is set to the fully-qualified name of the current module, In our case, it will be
# <some module>._utilities. <some module> is the module we want to query the version for.
root_package, *rest = __name__.split('.')
# pkg_resources uses setuptools to inspect the set of installed packages. We use it here to ask
# for the currently installed version of the root package (i.e. us) and get its version.
# Unfortunately, PEP440 and semver differ slightly in incompatible ways. The Pulumi engine expects
# to receive a valid semver string when receiving requests from the language host, so it's our
# responsibility as the library to convert our own PEP440 version into a valid semver string.
pep440_version_string = pkg_resources.require(root_package)[0].version
pep440_version = PEP440Version.parse(pep440_version_string)
(major, minor, patch) = pep440_version.release
prerelease = None
if pep440_version.pre_tag == 'a':
prerelease = f"alpha.{pep440_version.pre}"
elif pep440_version.pre_tag == 'b':
prerelease = f"beta.{pep440_version.pre}"
elif pep440_version.pre_tag == 'rc':
prerelease = f"rc.{pep440_version.pre}"
elif pep440_version.dev is not None:
prerelease = f"dev.{pep440_version.dev}"
# The only significant difference between PEP440 and semver as it pertains to us is that PEP440 has explicit support
# for dev builds, while semver encodes them as "prerelease" versions. In order to bridge between the two, we convert
# our dev build version into a prerelease tag. This matches what all of our other packages do when constructing
# their own semver string.
return SemverVersion(major=major, minor=minor, patch=patch, prerelease=prerelease)
# Determine the version once and cache the value, which measurably improves program performance.
_version = _get_semver_version()
_version_str = str(_version)
def get_version():
return _version_str
def get_resource_opts_defaults() -> pulumi.ResourceOptions:
return pulumi.ResourceOptions(
version=get_version(),
plugin_download_url=get_plugin_download_url(),
)
def get_invoke_opts_defaults() -> pulumi.InvokeOptions:
return pulumi.InvokeOptions(
version=get_version(),
plugin_download_url=get_plugin_download_url(),
)
def get_resource_args_opts(resource_args_type, resource_options_type, *args, **kwargs):
"""
Return the resource args and options given the *args and **kwargs of a resource's
__init__ method.
"""
resource_args, opts = None, None
# If the first item is the resource args type, save it and remove it from the args list.
if args and isinstance(args[0], resource_args_type):
resource_args, args = args[0], args[1:]
# Now look at the first item in the args list again.
# If the first item is the resource options class, save it.
if args and isinstance(args[0], resource_options_type):
opts = args[0]
# If resource_args is None, see if "args" is in kwargs, and, if so, if it's typed as the
# the resource args type.
if resource_args is None:
a = kwargs.get("args")
if isinstance(a, resource_args_type):
resource_args = a
# If opts is None, look it up in kwargs.
if opts is None:
opts = kwargs.get("opts")
return resource_args, opts
# Temporary: just use pulumi._utils.lazy_import once everyone upgrades.
def lazy_import(fullname):
import pulumi._utils as u
f = getattr(u, 'lazy_import', None)
if f is None:
f = _lazy_import_temp
return f(fullname)
# Copied from pulumi._utils.lazy_import, see comments there.
def _lazy_import_temp(fullname):
m = sys.modules.get(fullname, None)
if m is not None:
return m
spec = importlib.util.find_spec(fullname)
m = sys.modules.get(fullname, None)
if m is not None:
return m
loader = importlib.util.LazyLoader(spec.loader)
spec.loader = loader
module = importlib.util.module_from_spec(spec)
m = sys.modules.get(fullname, None)
if m is not None:
return m
sys.modules[fullname] = module
loader.exec_module(module)
return module
class Package(pulumi.runtime.ResourcePackage):
def __init__(self, pkg_info):
super().__init__()
self.pkg_info = pkg_info
def version(self):
return _version
def construct_provider(self, name: str, typ: str, urn: str) -> pulumi.ProviderResource:
if typ != self.pkg_info['token']:
raise Exception(f"unknown provider type {typ}")
Provider = getattr(lazy_import(self.pkg_info['fqn']), self.pkg_info['class'])
return Provider(name, pulumi.ResourceOptions(urn=urn))
class Module(pulumi.runtime.ResourceModule):
def __init__(self, mod_info):
super().__init__()
self.mod_info = mod_info
def version(self):
return _version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
class_name = self.mod_info['classes'].get(typ, None)
if class_name is None:
raise Exception(f"unknown resource type {typ}")
TheClass = getattr(lazy_import(self.mod_info['fqn']), class_name)
return TheClass(name, pulumi.ResourceOptions(urn=urn))
def register(resource_modules, resource_packages):
resource_modules = json.loads(resource_modules)
resource_packages = json.loads(resource_packages)
for pkg_info in resource_packages:
pulumi.runtime.register_resource_package(pkg_info['pkg'], Package(pkg_info))
for mod_info in resource_modules:
pulumi.runtime.register_resource_module(
mod_info['pkg'],
mod_info['mod'],
Module(mod_info))
_F = typing.TypeVar('_F', bound=typing.Callable[..., typing.Any])
def lift_output_func(func: typing.Any) -> typing.Callable[[_F], _F]:
"""Decorator internally used on {fn}_output lifted function versions
to implement them automatically from the un-lifted function."""
func_sig = inspect.signature(func)
def lifted_func(*args, opts=None, **kwargs):
bound_args = func_sig.bind(*args, **kwargs)
# Convert tuple to list, see pulumi/pulumi#8172
args_list = list(bound_args.args)
return pulumi.Output.from_input({
'args': args_list,
'kwargs': bound_args.kwargs
}).apply(lambda resolved_args: func(*resolved_args['args'],
opts=opts,
**resolved_args['kwargs']))
return (lambda _: lifted_func)
def get_plugin_download_url():
return "github://api.github.com/zscaler" | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/_utilities.py | _utilities.py |
from . import _utilities
import typing
# Export this package's modules as members:
from .provider import *
# Make subpackages available:
if typing.TYPE_CHECKING:
import zscaler_pulumi_zia.activation as __activation
activation = __activation
import zscaler_pulumi_zia.adminroles as __adminroles
adminroles = __adminroles
import zscaler_pulumi_zia.adminusers as __adminusers
adminusers = __adminusers
import zscaler_pulumi_zia.authsettingsurls as __authsettingsurls
authsettingsurls = __authsettingsurls
import zscaler_pulumi_zia.config as __config
config = __config
import zscaler_pulumi_zia.departments as __departments
departments = __departments
import zscaler_pulumi_zia.devicegroups as __devicegroups
devicegroups = __devicegroups
import zscaler_pulumi_zia.devices as __devices
devices = __devices
import zscaler_pulumi_zia.dlp as __dlp
dlp = __dlp
import zscaler_pulumi_zia.firewall as __firewall
firewall = __firewall
import zscaler_pulumi_zia.groups as __groups
groups = __groups
import zscaler_pulumi_zia.locationgroups as __locationgroups
locationgroups = __locationgroups
import zscaler_pulumi_zia.locationmanagement as __locationmanagement
locationmanagement = __locationmanagement
import zscaler_pulumi_zia.rulelabels as __rulelabels
rulelabels = __rulelabels
import zscaler_pulumi_zia.securitysettings as __securitysettings
securitysettings = __securitysettings
import zscaler_pulumi_zia.timewindow as __timewindow
timewindow = __timewindow
import zscaler_pulumi_zia.trafficforwarding as __trafficforwarding
trafficforwarding = __trafficforwarding
import zscaler_pulumi_zia.urlcategory as __urlcategory
urlcategory = __urlcategory
import zscaler_pulumi_zia.urlfiltering as __urlfiltering
urlfiltering = __urlfiltering
import zscaler_pulumi_zia.users as __users
users = __users
else:
activation = _utilities.lazy_import('zscaler_pulumi_zia.activation')
adminroles = _utilities.lazy_import('zscaler_pulumi_zia.adminroles')
adminusers = _utilities.lazy_import('zscaler_pulumi_zia.adminusers')
authsettingsurls = _utilities.lazy_import('zscaler_pulumi_zia.authsettingsurls')
config = _utilities.lazy_import('zscaler_pulumi_zia.config')
departments = _utilities.lazy_import('zscaler_pulumi_zia.departments')
devicegroups = _utilities.lazy_import('zscaler_pulumi_zia.devicegroups')
devices = _utilities.lazy_import('zscaler_pulumi_zia.devices')
dlp = _utilities.lazy_import('zscaler_pulumi_zia.dlp')
firewall = _utilities.lazy_import('zscaler_pulumi_zia.firewall')
groups = _utilities.lazy_import('zscaler_pulumi_zia.groups')
locationgroups = _utilities.lazy_import('zscaler_pulumi_zia.locationgroups')
locationmanagement = _utilities.lazy_import('zscaler_pulumi_zia.locationmanagement')
rulelabels = _utilities.lazy_import('zscaler_pulumi_zia.rulelabels')
securitysettings = _utilities.lazy_import('zscaler_pulumi_zia.securitysettings')
timewindow = _utilities.lazy_import('zscaler_pulumi_zia.timewindow')
trafficforwarding = _utilities.lazy_import('zscaler_pulumi_zia.trafficforwarding')
urlcategory = _utilities.lazy_import('zscaler_pulumi_zia.urlcategory')
urlfiltering = _utilities.lazy_import('zscaler_pulumi_zia.urlfiltering')
users = _utilities.lazy_import('zscaler_pulumi_zia.users')
_utilities.register(
resource_modules="""
[
{
"pkg": "zia",
"mod": "Activation/activationStatus",
"fqn": "zscaler_pulumi_zia.activation",
"classes": {
"zia:Activation/activationStatus:ActivationStatus": "ActivationStatus"
}
},
{
"pkg": "zia",
"mod": "AdminUsers/adminUsers",
"fqn": "zscaler_pulumi_zia.adminusers",
"classes": {
"zia:AdminUsers/adminUsers:AdminUsers": "AdminUsers"
}
},
{
"pkg": "zia",
"mod": "AuthSettingsUrls/authSettingsURLs",
"fqn": "zscaler_pulumi_zia.authsettingsurls",
"classes": {
"zia:AuthSettingsUrls/authSettingsURLs:AuthSettingsURLs": "AuthSettingsURLs"
}
},
{
"pkg": "zia",
"mod": "DLP/dLPDictionaries",
"fqn": "zscaler_pulumi_zia.dlp",
"classes": {
"zia:DLP/dLPDictionaries:DLPDictionaries": "DLPDictionaries"
}
},
{
"pkg": "zia",
"mod": "DLP/dLPNotificationTemplates",
"fqn": "zscaler_pulumi_zia.dlp",
"classes": {
"zia:DLP/dLPNotificationTemplates:DLPNotificationTemplates": "DLPNotificationTemplates"
}
},
{
"pkg": "zia",
"mod": "DLP/dLPWebRules",
"fqn": "zscaler_pulumi_zia.dlp",
"classes": {
"zia:DLP/dLPWebRules:DLPWebRules": "DLPWebRules"
}
},
{
"pkg": "zia",
"mod": "Firewall/firewallFilteringApplicationGroups",
"fqn": "zscaler_pulumi_zia.firewall",
"classes": {
"zia:Firewall/firewallFilteringApplicationGroups:FirewallFilteringApplicationGroups": "FirewallFilteringApplicationGroups"
}
},
{
"pkg": "zia",
"mod": "Firewall/firewallFilteringDestinationGroups",
"fqn": "zscaler_pulumi_zia.firewall",
"classes": {
"zia:Firewall/firewallFilteringDestinationGroups:FirewallFilteringDestinationGroups": "FirewallFilteringDestinationGroups"
}
},
{
"pkg": "zia",
"mod": "Firewall/firewallFilteringNetworkServices",
"fqn": "zscaler_pulumi_zia.firewall",
"classes": {
"zia:Firewall/firewallFilteringNetworkServices:FirewallFilteringNetworkServices": "FirewallFilteringNetworkServices"
}
},
{
"pkg": "zia",
"mod": "Firewall/firewallFilteringRule",
"fqn": "zscaler_pulumi_zia.firewall",
"classes": {
"zia:Firewall/firewallFilteringRule:FirewallFilteringRule": "FirewallFilteringRule"
}
},
{
"pkg": "zia",
"mod": "Firewall/firewallFilteringServiceGroups",
"fqn": "zscaler_pulumi_zia.firewall",
"classes": {
"zia:Firewall/firewallFilteringServiceGroups:FirewallFilteringServiceGroups": "FirewallFilteringServiceGroups"
}
},
{
"pkg": "zia",
"mod": "Firewall/firewallFilteringSourceGroups",
"fqn": "zscaler_pulumi_zia.firewall",
"classes": {
"zia:Firewall/firewallFilteringSourceGroups:FirewallFilteringSourceGroups": "FirewallFilteringSourceGroups"
}
},
{
"pkg": "zia",
"mod": "LocationManagement/locationManagement",
"fqn": "zscaler_pulumi_zia.locationmanagement",
"classes": {
"zia:LocationManagement/locationManagement:LocationManagement": "LocationManagement"
}
},
{
"pkg": "zia",
"mod": "RuleLabels/ruleLabels",
"fqn": "zscaler_pulumi_zia.rulelabels",
"classes": {
"zia:RuleLabels/ruleLabels:RuleLabels": "RuleLabels"
}
},
{
"pkg": "zia",
"mod": "SecuritySettings/securitySettings",
"fqn": "zscaler_pulumi_zia.securitysettings",
"classes": {
"zia:SecuritySettings/securitySettings:SecuritySettings": "SecuritySettings"
}
},
{
"pkg": "zia",
"mod": "TrafficForwarding/trafficForwardingGRETunnel",
"fqn": "zscaler_pulumi_zia.trafficforwarding",
"classes": {
"zia:TrafficForwarding/trafficForwardingGRETunnel:TrafficForwardingGRETunnel": "TrafficForwardingGRETunnel"
}
},
{
"pkg": "zia",
"mod": "TrafficForwarding/trafficForwardingStaticIP",
"fqn": "zscaler_pulumi_zia.trafficforwarding",
"classes": {
"zia:TrafficForwarding/trafficForwardingStaticIP:TrafficForwardingStaticIP": "TrafficForwardingStaticIP"
}
},
{
"pkg": "zia",
"mod": "TrafficForwarding/trafficForwardingVPNCredentials",
"fqn": "zscaler_pulumi_zia.trafficforwarding",
"classes": {
"zia:TrafficForwarding/trafficForwardingVPNCredentials:TrafficForwardingVPNCredentials": "TrafficForwardingVPNCredentials"
}
},
{
"pkg": "zia",
"mod": "URLCategory/uRLCategories",
"fqn": "zscaler_pulumi_zia.urlcategory",
"classes": {
"zia:URLCategory/uRLCategories:URLCategories": "URLCategories"
}
},
{
"pkg": "zia",
"mod": "URLFiltering/uRLFilteringRules",
"fqn": "zscaler_pulumi_zia.urlfiltering",
"classes": {
"zia:URLFiltering/uRLFilteringRules:URLFilteringRules": "URLFilteringRules"
}
},
{
"pkg": "zia",
"mod": "Users/userManagement",
"fqn": "zscaler_pulumi_zia.users",
"classes": {
"zia:Users/userManagement:UserManagement": "UserManagement"
}
}
]
""",
resource_packages="""
[
{
"pkg": "zia",
"token": "pulumi:providers:zia",
"fqn": "zscaler_pulumi_zia",
"class": "Provider"
}
]
"""
) | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/__init__.py | __init__.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AuthSettingsURLsArgs', 'AuthSettingsURLs']
@pulumi.input_type
class AuthSettingsURLsArgs:
def __init__(__self__, *,
urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a AuthSettingsURLs resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] urls: The email address of the admin user to be exported.
"""
if urls is not None:
pulumi.set(__self__, "urls", urls)
@property
@pulumi.getter
def urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The email address of the admin user to be exported.
"""
return pulumi.get(self, "urls")
@urls.setter
def urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "urls", value)
@pulumi.input_type
class _AuthSettingsURLsState:
def __init__(__self__, *,
urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering AuthSettingsURLs resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] urls: The email address of the admin user to be exported.
"""
if urls is not None:
pulumi.set(__self__, "urls", urls)
@property
@pulumi.getter
def urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The email address of the admin user to be exported.
"""
return pulumi.get(self, "urls")
@urls.setter
def urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "urls", value)
class AuthSettingsURLs(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
The **zia_auth_settings_urls** resource alows you to add or remove a URL from the cookie authentication exempt list in the Zscaler Internet Access cloud or via the API. To learn more see [URL Format Guidelines](https://help.zscaler.com/zia/url-format-guidelines)
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
# ZIA User Auth Settings Data Source
example = zia.auth_settings_urls.AuthSettingsURLs("example", urls=[
".okta.com",
".oktacdn.com",
".mtls.oktapreview.com",
".mtls.okta.com",
"d3l44rcogcb7iv.cloudfront.net",
"pac.zdxcloud.net",
".windowsazure.com",
".fedoraproject.org",
"login.windows.net",
"d32a6ru7mhaq0c.cloudfront.net",
".kerberos.oktapreview.com",
".oktapreview.com",
"login.zdxcloud.net",
"login.microsoftonline.com",
"smres.zdxcloud.net",
".kerberos.okta.com",
])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] urls: The email address of the admin user to be exported.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[AuthSettingsURLsArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The **zia_auth_settings_urls** resource alows you to add or remove a URL from the cookie authentication exempt list in the Zscaler Internet Access cloud or via the API. To learn more see [URL Format Guidelines](https://help.zscaler.com/zia/url-format-guidelines)
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
# ZIA User Auth Settings Data Source
example = zia.auth_settings_urls.AuthSettingsURLs("example", urls=[
".okta.com",
".oktacdn.com",
".mtls.oktapreview.com",
".mtls.okta.com",
"d3l44rcogcb7iv.cloudfront.net",
"pac.zdxcloud.net",
".windowsazure.com",
".fedoraproject.org",
"login.windows.net",
"d32a6ru7mhaq0c.cloudfront.net",
".kerberos.oktapreview.com",
".oktapreview.com",
"login.zdxcloud.net",
"login.microsoftonline.com",
"smres.zdxcloud.net",
".kerberos.okta.com",
])
```
:param str resource_name: The name of the resource.
:param AuthSettingsURLsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AuthSettingsURLsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AuthSettingsURLsArgs.__new__(AuthSettingsURLsArgs)
__props__.__dict__["urls"] = urls
super(AuthSettingsURLs, __self__).__init__(
'zia:AuthSettingsUrls/authSettingsURLs:AuthSettingsURLs',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'AuthSettingsURLs':
"""
Get an existing AuthSettingsURLs resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] urls: The email address of the admin user to be exported.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AuthSettingsURLsState.__new__(_AuthSettingsURLsState)
__props__.__dict__["urls"] = urls
return AuthSettingsURLs(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def urls(self) -> pulumi.Output[Sequence[str]]:
"""
The email address of the admin user to be exported.
"""
return pulumi.get(self, "urls") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/authsettingsurls/auth_settings_urls.py | auth_settings_urls.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetAuthSettingsURLsResult',
'AwaitableGetAuthSettingsURLsResult',
'get_auth_settings_urls',
]
@pulumi.output_type
class GetAuthSettingsURLsResult:
"""
A collection of values returned by getAuthSettingsURLs.
"""
def __init__(__self__, id=None, urls=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if urls and not isinstance(urls, list):
raise TypeError("Expected argument 'urls' to be a list")
pulumi.set(__self__, "urls", urls)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def urls(self) -> Sequence[str]:
return pulumi.get(self, "urls")
class AwaitableGetAuthSettingsURLsResult(GetAuthSettingsURLsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAuthSettingsURLsResult(
id=self.id,
urls=self.urls)
def get_auth_settings_urls(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAuthSettingsURLsResult:
"""
Use the **zia_auth_settings_urls** data source to get a list of URLs that were exempted from cookie authentiation and SSL Inspection in the Zscaler Internet Access cloud or via the API. To learn more see [URL Format Guidelines](https://help.zscaler.com/zia/url-format-guidelines)
## Example Usage
```python
import pulumi
import pulumi_zia as zia
foo = zia.AuthSettingsUrls.get_auth_settings_urls()
```
"""
__args__ = dict()
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:AuthSettingsUrls/getAuthSettingsURLs:getAuthSettingsURLs', __args__, opts=opts, typ=GetAuthSettingsURLsResult).value
return AwaitableGetAuthSettingsURLsResult(
id=__ret__.id,
urls=__ret__.urls) | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/authsettingsurls/get_auth_settings_urls.py | get_auth_settings_urls.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DLPDictionariesArgs', 'DLPDictionaries']
@pulumi.input_type
class DLPDictionariesArgs:
def __init__(__self__, *,
confidence_threshold: Optional[pulumi.Input[str]] = None,
custom_phrase_match_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
dictionary_type: Optional[pulumi.Input[str]] = None,
exact_data_match_details: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]]] = None,
idm_profile_match_accuracies: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
patterns: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]]] = None,
phrases: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]]] = None,
proximity: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a DLPDictionaries resource.
:param pulumi.Input[str] confidence_threshold: The DLP confidence threshold. The following values are supported:
:param pulumi.Input[str] custom_phrase_match_type: The DLP custom phrase match type. Supported values are:
:param pulumi.Input[str] description: The desciption of the DLP dictionary
:param pulumi.Input[str] dictionary_type: The DLP dictionary type. The following values are supported:
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]] exact_data_match_details: Exact Data Match (EDM) related information for custom DLP dictionaries.
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]] idm_profile_match_accuracies: List of Indexed Document Match (IDM) profiles and their corresponding match accuracy for custom DLP dictionaries.
:param pulumi.Input[str] name: The DLP dictionary's name
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]] patterns: List containing the patterns used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]] phrases: List containing the phrases used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[int] proximity: The DLP dictionary proximity length.
"""
if confidence_threshold is not None:
pulumi.set(__self__, "confidence_threshold", confidence_threshold)
if custom_phrase_match_type is not None:
pulumi.set(__self__, "custom_phrase_match_type", custom_phrase_match_type)
if description is not None:
pulumi.set(__self__, "description", description)
if dictionary_type is not None:
pulumi.set(__self__, "dictionary_type", dictionary_type)
if exact_data_match_details is not None:
pulumi.set(__self__, "exact_data_match_details", exact_data_match_details)
if idm_profile_match_accuracies is not None:
pulumi.set(__self__, "idm_profile_match_accuracies", idm_profile_match_accuracies)
if name is not None:
pulumi.set(__self__, "name", name)
if patterns is not None:
pulumi.set(__self__, "patterns", patterns)
if phrases is not None:
pulumi.set(__self__, "phrases", phrases)
if proximity is not None:
pulumi.set(__self__, "proximity", proximity)
@property
@pulumi.getter(name="confidenceThreshold")
def confidence_threshold(self) -> Optional[pulumi.Input[str]]:
"""
The DLP confidence threshold. The following values are supported:
"""
return pulumi.get(self, "confidence_threshold")
@confidence_threshold.setter
def confidence_threshold(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "confidence_threshold", value)
@property
@pulumi.getter(name="customPhraseMatchType")
def custom_phrase_match_type(self) -> Optional[pulumi.Input[str]]:
"""
The DLP custom phrase match type. Supported values are:
"""
return pulumi.get(self, "custom_phrase_match_type")
@custom_phrase_match_type.setter
def custom_phrase_match_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_phrase_match_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The desciption of the DLP dictionary
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dictionaryType")
def dictionary_type(self) -> Optional[pulumi.Input[str]]:
"""
The DLP dictionary type. The following values are supported:
"""
return pulumi.get(self, "dictionary_type")
@dictionary_type.setter
def dictionary_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dictionary_type", value)
@property
@pulumi.getter(name="exactDataMatchDetails")
def exact_data_match_details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]]]:
"""
Exact Data Match (EDM) related information for custom DLP dictionaries.
"""
return pulumi.get(self, "exact_data_match_details")
@exact_data_match_details.setter
def exact_data_match_details(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]]]):
pulumi.set(self, "exact_data_match_details", value)
@property
@pulumi.getter(name="idmProfileMatchAccuracies")
def idm_profile_match_accuracies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]]]:
"""
List of Indexed Document Match (IDM) profiles and their corresponding match accuracy for custom DLP dictionaries.
"""
return pulumi.get(self, "idm_profile_match_accuracies")
@idm_profile_match_accuracies.setter
def idm_profile_match_accuracies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]]]):
pulumi.set(self, "idm_profile_match_accuracies", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The DLP dictionary's name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def patterns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]]]:
"""
List containing the patterns used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
"""
return pulumi.get(self, "patterns")
@patterns.setter
def patterns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]]]):
pulumi.set(self, "patterns", value)
@property
@pulumi.getter
def phrases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]]]:
"""
List containing the phrases used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
"""
return pulumi.get(self, "phrases")
@phrases.setter
def phrases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]]]):
pulumi.set(self, "phrases", value)
@property
@pulumi.getter
def proximity(self) -> Optional[pulumi.Input[int]]:
"""
The DLP dictionary proximity length.
"""
return pulumi.get(self, "proximity")
@proximity.setter
def proximity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "proximity", value)
@pulumi.input_type
class _DLPDictionariesState:
def __init__(__self__, *,
confidence_threshold: Optional[pulumi.Input[str]] = None,
custom_phrase_match_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
dictionary_id: Optional[pulumi.Input[int]] = None,
dictionary_type: Optional[pulumi.Input[str]] = None,
exact_data_match_details: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]]] = None,
idm_profile_match_accuracies: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
name_l10n_tag: Optional[pulumi.Input[bool]] = None,
patterns: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]]] = None,
phrases: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]]] = None,
proximity: Optional[pulumi.Input[int]] = None,
threshold_type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DLPDictionaries resources.
:param pulumi.Input[str] confidence_threshold: The DLP confidence threshold. The following values are supported:
:param pulumi.Input[str] custom_phrase_match_type: The DLP custom phrase match type. Supported values are:
:param pulumi.Input[str] description: The desciption of the DLP dictionary
:param pulumi.Input[str] dictionary_type: The DLP dictionary type. The following values are supported:
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]] exact_data_match_details: Exact Data Match (EDM) related information for custom DLP dictionaries.
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]] idm_profile_match_accuracies: List of Indexed Document Match (IDM) profiles and their corresponding match accuracy for custom DLP dictionaries.
:param pulumi.Input[str] name: The DLP dictionary's name
:param pulumi.Input[bool] name_l10n_tag: Indicates whether the name is localized or not. This is always set to True for predefined DLP dictionaries.
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]] patterns: List containing the patterns used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]] phrases: List containing the phrases used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[int] proximity: The DLP dictionary proximity length.
:param pulumi.Input[str] threshold_type: The DLP threshold type. The following values are supported:
"""
if confidence_threshold is not None:
pulumi.set(__self__, "confidence_threshold", confidence_threshold)
if custom_phrase_match_type is not None:
pulumi.set(__self__, "custom_phrase_match_type", custom_phrase_match_type)
if description is not None:
pulumi.set(__self__, "description", description)
if dictionary_id is not None:
pulumi.set(__self__, "dictionary_id", dictionary_id)
if dictionary_type is not None:
pulumi.set(__self__, "dictionary_type", dictionary_type)
if exact_data_match_details is not None:
pulumi.set(__self__, "exact_data_match_details", exact_data_match_details)
if idm_profile_match_accuracies is not None:
pulumi.set(__self__, "idm_profile_match_accuracies", idm_profile_match_accuracies)
if name is not None:
pulumi.set(__self__, "name", name)
if name_l10n_tag is not None:
pulumi.set(__self__, "name_l10n_tag", name_l10n_tag)
if patterns is not None:
pulumi.set(__self__, "patterns", patterns)
if phrases is not None:
pulumi.set(__self__, "phrases", phrases)
if proximity is not None:
pulumi.set(__self__, "proximity", proximity)
if threshold_type is not None:
pulumi.set(__self__, "threshold_type", threshold_type)
@property
@pulumi.getter(name="confidenceThreshold")
def confidence_threshold(self) -> Optional[pulumi.Input[str]]:
"""
The DLP confidence threshold. The following values are supported:
"""
return pulumi.get(self, "confidence_threshold")
@confidence_threshold.setter
def confidence_threshold(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "confidence_threshold", value)
@property
@pulumi.getter(name="customPhraseMatchType")
def custom_phrase_match_type(self) -> Optional[pulumi.Input[str]]:
"""
The DLP custom phrase match type. Supported values are:
"""
return pulumi.get(self, "custom_phrase_match_type")
@custom_phrase_match_type.setter
def custom_phrase_match_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_phrase_match_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The desciption of the DLP dictionary
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dictionaryId")
def dictionary_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "dictionary_id")
@dictionary_id.setter
def dictionary_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dictionary_id", value)
@property
@pulumi.getter(name="dictionaryType")
def dictionary_type(self) -> Optional[pulumi.Input[str]]:
"""
The DLP dictionary type. The following values are supported:
"""
return pulumi.get(self, "dictionary_type")
@dictionary_type.setter
def dictionary_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dictionary_type", value)
@property
@pulumi.getter(name="exactDataMatchDetails")
def exact_data_match_details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]]]:
"""
Exact Data Match (EDM) related information for custom DLP dictionaries.
"""
return pulumi.get(self, "exact_data_match_details")
@exact_data_match_details.setter
def exact_data_match_details(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesExactDataMatchDetailArgs']]]]):
pulumi.set(self, "exact_data_match_details", value)
@property
@pulumi.getter(name="idmProfileMatchAccuracies")
def idm_profile_match_accuracies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]]]:
"""
List of Indexed Document Match (IDM) profiles and their corresponding match accuracy for custom DLP dictionaries.
"""
return pulumi.get(self, "idm_profile_match_accuracies")
@idm_profile_match_accuracies.setter
def idm_profile_match_accuracies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyArgs']]]]):
pulumi.set(self, "idm_profile_match_accuracies", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The DLP dictionary's name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nameL10nTag")
def name_l10n_tag(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the name is localized or not. This is always set to True for predefined DLP dictionaries.
"""
return pulumi.get(self, "name_l10n_tag")
@name_l10n_tag.setter
def name_l10n_tag(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "name_l10n_tag", value)
@property
@pulumi.getter
def patterns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]]]:
"""
List containing the patterns used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
"""
return pulumi.get(self, "patterns")
@patterns.setter
def patterns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPatternArgs']]]]):
pulumi.set(self, "patterns", value)
@property
@pulumi.getter
def phrases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]]]:
"""
List containing the phrases used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
"""
return pulumi.get(self, "phrases")
@phrases.setter
def phrases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DLPDictionariesPhraseArgs']]]]):
pulumi.set(self, "phrases", value)
@property
@pulumi.getter
def proximity(self) -> Optional[pulumi.Input[int]]:
"""
The DLP dictionary proximity length.
"""
return pulumi.get(self, "proximity")
@proximity.setter
def proximity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "proximity", value)
@property
@pulumi.getter(name="thresholdType")
def threshold_type(self) -> Optional[pulumi.Input[str]]:
"""
The DLP threshold type. The following values are supported:
"""
return pulumi.get(self, "threshold_type")
@threshold_type.setter
def threshold_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "threshold_type", value)
class DLPDictionaries(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
confidence_threshold: Optional[pulumi.Input[str]] = None,
custom_phrase_match_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
dictionary_type: Optional[pulumi.Input[str]] = None,
exact_data_match_details: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesExactDataMatchDetailArgs']]]]] = None,
idm_profile_match_accuracies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesIdmProfileMatchAccuracyArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
patterns: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPatternArgs']]]]] = None,
phrases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPhraseArgs']]]]] = None,
proximity: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
The **zia_dlp_dictionaries** resource allows the creation and management of ZIA DLP dictionaries in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
example = zia.dlp.DLPDictionaries("example",
custom_phrase_match_type="MATCH_ALL_CUSTOM_PHRASE_PATTERN_DICTIONARY",
description="Your Description",
dictionary_type="PATTERNS_AND_PHRASES",
patterns=[zia.dlp.DLPDictionariesPatternArgs(
action="PATTERN_COUNT_TYPE_UNIQUE",
pattern="YourPattern",
)],
phrases=[zia.dlp.DLPDictionariesPhraseArgs(
action="PHRASE_COUNT_TYPE_ALL",
phrase="YourPhrase",
)])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] confidence_threshold: The DLP confidence threshold. The following values are supported:
:param pulumi.Input[str] custom_phrase_match_type: The DLP custom phrase match type. Supported values are:
:param pulumi.Input[str] description: The desciption of the DLP dictionary
:param pulumi.Input[str] dictionary_type: The DLP dictionary type. The following values are supported:
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesExactDataMatchDetailArgs']]]] exact_data_match_details: Exact Data Match (EDM) related information for custom DLP dictionaries.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesIdmProfileMatchAccuracyArgs']]]] idm_profile_match_accuracies: List of Indexed Document Match (IDM) profiles and their corresponding match accuracy for custom DLP dictionaries.
:param pulumi.Input[str] name: The DLP dictionary's name
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPatternArgs']]]] patterns: List containing the patterns used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPhraseArgs']]]] phrases: List containing the phrases used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[int] proximity: The DLP dictionary proximity length.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[DLPDictionariesArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The **zia_dlp_dictionaries** resource allows the creation and management of ZIA DLP dictionaries in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
example = zia.dlp.DLPDictionaries("example",
custom_phrase_match_type="MATCH_ALL_CUSTOM_PHRASE_PATTERN_DICTIONARY",
description="Your Description",
dictionary_type="PATTERNS_AND_PHRASES",
patterns=[zia.dlp.DLPDictionariesPatternArgs(
action="PATTERN_COUNT_TYPE_UNIQUE",
pattern="YourPattern",
)],
phrases=[zia.dlp.DLPDictionariesPhraseArgs(
action="PHRASE_COUNT_TYPE_ALL",
phrase="YourPhrase",
)])
```
:param str resource_name: The name of the resource.
:param DLPDictionariesArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DLPDictionariesArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
confidence_threshold: Optional[pulumi.Input[str]] = None,
custom_phrase_match_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
dictionary_type: Optional[pulumi.Input[str]] = None,
exact_data_match_details: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesExactDataMatchDetailArgs']]]]] = None,
idm_profile_match_accuracies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesIdmProfileMatchAccuracyArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
patterns: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPatternArgs']]]]] = None,
phrases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPhraseArgs']]]]] = None,
proximity: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DLPDictionariesArgs.__new__(DLPDictionariesArgs)
__props__.__dict__["confidence_threshold"] = confidence_threshold
__props__.__dict__["custom_phrase_match_type"] = custom_phrase_match_type
__props__.__dict__["description"] = description
__props__.__dict__["dictionary_type"] = dictionary_type
__props__.__dict__["exact_data_match_details"] = exact_data_match_details
__props__.__dict__["idm_profile_match_accuracies"] = idm_profile_match_accuracies
__props__.__dict__["name"] = name
__props__.__dict__["patterns"] = patterns
__props__.__dict__["phrases"] = phrases
__props__.__dict__["proximity"] = proximity
__props__.__dict__["dictionary_id"] = None
__props__.__dict__["name_l10n_tag"] = None
__props__.__dict__["threshold_type"] = None
super(DLPDictionaries, __self__).__init__(
'zia:DLP/dLPDictionaries:DLPDictionaries',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
confidence_threshold: Optional[pulumi.Input[str]] = None,
custom_phrase_match_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
dictionary_id: Optional[pulumi.Input[int]] = None,
dictionary_type: Optional[pulumi.Input[str]] = None,
exact_data_match_details: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesExactDataMatchDetailArgs']]]]] = None,
idm_profile_match_accuracies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesIdmProfileMatchAccuracyArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
name_l10n_tag: Optional[pulumi.Input[bool]] = None,
patterns: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPatternArgs']]]]] = None,
phrases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPhraseArgs']]]]] = None,
proximity: Optional[pulumi.Input[int]] = None,
threshold_type: Optional[pulumi.Input[str]] = None) -> 'DLPDictionaries':
"""
Get an existing DLPDictionaries resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] confidence_threshold: The DLP confidence threshold. The following values are supported:
:param pulumi.Input[str] custom_phrase_match_type: The DLP custom phrase match type. Supported values are:
:param pulumi.Input[str] description: The desciption of the DLP dictionary
:param pulumi.Input[str] dictionary_type: The DLP dictionary type. The following values are supported:
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesExactDataMatchDetailArgs']]]] exact_data_match_details: Exact Data Match (EDM) related information for custom DLP dictionaries.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesIdmProfileMatchAccuracyArgs']]]] idm_profile_match_accuracies: List of Indexed Document Match (IDM) profiles and their corresponding match accuracy for custom DLP dictionaries.
:param pulumi.Input[str] name: The DLP dictionary's name
:param pulumi.Input[bool] name_l10n_tag: Indicates whether the name is localized or not. This is always set to True for predefined DLP dictionaries.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPatternArgs']]]] patterns: List containing the patterns used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DLPDictionariesPhraseArgs']]]] phrases: List containing the phrases used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
:param pulumi.Input[int] proximity: The DLP dictionary proximity length.
:param pulumi.Input[str] threshold_type: The DLP threshold type. The following values are supported:
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DLPDictionariesState.__new__(_DLPDictionariesState)
__props__.__dict__["confidence_threshold"] = confidence_threshold
__props__.__dict__["custom_phrase_match_type"] = custom_phrase_match_type
__props__.__dict__["description"] = description
__props__.__dict__["dictionary_id"] = dictionary_id
__props__.__dict__["dictionary_type"] = dictionary_type
__props__.__dict__["exact_data_match_details"] = exact_data_match_details
__props__.__dict__["idm_profile_match_accuracies"] = idm_profile_match_accuracies
__props__.__dict__["name"] = name
__props__.__dict__["name_l10n_tag"] = name_l10n_tag
__props__.__dict__["patterns"] = patterns
__props__.__dict__["phrases"] = phrases
__props__.__dict__["proximity"] = proximity
__props__.__dict__["threshold_type"] = threshold_type
return DLPDictionaries(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="confidenceThreshold")
def confidence_threshold(self) -> pulumi.Output[Optional[str]]:
"""
The DLP confidence threshold. The following values are supported:
"""
return pulumi.get(self, "confidence_threshold")
@property
@pulumi.getter(name="customPhraseMatchType")
def custom_phrase_match_type(self) -> pulumi.Output[Optional[str]]:
"""
The DLP custom phrase match type. Supported values are:
"""
return pulumi.get(self, "custom_phrase_match_type")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The desciption of the DLP dictionary
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="dictionaryId")
def dictionary_id(self) -> pulumi.Output[int]:
return pulumi.get(self, "dictionary_id")
@property
@pulumi.getter(name="dictionaryType")
def dictionary_type(self) -> pulumi.Output[Optional[str]]:
"""
The DLP dictionary type. The following values are supported:
"""
return pulumi.get(self, "dictionary_type")
@property
@pulumi.getter(name="exactDataMatchDetails")
def exact_data_match_details(self) -> pulumi.Output[Optional[Sequence['outputs.DLPDictionariesExactDataMatchDetail']]]:
"""
Exact Data Match (EDM) related information for custom DLP dictionaries.
"""
return pulumi.get(self, "exact_data_match_details")
@property
@pulumi.getter(name="idmProfileMatchAccuracies")
def idm_profile_match_accuracies(self) -> pulumi.Output[Sequence['outputs.DLPDictionariesIdmProfileMatchAccuracy']]:
"""
List of Indexed Document Match (IDM) profiles and their corresponding match accuracy for custom DLP dictionaries.
"""
return pulumi.get(self, "idm_profile_match_accuracies")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The DLP dictionary's name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameL10nTag")
def name_l10n_tag(self) -> pulumi.Output[bool]:
"""
Indicates whether the name is localized or not. This is always set to True for predefined DLP dictionaries.
"""
return pulumi.get(self, "name_l10n_tag")
@property
@pulumi.getter
def patterns(self) -> pulumi.Output[Optional[Sequence['outputs.DLPDictionariesPattern']]]:
"""
List containing the patterns used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
"""
return pulumi.get(self, "patterns")
@property
@pulumi.getter
def phrases(self) -> pulumi.Output[Optional[Sequence['outputs.DLPDictionariesPhrase']]]:
"""
List containing the phrases used within a custom DLP dictionary. This attribute is not applicable to predefined DLP dictionaries. Required when `dictionary_type` is `PATTERNS_AND_PHRASES`
"""
return pulumi.get(self, "phrases")
@property
@pulumi.getter
def proximity(self) -> pulumi.Output[Optional[int]]:
"""
The DLP dictionary proximity length.
"""
return pulumi.get(self, "proximity")
@property
@pulumi.getter(name="thresholdType")
def threshold_type(self) -> pulumi.Output[str]:
"""
The DLP threshold type. The following values are supported:
"""
return pulumi.get(self, "threshold_type") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/dlp_dictionaries.py | dlp_dictionaries.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DLPDictionariesExactDataMatchDetailArgs',
'DLPDictionariesIdmProfileMatchAccuracyArgs',
'DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileArgs',
'DLPDictionariesPatternArgs',
'DLPDictionariesPhraseArgs',
'DLPWebRulesAuditorArgs',
'DLPWebRulesDepartmentsArgs',
'DLPWebRulesDlpEnginesArgs',
'DLPWebRulesExcludedDepartmentsArgs',
'DLPWebRulesExcludedGroupsArgs',
'DLPWebRulesExcludedUsersArgs',
'DLPWebRulesGroupsArgs',
'DLPWebRulesIcapServerArgs',
'DLPWebRulesLabelsArgs',
'DLPWebRulesLocationGroupsArgs',
'DLPWebRulesLocationsArgs',
'DLPWebRulesNotificationTemplateArgs',
'DLPWebRulesTimeWindowsArgs',
'DLPWebRulesUrlCategoriesArgs',
'DLPWebRulesUsersArgs',
]
@pulumi.input_type
class DLPDictionariesExactDataMatchDetailArgs:
def __init__(__self__, *,
dictionary_edm_mapping_id: Optional[pulumi.Input[int]] = None,
primary_field: Optional[pulumi.Input[int]] = None,
schema_id: Optional[pulumi.Input[int]] = None,
secondary_field_match_on: Optional[pulumi.Input[str]] = None,
secondary_fields: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None):
"""
:param pulumi.Input[int] dictionary_edm_mapping_id: The unique identifier for the EDM mapping.
:param pulumi.Input[int] primary_field: The EDM template's primary field.
:param pulumi.Input[int] schema_id: The unique identifier for the EDM template (or schema).
:param pulumi.Input[str] secondary_field_match_on: The EDM secondary field to match on.
- `"MATCHON_NONE"`
- `"MATCHON_ANY_1"`
- `"MATCHON_ANY_2"`
- `"MATCHON_ANY_3"`
- `"MATCHON_ANY_4"`
- `"MATCHON_ANY_5"`
- `"MATCHON_ANY_6"`
- `"MATCHON_ANY_7"`
- `"MATCHON_ANY_8"`
- `"MATCHON_ANY_9"`
- `"MATCHON_ANY_10"`
- `"MATCHON_ANY_11"`
- `"MATCHON_ANY_12"`
- `"MATCHON_ANY_13"`
- `"MATCHON_ANY_14"`
- `"MATCHON_ANY_15"`
- `"MATCHON_ALL"`
:param pulumi.Input[Sequence[pulumi.Input[int]]] secondary_fields: The EDM template's secondary fields.
"""
if dictionary_edm_mapping_id is not None:
pulumi.set(__self__, "dictionary_edm_mapping_id", dictionary_edm_mapping_id)
if primary_field is not None:
pulumi.set(__self__, "primary_field", primary_field)
if schema_id is not None:
pulumi.set(__self__, "schema_id", schema_id)
if secondary_field_match_on is not None:
pulumi.set(__self__, "secondary_field_match_on", secondary_field_match_on)
if secondary_fields is not None:
pulumi.set(__self__, "secondary_fields", secondary_fields)
@property
@pulumi.getter(name="dictionaryEdmMappingId")
def dictionary_edm_mapping_id(self) -> Optional[pulumi.Input[int]]:
"""
The unique identifier for the EDM mapping.
"""
return pulumi.get(self, "dictionary_edm_mapping_id")
@dictionary_edm_mapping_id.setter
def dictionary_edm_mapping_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dictionary_edm_mapping_id", value)
@property
@pulumi.getter(name="primaryField")
def primary_field(self) -> Optional[pulumi.Input[int]]:
"""
The EDM template's primary field.
"""
return pulumi.get(self, "primary_field")
@primary_field.setter
def primary_field(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "primary_field", value)
@property
@pulumi.getter(name="schemaId")
def schema_id(self) -> Optional[pulumi.Input[int]]:
"""
The unique identifier for the EDM template (or schema).
"""
return pulumi.get(self, "schema_id")
@schema_id.setter
def schema_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "schema_id", value)
@property
@pulumi.getter(name="secondaryFieldMatchOn")
def secondary_field_match_on(self) -> Optional[pulumi.Input[str]]:
"""
The EDM secondary field to match on.
- `"MATCHON_NONE"`
- `"MATCHON_ANY_1"`
- `"MATCHON_ANY_2"`
- `"MATCHON_ANY_3"`
- `"MATCHON_ANY_4"`
- `"MATCHON_ANY_5"`
- `"MATCHON_ANY_6"`
- `"MATCHON_ANY_7"`
- `"MATCHON_ANY_8"`
- `"MATCHON_ANY_9"`
- `"MATCHON_ANY_10"`
- `"MATCHON_ANY_11"`
- `"MATCHON_ANY_12"`
- `"MATCHON_ANY_13"`
- `"MATCHON_ANY_14"`
- `"MATCHON_ANY_15"`
- `"MATCHON_ALL"`
"""
return pulumi.get(self, "secondary_field_match_on")
@secondary_field_match_on.setter
def secondary_field_match_on(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_field_match_on", value)
@property
@pulumi.getter(name="secondaryFields")
def secondary_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
The EDM template's secondary fields.
"""
return pulumi.get(self, "secondary_fields")
@secondary_fields.setter
def secondary_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "secondary_fields", value)
@pulumi.input_type
class DLPDictionariesIdmProfileMatchAccuracyArgs:
def __init__(__self__, *,
adp_idm_profile: Optional[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileArgs']] = None,
match_accuracy: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileArgs'] adp_idm_profile: The IDM template reference.
:param pulumi.Input[str] match_accuracy: The IDM template match accuracy.
- `"LOW"`
- `"MEDIUM"`
- `"HEAVY"`
"""
if adp_idm_profile is not None:
pulumi.set(__self__, "adp_idm_profile", adp_idm_profile)
if match_accuracy is not None:
pulumi.set(__self__, "match_accuracy", match_accuracy)
@property
@pulumi.getter(name="adpIdmProfile")
def adp_idm_profile(self) -> Optional[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileArgs']]:
"""
The IDM template reference.
"""
return pulumi.get(self, "adp_idm_profile")
@adp_idm_profile.setter
def adp_idm_profile(self, value: Optional[pulumi.Input['DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileArgs']]):
pulumi.set(self, "adp_idm_profile", value)
@property
@pulumi.getter(name="matchAccuracy")
def match_accuracy(self) -> Optional[pulumi.Input[str]]:
"""
The IDM template match accuracy.
- `"LOW"`
- `"MEDIUM"`
- `"HEAVY"`
"""
return pulumi.get(self, "match_accuracy")
@match_accuracy.setter
def match_accuracy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "match_accuracy", value)
@pulumi.input_type
class DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileArgs:
def __init__(__self__, *,
extensions: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[int]] = None):
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class DLPDictionariesPatternArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
pattern: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] action: The action applied to a DLP dictionary using patterns. The following values are supported:
:param pulumi.Input[str] pattern: DLP dictionary pattern
"""
if action is not None:
pulumi.set(__self__, "action", action)
if pattern is not None:
pulumi.set(__self__, "pattern", pattern)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The action applied to a DLP dictionary using patterns. The following values are supported:
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def pattern(self) -> Optional[pulumi.Input[str]]:
"""
DLP dictionary pattern
"""
return pulumi.get(self, "pattern")
@pattern.setter
def pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pattern", value)
@pulumi.input_type
class DLPDictionariesPhraseArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
phrase: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] action: The action applied to a DLP dictionary using patterns. The following values are supported:
:param pulumi.Input[str] phrase: DLP dictionary phrase
"""
if action is not None:
pulumi.set(__self__, "action", action)
if phrase is not None:
pulumi.set(__self__, "phrase", phrase)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The action applied to a DLP dictionary using patterns. The following values are supported:
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def phrase(self) -> Optional[pulumi.Input[str]]:
"""
DLP dictionary phrase
"""
return pulumi.get(self, "phrase")
@phrase.setter
def phrase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phrase", value)
@pulumi.input_type
class DLPWebRulesAuditorArgs:
def __init__(__self__, *,
id: pulumi.Input[int]):
"""
:param pulumi.Input[int] id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[int]):
pulumi.set(self, "id", value)
@pulumi.input_type
class DLPWebRulesDepartmentsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesDlpEnginesArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesExcludedDepartmentsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesExcludedGroupsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesExcludedUsersArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesGroupsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesIcapServerArgs:
def __init__(__self__, *,
id: pulumi.Input[int]):
"""
:param pulumi.Input[int] id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[int]):
pulumi.set(self, "id", value)
@pulumi.input_type
class DLPWebRulesLabelsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesLocationGroupsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesLocationsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesNotificationTemplateArgs:
def __init__(__self__, *,
id: pulumi.Input[int]):
"""
:param pulumi.Input[int] id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[int]):
pulumi.set(self, "id", value)
@pulumi.input_type
class DLPWebRulesTimeWindowsArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesUrlCategoriesArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class DLPWebRulesUsersArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value) | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/_inputs.py | _inputs.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDLPNotificationTemplatesResult',
'AwaitableGetDLPNotificationTemplatesResult',
'get_dlp_notification_templates',
'get_dlp_notification_templates_output',
]
@pulumi.output_type
class GetDLPNotificationTemplatesResult:
"""
A collection of values returned by getDLPNotificationTemplates.
"""
def __init__(__self__, attach_content=None, html_message=None, id=None, name=None, plain_test_message=None, subject=None, tls_enabled=None):
if attach_content and not isinstance(attach_content, bool):
raise TypeError("Expected argument 'attach_content' to be a bool")
pulumi.set(__self__, "attach_content", attach_content)
if html_message and not isinstance(html_message, str):
raise TypeError("Expected argument 'html_message' to be a str")
pulumi.set(__self__, "html_message", html_message)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if plain_test_message and not isinstance(plain_test_message, str):
raise TypeError("Expected argument 'plain_test_message' to be a str")
pulumi.set(__self__, "plain_test_message", plain_test_message)
if subject and not isinstance(subject, str):
raise TypeError("Expected argument 'subject' to be a str")
pulumi.set(__self__, "subject", subject)
if tls_enabled and not isinstance(tls_enabled, bool):
raise TypeError("Expected argument 'tls_enabled' to be a bool")
pulumi.set(__self__, "tls_enabled", tls_enabled)
@property
@pulumi.getter(name="attachContent")
def attach_content(self) -> bool:
return pulumi.get(self, "attach_content")
@property
@pulumi.getter(name="htmlMessage")
def html_message(self) -> str:
return pulumi.get(self, "html_message")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="plainTestMessage")
def plain_test_message(self) -> str:
return pulumi.get(self, "plain_test_message")
@property
@pulumi.getter
def subject(self) -> str:
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="tlsEnabled")
def tls_enabled(self) -> bool:
return pulumi.get(self, "tls_enabled")
class AwaitableGetDLPNotificationTemplatesResult(GetDLPNotificationTemplatesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDLPNotificationTemplatesResult(
attach_content=self.attach_content,
html_message=self.html_message,
id=self.id,
name=self.name,
plain_test_message=self.plain_test_message,
subject=self.subject,
tls_enabled=self.tls_enabled)
def get_dlp_notification_templates(id: Optional[int] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDLPNotificationTemplatesResult:
"""
Use the **zia_dlp_notification_templates** data source to get information about a ZIA DLP Notification Templates in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
example = zia.DLP.get_dlp_notification_templates(name="DLP Auditor Template Test")
```
:param int id: The unique identifier for a DLP notification template.
:param str name: The DLP policy rule name.
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:DLP/getDLPNotificationTemplates:getDLPNotificationTemplates', __args__, opts=opts, typ=GetDLPNotificationTemplatesResult).value
return AwaitableGetDLPNotificationTemplatesResult(
attach_content=__ret__.attach_content,
html_message=__ret__.html_message,
id=__ret__.id,
name=__ret__.name,
plain_test_message=__ret__.plain_test_message,
subject=__ret__.subject,
tls_enabled=__ret__.tls_enabled)
@_utilities.lift_output_func(get_dlp_notification_templates)
def get_dlp_notification_templates_output(id: Optional[pulumi.Input[Optional[int]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDLPNotificationTemplatesResult]:
"""
Use the **zia_dlp_notification_templates** data source to get information about a ZIA DLP Notification Templates in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
example = zia.DLP.get_dlp_notification_templates(name="DLP Auditor Template Test")
```
:param int id: The unique identifier for a DLP notification template.
:param str name: The DLP policy rule name.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/get_dlp_notification_templates.py | get_dlp_notification_templates.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DLPNotificationTemplatesArgs', 'DLPNotificationTemplates']
@pulumi.input_type
class DLPNotificationTemplatesArgs:
def __init__(__self__, *,
html_message: pulumi.Input[str],
plain_text_message: pulumi.Input[str],
attach_content: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
tls_enabled: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a DLPNotificationTemplates resource.
:param pulumi.Input[str] html_message: The template for the HTML message body that must be displayed in the DLP notification email.
:param pulumi.Input[str] plain_text_message: The template for the plain text UTF-8 message body that must be displayed in the DLP notification email.
:param pulumi.Input[bool] attach_content: If set to true, the content that is violation is attached to the DLP notification email.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input[str] subject: The Subject line that is displayed within the DLP notification email.
:param pulumi.Input[bool] tls_enabled: If set to true, the content that is violation is attached to the DLP notification email.
"""
pulumi.set(__self__, "html_message", html_message)
pulumi.set(__self__, "plain_text_message", plain_text_message)
if attach_content is not None:
pulumi.set(__self__, "attach_content", attach_content)
if name is not None:
pulumi.set(__self__, "name", name)
if subject is not None:
pulumi.set(__self__, "subject", subject)
if tls_enabled is not None:
pulumi.set(__self__, "tls_enabled", tls_enabled)
@property
@pulumi.getter(name="htmlMessage")
def html_message(self) -> pulumi.Input[str]:
"""
The template for the HTML message body that must be displayed in the DLP notification email.
"""
return pulumi.get(self, "html_message")
@html_message.setter
def html_message(self, value: pulumi.Input[str]):
pulumi.set(self, "html_message", value)
@property
@pulumi.getter(name="plainTextMessage")
def plain_text_message(self) -> pulumi.Input[str]:
"""
The template for the plain text UTF-8 message body that must be displayed in the DLP notification email.
"""
return pulumi.get(self, "plain_text_message")
@plain_text_message.setter
def plain_text_message(self, value: pulumi.Input[str]):
pulumi.set(self, "plain_text_message", value)
@property
@pulumi.getter(name="attachContent")
def attach_content(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, the content that is violation is attached to the DLP notification email.
"""
return pulumi.get(self, "attach_content")
@attach_content.setter
def attach_content(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "attach_content", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The DLP policy rule name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def subject(self) -> Optional[pulumi.Input[str]]:
"""
The Subject line that is displayed within the DLP notification email.
"""
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject", value)
@property
@pulumi.getter(name="tlsEnabled")
def tls_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, the content that is violation is attached to the DLP notification email.
"""
return pulumi.get(self, "tls_enabled")
@tls_enabled.setter
def tls_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tls_enabled", value)
@pulumi.input_type
class _DLPNotificationTemplatesState:
def __init__(__self__, *,
attach_content: Optional[pulumi.Input[bool]] = None,
html_message: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
plain_text_message: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_id: Optional[pulumi.Input[int]] = None,
tls_enabled: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering DLPNotificationTemplates resources.
:param pulumi.Input[bool] attach_content: If set to true, the content that is violation is attached to the DLP notification email.
:param pulumi.Input[str] html_message: The template for the HTML message body that must be displayed in the DLP notification email.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input[str] plain_text_message: The template for the plain text UTF-8 message body that must be displayed in the DLP notification email.
:param pulumi.Input[str] subject: The Subject line that is displayed within the DLP notification email.
:param pulumi.Input[bool] tls_enabled: If set to true, the content that is violation is attached to the DLP notification email.
"""
if attach_content is not None:
pulumi.set(__self__, "attach_content", attach_content)
if html_message is not None:
pulumi.set(__self__, "html_message", html_message)
if name is not None:
pulumi.set(__self__, "name", name)
if plain_text_message is not None:
pulumi.set(__self__, "plain_text_message", plain_text_message)
if subject is not None:
pulumi.set(__self__, "subject", subject)
if template_id is not None:
pulumi.set(__self__, "template_id", template_id)
if tls_enabled is not None:
pulumi.set(__self__, "tls_enabled", tls_enabled)
@property
@pulumi.getter(name="attachContent")
def attach_content(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, the content that is violation is attached to the DLP notification email.
"""
return pulumi.get(self, "attach_content")
@attach_content.setter
def attach_content(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "attach_content", value)
@property
@pulumi.getter(name="htmlMessage")
def html_message(self) -> Optional[pulumi.Input[str]]:
"""
The template for the HTML message body that must be displayed in the DLP notification email.
"""
return pulumi.get(self, "html_message")
@html_message.setter
def html_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "html_message", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The DLP policy rule name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="plainTextMessage")
def plain_text_message(self) -> Optional[pulumi.Input[str]]:
"""
The template for the plain text UTF-8 message body that must be displayed in the DLP notification email.
"""
return pulumi.get(self, "plain_text_message")
@plain_text_message.setter
def plain_text_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plain_text_message", value)
@property
@pulumi.getter
def subject(self) -> Optional[pulumi.Input[str]]:
"""
The Subject line that is displayed within the DLP notification email.
"""
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject", value)
@property
@pulumi.getter(name="templateId")
def template_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "template_id")
@template_id.setter
def template_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "template_id", value)
@property
@pulumi.getter(name="tlsEnabled")
def tls_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, the content that is violation is attached to the DLP notification email.
"""
return pulumi.get(self, "tls_enabled")
@tls_enabled.setter
def tls_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tls_enabled", value)
class DLPNotificationTemplates(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attach_content: Optional[pulumi.Input[bool]] = None,
html_message: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
plain_text_message: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
tls_enabled: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
The **zia_dlp_notification_templates** resource allows the creation and management of ZIA DLP Notification Templates in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
example = zia.dlp.DLPNotificationTemplates("example",
subject=f"DLP Violation: {transactio_n__id} {engines}",
attach_content=True,
tls_enabled=True,
html_message=(lambda path: open(path).read())("./index.html"),
plain_text_message=(lambda path: open(path).read())("./dlp.txt"))
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] attach_content: If set to true, the content that is violation is attached to the DLP notification email.
:param pulumi.Input[str] html_message: The template for the HTML message body that must be displayed in the DLP notification email.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input[str] plain_text_message: The template for the plain text UTF-8 message body that must be displayed in the DLP notification email.
:param pulumi.Input[str] subject: The Subject line that is displayed within the DLP notification email.
:param pulumi.Input[bool] tls_enabled: If set to true, the content that is violation is attached to the DLP notification email.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DLPNotificationTemplatesArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The **zia_dlp_notification_templates** resource allows the creation and management of ZIA DLP Notification Templates in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
example = zia.dlp.DLPNotificationTemplates("example",
subject=f"DLP Violation: {transactio_n__id} {engines}",
attach_content=True,
tls_enabled=True,
html_message=(lambda path: open(path).read())("./index.html"),
plain_text_message=(lambda path: open(path).read())("./dlp.txt"))
```
:param str resource_name: The name of the resource.
:param DLPNotificationTemplatesArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DLPNotificationTemplatesArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attach_content: Optional[pulumi.Input[bool]] = None,
html_message: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
plain_text_message: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
tls_enabled: Optional[pulumi.Input[bool]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DLPNotificationTemplatesArgs.__new__(DLPNotificationTemplatesArgs)
__props__.__dict__["attach_content"] = attach_content
if html_message is None and not opts.urn:
raise TypeError("Missing required property 'html_message'")
__props__.__dict__["html_message"] = html_message
__props__.__dict__["name"] = name
if plain_text_message is None and not opts.urn:
raise TypeError("Missing required property 'plain_text_message'")
__props__.__dict__["plain_text_message"] = plain_text_message
__props__.__dict__["subject"] = subject
__props__.__dict__["tls_enabled"] = tls_enabled
__props__.__dict__["template_id"] = None
super(DLPNotificationTemplates, __self__).__init__(
'zia:DLP/dLPNotificationTemplates:DLPNotificationTemplates',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
attach_content: Optional[pulumi.Input[bool]] = None,
html_message: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
plain_text_message: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_id: Optional[pulumi.Input[int]] = None,
tls_enabled: Optional[pulumi.Input[bool]] = None) -> 'DLPNotificationTemplates':
"""
Get an existing DLPNotificationTemplates resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] attach_content: If set to true, the content that is violation is attached to the DLP notification email.
:param pulumi.Input[str] html_message: The template for the HTML message body that must be displayed in the DLP notification email.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input[str] plain_text_message: The template for the plain text UTF-8 message body that must be displayed in the DLP notification email.
:param pulumi.Input[str] subject: The Subject line that is displayed within the DLP notification email.
:param pulumi.Input[bool] tls_enabled: If set to true, the content that is violation is attached to the DLP notification email.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DLPNotificationTemplatesState.__new__(_DLPNotificationTemplatesState)
__props__.__dict__["attach_content"] = attach_content
__props__.__dict__["html_message"] = html_message
__props__.__dict__["name"] = name
__props__.__dict__["plain_text_message"] = plain_text_message
__props__.__dict__["subject"] = subject
__props__.__dict__["template_id"] = template_id
__props__.__dict__["tls_enabled"] = tls_enabled
return DLPNotificationTemplates(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="attachContent")
def attach_content(self) -> pulumi.Output[Optional[bool]]:
"""
If set to true, the content that is violation is attached to the DLP notification email.
"""
return pulumi.get(self, "attach_content")
@property
@pulumi.getter(name="htmlMessage")
def html_message(self) -> pulumi.Output[str]:
"""
The template for the HTML message body that must be displayed in the DLP notification email.
"""
return pulumi.get(self, "html_message")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The DLP policy rule name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="plainTextMessage")
def plain_text_message(self) -> pulumi.Output[str]:
"""
The template for the plain text UTF-8 message body that must be displayed in the DLP notification email.
"""
return pulumi.get(self, "plain_text_message")
@property
@pulumi.getter
def subject(self) -> pulumi.Output[Optional[str]]:
"""
The Subject line that is displayed within the DLP notification email.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="templateId")
def template_id(self) -> pulumi.Output[int]:
return pulumi.get(self, "template_id")
@property
@pulumi.getter(name="tlsEnabled")
def tls_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
If set to true, the content that is violation is attached to the DLP notification email.
"""
return pulumi.get(self, "tls_enabled") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/dlp_notification_templates.py | dlp_notification_templates.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'DLPDictionariesExactDataMatchDetail',
'DLPDictionariesIdmProfileMatchAccuracy',
'DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfile',
'DLPDictionariesPattern',
'DLPDictionariesPhrase',
'DLPWebRulesAuditor',
'DLPWebRulesDepartments',
'DLPWebRulesDlpEngines',
'DLPWebRulesExcludedDepartments',
'DLPWebRulesExcludedGroups',
'DLPWebRulesExcludedUsers',
'DLPWebRulesGroups',
'DLPWebRulesIcapServer',
'DLPWebRulesLabels',
'DLPWebRulesLocationGroups',
'DLPWebRulesLocations',
'DLPWebRulesNotificationTemplate',
'DLPWebRulesTimeWindows',
'DLPWebRulesUrlCategories',
'DLPWebRulesUsers',
'GetDLPDictionariesExactDataMatchDetailResult',
'GetDLPDictionariesIdmProfileMatchAccuracyResult',
'GetDLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileResult',
'GetDLPDictionariesPatternResult',
'GetDLPDictionariesPhraseResult',
'GetDLPWebRulesAuditorResult',
'GetDLPWebRulesDepartmentResult',
'GetDLPWebRulesDlpEngineResult',
'GetDLPWebRulesExcludedDepartmentResult',
'GetDLPWebRulesExcludedGroupResult',
'GetDLPWebRulesExcludedUserResult',
'GetDLPWebRulesGroupResult',
'GetDLPWebRulesIcapServerResult',
'GetDLPWebRulesLabelResult',
'GetDLPWebRulesLastModifiedByResult',
'GetDLPWebRulesLocationResult',
'GetDLPWebRulesLocationGroupResult',
'GetDLPWebRulesNotificationTemplateResult',
'GetDLPWebRulesTimeWindowResult',
'GetDLPWebRulesUrlCategoryResult',
'GetDLPWebRulesUserResult',
]
@pulumi.output_type
class DLPDictionariesExactDataMatchDetail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dictionaryEdmMappingId":
suggest = "dictionary_edm_mapping_id"
elif key == "primaryField":
suggest = "primary_field"
elif key == "schemaId":
suggest = "schema_id"
elif key == "secondaryFieldMatchOn":
suggest = "secondary_field_match_on"
elif key == "secondaryFields":
suggest = "secondary_fields"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DLPDictionariesExactDataMatchDetail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DLPDictionariesExactDataMatchDetail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DLPDictionariesExactDataMatchDetail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
dictionary_edm_mapping_id: Optional[int] = None,
primary_field: Optional[int] = None,
schema_id: Optional[int] = None,
secondary_field_match_on: Optional[str] = None,
secondary_fields: Optional[Sequence[int]] = None):
"""
:param int dictionary_edm_mapping_id: The unique identifier for the EDM mapping.
:param int primary_field: The EDM template's primary field.
:param int schema_id: The unique identifier for the EDM template (or schema).
:param str secondary_field_match_on: The EDM secondary field to match on.
- `"MATCHON_NONE"`
- `"MATCHON_ANY_1"`
- `"MATCHON_ANY_2"`
- `"MATCHON_ANY_3"`
- `"MATCHON_ANY_4"`
- `"MATCHON_ANY_5"`
- `"MATCHON_ANY_6"`
- `"MATCHON_ANY_7"`
- `"MATCHON_ANY_8"`
- `"MATCHON_ANY_9"`
- `"MATCHON_ANY_10"`
- `"MATCHON_ANY_11"`
- `"MATCHON_ANY_12"`
- `"MATCHON_ANY_13"`
- `"MATCHON_ANY_14"`
- `"MATCHON_ANY_15"`
- `"MATCHON_ALL"`
:param Sequence[int] secondary_fields: The EDM template's secondary fields.
"""
if dictionary_edm_mapping_id is not None:
pulumi.set(__self__, "dictionary_edm_mapping_id", dictionary_edm_mapping_id)
if primary_field is not None:
pulumi.set(__self__, "primary_field", primary_field)
if schema_id is not None:
pulumi.set(__self__, "schema_id", schema_id)
if secondary_field_match_on is not None:
pulumi.set(__self__, "secondary_field_match_on", secondary_field_match_on)
if secondary_fields is not None:
pulumi.set(__self__, "secondary_fields", secondary_fields)
@property
@pulumi.getter(name="dictionaryEdmMappingId")
def dictionary_edm_mapping_id(self) -> Optional[int]:
"""
The unique identifier for the EDM mapping.
"""
return pulumi.get(self, "dictionary_edm_mapping_id")
@property
@pulumi.getter(name="primaryField")
def primary_field(self) -> Optional[int]:
"""
The EDM template's primary field.
"""
return pulumi.get(self, "primary_field")
@property
@pulumi.getter(name="schemaId")
def schema_id(self) -> Optional[int]:
"""
The unique identifier for the EDM template (or schema).
"""
return pulumi.get(self, "schema_id")
@property
@pulumi.getter(name="secondaryFieldMatchOn")
def secondary_field_match_on(self) -> Optional[str]:
"""
The EDM secondary field to match on.
- `"MATCHON_NONE"`
- `"MATCHON_ANY_1"`
- `"MATCHON_ANY_2"`
- `"MATCHON_ANY_3"`
- `"MATCHON_ANY_4"`
- `"MATCHON_ANY_5"`
- `"MATCHON_ANY_6"`
- `"MATCHON_ANY_7"`
- `"MATCHON_ANY_8"`
- `"MATCHON_ANY_9"`
- `"MATCHON_ANY_10"`
- `"MATCHON_ANY_11"`
- `"MATCHON_ANY_12"`
- `"MATCHON_ANY_13"`
- `"MATCHON_ANY_14"`
- `"MATCHON_ANY_15"`
- `"MATCHON_ALL"`
"""
return pulumi.get(self, "secondary_field_match_on")
@property
@pulumi.getter(name="secondaryFields")
def secondary_fields(self) -> Optional[Sequence[int]]:
"""
The EDM template's secondary fields.
"""
return pulumi.get(self, "secondary_fields")
@pulumi.output_type
class DLPDictionariesIdmProfileMatchAccuracy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adpIdmProfile":
suggest = "adp_idm_profile"
elif key == "matchAccuracy":
suggest = "match_accuracy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DLPDictionariesIdmProfileMatchAccuracy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DLPDictionariesIdmProfileMatchAccuracy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DLPDictionariesIdmProfileMatchAccuracy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
adp_idm_profile: Optional['outputs.DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfile'] = None,
match_accuracy: Optional[str] = None):
"""
:param 'DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileArgs' adp_idm_profile: The IDM template reference.
:param str match_accuracy: The IDM template match accuracy.
- `"LOW"`
- `"MEDIUM"`
- `"HEAVY"`
"""
if adp_idm_profile is not None:
pulumi.set(__self__, "adp_idm_profile", adp_idm_profile)
if match_accuracy is not None:
pulumi.set(__self__, "match_accuracy", match_accuracy)
@property
@pulumi.getter(name="adpIdmProfile")
def adp_idm_profile(self) -> Optional['outputs.DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfile']:
"""
The IDM template reference.
"""
return pulumi.get(self, "adp_idm_profile")
@property
@pulumi.getter(name="matchAccuracy")
def match_accuracy(self) -> Optional[str]:
"""
The IDM template match accuracy.
- `"LOW"`
- `"MEDIUM"`
- `"HEAVY"`
"""
return pulumi.get(self, "match_accuracy")
@pulumi.output_type
class DLPDictionariesIdmProfileMatchAccuracyAdpIdmProfile(dict):
def __init__(__self__, *,
extensions: Optional[Mapping[str, str]] = None,
id: Optional[int] = None):
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def extensions(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> Optional[int]:
return pulumi.get(self, "id")
@pulumi.output_type
class DLPDictionariesPattern(dict):
def __init__(__self__, *,
action: Optional[str] = None,
pattern: Optional[str] = None):
"""
:param str action: The action applied to a DLP dictionary using patterns. The following values are supported:
:param str pattern: DLP dictionary pattern
"""
if action is not None:
pulumi.set(__self__, "action", action)
if pattern is not None:
pulumi.set(__self__, "pattern", pattern)
@property
@pulumi.getter
def action(self) -> Optional[str]:
"""
The action applied to a DLP dictionary using patterns. The following values are supported:
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def pattern(self) -> Optional[str]:
"""
DLP dictionary pattern
"""
return pulumi.get(self, "pattern")
@pulumi.output_type
class DLPDictionariesPhrase(dict):
def __init__(__self__, *,
action: Optional[str] = None,
phrase: Optional[str] = None):
"""
:param str action: The action applied to a DLP dictionary using patterns. The following values are supported:
:param str phrase: DLP dictionary phrase
"""
if action is not None:
pulumi.set(__self__, "action", action)
if phrase is not None:
pulumi.set(__self__, "phrase", phrase)
@property
@pulumi.getter
def action(self) -> Optional[str]:
"""
The action applied to a DLP dictionary using patterns. The following values are supported:
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def phrase(self) -> Optional[str]:
"""
DLP dictionary phrase
"""
return pulumi.get(self, "phrase")
@pulumi.output_type
class DLPWebRulesAuditor(dict):
def __init__(__self__, *,
id: int):
"""
:param int id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@pulumi.output_type
class DLPWebRulesDepartments(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesDlpEngines(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesExcludedDepartments(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesExcludedGroups(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesExcludedUsers(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesGroups(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesIcapServer(dict):
def __init__(__self__, *,
id: int):
"""
:param int id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@pulumi.output_type
class DLPWebRulesLabels(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesLocationGroups(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesLocations(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesNotificationTemplate(dict):
def __init__(__self__, *,
id: int):
"""
:param int id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@pulumi.output_type
class DLPWebRulesTimeWindows(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesUrlCategories(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class DLPWebRulesUsers(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class GetDLPDictionariesExactDataMatchDetailResult(dict):
def __init__(__self__, *,
dictionary_edm_mapping_id: int,
primary_field: int,
schema_id: int,
secondary_field_match_on: str,
secondary_fields: Sequence[int]):
pulumi.set(__self__, "dictionary_edm_mapping_id", dictionary_edm_mapping_id)
pulumi.set(__self__, "primary_field", primary_field)
pulumi.set(__self__, "schema_id", schema_id)
pulumi.set(__self__, "secondary_field_match_on", secondary_field_match_on)
pulumi.set(__self__, "secondary_fields", secondary_fields)
@property
@pulumi.getter(name="dictionaryEdmMappingId")
def dictionary_edm_mapping_id(self) -> int:
return pulumi.get(self, "dictionary_edm_mapping_id")
@property
@pulumi.getter(name="primaryField")
def primary_field(self) -> int:
return pulumi.get(self, "primary_field")
@property
@pulumi.getter(name="schemaId")
def schema_id(self) -> int:
return pulumi.get(self, "schema_id")
@property
@pulumi.getter(name="secondaryFieldMatchOn")
def secondary_field_match_on(self) -> str:
return pulumi.get(self, "secondary_field_match_on")
@property
@pulumi.getter(name="secondaryFields")
def secondary_fields(self) -> Sequence[int]:
return pulumi.get(self, "secondary_fields")
@pulumi.output_type
class GetDLPDictionariesIdmProfileMatchAccuracyResult(dict):
def __init__(__self__, *,
adp_idm_profiles: Sequence['outputs.GetDLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileResult'],
match_accuracy: str):
pulumi.set(__self__, "adp_idm_profiles", adp_idm_profiles)
pulumi.set(__self__, "match_accuracy", match_accuracy)
@property
@pulumi.getter(name="adpIdmProfiles")
def adp_idm_profiles(self) -> Sequence['outputs.GetDLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileResult']:
return pulumi.get(self, "adp_idm_profiles")
@property
@pulumi.getter(name="matchAccuracy")
def match_accuracy(self) -> str:
return pulumi.get(self, "match_accuracy")
@pulumi.output_type
class GetDLPDictionariesIdmProfileMatchAccuracyAdpIdmProfileResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int):
"""
:param int id: Unique identifier for the DLP dictionary
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Unique identifier for the DLP dictionary
"""
return pulumi.get(self, "id")
@pulumi.output_type
class GetDLPDictionariesPatternResult(dict):
def __init__(__self__, *,
action: str,
pattern: str):
"""
:param str action: (String) The action applied to a DLP dictionary using patterns
:param str pattern: (String) DLP dictionary pattern
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "pattern", pattern)
@property
@pulumi.getter
def action(self) -> str:
"""
(String) The action applied to a DLP dictionary using patterns
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def pattern(self) -> str:
"""
(String) DLP dictionary pattern
"""
return pulumi.get(self, "pattern")
@pulumi.output_type
class GetDLPDictionariesPhraseResult(dict):
def __init__(__self__, *,
action: str,
phrase: str):
"""
:param str action: (String) The action applied to a DLP dictionary using patterns
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "phrase", phrase)
@property
@pulumi.getter
def action(self) -> str:
"""
(String) The action applied to a DLP dictionary using patterns
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def phrase(self) -> str:
return pulumi.get(self, "phrase")
@pulumi.output_type
class GetDLPWebRulesAuditorResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesDepartmentResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesDlpEngineResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesExcludedDepartmentResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int):
"""
:param int id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@pulumi.output_type
class GetDLPWebRulesExcludedGroupResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int):
"""
:param int id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@pulumi.output_type
class GetDLPWebRulesExcludedUserResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int):
"""
:param int id: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@pulumi.output_type
class GetDLPWebRulesGroupResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesIcapServerResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesLabelResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesLastModifiedByResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesLocationResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesLocationGroupResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesNotificationTemplateResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesTimeWindowResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesUrlCategoryResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetDLPWebRulesUserResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The DLP policy rule name.
rules.
"""
return pulumi.get(self, "name") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/outputs.py | outputs.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDLPEnginesResult',
'AwaitableGetDLPEnginesResult',
'get_dlp_engines',
'get_dlp_engines_output',
]
@pulumi.output_type
class GetDLPEnginesResult:
"""
A collection of values returned by getDLPEngines.
"""
def __init__(__self__, custom_dlp_engine=None, description=None, engine_expression=None, id=None, name=None, predefined_engine_name=None):
if custom_dlp_engine and not isinstance(custom_dlp_engine, bool):
raise TypeError("Expected argument 'custom_dlp_engine' to be a bool")
pulumi.set(__self__, "custom_dlp_engine", custom_dlp_engine)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if engine_expression and not isinstance(engine_expression, str):
raise TypeError("Expected argument 'engine_expression' to be a str")
pulumi.set(__self__, "engine_expression", engine_expression)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if predefined_engine_name and not isinstance(predefined_engine_name, str):
raise TypeError("Expected argument 'predefined_engine_name' to be a str")
pulumi.set(__self__, "predefined_engine_name", predefined_engine_name)
@property
@pulumi.getter(name="customDlpEngine")
def custom_dlp_engine(self) -> bool:
return pulumi.get(self, "custom_dlp_engine")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="engineExpression")
def engine_expression(self) -> str:
return pulumi.get(self, "engine_expression")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="predefinedEngineName")
def predefined_engine_name(self) -> str:
return pulumi.get(self, "predefined_engine_name")
class AwaitableGetDLPEnginesResult(GetDLPEnginesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDLPEnginesResult(
custom_dlp_engine=self.custom_dlp_engine,
description=self.description,
engine_expression=self.engine_expression,
id=self.id,
name=self.name,
predefined_engine_name=self.predefined_engine_name)
def get_dlp_engines(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDLPEnginesResult:
"""
Use the **zia_dlp_engines** data source to get information about a ZIA DLP Engines in the Zscaler Internet Access cloud or via the API.
:param str name: The DLP engine name as configured by the admin. This attribute is required in POST and PUT requests for custom DLP engines.
"""
__args__ = dict()
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:DLP/getDLPEngines:getDLPEngines', __args__, opts=opts, typ=GetDLPEnginesResult).value
return AwaitableGetDLPEnginesResult(
custom_dlp_engine=__ret__.custom_dlp_engine,
description=__ret__.description,
engine_expression=__ret__.engine_expression,
id=__ret__.id,
name=__ret__.name,
predefined_engine_name=__ret__.predefined_engine_name)
@_utilities.lift_output_func(get_dlp_engines)
def get_dlp_engines_output(name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDLPEnginesResult]:
"""
Use the **zia_dlp_engines** data source to get information about a ZIA DLP Engines in the Zscaler Internet Access cloud or via the API.
:param str name: The DLP engine name as configured by the admin. This attribute is required in POST and PUT requests for custom DLP engines.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/get_dlp_engines.py | get_dlp_engines.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDLPWebRulesResult',
'AwaitableGetDLPWebRulesResult',
'get_dlp_web_rules',
'get_dlp_web_rules_output',
]
@pulumi.output_type
class GetDLPWebRulesResult:
"""
A collection of values returned by getDLPWebRules.
"""
def __init__(__self__, access_control=None, action=None, auditors=None, cloud_applications=None, departments=None, description=None, dlp_engines=None, excluded_departments=None, excluded_groups=None, excluded_users=None, external_auditor_email=None, file_types=None, groups=None, icap_servers=None, id=None, labels=None, last_modified_bies=None, last_modified_time=None, location_groups=None, locations=None, match_only=None, min_size=None, name=None, notification_templates=None, ocr_enabled=None, order=None, protocols=None, rank=None, state=None, time_windows=None, url_categories=None, users=None, without_content_inspection=None, zscaler_incident_reciever=None):
if access_control and not isinstance(access_control, str):
raise TypeError("Expected argument 'access_control' to be a str")
pulumi.set(__self__, "access_control", access_control)
if action and not isinstance(action, str):
raise TypeError("Expected argument 'action' to be a str")
pulumi.set(__self__, "action", action)
if auditors and not isinstance(auditors, list):
raise TypeError("Expected argument 'auditors' to be a list")
pulumi.set(__self__, "auditors", auditors)
if cloud_applications and not isinstance(cloud_applications, list):
raise TypeError("Expected argument 'cloud_applications' to be a list")
pulumi.set(__self__, "cloud_applications", cloud_applications)
if departments and not isinstance(departments, list):
raise TypeError("Expected argument 'departments' to be a list")
pulumi.set(__self__, "departments", departments)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if dlp_engines and not isinstance(dlp_engines, list):
raise TypeError("Expected argument 'dlp_engines' to be a list")
pulumi.set(__self__, "dlp_engines", dlp_engines)
if excluded_departments and not isinstance(excluded_departments, list):
raise TypeError("Expected argument 'excluded_departments' to be a list")
pulumi.set(__self__, "excluded_departments", excluded_departments)
if excluded_groups and not isinstance(excluded_groups, list):
raise TypeError("Expected argument 'excluded_groups' to be a list")
pulumi.set(__self__, "excluded_groups", excluded_groups)
if excluded_users and not isinstance(excluded_users, list):
raise TypeError("Expected argument 'excluded_users' to be a list")
pulumi.set(__self__, "excluded_users", excluded_users)
if external_auditor_email and not isinstance(external_auditor_email, str):
raise TypeError("Expected argument 'external_auditor_email' to be a str")
pulumi.set(__self__, "external_auditor_email", external_auditor_email)
if file_types and not isinstance(file_types, list):
raise TypeError("Expected argument 'file_types' to be a list")
pulumi.set(__self__, "file_types", file_types)
if groups and not isinstance(groups, list):
raise TypeError("Expected argument 'groups' to be a list")
pulumi.set(__self__, "groups", groups)
if icap_servers and not isinstance(icap_servers, list):
raise TypeError("Expected argument 'icap_servers' to be a list")
pulumi.set(__self__, "icap_servers", icap_servers)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if labels and not isinstance(labels, list):
raise TypeError("Expected argument 'labels' to be a list")
pulumi.set(__self__, "labels", labels)
if last_modified_bies and not isinstance(last_modified_bies, list):
raise TypeError("Expected argument 'last_modified_bies' to be a list")
pulumi.set(__self__, "last_modified_bies", last_modified_bies)
if last_modified_time and not isinstance(last_modified_time, int):
raise TypeError("Expected argument 'last_modified_time' to be a int")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if location_groups and not isinstance(location_groups, list):
raise TypeError("Expected argument 'location_groups' to be a list")
pulumi.set(__self__, "location_groups", location_groups)
if locations and not isinstance(locations, list):
raise TypeError("Expected argument 'locations' to be a list")
pulumi.set(__self__, "locations", locations)
if match_only and not isinstance(match_only, bool):
raise TypeError("Expected argument 'match_only' to be a bool")
pulumi.set(__self__, "match_only", match_only)
if min_size and not isinstance(min_size, int):
raise TypeError("Expected argument 'min_size' to be a int")
pulumi.set(__self__, "min_size", min_size)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notification_templates and not isinstance(notification_templates, list):
raise TypeError("Expected argument 'notification_templates' to be a list")
pulumi.set(__self__, "notification_templates", notification_templates)
if ocr_enabled and not isinstance(ocr_enabled, bool):
raise TypeError("Expected argument 'ocr_enabled' to be a bool")
pulumi.set(__self__, "ocr_enabled", ocr_enabled)
if order and not isinstance(order, int):
raise TypeError("Expected argument 'order' to be a int")
pulumi.set(__self__, "order", order)
if protocols and not isinstance(protocols, list):
raise TypeError("Expected argument 'protocols' to be a list")
pulumi.set(__self__, "protocols", protocols)
if rank and not isinstance(rank, int):
raise TypeError("Expected argument 'rank' to be a int")
pulumi.set(__self__, "rank", rank)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if time_windows and not isinstance(time_windows, list):
raise TypeError("Expected argument 'time_windows' to be a list")
pulumi.set(__self__, "time_windows", time_windows)
if url_categories and not isinstance(url_categories, list):
raise TypeError("Expected argument 'url_categories' to be a list")
pulumi.set(__self__, "url_categories", url_categories)
if users and not isinstance(users, list):
raise TypeError("Expected argument 'users' to be a list")
pulumi.set(__self__, "users", users)
if without_content_inspection and not isinstance(without_content_inspection, bool):
raise TypeError("Expected argument 'without_content_inspection' to be a bool")
pulumi.set(__self__, "without_content_inspection", without_content_inspection)
if zscaler_incident_reciever and not isinstance(zscaler_incident_reciever, bool):
raise TypeError("Expected argument 'zscaler_incident_reciever' to be a bool")
pulumi.set(__self__, "zscaler_incident_reciever", zscaler_incident_reciever)
@property
@pulumi.getter(name="accessControl")
def access_control(self) -> str:
return pulumi.get(self, "access_control")
@property
@pulumi.getter
def action(self) -> str:
return pulumi.get(self, "action")
@property
@pulumi.getter
def auditors(self) -> Sequence['outputs.GetDLPWebRulesAuditorResult']:
return pulumi.get(self, "auditors")
@property
@pulumi.getter(name="cloudApplications")
def cloud_applications(self) -> Sequence[str]:
return pulumi.get(self, "cloud_applications")
@property
@pulumi.getter
def departments(self) -> Sequence['outputs.GetDLPWebRulesDepartmentResult']:
return pulumi.get(self, "departments")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="dlpEngines")
def dlp_engines(self) -> Sequence['outputs.GetDLPWebRulesDlpEngineResult']:
return pulumi.get(self, "dlp_engines")
@property
@pulumi.getter(name="excludedDepartments")
def excluded_departments(self) -> Sequence['outputs.GetDLPWebRulesExcludedDepartmentResult']:
return pulumi.get(self, "excluded_departments")
@property
@pulumi.getter(name="excludedGroups")
def excluded_groups(self) -> Sequence['outputs.GetDLPWebRulesExcludedGroupResult']:
return pulumi.get(self, "excluded_groups")
@property
@pulumi.getter(name="excludedUsers")
def excluded_users(self) -> Sequence['outputs.GetDLPWebRulesExcludedUserResult']:
return pulumi.get(self, "excluded_users")
@property
@pulumi.getter(name="externalAuditorEmail")
def external_auditor_email(self) -> str:
return pulumi.get(self, "external_auditor_email")
@property
@pulumi.getter(name="fileTypes")
def file_types(self) -> Sequence[str]:
return pulumi.get(self, "file_types")
@property
@pulumi.getter
def groups(self) -> Sequence['outputs.GetDLPWebRulesGroupResult']:
return pulumi.get(self, "groups")
@property
@pulumi.getter(name="icapServers")
def icap_servers(self) -> Sequence['outputs.GetDLPWebRulesIcapServerResult']:
return pulumi.get(self, "icap_servers")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter
def labels(self) -> Sequence['outputs.GetDLPWebRulesLabelResult']:
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="lastModifiedBies")
def last_modified_bies(self) -> Sequence['outputs.GetDLPWebRulesLastModifiedByResult']:
return pulumi.get(self, "last_modified_bies")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> int:
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter(name="locationGroups")
def location_groups(self) -> Sequence['outputs.GetDLPWebRulesLocationGroupResult']:
return pulumi.get(self, "location_groups")
@property
@pulumi.getter
def locations(self) -> Sequence['outputs.GetDLPWebRulesLocationResult']:
return pulumi.get(self, "locations")
@property
@pulumi.getter(name="matchOnly")
def match_only(self) -> bool:
return pulumi.get(self, "match_only")
@property
@pulumi.getter(name="minSize")
def min_size(self) -> int:
return pulumi.get(self, "min_size")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationTemplates")
def notification_templates(self) -> Sequence['outputs.GetDLPWebRulesNotificationTemplateResult']:
return pulumi.get(self, "notification_templates")
@property
@pulumi.getter(name="ocrEnabled")
def ocr_enabled(self) -> bool:
return pulumi.get(self, "ocr_enabled")
@property
@pulumi.getter
def order(self) -> int:
return pulumi.get(self, "order")
@property
@pulumi.getter
def protocols(self) -> Sequence[str]:
return pulumi.get(self, "protocols")
@property
@pulumi.getter
def rank(self) -> int:
return pulumi.get(self, "rank")
@property
@pulumi.getter
def state(self) -> str:
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeWindows")
def time_windows(self) -> Sequence['outputs.GetDLPWebRulesTimeWindowResult']:
return pulumi.get(self, "time_windows")
@property
@pulumi.getter(name="urlCategories")
def url_categories(self) -> Sequence['outputs.GetDLPWebRulesUrlCategoryResult']:
return pulumi.get(self, "url_categories")
@property
@pulumi.getter
def users(self) -> Sequence['outputs.GetDLPWebRulesUserResult']:
return pulumi.get(self, "users")
@property
@pulumi.getter(name="withoutContentInspection")
def without_content_inspection(self) -> bool:
return pulumi.get(self, "without_content_inspection")
@property
@pulumi.getter(name="zscalerIncidentReciever")
def zscaler_incident_reciever(self) -> bool:
return pulumi.get(self, "zscaler_incident_reciever")
class AwaitableGetDLPWebRulesResult(GetDLPWebRulesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDLPWebRulesResult(
access_control=self.access_control,
action=self.action,
auditors=self.auditors,
cloud_applications=self.cloud_applications,
departments=self.departments,
description=self.description,
dlp_engines=self.dlp_engines,
excluded_departments=self.excluded_departments,
excluded_groups=self.excluded_groups,
excluded_users=self.excluded_users,
external_auditor_email=self.external_auditor_email,
file_types=self.file_types,
groups=self.groups,
icap_servers=self.icap_servers,
id=self.id,
labels=self.labels,
last_modified_bies=self.last_modified_bies,
last_modified_time=self.last_modified_time,
location_groups=self.location_groups,
locations=self.locations,
match_only=self.match_only,
min_size=self.min_size,
name=self.name,
notification_templates=self.notification_templates,
ocr_enabled=self.ocr_enabled,
order=self.order,
protocols=self.protocols,
rank=self.rank,
state=self.state,
time_windows=self.time_windows,
url_categories=self.url_categories,
users=self.users,
without_content_inspection=self.without_content_inspection,
zscaler_incident_reciever=self.zscaler_incident_reciever)
def get_dlp_web_rules(id: Optional[int] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDLPWebRulesResult:
"""
Use the **zia_dlp_web_rules** data source to get information about a ZIA DLP Web Rules in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
example = zia.DLP.get_dlp_web_rules(name="Example")
```
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:DLP/getDLPWebRules:getDLPWebRules', __args__, opts=opts, typ=GetDLPWebRulesResult).value
return AwaitableGetDLPWebRulesResult(
access_control=__ret__.access_control,
action=__ret__.action,
auditors=__ret__.auditors,
cloud_applications=__ret__.cloud_applications,
departments=__ret__.departments,
description=__ret__.description,
dlp_engines=__ret__.dlp_engines,
excluded_departments=__ret__.excluded_departments,
excluded_groups=__ret__.excluded_groups,
excluded_users=__ret__.excluded_users,
external_auditor_email=__ret__.external_auditor_email,
file_types=__ret__.file_types,
groups=__ret__.groups,
icap_servers=__ret__.icap_servers,
id=__ret__.id,
labels=__ret__.labels,
last_modified_bies=__ret__.last_modified_bies,
last_modified_time=__ret__.last_modified_time,
location_groups=__ret__.location_groups,
locations=__ret__.locations,
match_only=__ret__.match_only,
min_size=__ret__.min_size,
name=__ret__.name,
notification_templates=__ret__.notification_templates,
ocr_enabled=__ret__.ocr_enabled,
order=__ret__.order,
protocols=__ret__.protocols,
rank=__ret__.rank,
state=__ret__.state,
time_windows=__ret__.time_windows,
url_categories=__ret__.url_categories,
users=__ret__.users,
without_content_inspection=__ret__.without_content_inspection,
zscaler_incident_reciever=__ret__.zscaler_incident_reciever)
@_utilities.lift_output_func(get_dlp_web_rules)
def get_dlp_web_rules_output(id: Optional[pulumi.Input[Optional[int]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDLPWebRulesResult]:
"""
Use the **zia_dlp_web_rules** data source to get information about a ZIA DLP Web Rules in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
example = zia.DLP.get_dlp_web_rules(name="Example")
```
:param int id: Identifier that uniquely identifies an entity
:param str name: The DLP policy rule name.
rules.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/get_dlp_web_rules.py | get_dlp_web_rules.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DLPWebRulesArgs', 'DLPWebRules']
@pulumi.input_type
class DLPWebRulesArgs:
def __init__(__self__, *,
order: pulumi.Input[int],
access_control: Optional[pulumi.Input[str]] = None,
action: Optional[pulumi.Input[str]] = None,
auditor: Optional[pulumi.Input['DLPWebRulesAuditorArgs']] = None,
cloud_applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
departments: Optional[pulumi.Input['DLPWebRulesDepartmentsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
dlp_engines: Optional[pulumi.Input['DLPWebRulesDlpEnginesArgs']] = None,
excluded_departments: Optional[pulumi.Input['DLPWebRulesExcludedDepartmentsArgs']] = None,
excluded_groups: Optional[pulumi.Input['DLPWebRulesExcludedGroupsArgs']] = None,
excluded_users: Optional[pulumi.Input['DLPWebRulesExcludedUsersArgs']] = None,
external_auditor_email: Optional[pulumi.Input[str]] = None,
file_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input['DLPWebRulesGroupsArgs']] = None,
icap_server: Optional[pulumi.Input['DLPWebRulesIcapServerArgs']] = None,
labels: Optional[pulumi.Input['DLPWebRulesLabelsArgs']] = None,
location_groups: Optional[pulumi.Input['DLPWebRulesLocationGroupsArgs']] = None,
locations: Optional[pulumi.Input['DLPWebRulesLocationsArgs']] = None,
match_only: Optional[pulumi.Input[bool]] = None,
min_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_template: Optional[pulumi.Input['DLPWebRulesNotificationTemplateArgs']] = None,
ocr_enabled: Optional[pulumi.Input[bool]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rank: Optional[pulumi.Input[int]] = None,
state: Optional[pulumi.Input[str]] = None,
time_windows: Optional[pulumi.Input['DLPWebRulesTimeWindowsArgs']] = None,
url_categories: Optional[pulumi.Input['DLPWebRulesUrlCategoriesArgs']] = None,
users: Optional[pulumi.Input['DLPWebRulesUsersArgs']] = None,
without_content_inspection: Optional[pulumi.Input[bool]] = None,
zscaler_incident_reciever: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a DLPWebRules resource.
:param pulumi.Input[int] order: The rule order of execution for the DLP policy rule with respect to other rules.
:param pulumi.Input[str] access_control: The access privilege for this DLP policy rule based on the admin's state. The supported values are:
:param pulumi.Input[str] action: The action taken when traffic matches the DLP policy rule criteria. The supported values are:
:param pulumi.Input['DLPWebRulesAuditorArgs'] auditor: The auditor to which the DLP policy rule must be applied.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_applications: The list of cloud applications to which the DLP policy rule must be applied. For the complete list of supported cloud applications refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input['DLPWebRulesDepartmentsArgs'] departments: The name-ID pairs of the departments that are excluded from the DLP policy rule.
:param pulumi.Input[str] description: The description of the DLP policy rule.
:param pulumi.Input['DLPWebRulesDlpEnginesArgs'] dlp_engines: The list of DLP engines to which the DLP policy rule must be applied.
:param pulumi.Input['DLPWebRulesExcludedDepartmentsArgs'] excluded_departments: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` departments.
:param pulumi.Input['DLPWebRulesExcludedGroupsArgs'] excluded_groups: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` groups.
:param pulumi.Input['DLPWebRulesExcludedUsersArgs'] excluded_users: The name-ID pairs of the users that are excluded from the DLP policy rule. Maximum of up to `256` users.
:param pulumi.Input[str] external_auditor_email: The email address of an external auditor to whom DLP email notifications are sent.
:param pulumi.Input[Sequence[pulumi.Input[str]]] file_types: The list of file types to which the DLP policy rule must be applied. For the complete list of supported file types refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input['DLPWebRulesGroupsArgs'] groups: The Name-ID pairs of groups to which the DLP policy rule must be applied. Maximum of up to `8` groups. When not used it implies `Any` to apply the rule to all groups.
:param pulumi.Input['DLPWebRulesIcapServerArgs'] icap_server: The DLP server, using ICAP, to which the transaction content is forwarded.
:param pulumi.Input['DLPWebRulesLabelsArgs'] labels: The Name-ID pairs of rule labels associated to the DLP policy rule.
:param pulumi.Input['DLPWebRulesLocationGroupsArgs'] location_groups: The Name-ID pairs of locations groups to which the DLP policy rule must be applied. Maximum of up to `32` location groups. When not used it implies `Any` to apply the rule to all location groups.
:param pulumi.Input['DLPWebRulesLocationsArgs'] locations: The Name-ID pairs of locations to which the DLP policy rule must be applied. Maximum of up to `8` locations. When not used it implies `Any` to apply the rule to all locations.
:param pulumi.Input[bool] match_only: The match only criteria for DLP engines.
:param pulumi.Input[int] min_size: The minimum file size (in KB) used for evaluation of the DLP policy rule.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input['DLPWebRulesNotificationTemplateArgs'] notification_template: The template used for DLP notification emails.
:param pulumi.Input[bool] ocr_enabled: Enables or disables image file scanning.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocols: The protocol criteria specified for the DLP policy rule.
:param pulumi.Input[int] rank: Admin rank of the admin who creates this rule
:param pulumi.Input[str] state: Enables or disables the DLP policy rule.. The supported values are:
:param pulumi.Input['DLPWebRulesTimeWindowsArgs'] time_windows: The Name-ID pairs of time windows to which the DLP policy rule must be applied. Maximum of up to `2` time intervals. When not used it implies `always` to apply the rule to all time intervals.
:param pulumi.Input['DLPWebRulesUrlCategoriesArgs'] url_categories: The list of URL categories to which the DLP policy rule must be applied.
:param pulumi.Input['DLPWebRulesUsersArgs'] users: The Name-ID pairs of users to which the DLP policy rule must be applied. Maximum of up to `4` users. When not used it implies `Any` to apply the rule to all users.
:param pulumi.Input[bool] without_content_inspection: Indicates a DLP policy rule without content inspection, when the value is set to true.
:param pulumi.Input[bool] zscaler_incident_reciever: Indicates whether a Zscaler Incident Receiver is associated to the DLP policy rule.
"""
pulumi.set(__self__, "order", order)
if access_control is not None:
pulumi.set(__self__, "access_control", access_control)
if action is not None:
pulumi.set(__self__, "action", action)
if auditor is not None:
pulumi.set(__self__, "auditor", auditor)
if cloud_applications is not None:
pulumi.set(__self__, "cloud_applications", cloud_applications)
if departments is not None:
pulumi.set(__self__, "departments", departments)
if description is not None:
pulumi.set(__self__, "description", description)
if dlp_engines is not None:
pulumi.set(__self__, "dlp_engines", dlp_engines)
if excluded_departments is not None:
pulumi.set(__self__, "excluded_departments", excluded_departments)
if excluded_groups is not None:
pulumi.set(__self__, "excluded_groups", excluded_groups)
if excluded_users is not None:
pulumi.set(__self__, "excluded_users", excluded_users)
if external_auditor_email is not None:
pulumi.set(__self__, "external_auditor_email", external_auditor_email)
if file_types is not None:
pulumi.set(__self__, "file_types", file_types)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if icap_server is not None:
pulumi.set(__self__, "icap_server", icap_server)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location_groups is not None:
pulumi.set(__self__, "location_groups", location_groups)
if locations is not None:
pulumi.set(__self__, "locations", locations)
if match_only is not None:
pulumi.set(__self__, "match_only", match_only)
if min_size is not None:
pulumi.set(__self__, "min_size", min_size)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_template is not None:
pulumi.set(__self__, "notification_template", notification_template)
if ocr_enabled is not None:
pulumi.set(__self__, "ocr_enabled", ocr_enabled)
if protocols is not None:
pulumi.set(__self__, "protocols", protocols)
if rank is not None:
pulumi.set(__self__, "rank", rank)
if state is not None:
pulumi.set(__self__, "state", state)
if time_windows is not None:
pulumi.set(__self__, "time_windows", time_windows)
if url_categories is not None:
pulumi.set(__self__, "url_categories", url_categories)
if users is not None:
pulumi.set(__self__, "users", users)
if without_content_inspection is not None:
pulumi.set(__self__, "without_content_inspection", without_content_inspection)
if zscaler_incident_reciever is not None:
pulumi.set(__self__, "zscaler_incident_reciever", zscaler_incident_reciever)
@property
@pulumi.getter
def order(self) -> pulumi.Input[int]:
"""
The rule order of execution for the DLP policy rule with respect to other rules.
"""
return pulumi.get(self, "order")
@order.setter
def order(self, value: pulumi.Input[int]):
pulumi.set(self, "order", value)
@property
@pulumi.getter(name="accessControl")
def access_control(self) -> Optional[pulumi.Input[str]]:
"""
The access privilege for this DLP policy rule based on the admin's state. The supported values are:
"""
return pulumi.get(self, "access_control")
@access_control.setter
def access_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_control", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The action taken when traffic matches the DLP policy rule criteria. The supported values are:
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def auditor(self) -> Optional[pulumi.Input['DLPWebRulesAuditorArgs']]:
"""
The auditor to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "auditor")
@auditor.setter
def auditor(self, value: Optional[pulumi.Input['DLPWebRulesAuditorArgs']]):
pulumi.set(self, "auditor", value)
@property
@pulumi.getter(name="cloudApplications")
def cloud_applications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of cloud applications to which the DLP policy rule must be applied. For the complete list of supported cloud applications refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
"""
return pulumi.get(self, "cloud_applications")
@cloud_applications.setter
def cloud_applications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cloud_applications", value)
@property
@pulumi.getter
def departments(self) -> Optional[pulumi.Input['DLPWebRulesDepartmentsArgs']]:
"""
The name-ID pairs of the departments that are excluded from the DLP policy rule.
"""
return pulumi.get(self, "departments")
@departments.setter
def departments(self, value: Optional[pulumi.Input['DLPWebRulesDepartmentsArgs']]):
pulumi.set(self, "departments", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the DLP policy rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dlpEngines")
def dlp_engines(self) -> Optional[pulumi.Input['DLPWebRulesDlpEnginesArgs']]:
"""
The list of DLP engines to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "dlp_engines")
@dlp_engines.setter
def dlp_engines(self, value: Optional[pulumi.Input['DLPWebRulesDlpEnginesArgs']]):
pulumi.set(self, "dlp_engines", value)
@property
@pulumi.getter(name="excludedDepartments")
def excluded_departments(self) -> Optional[pulumi.Input['DLPWebRulesExcludedDepartmentsArgs']]:
"""
The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` departments.
"""
return pulumi.get(self, "excluded_departments")
@excluded_departments.setter
def excluded_departments(self, value: Optional[pulumi.Input['DLPWebRulesExcludedDepartmentsArgs']]):
pulumi.set(self, "excluded_departments", value)
@property
@pulumi.getter(name="excludedGroups")
def excluded_groups(self) -> Optional[pulumi.Input['DLPWebRulesExcludedGroupsArgs']]:
"""
The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` groups.
"""
return pulumi.get(self, "excluded_groups")
@excluded_groups.setter
def excluded_groups(self, value: Optional[pulumi.Input['DLPWebRulesExcludedGroupsArgs']]):
pulumi.set(self, "excluded_groups", value)
@property
@pulumi.getter(name="excludedUsers")
def excluded_users(self) -> Optional[pulumi.Input['DLPWebRulesExcludedUsersArgs']]:
"""
The name-ID pairs of the users that are excluded from the DLP policy rule. Maximum of up to `256` users.
"""
return pulumi.get(self, "excluded_users")
@excluded_users.setter
def excluded_users(self, value: Optional[pulumi.Input['DLPWebRulesExcludedUsersArgs']]):
pulumi.set(self, "excluded_users", value)
@property
@pulumi.getter(name="externalAuditorEmail")
def external_auditor_email(self) -> Optional[pulumi.Input[str]]:
"""
The email address of an external auditor to whom DLP email notifications are sent.
"""
return pulumi.get(self, "external_auditor_email")
@external_auditor_email.setter
def external_auditor_email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_auditor_email", value)
@property
@pulumi.getter(name="fileTypes")
def file_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of file types to which the DLP policy rule must be applied. For the complete list of supported file types refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
"""
return pulumi.get(self, "file_types")
@file_types.setter
def file_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "file_types", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input['DLPWebRulesGroupsArgs']]:
"""
The Name-ID pairs of groups to which the DLP policy rule must be applied. Maximum of up to `8` groups. When not used it implies `Any` to apply the rule to all groups.
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input['DLPWebRulesGroupsArgs']]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="icapServer")
def icap_server(self) -> Optional[pulumi.Input['DLPWebRulesIcapServerArgs']]:
"""
The DLP server, using ICAP, to which the transaction content is forwarded.
"""
return pulumi.get(self, "icap_server")
@icap_server.setter
def icap_server(self, value: Optional[pulumi.Input['DLPWebRulesIcapServerArgs']]):
pulumi.set(self, "icap_server", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input['DLPWebRulesLabelsArgs']]:
"""
The Name-ID pairs of rule labels associated to the DLP policy rule.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input['DLPWebRulesLabelsArgs']]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="locationGroups")
def location_groups(self) -> Optional[pulumi.Input['DLPWebRulesLocationGroupsArgs']]:
"""
The Name-ID pairs of locations groups to which the DLP policy rule must be applied. Maximum of up to `32` location groups. When not used it implies `Any` to apply the rule to all location groups.
"""
return pulumi.get(self, "location_groups")
@location_groups.setter
def location_groups(self, value: Optional[pulumi.Input['DLPWebRulesLocationGroupsArgs']]):
pulumi.set(self, "location_groups", value)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input['DLPWebRulesLocationsArgs']]:
"""
The Name-ID pairs of locations to which the DLP policy rule must be applied. Maximum of up to `8` locations. When not used it implies `Any` to apply the rule to all locations.
"""
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input['DLPWebRulesLocationsArgs']]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter(name="matchOnly")
def match_only(self) -> Optional[pulumi.Input[bool]]:
"""
The match only criteria for DLP engines.
"""
return pulumi.get(self, "match_only")
@match_only.setter
def match_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "match_only", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> Optional[pulumi.Input[int]]:
"""
The minimum file size (in KB) used for evaluation of the DLP policy rule.
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The DLP policy rule name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationTemplate")
def notification_template(self) -> Optional[pulumi.Input['DLPWebRulesNotificationTemplateArgs']]:
"""
The template used for DLP notification emails.
"""
return pulumi.get(self, "notification_template")
@notification_template.setter
def notification_template(self, value: Optional[pulumi.Input['DLPWebRulesNotificationTemplateArgs']]):
pulumi.set(self, "notification_template", value)
@property
@pulumi.getter(name="ocrEnabled")
def ocr_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables image file scanning.
"""
return pulumi.get(self, "ocr_enabled")
@ocr_enabled.setter
def ocr_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ocr_enabled", value)
@property
@pulumi.getter
def protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The protocol criteria specified for the DLP policy rule.
"""
return pulumi.get(self, "protocols")
@protocols.setter
def protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "protocols", value)
@property
@pulumi.getter
def rank(self) -> Optional[pulumi.Input[int]]:
"""
Admin rank of the admin who creates this rule
"""
return pulumi.get(self, "rank")
@rank.setter
def rank(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rank", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
Enables or disables the DLP policy rule.. The supported values are:
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeWindows")
def time_windows(self) -> Optional[pulumi.Input['DLPWebRulesTimeWindowsArgs']]:
"""
The Name-ID pairs of time windows to which the DLP policy rule must be applied. Maximum of up to `2` time intervals. When not used it implies `always` to apply the rule to all time intervals.
"""
return pulumi.get(self, "time_windows")
@time_windows.setter
def time_windows(self, value: Optional[pulumi.Input['DLPWebRulesTimeWindowsArgs']]):
pulumi.set(self, "time_windows", value)
@property
@pulumi.getter(name="urlCategories")
def url_categories(self) -> Optional[pulumi.Input['DLPWebRulesUrlCategoriesArgs']]:
"""
The list of URL categories to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "url_categories")
@url_categories.setter
def url_categories(self, value: Optional[pulumi.Input['DLPWebRulesUrlCategoriesArgs']]):
pulumi.set(self, "url_categories", value)
@property
@pulumi.getter
def users(self) -> Optional[pulumi.Input['DLPWebRulesUsersArgs']]:
"""
The Name-ID pairs of users to which the DLP policy rule must be applied. Maximum of up to `4` users. When not used it implies `Any` to apply the rule to all users.
"""
return pulumi.get(self, "users")
@users.setter
def users(self, value: Optional[pulumi.Input['DLPWebRulesUsersArgs']]):
pulumi.set(self, "users", value)
@property
@pulumi.getter(name="withoutContentInspection")
def without_content_inspection(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates a DLP policy rule without content inspection, when the value is set to true.
"""
return pulumi.get(self, "without_content_inspection")
@without_content_inspection.setter
def without_content_inspection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "without_content_inspection", value)
@property
@pulumi.getter(name="zscalerIncidentReciever")
def zscaler_incident_reciever(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether a Zscaler Incident Receiver is associated to the DLP policy rule.
"""
return pulumi.get(self, "zscaler_incident_reciever")
@zscaler_incident_reciever.setter
def zscaler_incident_reciever(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "zscaler_incident_reciever", value)
@pulumi.input_type
class _DLPWebRulesState:
def __init__(__self__, *,
access_control: Optional[pulumi.Input[str]] = None,
action: Optional[pulumi.Input[str]] = None,
auditor: Optional[pulumi.Input['DLPWebRulesAuditorArgs']] = None,
cloud_applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
departments: Optional[pulumi.Input['DLPWebRulesDepartmentsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
dlp_engines: Optional[pulumi.Input['DLPWebRulesDlpEnginesArgs']] = None,
excluded_departments: Optional[pulumi.Input['DLPWebRulesExcludedDepartmentsArgs']] = None,
excluded_groups: Optional[pulumi.Input['DLPWebRulesExcludedGroupsArgs']] = None,
excluded_users: Optional[pulumi.Input['DLPWebRulesExcludedUsersArgs']] = None,
external_auditor_email: Optional[pulumi.Input[str]] = None,
file_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input['DLPWebRulesGroupsArgs']] = None,
icap_server: Optional[pulumi.Input['DLPWebRulesIcapServerArgs']] = None,
labels: Optional[pulumi.Input['DLPWebRulesLabelsArgs']] = None,
location_groups: Optional[pulumi.Input['DLPWebRulesLocationGroupsArgs']] = None,
locations: Optional[pulumi.Input['DLPWebRulesLocationsArgs']] = None,
match_only: Optional[pulumi.Input[bool]] = None,
min_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_template: Optional[pulumi.Input['DLPWebRulesNotificationTemplateArgs']] = None,
ocr_enabled: Optional[pulumi.Input[bool]] = None,
order: Optional[pulumi.Input[int]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rank: Optional[pulumi.Input[int]] = None,
rule_id: Optional[pulumi.Input[int]] = None,
state: Optional[pulumi.Input[str]] = None,
time_windows: Optional[pulumi.Input['DLPWebRulesTimeWindowsArgs']] = None,
url_categories: Optional[pulumi.Input['DLPWebRulesUrlCategoriesArgs']] = None,
users: Optional[pulumi.Input['DLPWebRulesUsersArgs']] = None,
without_content_inspection: Optional[pulumi.Input[bool]] = None,
zscaler_incident_reciever: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering DLPWebRules resources.
:param pulumi.Input[str] access_control: The access privilege for this DLP policy rule based on the admin's state. The supported values are:
:param pulumi.Input[str] action: The action taken when traffic matches the DLP policy rule criteria. The supported values are:
:param pulumi.Input['DLPWebRulesAuditorArgs'] auditor: The auditor to which the DLP policy rule must be applied.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_applications: The list of cloud applications to which the DLP policy rule must be applied. For the complete list of supported cloud applications refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input['DLPWebRulesDepartmentsArgs'] departments: The name-ID pairs of the departments that are excluded from the DLP policy rule.
:param pulumi.Input[str] description: The description of the DLP policy rule.
:param pulumi.Input['DLPWebRulesDlpEnginesArgs'] dlp_engines: The list of DLP engines to which the DLP policy rule must be applied.
:param pulumi.Input['DLPWebRulesExcludedDepartmentsArgs'] excluded_departments: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` departments.
:param pulumi.Input['DLPWebRulesExcludedGroupsArgs'] excluded_groups: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` groups.
:param pulumi.Input['DLPWebRulesExcludedUsersArgs'] excluded_users: The name-ID pairs of the users that are excluded from the DLP policy rule. Maximum of up to `256` users.
:param pulumi.Input[str] external_auditor_email: The email address of an external auditor to whom DLP email notifications are sent.
:param pulumi.Input[Sequence[pulumi.Input[str]]] file_types: The list of file types to which the DLP policy rule must be applied. For the complete list of supported file types refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input['DLPWebRulesGroupsArgs'] groups: The Name-ID pairs of groups to which the DLP policy rule must be applied. Maximum of up to `8` groups. When not used it implies `Any` to apply the rule to all groups.
:param pulumi.Input['DLPWebRulesIcapServerArgs'] icap_server: The DLP server, using ICAP, to which the transaction content is forwarded.
:param pulumi.Input['DLPWebRulesLabelsArgs'] labels: The Name-ID pairs of rule labels associated to the DLP policy rule.
:param pulumi.Input['DLPWebRulesLocationGroupsArgs'] location_groups: The Name-ID pairs of locations groups to which the DLP policy rule must be applied. Maximum of up to `32` location groups. When not used it implies `Any` to apply the rule to all location groups.
:param pulumi.Input['DLPWebRulesLocationsArgs'] locations: The Name-ID pairs of locations to which the DLP policy rule must be applied. Maximum of up to `8` locations. When not used it implies `Any` to apply the rule to all locations.
:param pulumi.Input[bool] match_only: The match only criteria for DLP engines.
:param pulumi.Input[int] min_size: The minimum file size (in KB) used for evaluation of the DLP policy rule.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input['DLPWebRulesNotificationTemplateArgs'] notification_template: The template used for DLP notification emails.
:param pulumi.Input[bool] ocr_enabled: Enables or disables image file scanning.
:param pulumi.Input[int] order: The rule order of execution for the DLP policy rule with respect to other rules.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocols: The protocol criteria specified for the DLP policy rule.
:param pulumi.Input[int] rank: Admin rank of the admin who creates this rule
:param pulumi.Input[str] state: Enables or disables the DLP policy rule.. The supported values are:
:param pulumi.Input['DLPWebRulesTimeWindowsArgs'] time_windows: The Name-ID pairs of time windows to which the DLP policy rule must be applied. Maximum of up to `2` time intervals. When not used it implies `always` to apply the rule to all time intervals.
:param pulumi.Input['DLPWebRulesUrlCategoriesArgs'] url_categories: The list of URL categories to which the DLP policy rule must be applied.
:param pulumi.Input['DLPWebRulesUsersArgs'] users: The Name-ID pairs of users to which the DLP policy rule must be applied. Maximum of up to `4` users. When not used it implies `Any` to apply the rule to all users.
:param pulumi.Input[bool] without_content_inspection: Indicates a DLP policy rule without content inspection, when the value is set to true.
:param pulumi.Input[bool] zscaler_incident_reciever: Indicates whether a Zscaler Incident Receiver is associated to the DLP policy rule.
"""
if access_control is not None:
pulumi.set(__self__, "access_control", access_control)
if action is not None:
pulumi.set(__self__, "action", action)
if auditor is not None:
pulumi.set(__self__, "auditor", auditor)
if cloud_applications is not None:
pulumi.set(__self__, "cloud_applications", cloud_applications)
if departments is not None:
pulumi.set(__self__, "departments", departments)
if description is not None:
pulumi.set(__self__, "description", description)
if dlp_engines is not None:
pulumi.set(__self__, "dlp_engines", dlp_engines)
if excluded_departments is not None:
pulumi.set(__self__, "excluded_departments", excluded_departments)
if excluded_groups is not None:
pulumi.set(__self__, "excluded_groups", excluded_groups)
if excluded_users is not None:
pulumi.set(__self__, "excluded_users", excluded_users)
if external_auditor_email is not None:
pulumi.set(__self__, "external_auditor_email", external_auditor_email)
if file_types is not None:
pulumi.set(__self__, "file_types", file_types)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if icap_server is not None:
pulumi.set(__self__, "icap_server", icap_server)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location_groups is not None:
pulumi.set(__self__, "location_groups", location_groups)
if locations is not None:
pulumi.set(__self__, "locations", locations)
if match_only is not None:
pulumi.set(__self__, "match_only", match_only)
if min_size is not None:
pulumi.set(__self__, "min_size", min_size)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_template is not None:
pulumi.set(__self__, "notification_template", notification_template)
if ocr_enabled is not None:
pulumi.set(__self__, "ocr_enabled", ocr_enabled)
if order is not None:
pulumi.set(__self__, "order", order)
if protocols is not None:
pulumi.set(__self__, "protocols", protocols)
if rank is not None:
pulumi.set(__self__, "rank", rank)
if rule_id is not None:
pulumi.set(__self__, "rule_id", rule_id)
if state is not None:
pulumi.set(__self__, "state", state)
if time_windows is not None:
pulumi.set(__self__, "time_windows", time_windows)
if url_categories is not None:
pulumi.set(__self__, "url_categories", url_categories)
if users is not None:
pulumi.set(__self__, "users", users)
if without_content_inspection is not None:
pulumi.set(__self__, "without_content_inspection", without_content_inspection)
if zscaler_incident_reciever is not None:
pulumi.set(__self__, "zscaler_incident_reciever", zscaler_incident_reciever)
@property
@pulumi.getter(name="accessControl")
def access_control(self) -> Optional[pulumi.Input[str]]:
"""
The access privilege for this DLP policy rule based on the admin's state. The supported values are:
"""
return pulumi.get(self, "access_control")
@access_control.setter
def access_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_control", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The action taken when traffic matches the DLP policy rule criteria. The supported values are:
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def auditor(self) -> Optional[pulumi.Input['DLPWebRulesAuditorArgs']]:
"""
The auditor to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "auditor")
@auditor.setter
def auditor(self, value: Optional[pulumi.Input['DLPWebRulesAuditorArgs']]):
pulumi.set(self, "auditor", value)
@property
@pulumi.getter(name="cloudApplications")
def cloud_applications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of cloud applications to which the DLP policy rule must be applied. For the complete list of supported cloud applications refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
"""
return pulumi.get(self, "cloud_applications")
@cloud_applications.setter
def cloud_applications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cloud_applications", value)
@property
@pulumi.getter
def departments(self) -> Optional[pulumi.Input['DLPWebRulesDepartmentsArgs']]:
"""
The name-ID pairs of the departments that are excluded from the DLP policy rule.
"""
return pulumi.get(self, "departments")
@departments.setter
def departments(self, value: Optional[pulumi.Input['DLPWebRulesDepartmentsArgs']]):
pulumi.set(self, "departments", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the DLP policy rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dlpEngines")
def dlp_engines(self) -> Optional[pulumi.Input['DLPWebRulesDlpEnginesArgs']]:
"""
The list of DLP engines to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "dlp_engines")
@dlp_engines.setter
def dlp_engines(self, value: Optional[pulumi.Input['DLPWebRulesDlpEnginesArgs']]):
pulumi.set(self, "dlp_engines", value)
@property
@pulumi.getter(name="excludedDepartments")
def excluded_departments(self) -> Optional[pulumi.Input['DLPWebRulesExcludedDepartmentsArgs']]:
"""
The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` departments.
"""
return pulumi.get(self, "excluded_departments")
@excluded_departments.setter
def excluded_departments(self, value: Optional[pulumi.Input['DLPWebRulesExcludedDepartmentsArgs']]):
pulumi.set(self, "excluded_departments", value)
@property
@pulumi.getter(name="excludedGroups")
def excluded_groups(self) -> Optional[pulumi.Input['DLPWebRulesExcludedGroupsArgs']]:
"""
The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` groups.
"""
return pulumi.get(self, "excluded_groups")
@excluded_groups.setter
def excluded_groups(self, value: Optional[pulumi.Input['DLPWebRulesExcludedGroupsArgs']]):
pulumi.set(self, "excluded_groups", value)
@property
@pulumi.getter(name="excludedUsers")
def excluded_users(self) -> Optional[pulumi.Input['DLPWebRulesExcludedUsersArgs']]:
"""
The name-ID pairs of the users that are excluded from the DLP policy rule. Maximum of up to `256` users.
"""
return pulumi.get(self, "excluded_users")
@excluded_users.setter
def excluded_users(self, value: Optional[pulumi.Input['DLPWebRulesExcludedUsersArgs']]):
pulumi.set(self, "excluded_users", value)
@property
@pulumi.getter(name="externalAuditorEmail")
def external_auditor_email(self) -> Optional[pulumi.Input[str]]:
"""
The email address of an external auditor to whom DLP email notifications are sent.
"""
return pulumi.get(self, "external_auditor_email")
@external_auditor_email.setter
def external_auditor_email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_auditor_email", value)
@property
@pulumi.getter(name="fileTypes")
def file_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of file types to which the DLP policy rule must be applied. For the complete list of supported file types refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
"""
return pulumi.get(self, "file_types")
@file_types.setter
def file_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "file_types", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input['DLPWebRulesGroupsArgs']]:
"""
The Name-ID pairs of groups to which the DLP policy rule must be applied. Maximum of up to `8` groups. When not used it implies `Any` to apply the rule to all groups.
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input['DLPWebRulesGroupsArgs']]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="icapServer")
def icap_server(self) -> Optional[pulumi.Input['DLPWebRulesIcapServerArgs']]:
"""
The DLP server, using ICAP, to which the transaction content is forwarded.
"""
return pulumi.get(self, "icap_server")
@icap_server.setter
def icap_server(self, value: Optional[pulumi.Input['DLPWebRulesIcapServerArgs']]):
pulumi.set(self, "icap_server", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input['DLPWebRulesLabelsArgs']]:
"""
The Name-ID pairs of rule labels associated to the DLP policy rule.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input['DLPWebRulesLabelsArgs']]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="locationGroups")
def location_groups(self) -> Optional[pulumi.Input['DLPWebRulesLocationGroupsArgs']]:
"""
The Name-ID pairs of locations groups to which the DLP policy rule must be applied. Maximum of up to `32` location groups. When not used it implies `Any` to apply the rule to all location groups.
"""
return pulumi.get(self, "location_groups")
@location_groups.setter
def location_groups(self, value: Optional[pulumi.Input['DLPWebRulesLocationGroupsArgs']]):
pulumi.set(self, "location_groups", value)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input['DLPWebRulesLocationsArgs']]:
"""
The Name-ID pairs of locations to which the DLP policy rule must be applied. Maximum of up to `8` locations. When not used it implies `Any` to apply the rule to all locations.
"""
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input['DLPWebRulesLocationsArgs']]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter(name="matchOnly")
def match_only(self) -> Optional[pulumi.Input[bool]]:
"""
The match only criteria for DLP engines.
"""
return pulumi.get(self, "match_only")
@match_only.setter
def match_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "match_only", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> Optional[pulumi.Input[int]]:
"""
The minimum file size (in KB) used for evaluation of the DLP policy rule.
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The DLP policy rule name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationTemplate")
def notification_template(self) -> Optional[pulumi.Input['DLPWebRulesNotificationTemplateArgs']]:
"""
The template used for DLP notification emails.
"""
return pulumi.get(self, "notification_template")
@notification_template.setter
def notification_template(self, value: Optional[pulumi.Input['DLPWebRulesNotificationTemplateArgs']]):
pulumi.set(self, "notification_template", value)
@property
@pulumi.getter(name="ocrEnabled")
def ocr_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables image file scanning.
"""
return pulumi.get(self, "ocr_enabled")
@ocr_enabled.setter
def ocr_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ocr_enabled", value)
@property
@pulumi.getter
def order(self) -> Optional[pulumi.Input[int]]:
"""
The rule order of execution for the DLP policy rule with respect to other rules.
"""
return pulumi.get(self, "order")
@order.setter
def order(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "order", value)
@property
@pulumi.getter
def protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The protocol criteria specified for the DLP policy rule.
"""
return pulumi.get(self, "protocols")
@protocols.setter
def protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "protocols", value)
@property
@pulumi.getter
def rank(self) -> Optional[pulumi.Input[int]]:
"""
Admin rank of the admin who creates this rule
"""
return pulumi.get(self, "rank")
@rank.setter
def rank(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rank", value)
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "rule_id")
@rule_id.setter
def rule_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rule_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
Enables or disables the DLP policy rule.. The supported values are:
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeWindows")
def time_windows(self) -> Optional[pulumi.Input['DLPWebRulesTimeWindowsArgs']]:
"""
The Name-ID pairs of time windows to which the DLP policy rule must be applied. Maximum of up to `2` time intervals. When not used it implies `always` to apply the rule to all time intervals.
"""
return pulumi.get(self, "time_windows")
@time_windows.setter
def time_windows(self, value: Optional[pulumi.Input['DLPWebRulesTimeWindowsArgs']]):
pulumi.set(self, "time_windows", value)
@property
@pulumi.getter(name="urlCategories")
def url_categories(self) -> Optional[pulumi.Input['DLPWebRulesUrlCategoriesArgs']]:
"""
The list of URL categories to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "url_categories")
@url_categories.setter
def url_categories(self, value: Optional[pulumi.Input['DLPWebRulesUrlCategoriesArgs']]):
pulumi.set(self, "url_categories", value)
@property
@pulumi.getter
def users(self) -> Optional[pulumi.Input['DLPWebRulesUsersArgs']]:
"""
The Name-ID pairs of users to which the DLP policy rule must be applied. Maximum of up to `4` users. When not used it implies `Any` to apply the rule to all users.
"""
return pulumi.get(self, "users")
@users.setter
def users(self, value: Optional[pulumi.Input['DLPWebRulesUsersArgs']]):
pulumi.set(self, "users", value)
@property
@pulumi.getter(name="withoutContentInspection")
def without_content_inspection(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates a DLP policy rule without content inspection, when the value is set to true.
"""
return pulumi.get(self, "without_content_inspection")
@without_content_inspection.setter
def without_content_inspection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "without_content_inspection", value)
@property
@pulumi.getter(name="zscalerIncidentReciever")
def zscaler_incident_reciever(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether a Zscaler Incident Receiver is associated to the DLP policy rule.
"""
return pulumi.get(self, "zscaler_incident_reciever")
@zscaler_incident_reciever.setter
def zscaler_incident_reciever(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "zscaler_incident_reciever", value)
class DLPWebRules(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_control: Optional[pulumi.Input[str]] = None,
action: Optional[pulumi.Input[str]] = None,
auditor: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesAuditorArgs']]] = None,
cloud_applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
departments: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesDepartmentsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dlp_engines: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesDlpEnginesArgs']]] = None,
excluded_departments: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedDepartmentsArgs']]] = None,
excluded_groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedGroupsArgs']]] = None,
excluded_users: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedUsersArgs']]] = None,
external_auditor_email: Optional[pulumi.Input[str]] = None,
file_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesGroupsArgs']]] = None,
icap_server: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesIcapServerArgs']]] = None,
labels: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLabelsArgs']]] = None,
location_groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLocationGroupsArgs']]] = None,
locations: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLocationsArgs']]] = None,
match_only: Optional[pulumi.Input[bool]] = None,
min_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_template: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesNotificationTemplateArgs']]] = None,
ocr_enabled: Optional[pulumi.Input[bool]] = None,
order: Optional[pulumi.Input[int]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rank: Optional[pulumi.Input[int]] = None,
state: Optional[pulumi.Input[str]] = None,
time_windows: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesTimeWindowsArgs']]] = None,
url_categories: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesUrlCategoriesArgs']]] = None,
users: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesUsersArgs']]] = None,
without_content_inspection: Optional[pulumi.Input[bool]] = None,
zscaler_incident_reciever: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
The **zia_dlp_web_rules** resource allows the creation and management of ZIA DLP Web Rules in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
test = zia.dlp.DLPWebRules("test",
action="ALLOW",
cloud_applications=[
"ZENDESK",
"LUCKY_ORANGE",
"MICROSOFT_POWERAPPS",
"MICROSOFTLIVEMEETING",
],
description="Test",
file_types=[],
match_only=False,
min_size=20,
ocr_enabled=False,
order=1,
protocols=[
"HTTPS_RULE",
"HTTP_RULE",
],
rank=7,
state="ENABLED",
without_content_inspection=False,
zscaler_incident_reciever=True)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_control: The access privilege for this DLP policy rule based on the admin's state. The supported values are:
:param pulumi.Input[str] action: The action taken when traffic matches the DLP policy rule criteria. The supported values are:
:param pulumi.Input[pulumi.InputType['DLPWebRulesAuditorArgs']] auditor: The auditor to which the DLP policy rule must be applied.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_applications: The list of cloud applications to which the DLP policy rule must be applied. For the complete list of supported cloud applications refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input[pulumi.InputType['DLPWebRulesDepartmentsArgs']] departments: The name-ID pairs of the departments that are excluded from the DLP policy rule.
:param pulumi.Input[str] description: The description of the DLP policy rule.
:param pulumi.Input[pulumi.InputType['DLPWebRulesDlpEnginesArgs']] dlp_engines: The list of DLP engines to which the DLP policy rule must be applied.
:param pulumi.Input[pulumi.InputType['DLPWebRulesExcludedDepartmentsArgs']] excluded_departments: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` departments.
:param pulumi.Input[pulumi.InputType['DLPWebRulesExcludedGroupsArgs']] excluded_groups: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` groups.
:param pulumi.Input[pulumi.InputType['DLPWebRulesExcludedUsersArgs']] excluded_users: The name-ID pairs of the users that are excluded from the DLP policy rule. Maximum of up to `256` users.
:param pulumi.Input[str] external_auditor_email: The email address of an external auditor to whom DLP email notifications are sent.
:param pulumi.Input[Sequence[pulumi.Input[str]]] file_types: The list of file types to which the DLP policy rule must be applied. For the complete list of supported file types refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input[pulumi.InputType['DLPWebRulesGroupsArgs']] groups: The Name-ID pairs of groups to which the DLP policy rule must be applied. Maximum of up to `8` groups. When not used it implies `Any` to apply the rule to all groups.
:param pulumi.Input[pulumi.InputType['DLPWebRulesIcapServerArgs']] icap_server: The DLP server, using ICAP, to which the transaction content is forwarded.
:param pulumi.Input[pulumi.InputType['DLPWebRulesLabelsArgs']] labels: The Name-ID pairs of rule labels associated to the DLP policy rule.
:param pulumi.Input[pulumi.InputType['DLPWebRulesLocationGroupsArgs']] location_groups: The Name-ID pairs of locations groups to which the DLP policy rule must be applied. Maximum of up to `32` location groups. When not used it implies `Any` to apply the rule to all location groups.
:param pulumi.Input[pulumi.InputType['DLPWebRulesLocationsArgs']] locations: The Name-ID pairs of locations to which the DLP policy rule must be applied. Maximum of up to `8` locations. When not used it implies `Any` to apply the rule to all locations.
:param pulumi.Input[bool] match_only: The match only criteria for DLP engines.
:param pulumi.Input[int] min_size: The minimum file size (in KB) used for evaluation of the DLP policy rule.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input[pulumi.InputType['DLPWebRulesNotificationTemplateArgs']] notification_template: The template used for DLP notification emails.
:param pulumi.Input[bool] ocr_enabled: Enables or disables image file scanning.
:param pulumi.Input[int] order: The rule order of execution for the DLP policy rule with respect to other rules.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocols: The protocol criteria specified for the DLP policy rule.
:param pulumi.Input[int] rank: Admin rank of the admin who creates this rule
:param pulumi.Input[str] state: Enables or disables the DLP policy rule.. The supported values are:
:param pulumi.Input[pulumi.InputType['DLPWebRulesTimeWindowsArgs']] time_windows: The Name-ID pairs of time windows to which the DLP policy rule must be applied. Maximum of up to `2` time intervals. When not used it implies `always` to apply the rule to all time intervals.
:param pulumi.Input[pulumi.InputType['DLPWebRulesUrlCategoriesArgs']] url_categories: The list of URL categories to which the DLP policy rule must be applied.
:param pulumi.Input[pulumi.InputType['DLPWebRulesUsersArgs']] users: The Name-ID pairs of users to which the DLP policy rule must be applied. Maximum of up to `4` users. When not used it implies `Any` to apply the rule to all users.
:param pulumi.Input[bool] without_content_inspection: Indicates a DLP policy rule without content inspection, when the value is set to true.
:param pulumi.Input[bool] zscaler_incident_reciever: Indicates whether a Zscaler Incident Receiver is associated to the DLP policy rule.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DLPWebRulesArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The **zia_dlp_web_rules** resource allows the creation and management of ZIA DLP Web Rules in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import zscaler_pulumi_zia as zia
test = zia.dlp.DLPWebRules("test",
action="ALLOW",
cloud_applications=[
"ZENDESK",
"LUCKY_ORANGE",
"MICROSOFT_POWERAPPS",
"MICROSOFTLIVEMEETING",
],
description="Test",
file_types=[],
match_only=False,
min_size=20,
ocr_enabled=False,
order=1,
protocols=[
"HTTPS_RULE",
"HTTP_RULE",
],
rank=7,
state="ENABLED",
without_content_inspection=False,
zscaler_incident_reciever=True)
```
:param str resource_name: The name of the resource.
:param DLPWebRulesArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DLPWebRulesArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_control: Optional[pulumi.Input[str]] = None,
action: Optional[pulumi.Input[str]] = None,
auditor: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesAuditorArgs']]] = None,
cloud_applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
departments: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesDepartmentsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dlp_engines: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesDlpEnginesArgs']]] = None,
excluded_departments: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedDepartmentsArgs']]] = None,
excluded_groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedGroupsArgs']]] = None,
excluded_users: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedUsersArgs']]] = None,
external_auditor_email: Optional[pulumi.Input[str]] = None,
file_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesGroupsArgs']]] = None,
icap_server: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesIcapServerArgs']]] = None,
labels: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLabelsArgs']]] = None,
location_groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLocationGroupsArgs']]] = None,
locations: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLocationsArgs']]] = None,
match_only: Optional[pulumi.Input[bool]] = None,
min_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_template: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesNotificationTemplateArgs']]] = None,
ocr_enabled: Optional[pulumi.Input[bool]] = None,
order: Optional[pulumi.Input[int]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rank: Optional[pulumi.Input[int]] = None,
state: Optional[pulumi.Input[str]] = None,
time_windows: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesTimeWindowsArgs']]] = None,
url_categories: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesUrlCategoriesArgs']]] = None,
users: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesUsersArgs']]] = None,
without_content_inspection: Optional[pulumi.Input[bool]] = None,
zscaler_incident_reciever: Optional[pulumi.Input[bool]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DLPWebRulesArgs.__new__(DLPWebRulesArgs)
__props__.__dict__["access_control"] = access_control
__props__.__dict__["action"] = action
__props__.__dict__["auditor"] = auditor
__props__.__dict__["cloud_applications"] = cloud_applications
__props__.__dict__["departments"] = departments
__props__.__dict__["description"] = description
__props__.__dict__["dlp_engines"] = dlp_engines
__props__.__dict__["excluded_departments"] = excluded_departments
__props__.__dict__["excluded_groups"] = excluded_groups
__props__.__dict__["excluded_users"] = excluded_users
__props__.__dict__["external_auditor_email"] = external_auditor_email
__props__.__dict__["file_types"] = file_types
__props__.__dict__["groups"] = groups
__props__.__dict__["icap_server"] = icap_server
__props__.__dict__["labels"] = labels
__props__.__dict__["location_groups"] = location_groups
__props__.__dict__["locations"] = locations
__props__.__dict__["match_only"] = match_only
__props__.__dict__["min_size"] = min_size
__props__.__dict__["name"] = name
__props__.__dict__["notification_template"] = notification_template
__props__.__dict__["ocr_enabled"] = ocr_enabled
if order is None and not opts.urn:
raise TypeError("Missing required property 'order'")
__props__.__dict__["order"] = order
__props__.__dict__["protocols"] = protocols
__props__.__dict__["rank"] = rank
__props__.__dict__["state"] = state
__props__.__dict__["time_windows"] = time_windows
__props__.__dict__["url_categories"] = url_categories
__props__.__dict__["users"] = users
__props__.__dict__["without_content_inspection"] = without_content_inspection
__props__.__dict__["zscaler_incident_reciever"] = zscaler_incident_reciever
__props__.__dict__["rule_id"] = None
super(DLPWebRules, __self__).__init__(
'zia:DLP/dLPWebRules:DLPWebRules',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_control: Optional[pulumi.Input[str]] = None,
action: Optional[pulumi.Input[str]] = None,
auditor: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesAuditorArgs']]] = None,
cloud_applications: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
departments: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesDepartmentsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dlp_engines: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesDlpEnginesArgs']]] = None,
excluded_departments: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedDepartmentsArgs']]] = None,
excluded_groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedGroupsArgs']]] = None,
excluded_users: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesExcludedUsersArgs']]] = None,
external_auditor_email: Optional[pulumi.Input[str]] = None,
file_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesGroupsArgs']]] = None,
icap_server: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesIcapServerArgs']]] = None,
labels: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLabelsArgs']]] = None,
location_groups: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLocationGroupsArgs']]] = None,
locations: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesLocationsArgs']]] = None,
match_only: Optional[pulumi.Input[bool]] = None,
min_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_template: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesNotificationTemplateArgs']]] = None,
ocr_enabled: Optional[pulumi.Input[bool]] = None,
order: Optional[pulumi.Input[int]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rank: Optional[pulumi.Input[int]] = None,
rule_id: Optional[pulumi.Input[int]] = None,
state: Optional[pulumi.Input[str]] = None,
time_windows: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesTimeWindowsArgs']]] = None,
url_categories: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesUrlCategoriesArgs']]] = None,
users: Optional[pulumi.Input[pulumi.InputType['DLPWebRulesUsersArgs']]] = None,
without_content_inspection: Optional[pulumi.Input[bool]] = None,
zscaler_incident_reciever: Optional[pulumi.Input[bool]] = None) -> 'DLPWebRules':
"""
Get an existing DLPWebRules resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_control: The access privilege for this DLP policy rule based on the admin's state. The supported values are:
:param pulumi.Input[str] action: The action taken when traffic matches the DLP policy rule criteria. The supported values are:
:param pulumi.Input[pulumi.InputType['DLPWebRulesAuditorArgs']] auditor: The auditor to which the DLP policy rule must be applied.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_applications: The list of cloud applications to which the DLP policy rule must be applied. For the complete list of supported cloud applications refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input[pulumi.InputType['DLPWebRulesDepartmentsArgs']] departments: The name-ID pairs of the departments that are excluded from the DLP policy rule.
:param pulumi.Input[str] description: The description of the DLP policy rule.
:param pulumi.Input[pulumi.InputType['DLPWebRulesDlpEnginesArgs']] dlp_engines: The list of DLP engines to which the DLP policy rule must be applied.
:param pulumi.Input[pulumi.InputType['DLPWebRulesExcludedDepartmentsArgs']] excluded_departments: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` departments.
:param pulumi.Input[pulumi.InputType['DLPWebRulesExcludedGroupsArgs']] excluded_groups: The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` groups.
:param pulumi.Input[pulumi.InputType['DLPWebRulesExcludedUsersArgs']] excluded_users: The name-ID pairs of the users that are excluded from the DLP policy rule. Maximum of up to `256` users.
:param pulumi.Input[str] external_auditor_email: The email address of an external auditor to whom DLP email notifications are sent.
:param pulumi.Input[Sequence[pulumi.Input[str]]] file_types: The list of file types to which the DLP policy rule must be applied. For the complete list of supported file types refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
:param pulumi.Input[pulumi.InputType['DLPWebRulesGroupsArgs']] groups: The Name-ID pairs of groups to which the DLP policy rule must be applied. Maximum of up to `8` groups. When not used it implies `Any` to apply the rule to all groups.
:param pulumi.Input[pulumi.InputType['DLPWebRulesIcapServerArgs']] icap_server: The DLP server, using ICAP, to which the transaction content is forwarded.
:param pulumi.Input[pulumi.InputType['DLPWebRulesLabelsArgs']] labels: The Name-ID pairs of rule labels associated to the DLP policy rule.
:param pulumi.Input[pulumi.InputType['DLPWebRulesLocationGroupsArgs']] location_groups: The Name-ID pairs of locations groups to which the DLP policy rule must be applied. Maximum of up to `32` location groups. When not used it implies `Any` to apply the rule to all location groups.
:param pulumi.Input[pulumi.InputType['DLPWebRulesLocationsArgs']] locations: The Name-ID pairs of locations to which the DLP policy rule must be applied. Maximum of up to `8` locations. When not used it implies `Any` to apply the rule to all locations.
:param pulumi.Input[bool] match_only: The match only criteria for DLP engines.
:param pulumi.Input[int] min_size: The minimum file size (in KB) used for evaluation of the DLP policy rule.
:param pulumi.Input[str] name: The DLP policy rule name.
:param pulumi.Input[pulumi.InputType['DLPWebRulesNotificationTemplateArgs']] notification_template: The template used for DLP notification emails.
:param pulumi.Input[bool] ocr_enabled: Enables or disables image file scanning.
:param pulumi.Input[int] order: The rule order of execution for the DLP policy rule with respect to other rules.
:param pulumi.Input[Sequence[pulumi.Input[str]]] protocols: The protocol criteria specified for the DLP policy rule.
:param pulumi.Input[int] rank: Admin rank of the admin who creates this rule
:param pulumi.Input[str] state: Enables or disables the DLP policy rule.. The supported values are:
:param pulumi.Input[pulumi.InputType['DLPWebRulesTimeWindowsArgs']] time_windows: The Name-ID pairs of time windows to which the DLP policy rule must be applied. Maximum of up to `2` time intervals. When not used it implies `always` to apply the rule to all time intervals.
:param pulumi.Input[pulumi.InputType['DLPWebRulesUrlCategoriesArgs']] url_categories: The list of URL categories to which the DLP policy rule must be applied.
:param pulumi.Input[pulumi.InputType['DLPWebRulesUsersArgs']] users: The Name-ID pairs of users to which the DLP policy rule must be applied. Maximum of up to `4` users. When not used it implies `Any` to apply the rule to all users.
:param pulumi.Input[bool] without_content_inspection: Indicates a DLP policy rule without content inspection, when the value is set to true.
:param pulumi.Input[bool] zscaler_incident_reciever: Indicates whether a Zscaler Incident Receiver is associated to the DLP policy rule.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DLPWebRulesState.__new__(_DLPWebRulesState)
__props__.__dict__["access_control"] = access_control
__props__.__dict__["action"] = action
__props__.__dict__["auditor"] = auditor
__props__.__dict__["cloud_applications"] = cloud_applications
__props__.__dict__["departments"] = departments
__props__.__dict__["description"] = description
__props__.__dict__["dlp_engines"] = dlp_engines
__props__.__dict__["excluded_departments"] = excluded_departments
__props__.__dict__["excluded_groups"] = excluded_groups
__props__.__dict__["excluded_users"] = excluded_users
__props__.__dict__["external_auditor_email"] = external_auditor_email
__props__.__dict__["file_types"] = file_types
__props__.__dict__["groups"] = groups
__props__.__dict__["icap_server"] = icap_server
__props__.__dict__["labels"] = labels
__props__.__dict__["location_groups"] = location_groups
__props__.__dict__["locations"] = locations
__props__.__dict__["match_only"] = match_only
__props__.__dict__["min_size"] = min_size
__props__.__dict__["name"] = name
__props__.__dict__["notification_template"] = notification_template
__props__.__dict__["ocr_enabled"] = ocr_enabled
__props__.__dict__["order"] = order
__props__.__dict__["protocols"] = protocols
__props__.__dict__["rank"] = rank
__props__.__dict__["rule_id"] = rule_id
__props__.__dict__["state"] = state
__props__.__dict__["time_windows"] = time_windows
__props__.__dict__["url_categories"] = url_categories
__props__.__dict__["users"] = users
__props__.__dict__["without_content_inspection"] = without_content_inspection
__props__.__dict__["zscaler_incident_reciever"] = zscaler_incident_reciever
return DLPWebRules(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessControl")
def access_control(self) -> pulumi.Output[str]:
"""
The access privilege for this DLP policy rule based on the admin's state. The supported values are:
"""
return pulumi.get(self, "access_control")
@property
@pulumi.getter
def action(self) -> pulumi.Output[str]:
"""
The action taken when traffic matches the DLP policy rule criteria. The supported values are:
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def auditor(self) -> pulumi.Output['outputs.DLPWebRulesAuditor']:
"""
The auditor to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "auditor")
@property
@pulumi.getter(name="cloudApplications")
def cloud_applications(self) -> pulumi.Output[Sequence[str]]:
"""
The list of cloud applications to which the DLP policy rule must be applied. For the complete list of supported cloud applications refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
"""
return pulumi.get(self, "cloud_applications")
@property
@pulumi.getter
def departments(self) -> pulumi.Output['outputs.DLPWebRulesDepartments']:
"""
The name-ID pairs of the departments that are excluded from the DLP policy rule.
"""
return pulumi.get(self, "departments")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
The description of the DLP policy rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="dlpEngines")
def dlp_engines(self) -> pulumi.Output['outputs.DLPWebRulesDlpEngines']:
"""
The list of DLP engines to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "dlp_engines")
@property
@pulumi.getter(name="excludedDepartments")
def excluded_departments(self) -> pulumi.Output['outputs.DLPWebRulesExcludedDepartments']:
"""
The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` departments.
"""
return pulumi.get(self, "excluded_departments")
@property
@pulumi.getter(name="excludedGroups")
def excluded_groups(self) -> pulumi.Output['outputs.DLPWebRulesExcludedGroups']:
"""
The name-ID pairs of the groups that are excluded from the DLP policy rule. Maximum of up to `256` groups.
"""
return pulumi.get(self, "excluded_groups")
@property
@pulumi.getter(name="excludedUsers")
def excluded_users(self) -> pulumi.Output['outputs.DLPWebRulesExcludedUsers']:
"""
The name-ID pairs of the users that are excluded from the DLP policy rule. Maximum of up to `256` users.
"""
return pulumi.get(self, "excluded_users")
@property
@pulumi.getter(name="externalAuditorEmail")
def external_auditor_email(self) -> pulumi.Output[str]:
"""
The email address of an external auditor to whom DLP email notifications are sent.
"""
return pulumi.get(self, "external_auditor_email")
@property
@pulumi.getter(name="fileTypes")
def file_types(self) -> pulumi.Output[Sequence[str]]:
"""
The list of file types to which the DLP policy rule must be applied. For the complete list of supported file types refer to the [ZIA API documentation](https://help.zscaler.com/zia/data-loss-prevention#/webDlpRules-post)
"""
return pulumi.get(self, "file_types")
@property
@pulumi.getter
def groups(self) -> pulumi.Output['outputs.DLPWebRulesGroups']:
"""
The Name-ID pairs of groups to which the DLP policy rule must be applied. Maximum of up to `8` groups. When not used it implies `Any` to apply the rule to all groups.
"""
return pulumi.get(self, "groups")
@property
@pulumi.getter(name="icapServer")
def icap_server(self) -> pulumi.Output['outputs.DLPWebRulesIcapServer']:
"""
The DLP server, using ICAP, to which the transaction content is forwarded.
"""
return pulumi.get(self, "icap_server")
@property
@pulumi.getter
def labels(self) -> pulumi.Output['outputs.DLPWebRulesLabels']:
"""
The Name-ID pairs of rule labels associated to the DLP policy rule.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="locationGroups")
def location_groups(self) -> pulumi.Output['outputs.DLPWebRulesLocationGroups']:
"""
The Name-ID pairs of locations groups to which the DLP policy rule must be applied. Maximum of up to `32` location groups. When not used it implies `Any` to apply the rule to all location groups.
"""
return pulumi.get(self, "location_groups")
@property
@pulumi.getter
def locations(self) -> pulumi.Output['outputs.DLPWebRulesLocations']:
"""
The Name-ID pairs of locations to which the DLP policy rule must be applied. Maximum of up to `8` locations. When not used it implies `Any` to apply the rule to all locations.
"""
return pulumi.get(self, "locations")
@property
@pulumi.getter(name="matchOnly")
def match_only(self) -> pulumi.Output[bool]:
"""
The match only criteria for DLP engines.
"""
return pulumi.get(self, "match_only")
@property
@pulumi.getter(name="minSize")
def min_size(self) -> pulumi.Output[int]:
"""
The minimum file size (in KB) used for evaluation of the DLP policy rule.
"""
return pulumi.get(self, "min_size")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The DLP policy rule name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationTemplate")
def notification_template(self) -> pulumi.Output['outputs.DLPWebRulesNotificationTemplate']:
"""
The template used for DLP notification emails.
"""
return pulumi.get(self, "notification_template")
@property
@pulumi.getter(name="ocrEnabled")
def ocr_enabled(self) -> pulumi.Output[bool]:
"""
Enables or disables image file scanning.
"""
return pulumi.get(self, "ocr_enabled")
@property
@pulumi.getter
def order(self) -> pulumi.Output[int]:
"""
The rule order of execution for the DLP policy rule with respect to other rules.
"""
return pulumi.get(self, "order")
@property
@pulumi.getter
def protocols(self) -> pulumi.Output[Sequence[str]]:
"""
The protocol criteria specified for the DLP policy rule.
"""
return pulumi.get(self, "protocols")
@property
@pulumi.getter
def rank(self) -> pulumi.Output[Optional[int]]:
"""
Admin rank of the admin who creates this rule
"""
return pulumi.get(self, "rank")
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> pulumi.Output[int]:
return pulumi.get(self, "rule_id")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Enables or disables the DLP policy rule.. The supported values are:
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeWindows")
def time_windows(self) -> pulumi.Output['outputs.DLPWebRulesTimeWindows']:
"""
The Name-ID pairs of time windows to which the DLP policy rule must be applied. Maximum of up to `2` time intervals. When not used it implies `always` to apply the rule to all time intervals.
"""
return pulumi.get(self, "time_windows")
@property
@pulumi.getter(name="urlCategories")
def url_categories(self) -> pulumi.Output['outputs.DLPWebRulesUrlCategories']:
"""
The list of URL categories to which the DLP policy rule must be applied.
"""
return pulumi.get(self, "url_categories")
@property
@pulumi.getter
def users(self) -> pulumi.Output['outputs.DLPWebRulesUsers']:
"""
The Name-ID pairs of users to which the DLP policy rule must be applied. Maximum of up to `4` users. When not used it implies `Any` to apply the rule to all users.
"""
return pulumi.get(self, "users")
@property
@pulumi.getter(name="withoutContentInspection")
def without_content_inspection(self) -> pulumi.Output[bool]:
"""
Indicates a DLP policy rule without content inspection, when the value is set to true.
"""
return pulumi.get(self, "without_content_inspection")
@property
@pulumi.getter(name="zscalerIncidentReciever")
def zscaler_incident_reciever(self) -> pulumi.Output[bool]:
"""
Indicates whether a Zscaler Incident Receiver is associated to the DLP policy rule.
"""
return pulumi.get(self, "zscaler_incident_reciever") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/dlp_web_rules.py | dlp_web_rules.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDLPDictionariesResult',
'AwaitableGetDLPDictionariesResult',
'get_dlp_dictionaries',
'get_dlp_dictionaries_output',
]
@pulumi.output_type
class GetDLPDictionariesResult:
"""
A collection of values returned by getDLPDictionaries.
"""
def __init__(__self__, confidence_threshold=None, custom_phrase_match_type=None, description=None, dictionary_type=None, exact_data_match_details=None, id=None, idm_profile_match_accuracies=None, name=None, name_l10n_tag=None, patterns=None, phrases=None, proximity=None, threshold_type=None):
if confidence_threshold and not isinstance(confidence_threshold, str):
raise TypeError("Expected argument 'confidence_threshold' to be a str")
pulumi.set(__self__, "confidence_threshold", confidence_threshold)
if custom_phrase_match_type and not isinstance(custom_phrase_match_type, str):
raise TypeError("Expected argument 'custom_phrase_match_type' to be a str")
pulumi.set(__self__, "custom_phrase_match_type", custom_phrase_match_type)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if dictionary_type and not isinstance(dictionary_type, str):
raise TypeError("Expected argument 'dictionary_type' to be a str")
pulumi.set(__self__, "dictionary_type", dictionary_type)
if exact_data_match_details and not isinstance(exact_data_match_details, list):
raise TypeError("Expected argument 'exact_data_match_details' to be a list")
pulumi.set(__self__, "exact_data_match_details", exact_data_match_details)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if idm_profile_match_accuracies and not isinstance(idm_profile_match_accuracies, list):
raise TypeError("Expected argument 'idm_profile_match_accuracies' to be a list")
pulumi.set(__self__, "idm_profile_match_accuracies", idm_profile_match_accuracies)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if name_l10n_tag and not isinstance(name_l10n_tag, bool):
raise TypeError("Expected argument 'name_l10n_tag' to be a bool")
pulumi.set(__self__, "name_l10n_tag", name_l10n_tag)
if patterns and not isinstance(patterns, list):
raise TypeError("Expected argument 'patterns' to be a list")
pulumi.set(__self__, "patterns", patterns)
if phrases and not isinstance(phrases, list):
raise TypeError("Expected argument 'phrases' to be a list")
pulumi.set(__self__, "phrases", phrases)
if proximity and not isinstance(proximity, int):
raise TypeError("Expected argument 'proximity' to be a int")
pulumi.set(__self__, "proximity", proximity)
if threshold_type and not isinstance(threshold_type, str):
raise TypeError("Expected argument 'threshold_type' to be a str")
pulumi.set(__self__, "threshold_type", threshold_type)
@property
@pulumi.getter(name="confidenceThreshold")
def confidence_threshold(self) -> str:
"""
(String) he DLP confidence threshold. [`CONFIDENCE_LEVEL_LOW`, `CONFIDENCE_LEVEL_MEDIUM` `CONFIDENCE_LEVEL_HIGH` ]
"""
return pulumi.get(self, "confidence_threshold")
@property
@pulumi.getter(name="customPhraseMatchType")
def custom_phrase_match_type(self) -> str:
"""
(String) The DLP custom phrase match type. [ `MATCH_ALL_CUSTOM_PHRASE_PATTERN_DICTIONARY`, `MATCH_ANY_CUSTOM_PHRASE_PATTERN_DICTIONARY` ]
"""
return pulumi.get(self, "custom_phrase_match_type")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="dictionaryType")
def dictionary_type(self) -> str:
"""
(String) The DLP dictionary type. The cloud service API only supports custom DLP dictionaries that are using the `PATTERNS_AND_PHRASES` type.
"""
return pulumi.get(self, "dictionary_type")
@property
@pulumi.getter(name="exactDataMatchDetails")
def exact_data_match_details(self) -> Sequence['outputs.GetDLPDictionariesExactDataMatchDetailResult']:
return pulumi.get(self, "exact_data_match_details")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idmProfileMatchAccuracies")
def idm_profile_match_accuracies(self) -> Sequence['outputs.GetDLPDictionariesIdmProfileMatchAccuracyResult']:
return pulumi.get(self, "idm_profile_match_accuracies")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameL10nTag")
def name_l10n_tag(self) -> bool:
"""
(Boolean) Indicates whether the name is localized or not. This is always set to True for predefined DLP dictionaries.
"""
return pulumi.get(self, "name_l10n_tag")
@property
@pulumi.getter
def patterns(self) -> Sequence['outputs.GetDLPDictionariesPatternResult']:
return pulumi.get(self, "patterns")
@property
@pulumi.getter
def phrases(self) -> Sequence['outputs.GetDLPDictionariesPhraseResult']:
return pulumi.get(self, "phrases")
@property
@pulumi.getter
def proximity(self) -> int:
return pulumi.get(self, "proximity")
@property
@pulumi.getter(name="thresholdType")
def threshold_type(self) -> str:
return pulumi.get(self, "threshold_type")
class AwaitableGetDLPDictionariesResult(GetDLPDictionariesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDLPDictionariesResult(
confidence_threshold=self.confidence_threshold,
custom_phrase_match_type=self.custom_phrase_match_type,
description=self.description,
dictionary_type=self.dictionary_type,
exact_data_match_details=self.exact_data_match_details,
id=self.id,
idm_profile_match_accuracies=self.idm_profile_match_accuracies,
name=self.name,
name_l10n_tag=self.name_l10n_tag,
patterns=self.patterns,
phrases=self.phrases,
proximity=self.proximity,
threshold_type=self.threshold_type)
def get_dlp_dictionaries(id: Optional[int] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDLPDictionariesResult:
"""
Use the **zia_dlp_dictionaries** data source to get information about a DLP dictionary option available in the Zscaler Internet Access.
```python
import pulumi
import pulumi_zia as zia
example = zia.DLP.get_dlp_dictionaries(name="SALESFORCE_REPORT_LEAKAGE")
```
:param int id: Unique identifier for the DLP dictionary
:param str name: DLP dictionary name
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:DLP/getDLPDictionaries:getDLPDictionaries', __args__, opts=opts, typ=GetDLPDictionariesResult).value
return AwaitableGetDLPDictionariesResult(
confidence_threshold=__ret__.confidence_threshold,
custom_phrase_match_type=__ret__.custom_phrase_match_type,
description=__ret__.description,
dictionary_type=__ret__.dictionary_type,
exact_data_match_details=__ret__.exact_data_match_details,
id=__ret__.id,
idm_profile_match_accuracies=__ret__.idm_profile_match_accuracies,
name=__ret__.name,
name_l10n_tag=__ret__.name_l10n_tag,
patterns=__ret__.patterns,
phrases=__ret__.phrases,
proximity=__ret__.proximity,
threshold_type=__ret__.threshold_type)
@_utilities.lift_output_func(get_dlp_dictionaries)
def get_dlp_dictionaries_output(id: Optional[pulumi.Input[Optional[int]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDLPDictionariesResult]:
"""
Use the **zia_dlp_dictionaries** data source to get information about a DLP dictionary option available in the Zscaler Internet Access.
```python
import pulumi
import pulumi_zia as zia
example = zia.DLP.get_dlp_dictionaries(name="SALESFORCE_REPORT_LEAKAGE")
```
:param int id: Unique identifier for the DLP dictionary
:param str name: DLP dictionary name
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/dlp/get_dlp_dictionaries.py | get_dlp_dictionaries.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGroupManagementResult',
'AwaitableGetGroupManagementResult',
'get_group_management',
'get_group_management_output',
]
@pulumi.output_type
class GetGroupManagementResult:
"""
A collection of values returned by getGroupManagement.
"""
def __init__(__self__, comments=None, id=None, idp_id=None, name=None):
if comments and not isinstance(comments, str):
raise TypeError("Expected argument 'comments' to be a str")
pulumi.set(__self__, "comments", comments)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if idp_id and not isinstance(idp_id, int):
raise TypeError("Expected argument 'idp_id' to be a int")
pulumi.set(__self__, "idp_id", idp_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def comments(self) -> str:
return pulumi.get(self, "comments")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idpId")
def idp_id(self) -> int:
return pulumi.get(self, "idp_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
class AwaitableGetGroupManagementResult(GetGroupManagementResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupManagementResult(
comments=self.comments,
id=self.id,
idp_id=self.idp_id,
name=self.name)
def get_group_management(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupManagementResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:Groups/getGroupManagement:getGroupManagement', __args__, opts=opts, typ=GetGroupManagementResult).value
return AwaitableGetGroupManagementResult(
comments=__ret__.comments,
id=__ret__.id,
idp_id=__ret__.idp_id,
name=__ret__.name)
@_utilities.lift_output_func(get_group_management)
def get_group_management_output(name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupManagementResult]:
"""
Use this data source to access information about an existing resource.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/groups/get_group_management.py | get_group_management.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDepartmentManagementResult',
'AwaitableGetDepartmentManagementResult',
'get_department_management',
'get_department_management_output',
]
@pulumi.output_type
class GetDepartmentManagementResult:
"""
A collection of values returned by getDepartmentManagement.
"""
def __init__(__self__, comments=None, deleted=None, id=None, idp_id=None, name=None):
if comments and not isinstance(comments, str):
raise TypeError("Expected argument 'comments' to be a str")
pulumi.set(__self__, "comments", comments)
if deleted and not isinstance(deleted, bool):
raise TypeError("Expected argument 'deleted' to be a bool")
pulumi.set(__self__, "deleted", deleted)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if idp_id and not isinstance(idp_id, int):
raise TypeError("Expected argument 'idp_id' to be a int")
pulumi.set(__self__, "idp_id", idp_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def comments(self) -> str:
return pulumi.get(self, "comments")
@property
@pulumi.getter
def deleted(self) -> bool:
return pulumi.get(self, "deleted")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idpId")
def idp_id(self) -> int:
return pulumi.get(self, "idp_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
class AwaitableGetDepartmentManagementResult(GetDepartmentManagementResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDepartmentManagementResult(
comments=self.comments,
deleted=self.deleted,
id=self.id,
idp_id=self.idp_id,
name=self.name)
def get_department_management(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDepartmentManagementResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:Departments/getDepartmentManagement:getDepartmentManagement', __args__, opts=opts, typ=GetDepartmentManagementResult).value
return AwaitableGetDepartmentManagementResult(
comments=__ret__.comments,
deleted=__ret__.deleted,
id=__ret__.id,
idp_id=__ret__.idp_id,
name=__ret__.name)
@_utilities.lift_output_func(get_department_management)
def get_department_management_output(name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDepartmentManagementResult]:
"""
Use this data source to access information about an existing resource.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/departments/get_department_management.py | get_department_management.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ActivationStatusArgs', 'ActivationStatus']
@pulumi.input_type
class ActivationStatusArgs:
def __init__(__self__, *,
status: pulumi.Input[str]):
"""
The set of arguments for constructing a ActivationStatus resource.
:param pulumi.Input[str] status: Activates configuration changes.
"""
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
Activates configuration changes.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@pulumi.input_type
class _ActivationStatusState:
def __init__(__self__, *,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ActivationStatus resources.
:param pulumi.Input[str] status: Activates configuration changes.
"""
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Activates configuration changes.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class ActivationStatus(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
status: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_zia as zia
import zscaler_pulumi_zia as zia
activation_activation_status = zia.Activation.get_activation_status()
activation_activation_activation_status_activation_status = zia.activation.ActivationStatus("activationActivation/activationStatusActivationStatus", status="ACTIVE")
```
## Import
Activation is not an importable resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] status: Activates configuration changes.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ActivationStatusArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_zia as zia
import zscaler_pulumi_zia as zia
activation_activation_status = zia.Activation.get_activation_status()
activation_activation_activation_status_activation_status = zia.activation.ActivationStatus("activationActivation/activationStatusActivationStatus", status="ACTIVE")
```
## Import
Activation is not an importable resource.
:param str resource_name: The name of the resource.
:param ActivationStatusArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ActivationStatusArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
status: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ActivationStatusArgs.__new__(ActivationStatusArgs)
if status is None and not opts.urn:
raise TypeError("Missing required property 'status'")
__props__.__dict__["status"] = status
super(ActivationStatus, __self__).__init__(
'zia:Activation/activationStatus:ActivationStatus',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
status: Optional[pulumi.Input[str]] = None) -> 'ActivationStatus':
"""
Get an existing ActivationStatus resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] status: Activates configuration changes.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ActivationStatusState.__new__(_ActivationStatusState)
__props__.__dict__["status"] = status
return ActivationStatus(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Activates configuration changes.
"""
return pulumi.get(self, "status") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/activation/activation_status.py | activation_status.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetTimeWindowResult',
'AwaitableGetTimeWindowResult',
'get_time_window',
'get_time_window_output',
]
@pulumi.output_type
class GetTimeWindowResult:
"""
A collection of values returned by getTimeWindow.
"""
def __init__(__self__, day_of_weeks=None, end_time=None, id=None, name=None, start_time=None):
if day_of_weeks and not isinstance(day_of_weeks, list):
raise TypeError("Expected argument 'day_of_weeks' to be a list")
pulumi.set(__self__, "day_of_weeks", day_of_weeks)
if end_time and not isinstance(end_time, int):
raise TypeError("Expected argument 'end_time' to be a int")
pulumi.set(__self__, "end_time", end_time)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_time and not isinstance(start_time, int):
raise TypeError("Expected argument 'start_time' to be a int")
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter(name="dayOfWeeks")
def day_of_weeks(self) -> Sequence[str]:
"""
(String). The supported values are:
"""
return pulumi.get(self, "day_of_weeks")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> int:
"""
(String)
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> int:
"""
(String)
"""
return pulumi.get(self, "start_time")
class AwaitableGetTimeWindowResult(GetTimeWindowResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTimeWindowResult(
day_of_weeks=self.day_of_weeks,
end_time=self.end_time,
id=self.id,
name=self.name,
start_time=self.start_time)
def get_time_window(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTimeWindowResult:
"""
Use the **zia_firewall_filtering_time_window** data source to get information about a time window option available in the Zscaler Internet Access cloud firewall. This data source can then be associated with a ZIA firewall filtering rule.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
work_hours = zia.TimeWindow.get_time_window(name="Work hours")
```
```python
import pulumi
import pulumi_zia as zia
weekends = zia.TimeWindow.get_time_window(name="Weekends")
```
```python
import pulumi
import pulumi_zia as zia
off_hours = zia.TimeWindow.get_time_window(name="Off hours")
```
:param str name: The name of the time window to be exported.
"""
__args__ = dict()
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:TimeWindow/getTimeWindow:getTimeWindow', __args__, opts=opts, typ=GetTimeWindowResult).value
return AwaitableGetTimeWindowResult(
day_of_weeks=__ret__.day_of_weeks,
end_time=__ret__.end_time,
id=__ret__.id,
name=__ret__.name,
start_time=__ret__.start_time)
@_utilities.lift_output_func(get_time_window)
def get_time_window_output(name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTimeWindowResult]:
"""
Use the **zia_firewall_filtering_time_window** data source to get information about a time window option available in the Zscaler Internet Access cloud firewall. This data source can then be associated with a ZIA firewall filtering rule.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
work_hours = zia.TimeWindow.get_time_window(name="Work hours")
```
```python
import pulumi
import pulumi_zia as zia
weekends = zia.TimeWindow.get_time_window(name="Weekends")
```
```python
import pulumi
import pulumi_zia as zia
off_hours = zia.TimeWindow.get_time_window(name="Off hours")
```
:param str name: The name of the time window to be exported.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/timewindow/get_time_window.py | get_time_window.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetSecuritySettingsResult',
'AwaitableGetSecuritySettingsResult',
'get_security_settings',
]
@pulumi.output_type
class GetSecuritySettingsResult:
"""
A collection of values returned by getSecuritySettings.
"""
def __init__(__self__, blacklist_urls=None, id=None, whitelist_urls=None):
if blacklist_urls and not isinstance(blacklist_urls, list):
raise TypeError("Expected argument 'blacklist_urls' to be a list")
pulumi.set(__self__, "blacklist_urls", blacklist_urls)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if whitelist_urls and not isinstance(whitelist_urls, list):
raise TypeError("Expected argument 'whitelist_urls' to be a list")
pulumi.set(__self__, "whitelist_urls", whitelist_urls)
@property
@pulumi.getter(name="blacklistUrls")
def blacklist_urls(self) -> Sequence[str]:
return pulumi.get(self, "blacklist_urls")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="whitelistUrls")
def whitelist_urls(self) -> Sequence[str]:
return pulumi.get(self, "whitelist_urls")
class AwaitableGetSecuritySettingsResult(GetSecuritySettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecuritySettingsResult(
blacklist_urls=self.blacklist_urls,
id=self.id,
whitelist_urls=self.whitelist_urls)
def get_security_settings(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecuritySettingsResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:SecuritySettings/getSecuritySettings:getSecuritySettings', __args__, opts=opts, typ=GetSecuritySettingsResult).value
return AwaitableGetSecuritySettingsResult(
blacklist_urls=__ret__.blacklist_urls,
id=__ret__.id,
whitelist_urls=__ret__.whitelist_urls) | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/securitysettings/get_security_settings.py | get_security_settings.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecuritySettingsArgs', 'SecuritySettings']
@pulumi.input_type
class SecuritySettingsArgs:
def __init__(__self__, *,
blacklist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
whitelist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SecuritySettings resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] blacklist_urls: URLs on the denylist for your organization. Allow up to 25000 URLs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] whitelist_urls: Allowlist URLs whose contents will not be scanned. Allows up to 255 URLs.
"""
if blacklist_urls is not None:
pulumi.set(__self__, "blacklist_urls", blacklist_urls)
if whitelist_urls is not None:
pulumi.set(__self__, "whitelist_urls", whitelist_urls)
@property
@pulumi.getter(name="blacklistUrls")
def blacklist_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
URLs on the denylist for your organization. Allow up to 25000 URLs.
"""
return pulumi.get(self, "blacklist_urls")
@blacklist_urls.setter
def blacklist_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "blacklist_urls", value)
@property
@pulumi.getter(name="whitelistUrls")
def whitelist_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowlist URLs whose contents will not be scanned. Allows up to 255 URLs.
"""
return pulumi.get(self, "whitelist_urls")
@whitelist_urls.setter
def whitelist_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "whitelist_urls", value)
@pulumi.input_type
class _SecuritySettingsState:
def __init__(__self__, *,
blacklist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
whitelist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering SecuritySettings resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] blacklist_urls: URLs on the denylist for your organization. Allow up to 25000 URLs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] whitelist_urls: Allowlist URLs whose contents will not be scanned. Allows up to 255 URLs.
"""
if blacklist_urls is not None:
pulumi.set(__self__, "blacklist_urls", blacklist_urls)
if whitelist_urls is not None:
pulumi.set(__self__, "whitelist_urls", whitelist_urls)
@property
@pulumi.getter(name="blacklistUrls")
def blacklist_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
URLs on the denylist for your organization. Allow up to 25000 URLs.
"""
return pulumi.get(self, "blacklist_urls")
@blacklist_urls.setter
def blacklist_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "blacklist_urls", value)
@property
@pulumi.getter(name="whitelistUrls")
def whitelist_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowlist URLs whose contents will not be scanned. Allows up to 255 URLs.
"""
return pulumi.get(self, "whitelist_urls")
@whitelist_urls.setter
def whitelist_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "whitelist_urls", value)
class SecuritySettings(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
blacklist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
whitelist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Create a SecuritySettings resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] blacklist_urls: URLs on the denylist for your organization. Allow up to 25000 URLs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] whitelist_urls: Allowlist URLs whose contents will not be scanned. Allows up to 255 URLs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[SecuritySettingsArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a SecuritySettings resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param SecuritySettingsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecuritySettingsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
blacklist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
whitelist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecuritySettingsArgs.__new__(SecuritySettingsArgs)
__props__.__dict__["blacklist_urls"] = blacklist_urls
__props__.__dict__["whitelist_urls"] = whitelist_urls
super(SecuritySettings, __self__).__init__(
'zia:SecuritySettings/securitySettings:SecuritySettings',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
blacklist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
whitelist_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SecuritySettings':
"""
Get an existing SecuritySettings resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] blacklist_urls: URLs on the denylist for your organization. Allow up to 25000 URLs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] whitelist_urls: Allowlist URLs whose contents will not be scanned. Allows up to 255 URLs.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecuritySettingsState.__new__(_SecuritySettingsState)
__props__.__dict__["blacklist_urls"] = blacklist_urls
__props__.__dict__["whitelist_urls"] = whitelist_urls
return SecuritySettings(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="blacklistUrls")
def blacklist_urls(self) -> pulumi.Output[Sequence[str]]:
"""
URLs on the denylist for your organization. Allow up to 25000 URLs.
"""
return pulumi.get(self, "blacklist_urls")
@property
@pulumi.getter(name="whitelistUrls")
def whitelist_urls(self) -> pulumi.Output[Sequence[str]]:
"""
Allowlist URLs whose contents will not be scanned. Allows up to 255 URLs.
"""
return pulumi.get(self, "whitelist_urls") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/securitysettings/security_settings.py | security_settings.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAdminUsersResult',
'AwaitableGetAdminUsersResult',
'get_admin_users',
'get_admin_users_output',
]
@pulumi.output_type
class GetAdminUsersResult:
"""
A collection of values returned by getAdminUsers.
"""
def __init__(__self__, admin_scopes=None, comments=None, disabled=None, email=None, exec_mobile_app_tokens=None, id=None, is_auditor=None, is_exec_mobile_app_enabled=None, is_non_editable=None, is_password_expired=None, is_password_login_allowed=None, is_product_update_comm_enabled=None, is_security_report_comm_enabled=None, is_service_update_comm_enabled=None, login_name=None, pwd_last_modified_time=None, roles=None, username=None):
if admin_scopes and not isinstance(admin_scopes, list):
raise TypeError("Expected argument 'admin_scopes' to be a list")
pulumi.set(__self__, "admin_scopes", admin_scopes)
if comments and not isinstance(comments, str):
raise TypeError("Expected argument 'comments' to be a str")
pulumi.set(__self__, "comments", comments)
if disabled and not isinstance(disabled, bool):
raise TypeError("Expected argument 'disabled' to be a bool")
pulumi.set(__self__, "disabled", disabled)
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
pulumi.set(__self__, "email", email)
if exec_mobile_app_tokens and not isinstance(exec_mobile_app_tokens, list):
raise TypeError("Expected argument 'exec_mobile_app_tokens' to be a list")
pulumi.set(__self__, "exec_mobile_app_tokens", exec_mobile_app_tokens)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if is_auditor and not isinstance(is_auditor, bool):
raise TypeError("Expected argument 'is_auditor' to be a bool")
pulumi.set(__self__, "is_auditor", is_auditor)
if is_exec_mobile_app_enabled and not isinstance(is_exec_mobile_app_enabled, bool):
raise TypeError("Expected argument 'is_exec_mobile_app_enabled' to be a bool")
pulumi.set(__self__, "is_exec_mobile_app_enabled", is_exec_mobile_app_enabled)
if is_non_editable and not isinstance(is_non_editable, bool):
raise TypeError("Expected argument 'is_non_editable' to be a bool")
pulumi.set(__self__, "is_non_editable", is_non_editable)
if is_password_expired and not isinstance(is_password_expired, bool):
raise TypeError("Expected argument 'is_password_expired' to be a bool")
pulumi.set(__self__, "is_password_expired", is_password_expired)
if is_password_login_allowed and not isinstance(is_password_login_allowed, bool):
raise TypeError("Expected argument 'is_password_login_allowed' to be a bool")
pulumi.set(__self__, "is_password_login_allowed", is_password_login_allowed)
if is_product_update_comm_enabled and not isinstance(is_product_update_comm_enabled, bool):
raise TypeError("Expected argument 'is_product_update_comm_enabled' to be a bool")
pulumi.set(__self__, "is_product_update_comm_enabled", is_product_update_comm_enabled)
if is_security_report_comm_enabled and not isinstance(is_security_report_comm_enabled, bool):
raise TypeError("Expected argument 'is_security_report_comm_enabled' to be a bool")
pulumi.set(__self__, "is_security_report_comm_enabled", is_security_report_comm_enabled)
if is_service_update_comm_enabled and not isinstance(is_service_update_comm_enabled, bool):
raise TypeError("Expected argument 'is_service_update_comm_enabled' to be a bool")
pulumi.set(__self__, "is_service_update_comm_enabled", is_service_update_comm_enabled)
if login_name and not isinstance(login_name, str):
raise TypeError("Expected argument 'login_name' to be a str")
pulumi.set(__self__, "login_name", login_name)
if pwd_last_modified_time and not isinstance(pwd_last_modified_time, int):
raise TypeError("Expected argument 'pwd_last_modified_time' to be a int")
pulumi.set(__self__, "pwd_last_modified_time", pwd_last_modified_time)
if roles and not isinstance(roles, list):
raise TypeError("Expected argument 'roles' to be a list")
pulumi.set(__self__, "roles", roles)
if username and not isinstance(username, str):
raise TypeError("Expected argument 'username' to be a str")
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="adminScopes")
def admin_scopes(self) -> Sequence['outputs.GetAdminUsersAdminScopeResult']:
"""
(Set of Object) The admin's scope. Only applicable for the LOCATION_GROUP admin scope type, in which case this attribute gives the list of ID/name pairs of locations within the location group.
"""
return pulumi.get(self, "admin_scopes")
@property
@pulumi.getter
def comments(self) -> str:
"""
(String) Additional information about the admin or auditor.
"""
return pulumi.get(self, "comments")
@property
@pulumi.getter
def disabled(self) -> bool:
"""
(Boolean) Indicates whether or not the admin account is disabled.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter
def email(self) -> str:
"""
(String) Admin or auditor's email address.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter(name="execMobileAppTokens")
def exec_mobile_app_tokens(self) -> Sequence['outputs.GetAdminUsersExecMobileAppTokenResult']:
"""
(List of Object)
"""
return pulumi.get(self, "exec_mobile_app_tokens")
@property
@pulumi.getter
def id(self) -> int:
"""
(Number) Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isAuditor")
def is_auditor(self) -> bool:
"""
(Boolean) Indicates whether the user is an auditor. This attribute is subject to change.
"""
return pulumi.get(self, "is_auditor")
@property
@pulumi.getter(name="isExecMobileAppEnabled")
def is_exec_mobile_app_enabled(self) -> bool:
"""
(Boolean) Indicates whether or not Executive Insights App access is enabled for the admin.
"""
return pulumi.get(self, "is_exec_mobile_app_enabled")
@property
@pulumi.getter(name="isNonEditable")
def is_non_editable(self) -> bool:
"""
(Boolean) Indicates whether or not the admin can be edited or deleted.
"""
return pulumi.get(self, "is_non_editable")
@property
@pulumi.getter(name="isPasswordExpired")
def is_password_expired(self) -> bool:
"""
(Boolean) Indicates whether or not an admin's password has expired.
"""
return pulumi.get(self, "is_password_expired")
@property
@pulumi.getter(name="isPasswordLoginAllowed")
def is_password_login_allowed(self) -> bool:
"""
(Boolean) The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
"""
return pulumi.get(self, "is_password_login_allowed")
@property
@pulumi.getter(name="isProductUpdateCommEnabled")
def is_product_update_comm_enabled(self) -> bool:
"""
(Boolean) Communication setting for Product Update.
"""
return pulumi.get(self, "is_product_update_comm_enabled")
@property
@pulumi.getter(name="isSecurityReportCommEnabled")
def is_security_report_comm_enabled(self) -> bool:
"""
(Boolean) Communication for Security Report is enabled.
"""
return pulumi.get(self, "is_security_report_comm_enabled")
@property
@pulumi.getter(name="isServiceUpdateCommEnabled")
def is_service_update_comm_enabled(self) -> bool:
"""
(Boolean) Communication setting for Service Update.
"""
return pulumi.get(self, "is_service_update_comm_enabled")
@property
@pulumi.getter(name="loginName")
def login_name(self) -> str:
return pulumi.get(self, "login_name")
@property
@pulumi.getter(name="pwdLastModifiedTime")
def pwd_last_modified_time(self) -> int:
return pulumi.get(self, "pwd_last_modified_time")
@property
@pulumi.getter
def roles(self) -> Sequence['outputs.GetAdminUsersRoleResult']:
"""
(Set of Object) Role of the admin. This is not required for an auditor.
"""
return pulumi.get(self, "roles")
@property
@pulumi.getter
def username(self) -> str:
return pulumi.get(self, "username")
class AwaitableGetAdminUsersResult(GetAdminUsersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAdminUsersResult(
admin_scopes=self.admin_scopes,
comments=self.comments,
disabled=self.disabled,
email=self.email,
exec_mobile_app_tokens=self.exec_mobile_app_tokens,
id=self.id,
is_auditor=self.is_auditor,
is_exec_mobile_app_enabled=self.is_exec_mobile_app_enabled,
is_non_editable=self.is_non_editable,
is_password_expired=self.is_password_expired,
is_password_login_allowed=self.is_password_login_allowed,
is_product_update_comm_enabled=self.is_product_update_comm_enabled,
is_security_report_comm_enabled=self.is_security_report_comm_enabled,
is_service_update_comm_enabled=self.is_service_update_comm_enabled,
login_name=self.login_name,
pwd_last_modified_time=self.pwd_last_modified_time,
roles=self.roles,
username=self.username)
def get_admin_users(id: Optional[int] = None,
login_name: Optional[str] = None,
username: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAdminUsersResult:
"""
Use the **zia_admin_users** data source to get information about an admin user account created in the Zscaler Internet Access cloud or via the API. This data source can then be associated with a ZIA administrator role.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
john_doe = zia.AdminUsers.get_admin_users(login_name="[email protected]")
```
```python
import pulumi
import pulumi_zia as zia
john_doe = zia.AdminUsers.get_admin_users(username="John Doe")
```
:param int id: The ID of the admin user to be exported.
:param str login_name: The email address of the admin user to be exported.
:param str username: The username of the admin user to be exported.
"""
__args__ = dict()
__args__['id'] = id
__args__['loginName'] = login_name
__args__['username'] = username
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:AdminUsers/getAdminUsers:getAdminUsers', __args__, opts=opts, typ=GetAdminUsersResult).value
return AwaitableGetAdminUsersResult(
admin_scopes=__ret__.admin_scopes,
comments=__ret__.comments,
disabled=__ret__.disabled,
email=__ret__.email,
exec_mobile_app_tokens=__ret__.exec_mobile_app_tokens,
id=__ret__.id,
is_auditor=__ret__.is_auditor,
is_exec_mobile_app_enabled=__ret__.is_exec_mobile_app_enabled,
is_non_editable=__ret__.is_non_editable,
is_password_expired=__ret__.is_password_expired,
is_password_login_allowed=__ret__.is_password_login_allowed,
is_product_update_comm_enabled=__ret__.is_product_update_comm_enabled,
is_security_report_comm_enabled=__ret__.is_security_report_comm_enabled,
is_service_update_comm_enabled=__ret__.is_service_update_comm_enabled,
login_name=__ret__.login_name,
pwd_last_modified_time=__ret__.pwd_last_modified_time,
roles=__ret__.roles,
username=__ret__.username)
@_utilities.lift_output_func(get_admin_users)
def get_admin_users_output(id: Optional[pulumi.Input[Optional[int]]] = None,
login_name: Optional[pulumi.Input[Optional[str]]] = None,
username: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAdminUsersResult]:
"""
Use the **zia_admin_users** data source to get information about an admin user account created in the Zscaler Internet Access cloud or via the API. This data source can then be associated with a ZIA administrator role.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
john_doe = zia.AdminUsers.get_admin_users(login_name="[email protected]")
```
```python
import pulumi
import pulumi_zia as zia
john_doe = zia.AdminUsers.get_admin_users(username="John Doe")
```
:param int id: The ID of the admin user to be exported.
:param str login_name: The email address of the admin user to be exported.
:param str username: The username of the admin user to be exported.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/adminusers/get_admin_users.py | get_admin_users.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AdminUsersAdminScopeArgs',
'AdminUsersAdminScopeScopeEntitiesArgs',
'AdminUsersAdminScopeScopeGroupMemberEntitiesArgs',
'AdminUsersRoleArgs',
]
@pulumi.input_type
class AdminUsersAdminScopeArgs:
def __init__(__self__, *,
scope_entities: Optional[pulumi.Input['AdminUsersAdminScopeScopeEntitiesArgs']] = None,
scope_group_member_entities: Optional[pulumi.Input['AdminUsersAdminScopeScopeGroupMemberEntitiesArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['AdminUsersAdminScopeScopeEntitiesArgs'] scope_entities: Based on the admin scope type, the entities can be the ID/name pair of departments, locations, or location groups.
:param pulumi.Input['AdminUsersAdminScopeScopeGroupMemberEntitiesArgs'] scope_group_member_entities: Only applicable for the LOCATION_GROUP admin scope type, in which case this attribute gives the list of ID/name pairs of locations within the location group.
:param pulumi.Input[str] type: The admin scope type. The attribute name is subject to change.
"""
if scope_entities is not None:
pulumi.set(__self__, "scope_entities", scope_entities)
if scope_group_member_entities is not None:
pulumi.set(__self__, "scope_group_member_entities", scope_group_member_entities)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="scopeEntities")
def scope_entities(self) -> Optional[pulumi.Input['AdminUsersAdminScopeScopeEntitiesArgs']]:
"""
Based on the admin scope type, the entities can be the ID/name pair of departments, locations, or location groups.
"""
return pulumi.get(self, "scope_entities")
@scope_entities.setter
def scope_entities(self, value: Optional[pulumi.Input['AdminUsersAdminScopeScopeEntitiesArgs']]):
pulumi.set(self, "scope_entities", value)
@property
@pulumi.getter(name="scopeGroupMemberEntities")
def scope_group_member_entities(self) -> Optional[pulumi.Input['AdminUsersAdminScopeScopeGroupMemberEntitiesArgs']]:
"""
Only applicable for the LOCATION_GROUP admin scope type, in which case this attribute gives the list of ID/name pairs of locations within the location group.
"""
return pulumi.get(self, "scope_group_member_entities")
@scope_group_member_entities.setter
def scope_group_member_entities(self, value: Optional[pulumi.Input['AdminUsersAdminScopeScopeGroupMemberEntitiesArgs']]):
pulumi.set(self, "scope_group_member_entities", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The admin scope type. The attribute name is subject to change.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class AdminUsersAdminScopeScopeEntitiesArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class AdminUsersAdminScopeScopeGroupMemberEntitiesArgs:
def __init__(__self__, *,
ids: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@ids.setter
def ids(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ids", value)
@pulumi.input_type
class AdminUsersRoleArgs:
def __init__(__self__, *,
extensions: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[int]] = None,
is_name_l10n_tag: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] id: Identifier that uniquely identifies an entity
:param pulumi.Input[str] name: The configured name of the entity
"""
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if id is not None:
pulumi.set(__self__, "id", id)
if is_name_l10n_tag is not None:
pulumi.set(__self__, "is_name_l10n_tag", is_name_l10n_tag)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[int]]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isNameL10nTag")
def is_name_l10n_tag(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_name_l10n_tag")
@is_name_l10n_tag.setter
def is_name_l10n_tag(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_name_l10n_tag", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The configured name of the entity
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value) | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/adminusers/_inputs.py | _inputs.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'AdminUsersAdminScope',
'AdminUsersAdminScopeScopeEntities',
'AdminUsersAdminScopeScopeGroupMemberEntities',
'AdminUsersRole',
'GetAdminUsersAdminScopeResult',
'GetAdminUsersAdminScopeScopeEntityResult',
'GetAdminUsersAdminScopeScopeGroupMemberEntityResult',
'GetAdminUsersExecMobileAppTokenResult',
'GetAdminUsersRoleResult',
]
@pulumi.output_type
class AdminUsersAdminScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scopeEntities":
suggest = "scope_entities"
elif key == "scopeGroupMemberEntities":
suggest = "scope_group_member_entities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AdminUsersAdminScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AdminUsersAdminScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AdminUsersAdminScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
scope_entities: Optional['outputs.AdminUsersAdminScopeScopeEntities'] = None,
scope_group_member_entities: Optional['outputs.AdminUsersAdminScopeScopeGroupMemberEntities'] = None,
type: Optional[str] = None):
"""
:param 'AdminUsersAdminScopeScopeEntitiesArgs' scope_entities: Based on the admin scope type, the entities can be the ID/name pair of departments, locations, or location groups.
:param 'AdminUsersAdminScopeScopeGroupMemberEntitiesArgs' scope_group_member_entities: Only applicable for the LOCATION_GROUP admin scope type, in which case this attribute gives the list of ID/name pairs of locations within the location group.
:param str type: The admin scope type. The attribute name is subject to change.
"""
if scope_entities is not None:
pulumi.set(__self__, "scope_entities", scope_entities)
if scope_group_member_entities is not None:
pulumi.set(__self__, "scope_group_member_entities", scope_group_member_entities)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="scopeEntities")
def scope_entities(self) -> Optional['outputs.AdminUsersAdminScopeScopeEntities']:
"""
Based on the admin scope type, the entities can be the ID/name pair of departments, locations, or location groups.
"""
return pulumi.get(self, "scope_entities")
@property
@pulumi.getter(name="scopeGroupMemberEntities")
def scope_group_member_entities(self) -> Optional['outputs.AdminUsersAdminScopeScopeGroupMemberEntities']:
"""
Only applicable for the LOCATION_GROUP admin scope type, in which case this attribute gives the list of ID/name pairs of locations within the location group.
"""
return pulumi.get(self, "scope_group_member_entities")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The admin scope type. The attribute name is subject to change.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class AdminUsersAdminScopeScopeEntities(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class AdminUsersAdminScopeScopeGroupMemberEntities(dict):
def __init__(__self__, *,
ids: Sequence[int]):
"""
:param Sequence[int] ids: Identifier that uniquely identifies an entity
"""
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def ids(self) -> Sequence[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "ids")
@pulumi.output_type
class AdminUsersRole(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "isNameL10nTag":
suggest = "is_name_l10n_tag"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AdminUsersRole. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AdminUsersRole.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AdminUsersRole.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
extensions: Optional[Mapping[str, str]] = None,
id: Optional[int] = None,
is_name_l10n_tag: Optional[bool] = None,
name: Optional[str] = None):
"""
:param int id: Identifier that uniquely identifies an entity
:param str name: The configured name of the entity
"""
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if id is not None:
pulumi.set(__self__, "id", id)
if is_name_l10n_tag is not None:
pulumi.set(__self__, "is_name_l10n_tag", is_name_l10n_tag)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> Optional[int]:
"""
Identifier that uniquely identifies an entity
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isNameL10nTag")
def is_name_l10n_tag(self) -> Optional[bool]:
return pulumi.get(self, "is_name_l10n_tag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The configured name of the entity
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetAdminUsersAdminScopeResult(dict):
def __init__(__self__, *,
scope_entities: Sequence['outputs.GetAdminUsersAdminScopeScopeEntityResult'],
scope_group_member_entities: Sequence['outputs.GetAdminUsersAdminScopeScopeGroupMemberEntityResult'],
type: str):
"""
:param Sequence['GetAdminUsersAdminScopeScopeEntityArgs'] scope_entities: (String) Based on the admin scope type, the entities can be the ID/name pair of departments, locations, or location groups.
:param Sequence['GetAdminUsersAdminScopeScopeGroupMemberEntityArgs'] scope_group_member_entities: (Number) Only applicable for the LOCATION_GROUP admin scope type, in which case this attribute gives the list of ID/name pairs of locations within the location group.
:param str type: (String) The admin scope type. The attribute name is subject to change.
"""
pulumi.set(__self__, "scope_entities", scope_entities)
pulumi.set(__self__, "scope_group_member_entities", scope_group_member_entities)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="scopeEntities")
def scope_entities(self) -> Sequence['outputs.GetAdminUsersAdminScopeScopeEntityResult']:
"""
(String) Based on the admin scope type, the entities can be the ID/name pair of departments, locations, or location groups.
"""
return pulumi.get(self, "scope_entities")
@property
@pulumi.getter(name="scopeGroupMemberEntities")
def scope_group_member_entities(self) -> Sequence['outputs.GetAdminUsersAdminScopeScopeGroupMemberEntityResult']:
"""
(Number) Only applicable for the LOCATION_GROUP admin scope type, in which case this attribute gives the list of ID/name pairs of locations within the location group.
"""
return pulumi.get(self, "scope_group_member_entities")
@property
@pulumi.getter
def type(self) -> str:
"""
(String) The admin scope type. The attribute name is subject to change.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetAdminUsersAdminScopeScopeEntityResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: The ID of the admin user to be exported.
:param str name: (String)
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
The ID of the admin user to be exported.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
(String)
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetAdminUsersAdminScopeScopeGroupMemberEntityResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: The ID of the admin user to be exported.
:param str name: (String)
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
The ID of the admin user to be exported.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
(String)
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetAdminUsersExecMobileAppTokenResult(dict):
def __init__(__self__, *,
cloud: str,
create_time: int,
device_id: str,
device_name: str,
name: str,
org_id: int,
token: str,
token_expiry: int,
token_id: str):
"""
:param str cloud: (String)
:param int create_time: (Number)
:param str device_id: (String)
:param str device_name: (String)
:param str name: (String)
:param int org_id: (Number)
:param str token: (String)
:param int token_expiry: (Number)
:param str token_id: (String)
"""
pulumi.set(__self__, "cloud", cloud)
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "device_id", device_id)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "org_id", org_id)
pulumi.set(__self__, "token", token)
pulumi.set(__self__, "token_expiry", token_expiry)
pulumi.set(__self__, "token_id", token_id)
@property
@pulumi.getter
def cloud(self) -> str:
"""
(String)
"""
return pulumi.get(self, "cloud")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> int:
"""
(Number)
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="deviceId")
def device_id(self) -> str:
"""
(String)
"""
return pulumi.get(self, "device_id")
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> str:
"""
(String)
"""
return pulumi.get(self, "device_name")
@property
@pulumi.getter
def name(self) -> str:
"""
(String)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orgId")
def org_id(self) -> int:
"""
(Number)
"""
return pulumi.get(self, "org_id")
@property
@pulumi.getter
def token(self) -> str:
"""
(String)
"""
return pulumi.get(self, "token")
@property
@pulumi.getter(name="tokenExpiry")
def token_expiry(self) -> int:
"""
(Number)
"""
return pulumi.get(self, "token_expiry")
@property
@pulumi.getter(name="tokenId")
def token_id(self) -> str:
"""
(String)
"""
return pulumi.get(self, "token_id")
@pulumi.output_type
class GetAdminUsersRoleResult(dict):
def __init__(__self__, *,
extensions: Mapping[str, str],
id: int,
name: str):
"""
:param int id: The ID of the admin user to be exported.
:param str name: (String)
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def extensions(self) -> Mapping[str, str]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter
def id(self) -> int:
"""
The ID of the admin user to be exported.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
(String)
"""
return pulumi.get(self, "name") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/adminusers/outputs.py | outputs.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['AdminUsersArgs', 'AdminUsers']
@pulumi.input_type
class AdminUsersArgs:
def __init__(__self__, *,
email: pulumi.Input[str],
login_name: pulumi.Input[str],
username: pulumi.Input[str],
admin_scopes: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]]] = None,
comments: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
is_auditor: Optional[pulumi.Input[bool]] = None,
is_exec_mobile_app_enabled: Optional[pulumi.Input[bool]] = None,
is_non_editable: Optional[pulumi.Input[bool]] = None,
is_password_expired: Optional[pulumi.Input[bool]] = None,
is_password_login_allowed: Optional[pulumi.Input[bool]] = None,
is_product_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_security_report_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_service_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]]] = None):
"""
The set of arguments for constructing a AdminUsers resource.
:param pulumi.Input[str] email: Admin or auditor's email address.
:param pulumi.Input[str] login_name: The email address of the admin user to be exported.
:param pulumi.Input[str] username: The username of the admin user to be exported.
:param pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]] admin_scopes: The admin's scope. A scope is required for admins, but not applicable to auditors. This attribute is subject to change.
:param pulumi.Input[str] comments: Additional information about the admin or auditor.
:param pulumi.Input[bool] disabled: Indicates whether or not the admin account is disabled.
:param pulumi.Input[bool] is_auditor: Indicates whether the user is an auditor. This attribute is subject to change.
:param pulumi.Input[bool] is_exec_mobile_app_enabled: Indicates whether or not Executive Insights App access is enabled for the admin.
:param pulumi.Input[bool] is_non_editable: Indicates whether or not the admin can be edited or deleted.
:param pulumi.Input[bool] is_password_expired: Indicates whether or not an admin's password has expired.
:param pulumi.Input[bool] is_password_login_allowed: The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
:param pulumi.Input[bool] is_product_update_comm_enabled: Communication setting for Product Update.
:param pulumi.Input[bool] is_security_report_comm_enabled: Communication for Security Report is enabled.
:param pulumi.Input[bool] is_service_update_comm_enabled: Communication setting for Service Update.
:param pulumi.Input[str] password: The username of the admin user to be exported.
:param pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]] roles: Role of the admin. This is not required for an auditor.
"""
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "login_name", login_name)
pulumi.set(__self__, "username", username)
if admin_scopes is not None:
pulumi.set(__self__, "admin_scopes", admin_scopes)
if comments is not None:
pulumi.set(__self__, "comments", comments)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if is_auditor is not None:
pulumi.set(__self__, "is_auditor", is_auditor)
if is_exec_mobile_app_enabled is not None:
pulumi.set(__self__, "is_exec_mobile_app_enabled", is_exec_mobile_app_enabled)
if is_non_editable is not None:
pulumi.set(__self__, "is_non_editable", is_non_editable)
if is_password_expired is not None:
pulumi.set(__self__, "is_password_expired", is_password_expired)
if is_password_login_allowed is not None:
pulumi.set(__self__, "is_password_login_allowed", is_password_login_allowed)
if is_product_update_comm_enabled is not None:
pulumi.set(__self__, "is_product_update_comm_enabled", is_product_update_comm_enabled)
if is_security_report_comm_enabled is not None:
pulumi.set(__self__, "is_security_report_comm_enabled", is_security_report_comm_enabled)
if is_service_update_comm_enabled is not None:
pulumi.set(__self__, "is_service_update_comm_enabled", is_service_update_comm_enabled)
if password is not None:
pulumi.set(__self__, "password", password)
if roles is not None:
pulumi.set(__self__, "roles", roles)
@property
@pulumi.getter
def email(self) -> pulumi.Input[str]:
"""
Admin or auditor's email address.
"""
return pulumi.get(self, "email")
@email.setter
def email(self, value: pulumi.Input[str]):
pulumi.set(self, "email", value)
@property
@pulumi.getter(name="loginName")
def login_name(self) -> pulumi.Input[str]:
"""
The email address of the admin user to be exported.
"""
return pulumi.get(self, "login_name")
@login_name.setter
def login_name(self, value: pulumi.Input[str]):
pulumi.set(self, "login_name", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username of the admin user to be exported.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="adminScopes")
def admin_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]]]:
"""
The admin's scope. A scope is required for admins, but not applicable to auditors. This attribute is subject to change.
"""
return pulumi.get(self, "admin_scopes")
@admin_scopes.setter
def admin_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]]]):
pulumi.set(self, "admin_scopes", value)
@property
@pulumi.getter
def comments(self) -> Optional[pulumi.Input[str]]:
"""
Additional information about the admin or auditor.
"""
return pulumi.get(self, "comments")
@comments.setter
def comments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comments", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not the admin account is disabled.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="isAuditor")
def is_auditor(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the user is an auditor. This attribute is subject to change.
"""
return pulumi.get(self, "is_auditor")
@is_auditor.setter
def is_auditor(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_auditor", value)
@property
@pulumi.getter(name="isExecMobileAppEnabled")
def is_exec_mobile_app_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not Executive Insights App access is enabled for the admin.
"""
return pulumi.get(self, "is_exec_mobile_app_enabled")
@is_exec_mobile_app_enabled.setter
def is_exec_mobile_app_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_exec_mobile_app_enabled", value)
@property
@pulumi.getter(name="isNonEditable")
def is_non_editable(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not the admin can be edited or deleted.
"""
return pulumi.get(self, "is_non_editable")
@is_non_editable.setter
def is_non_editable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_non_editable", value)
@property
@pulumi.getter(name="isPasswordExpired")
def is_password_expired(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not an admin's password has expired.
"""
return pulumi.get(self, "is_password_expired")
@is_password_expired.setter
def is_password_expired(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_password_expired", value)
@property
@pulumi.getter(name="isPasswordLoginAllowed")
def is_password_login_allowed(self) -> Optional[pulumi.Input[bool]]:
"""
The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
"""
return pulumi.get(self, "is_password_login_allowed")
@is_password_login_allowed.setter
def is_password_login_allowed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_password_login_allowed", value)
@property
@pulumi.getter(name="isProductUpdateCommEnabled")
def is_product_update_comm_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Communication setting for Product Update.
"""
return pulumi.get(self, "is_product_update_comm_enabled")
@is_product_update_comm_enabled.setter
def is_product_update_comm_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_product_update_comm_enabled", value)
@property
@pulumi.getter(name="isSecurityReportCommEnabled")
def is_security_report_comm_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Communication for Security Report is enabled.
"""
return pulumi.get(self, "is_security_report_comm_enabled")
@is_security_report_comm_enabled.setter
def is_security_report_comm_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_security_report_comm_enabled", value)
@property
@pulumi.getter(name="isServiceUpdateCommEnabled")
def is_service_update_comm_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Communication setting for Service Update.
"""
return pulumi.get(self, "is_service_update_comm_enabled")
@is_service_update_comm_enabled.setter
def is_service_update_comm_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_service_update_comm_enabled", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The username of the admin user to be exported.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]]]:
"""
Role of the admin. This is not required for an auditor.
"""
return pulumi.get(self, "roles")
@roles.setter
def roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]]]):
pulumi.set(self, "roles", value)
@pulumi.input_type
class _AdminUsersState:
def __init__(__self__, *,
admin_id: Optional[pulumi.Input[int]] = None,
admin_scopes: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]]] = None,
comments: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
email: Optional[pulumi.Input[str]] = None,
is_auditor: Optional[pulumi.Input[bool]] = None,
is_exec_mobile_app_enabled: Optional[pulumi.Input[bool]] = None,
is_non_editable: Optional[pulumi.Input[bool]] = None,
is_password_expired: Optional[pulumi.Input[bool]] = None,
is_password_login_allowed: Optional[pulumi.Input[bool]] = None,
is_product_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_security_report_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_service_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
login_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AdminUsers resources.
:param pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]] admin_scopes: The admin's scope. A scope is required for admins, but not applicable to auditors. This attribute is subject to change.
:param pulumi.Input[str] comments: Additional information about the admin or auditor.
:param pulumi.Input[bool] disabled: Indicates whether or not the admin account is disabled.
:param pulumi.Input[str] email: Admin or auditor's email address.
:param pulumi.Input[bool] is_auditor: Indicates whether the user is an auditor. This attribute is subject to change.
:param pulumi.Input[bool] is_exec_mobile_app_enabled: Indicates whether or not Executive Insights App access is enabled for the admin.
:param pulumi.Input[bool] is_non_editable: Indicates whether or not the admin can be edited or deleted.
:param pulumi.Input[bool] is_password_expired: Indicates whether or not an admin's password has expired.
:param pulumi.Input[bool] is_password_login_allowed: The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
:param pulumi.Input[bool] is_product_update_comm_enabled: Communication setting for Product Update.
:param pulumi.Input[bool] is_security_report_comm_enabled: Communication for Security Report is enabled.
:param pulumi.Input[bool] is_service_update_comm_enabled: Communication setting for Service Update.
:param pulumi.Input[str] login_name: The email address of the admin user to be exported.
:param pulumi.Input[str] password: The username of the admin user to be exported.
:param pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]] roles: Role of the admin. This is not required for an auditor.
:param pulumi.Input[str] username: The username of the admin user to be exported.
"""
if admin_id is not None:
pulumi.set(__self__, "admin_id", admin_id)
if admin_scopes is not None:
pulumi.set(__self__, "admin_scopes", admin_scopes)
if comments is not None:
pulumi.set(__self__, "comments", comments)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if email is not None:
pulumi.set(__self__, "email", email)
if is_auditor is not None:
pulumi.set(__self__, "is_auditor", is_auditor)
if is_exec_mobile_app_enabled is not None:
pulumi.set(__self__, "is_exec_mobile_app_enabled", is_exec_mobile_app_enabled)
if is_non_editable is not None:
pulumi.set(__self__, "is_non_editable", is_non_editable)
if is_password_expired is not None:
pulumi.set(__self__, "is_password_expired", is_password_expired)
if is_password_login_allowed is not None:
pulumi.set(__self__, "is_password_login_allowed", is_password_login_allowed)
if is_product_update_comm_enabled is not None:
pulumi.set(__self__, "is_product_update_comm_enabled", is_product_update_comm_enabled)
if is_security_report_comm_enabled is not None:
pulumi.set(__self__, "is_security_report_comm_enabled", is_security_report_comm_enabled)
if is_service_update_comm_enabled is not None:
pulumi.set(__self__, "is_service_update_comm_enabled", is_service_update_comm_enabled)
if login_name is not None:
pulumi.set(__self__, "login_name", login_name)
if password is not None:
pulumi.set(__self__, "password", password)
if roles is not None:
pulumi.set(__self__, "roles", roles)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="adminId")
def admin_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "admin_id")
@admin_id.setter
def admin_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "admin_id", value)
@property
@pulumi.getter(name="adminScopes")
def admin_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]]]:
"""
The admin's scope. A scope is required for admins, but not applicable to auditors. This attribute is subject to change.
"""
return pulumi.get(self, "admin_scopes")
@admin_scopes.setter
def admin_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersAdminScopeArgs']]]]):
pulumi.set(self, "admin_scopes", value)
@property
@pulumi.getter
def comments(self) -> Optional[pulumi.Input[str]]:
"""
Additional information about the admin or auditor.
"""
return pulumi.get(self, "comments")
@comments.setter
def comments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comments", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not the admin account is disabled.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def email(self) -> Optional[pulumi.Input[str]]:
"""
Admin or auditor's email address.
"""
return pulumi.get(self, "email")
@email.setter
def email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email", value)
@property
@pulumi.getter(name="isAuditor")
def is_auditor(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the user is an auditor. This attribute is subject to change.
"""
return pulumi.get(self, "is_auditor")
@is_auditor.setter
def is_auditor(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_auditor", value)
@property
@pulumi.getter(name="isExecMobileAppEnabled")
def is_exec_mobile_app_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not Executive Insights App access is enabled for the admin.
"""
return pulumi.get(self, "is_exec_mobile_app_enabled")
@is_exec_mobile_app_enabled.setter
def is_exec_mobile_app_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_exec_mobile_app_enabled", value)
@property
@pulumi.getter(name="isNonEditable")
def is_non_editable(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not the admin can be edited or deleted.
"""
return pulumi.get(self, "is_non_editable")
@is_non_editable.setter
def is_non_editable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_non_editable", value)
@property
@pulumi.getter(name="isPasswordExpired")
def is_password_expired(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not an admin's password has expired.
"""
return pulumi.get(self, "is_password_expired")
@is_password_expired.setter
def is_password_expired(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_password_expired", value)
@property
@pulumi.getter(name="isPasswordLoginAllowed")
def is_password_login_allowed(self) -> Optional[pulumi.Input[bool]]:
"""
The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
"""
return pulumi.get(self, "is_password_login_allowed")
@is_password_login_allowed.setter
def is_password_login_allowed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_password_login_allowed", value)
@property
@pulumi.getter(name="isProductUpdateCommEnabled")
def is_product_update_comm_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Communication setting for Product Update.
"""
return pulumi.get(self, "is_product_update_comm_enabled")
@is_product_update_comm_enabled.setter
def is_product_update_comm_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_product_update_comm_enabled", value)
@property
@pulumi.getter(name="isSecurityReportCommEnabled")
def is_security_report_comm_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Communication for Security Report is enabled.
"""
return pulumi.get(self, "is_security_report_comm_enabled")
@is_security_report_comm_enabled.setter
def is_security_report_comm_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_security_report_comm_enabled", value)
@property
@pulumi.getter(name="isServiceUpdateCommEnabled")
def is_service_update_comm_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Communication setting for Service Update.
"""
return pulumi.get(self, "is_service_update_comm_enabled")
@is_service_update_comm_enabled.setter
def is_service_update_comm_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_service_update_comm_enabled", value)
@property
@pulumi.getter(name="loginName")
def login_name(self) -> Optional[pulumi.Input[str]]:
"""
The email address of the admin user to be exported.
"""
return pulumi.get(self, "login_name")
@login_name.setter
def login_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login_name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The username of the admin user to be exported.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]]]:
"""
Role of the admin. This is not required for an auditor.
"""
return pulumi.get(self, "roles")
@roles.setter
def roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AdminUsersRoleArgs']]]]):
pulumi.set(self, "roles", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
The username of the admin user to be exported.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class AdminUsers(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersAdminScopeArgs']]]]] = None,
comments: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
email: Optional[pulumi.Input[str]] = None,
is_auditor: Optional[pulumi.Input[bool]] = None,
is_exec_mobile_app_enabled: Optional[pulumi.Input[bool]] = None,
is_non_editable: Optional[pulumi.Input[bool]] = None,
is_password_expired: Optional[pulumi.Input[bool]] = None,
is_password_login_allowed: Optional[pulumi.Input[bool]] = None,
is_product_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_security_report_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_service_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
login_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersRoleArgs']]]]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The **zia_admin_users** resource allows the creation and management of ZIA admin user account created in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
import zscaler_pulumi_zia as zia
super_admin = zia.AdminRoles.get_admin_roles(name="Super Admin")
engineering = zia.Departments.get_department_management(name="Engineering")
john_smith = zia.admin_users.AdminUsers("johnSmith",
login_name="[email protected]",
user_name="John Smith",
email="[email protected]",
is_password_login_allowed=True,
password="AeQ9E5w8B$",
is_security_report_comm_enabled=True,
is_service_update_comm_enabled=True,
is_product_update_comm_enabled=True,
comments="Administrator User",
roles=[zia.admin_users.AdminUsersRoleArgs(
id=super_admin.id,
)],
admin_scopes=[zia.admin_users.AdminUsersAdminScopeArgs(
type="DEPARTMENT",
scope_entities=zia.admin_users.AdminUsersAdminScopeScopeEntitiesArgs(
ids=[engineering.id],
),
)])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersAdminScopeArgs']]]] admin_scopes: The admin's scope. A scope is required for admins, but not applicable to auditors. This attribute is subject to change.
:param pulumi.Input[str] comments: Additional information about the admin or auditor.
:param pulumi.Input[bool] disabled: Indicates whether or not the admin account is disabled.
:param pulumi.Input[str] email: Admin or auditor's email address.
:param pulumi.Input[bool] is_auditor: Indicates whether the user is an auditor. This attribute is subject to change.
:param pulumi.Input[bool] is_exec_mobile_app_enabled: Indicates whether or not Executive Insights App access is enabled for the admin.
:param pulumi.Input[bool] is_non_editable: Indicates whether or not the admin can be edited or deleted.
:param pulumi.Input[bool] is_password_expired: Indicates whether or not an admin's password has expired.
:param pulumi.Input[bool] is_password_login_allowed: The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
:param pulumi.Input[bool] is_product_update_comm_enabled: Communication setting for Product Update.
:param pulumi.Input[bool] is_security_report_comm_enabled: Communication for Security Report is enabled.
:param pulumi.Input[bool] is_service_update_comm_enabled: Communication setting for Service Update.
:param pulumi.Input[str] login_name: The email address of the admin user to be exported.
:param pulumi.Input[str] password: The username of the admin user to be exported.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersRoleArgs']]]] roles: Role of the admin. This is not required for an auditor.
:param pulumi.Input[str] username: The username of the admin user to be exported.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AdminUsersArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The **zia_admin_users** resource allows the creation and management of ZIA admin user account created in the Zscaler Internet Access cloud or via the API.
## Example Usage
```python
import pulumi
import pulumi_zia as zia
import zscaler_pulumi_zia as zia
super_admin = zia.AdminRoles.get_admin_roles(name="Super Admin")
engineering = zia.Departments.get_department_management(name="Engineering")
john_smith = zia.admin_users.AdminUsers("johnSmith",
login_name="[email protected]",
user_name="John Smith",
email="[email protected]",
is_password_login_allowed=True,
password="AeQ9E5w8B$",
is_security_report_comm_enabled=True,
is_service_update_comm_enabled=True,
is_product_update_comm_enabled=True,
comments="Administrator User",
roles=[zia.admin_users.AdminUsersRoleArgs(
id=super_admin.id,
)],
admin_scopes=[zia.admin_users.AdminUsersAdminScopeArgs(
type="DEPARTMENT",
scope_entities=zia.admin_users.AdminUsersAdminScopeScopeEntitiesArgs(
ids=[engineering.id],
),
)])
```
:param str resource_name: The name of the resource.
:param AdminUsersArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AdminUsersArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersAdminScopeArgs']]]]] = None,
comments: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
email: Optional[pulumi.Input[str]] = None,
is_auditor: Optional[pulumi.Input[bool]] = None,
is_exec_mobile_app_enabled: Optional[pulumi.Input[bool]] = None,
is_non_editable: Optional[pulumi.Input[bool]] = None,
is_password_expired: Optional[pulumi.Input[bool]] = None,
is_password_login_allowed: Optional[pulumi.Input[bool]] = None,
is_product_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_security_report_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_service_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
login_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersRoleArgs']]]]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AdminUsersArgs.__new__(AdminUsersArgs)
__props__.__dict__["admin_scopes"] = admin_scopes
__props__.__dict__["comments"] = comments
__props__.__dict__["disabled"] = disabled
if email is None and not opts.urn:
raise TypeError("Missing required property 'email'")
__props__.__dict__["email"] = email
__props__.__dict__["is_auditor"] = is_auditor
__props__.__dict__["is_exec_mobile_app_enabled"] = is_exec_mobile_app_enabled
__props__.__dict__["is_non_editable"] = is_non_editable
__props__.__dict__["is_password_expired"] = is_password_expired
__props__.__dict__["is_password_login_allowed"] = is_password_login_allowed
__props__.__dict__["is_product_update_comm_enabled"] = is_product_update_comm_enabled
__props__.__dict__["is_security_report_comm_enabled"] = is_security_report_comm_enabled
__props__.__dict__["is_service_update_comm_enabled"] = is_service_update_comm_enabled
if login_name is None and not opts.urn:
raise TypeError("Missing required property 'login_name'")
__props__.__dict__["login_name"] = login_name
__props__.__dict__["password"] = None if password is None else pulumi.Output.secret(password)
__props__.__dict__["roles"] = roles
if username is None and not opts.urn:
raise TypeError("Missing required property 'username'")
__props__.__dict__["username"] = username
__props__.__dict__["admin_id"] = None
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["password"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(AdminUsers, __self__).__init__(
'zia:AdminUsers/adminUsers:AdminUsers',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
admin_id: Optional[pulumi.Input[int]] = None,
admin_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersAdminScopeArgs']]]]] = None,
comments: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
email: Optional[pulumi.Input[str]] = None,
is_auditor: Optional[pulumi.Input[bool]] = None,
is_exec_mobile_app_enabled: Optional[pulumi.Input[bool]] = None,
is_non_editable: Optional[pulumi.Input[bool]] = None,
is_password_expired: Optional[pulumi.Input[bool]] = None,
is_password_login_allowed: Optional[pulumi.Input[bool]] = None,
is_product_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_security_report_comm_enabled: Optional[pulumi.Input[bool]] = None,
is_service_update_comm_enabled: Optional[pulumi.Input[bool]] = None,
login_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersRoleArgs']]]]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'AdminUsers':
"""
Get an existing AdminUsers resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersAdminScopeArgs']]]] admin_scopes: The admin's scope. A scope is required for admins, but not applicable to auditors. This attribute is subject to change.
:param pulumi.Input[str] comments: Additional information about the admin or auditor.
:param pulumi.Input[bool] disabled: Indicates whether or not the admin account is disabled.
:param pulumi.Input[str] email: Admin or auditor's email address.
:param pulumi.Input[bool] is_auditor: Indicates whether the user is an auditor. This attribute is subject to change.
:param pulumi.Input[bool] is_exec_mobile_app_enabled: Indicates whether or not Executive Insights App access is enabled for the admin.
:param pulumi.Input[bool] is_non_editable: Indicates whether or not the admin can be edited or deleted.
:param pulumi.Input[bool] is_password_expired: Indicates whether or not an admin's password has expired.
:param pulumi.Input[bool] is_password_login_allowed: The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
:param pulumi.Input[bool] is_product_update_comm_enabled: Communication setting for Product Update.
:param pulumi.Input[bool] is_security_report_comm_enabled: Communication for Security Report is enabled.
:param pulumi.Input[bool] is_service_update_comm_enabled: Communication setting for Service Update.
:param pulumi.Input[str] login_name: The email address of the admin user to be exported.
:param pulumi.Input[str] password: The username of the admin user to be exported.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AdminUsersRoleArgs']]]] roles: Role of the admin. This is not required for an auditor.
:param pulumi.Input[str] username: The username of the admin user to be exported.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AdminUsersState.__new__(_AdminUsersState)
__props__.__dict__["admin_id"] = admin_id
__props__.__dict__["admin_scopes"] = admin_scopes
__props__.__dict__["comments"] = comments
__props__.__dict__["disabled"] = disabled
__props__.__dict__["email"] = email
__props__.__dict__["is_auditor"] = is_auditor
__props__.__dict__["is_exec_mobile_app_enabled"] = is_exec_mobile_app_enabled
__props__.__dict__["is_non_editable"] = is_non_editable
__props__.__dict__["is_password_expired"] = is_password_expired
__props__.__dict__["is_password_login_allowed"] = is_password_login_allowed
__props__.__dict__["is_product_update_comm_enabled"] = is_product_update_comm_enabled
__props__.__dict__["is_security_report_comm_enabled"] = is_security_report_comm_enabled
__props__.__dict__["is_service_update_comm_enabled"] = is_service_update_comm_enabled
__props__.__dict__["login_name"] = login_name
__props__.__dict__["password"] = password
__props__.__dict__["roles"] = roles
__props__.__dict__["username"] = username
return AdminUsers(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminId")
def admin_id(self) -> pulumi.Output[int]:
return pulumi.get(self, "admin_id")
@property
@pulumi.getter(name="adminScopes")
def admin_scopes(self) -> pulumi.Output[Sequence['outputs.AdminUsersAdminScope']]:
"""
The admin's scope. A scope is required for admins, but not applicable to auditors. This attribute is subject to change.
"""
return pulumi.get(self, "admin_scopes")
@property
@pulumi.getter
def comments(self) -> pulumi.Output[Optional[str]]:
"""
Additional information about the admin or auditor.
"""
return pulumi.get(self, "comments")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[bool]:
"""
Indicates whether or not the admin account is disabled.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter
def email(self) -> pulumi.Output[str]:
"""
Admin or auditor's email address.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter(name="isAuditor")
def is_auditor(self) -> pulumi.Output[bool]:
"""
Indicates whether the user is an auditor. This attribute is subject to change.
"""
return pulumi.get(self, "is_auditor")
@property
@pulumi.getter(name="isExecMobileAppEnabled")
def is_exec_mobile_app_enabled(self) -> pulumi.Output[bool]:
"""
Indicates whether or not Executive Insights App access is enabled for the admin.
"""
return pulumi.get(self, "is_exec_mobile_app_enabled")
@property
@pulumi.getter(name="isNonEditable")
def is_non_editable(self) -> pulumi.Output[bool]:
"""
Indicates whether or not the admin can be edited or deleted.
"""
return pulumi.get(self, "is_non_editable")
@property
@pulumi.getter(name="isPasswordExpired")
def is_password_expired(self) -> pulumi.Output[bool]:
"""
Indicates whether or not an admin's password has expired.
"""
return pulumi.get(self, "is_password_expired")
@property
@pulumi.getter(name="isPasswordLoginAllowed")
def is_password_login_allowed(self) -> pulumi.Output[bool]:
"""
The default is true when SAML Authentication is disabled. When SAML Authentication is enabled, this can be set to false in order to force the admin to login via SSO only.
"""
return pulumi.get(self, "is_password_login_allowed")
@property
@pulumi.getter(name="isProductUpdateCommEnabled")
def is_product_update_comm_enabled(self) -> pulumi.Output[bool]:
"""
Communication setting for Product Update.
"""
return pulumi.get(self, "is_product_update_comm_enabled")
@property
@pulumi.getter(name="isSecurityReportCommEnabled")
def is_security_report_comm_enabled(self) -> pulumi.Output[bool]:
"""
Communication for Security Report is enabled.
"""
return pulumi.get(self, "is_security_report_comm_enabled")
@property
@pulumi.getter(name="isServiceUpdateCommEnabled")
def is_service_update_comm_enabled(self) -> pulumi.Output[bool]:
"""
Communication setting for Service Update.
"""
return pulumi.get(self, "is_service_update_comm_enabled")
@property
@pulumi.getter(name="loginName")
def login_name(self) -> pulumi.Output[str]:
"""
The email address of the admin user to be exported.
"""
return pulumi.get(self, "login_name")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The username of the admin user to be exported.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def roles(self) -> pulumi.Output[Sequence['outputs.AdminUsersRole']]:
"""
Role of the admin. This is not required for an auditor.
"""
return pulumi.get(self, "roles")
@property
@pulumi.getter
def username(self) -> pulumi.Output[str]:
"""
The username of the admin user to be exported.
"""
return pulumi.get(self, "username") | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/adminusers/admin_users.py | admin_users.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDevicesResult',
'AwaitableGetDevicesResult',
'get_devices',
'get_devices_output',
]
@pulumi.output_type
class GetDevicesResult:
"""
A collection of values returned by getDevices.
"""
def __init__(__self__, description=None, device_group_type=None, device_model=None, id=None, name=None, os_type=None, os_version=None, owner_name=None, owner_user_id=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if device_group_type and not isinstance(device_group_type, str):
raise TypeError("Expected argument 'device_group_type' to be a str")
pulumi.set(__self__, "device_group_type", device_group_type)
if device_model and not isinstance(device_model, str):
raise TypeError("Expected argument 'device_model' to be a str")
pulumi.set(__self__, "device_model", device_model)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if os_type and not isinstance(os_type, str):
raise TypeError("Expected argument 'os_type' to be a str")
pulumi.set(__self__, "os_type", os_type)
if os_version and not isinstance(os_version, str):
raise TypeError("Expected argument 'os_version' to be a str")
pulumi.set(__self__, "os_version", os_version)
if owner_name and not isinstance(owner_name, str):
raise TypeError("Expected argument 'owner_name' to be a str")
pulumi.set(__self__, "owner_name", owner_name)
if owner_user_id and not isinstance(owner_user_id, int):
raise TypeError("Expected argument 'owner_user_id' to be a int")
pulumi.set(__self__, "owner_user_id", owner_user_id)
@property
@pulumi.getter
def description(self) -> str:
"""
(String) The device's description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="deviceGroupType")
def device_group_type(self) -> str:
"""
(String) The device group type. i.e ``ZCC_OS``, ``NON_ZCC``, ``CBI``
"""
return pulumi.get(self, "device_group_type")
@property
@pulumi.getter(name="deviceModel")
def device_model(self) -> str:
"""
(String) The device model.
"""
return pulumi.get(self, "device_model")
@property
@pulumi.getter
def id(self) -> int:
"""
(String) The unique identifer for the device group.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
(String) The device name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> str:
"""
(String) The operating system (OS). ``ANY``, ``OTHER_OS``, ``IOS``, ``ANDROID_OS``, ``WINDOWS_OS``, ``MAC_OS``, ``LINUX``
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> str:
"""
(String) The operating system version.
"""
return pulumi.get(self, "os_version")
@property
@pulumi.getter(name="ownerName")
def owner_name(self) -> str:
"""
(String) The device owner's user name.
"""
return pulumi.get(self, "owner_name")
@property
@pulumi.getter(name="ownerUserId")
def owner_user_id(self) -> int:
"""
(int) The unique identifier of the device owner (i.e., user).
"""
return pulumi.get(self, "owner_user_id")
class AwaitableGetDevicesResult(GetDevicesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDevicesResult(
description=self.description,
device_group_type=self.device_group_type,
device_model=self.device_model,
id=self.id,
name=self.name,
os_type=self.os_type,
os_version=self.os_version,
owner_name=self.owner_name,
owner_user_id=self.owner_user_id)
def get_devices(device_group_type: Optional[str] = None,
device_model: Optional[str] = None,
name: Optional[str] = None,
os_type: Optional[str] = None,
os_version: Optional[str] = None,
owner_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDevicesResult:
"""
Use the **zia_devices** data source to get information about a device in the Zscaler Internet Access cloud or via the API. This data source can then be associated with resources such as: URL Filtering Rules
## Example Usage
```python
import pulumi
import pulumi_zia as zia
device = zia.Devices.get_devices(name="administrator")
```
:param str device_group_type: (String) The device group type. i.e ``ZCC_OS``, ``NON_ZCC``, ``CBI``
:param str device_model: (String) The device model.
:param str name: The name of the devices to be exported.
:param str os_type: (String) The operating system (OS). ``ANY``, ``OTHER_OS``, ``IOS``, ``ANDROID_OS``, ``WINDOWS_OS``, ``MAC_OS``, ``LINUX``
:param str os_version: (String) The operating system version.
:param str owner_name: (String) The device owner's user name.
"""
__args__ = dict()
__args__['deviceGroupType'] = device_group_type
__args__['deviceModel'] = device_model
__args__['name'] = name
__args__['osType'] = os_type
__args__['osVersion'] = os_version
__args__['ownerName'] = owner_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('zia:Devices/getDevices:getDevices', __args__, opts=opts, typ=GetDevicesResult).value
return AwaitableGetDevicesResult(
description=__ret__.description,
device_group_type=__ret__.device_group_type,
device_model=__ret__.device_model,
id=__ret__.id,
name=__ret__.name,
os_type=__ret__.os_type,
os_version=__ret__.os_version,
owner_name=__ret__.owner_name,
owner_user_id=__ret__.owner_user_id)
@_utilities.lift_output_func(get_devices)
def get_devices_output(device_group_type: Optional[pulumi.Input[Optional[str]]] = None,
device_model: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
os_type: Optional[pulumi.Input[Optional[str]]] = None,
os_version: Optional[pulumi.Input[Optional[str]]] = None,
owner_name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDevicesResult]:
"""
Use the **zia_devices** data source to get information about a device in the Zscaler Internet Access cloud or via the API. This data source can then be associated with resources such as: URL Filtering Rules
## Example Usage
```python
import pulumi
import pulumi_zia as zia
device = zia.Devices.get_devices(name="administrator")
```
:param str device_group_type: (String) The device group type. i.e ``ZCC_OS``, ``NON_ZCC``, ``CBI``
:param str device_model: (String) The device model.
:param str name: The name of the devices to be exported.
:param str os_type: (String) The operating system (OS). ``ANY``, ``OTHER_OS``, ``IOS``, ``ANDROID_OS``, ``WINDOWS_OS``, ``MAC_OS``, ``LINUX``
:param str os_version: (String) The operating system version.
:param str owner_name: (String) The device owner's user name.
"""
... | zscaler-pulumi-zia | /zscaler_pulumi_zia-0.0.3a1675095059.tar.gz/zscaler_pulumi_zia-0.0.3a1675095059/zscaler_pulumi_zia/devices/get_devices.py | get_devices.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.